ngram
listlengths
0
67.8k
[ "os envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader)", "from jinja2 import Environment, FileSystemLoader import os envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH']", "Environment, FileSystemLoader import os envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates')", "FileSystemLoader import os envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env", "envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader) template", "#!/usr/bin/env/python from jinja2 import Environment, FileSystemLoader import os envVersion = os.environ['VERSION'] envFileHash =", "os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader) template = env.get_template('stagectl.rb.j2') output = template.stream(version=envVersion,", "<reponame>auto-staging/homebrew-stagectl #!/usr/bin/env/python from jinja2 import Environment, FileSystemLoader import os envVersion = os.environ['VERSION'] envFileHash", "= os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader) template =", "envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader) template = env.get_template('stagectl.rb.j2') output", "jinja2 import Environment, FileSystemLoader import os envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader", "= os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader) template = env.get_template('stagectl.rb.j2') output =", "os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader) template = env.get_template('stagectl.rb.j2')", "import Environment, FileSystemLoader import os envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader =", "file_loader = FileSystemLoader('templates') env = Environment(loader=file_loader) template = env.get_template('stagectl.rb.j2') output = template.stream(version=envVersion, sha256=envFileHash).dump('stagectl.rb')", "import os envVersion = os.environ['VERSION'] envFileHash = os.environ['FILE_HASH'] file_loader = FileSystemLoader('templates') env =" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "Act desc = 'test description' @actions.action(desc) def _t(x): return x # Assert self.assertIsInstance(actions,", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description = 'description' # Act @short_description(description) def tfunc(x):", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def", "@short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter)", "License. # You may obtain a copy of the License at # #", "= tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): #", "and # limitations under the License. import unittest import mock from djhelpers.adminhelpers import", "law or agreed to in writing, software # distributed under the License is", "boolean=mock.sentinel.kwarg) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description,", "the License for the specific language governing permissions and # limitations under the", "# limitations under the License. import unittest import mock from djhelpers.adminhelpers import ActionDecorator", "compliance with the License. # You may obtain a copy of the License", "import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description = 'description' # Act", "Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__ == '__main__':", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "# Arrange description = 'description' # Act @short_description(description) def tfunc(x): return x result", "description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description = 'description' # Act @short_description(description,", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "description = 'description' # Act @short_description(description) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter)", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self):", "ActionDecorator() # Act desc = 'test description' @actions.action(desc) def _t(x): return x #", "def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "def test_decorator(self): # Arrange description = 'description' # Act @short_description(description) def tfunc(x): return", "self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions", "def _t(x): return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description,", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "Arrange description = 'description' # Act @short_description(description) def tfunc(x): return x result =", "2014 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "ANY KIND, either express or implied. # See the License for the specific", "tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self):", "in compliance with the License. # You may obtain a copy of the", "import unittest import mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description class", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "Arrange description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "import mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "# Copyright 2014 <NAME> # # Licensed under the Apache License, Version 2.0", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "desc = 'test description' @actions.action(desc) def _t(x): return x # Assert self.assertIsInstance(actions, list)", "short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description = 'description' # Act @short_description(description)", "# Arrange description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x", "See the License for the specific language governing permissions and # limitations under", "= tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License. import unittest import mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= 'test description' @actions.action(desc) def _t(x): return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__ == '__main__': suite = unittest.TestLoader().discover('.')", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description = 'description'", "Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "permissions and # limitations under the License. import unittest import mock from djhelpers.adminhelpers", "'boolean')) def test_decorator_with_kwargs(self): # Arrange description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def", "return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if", "governing permissions and # limitations under the License. import unittest import mock from", "self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions = ActionDecorator() # Act", "OF ANY KIND, either express or implied. # See the License for the", "mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions =", "2.0 (the \"License\"); # you may not use this file except in compliance", "x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class", "# you may not use this file except in compliance with the License.", "'test description' @actions.action(desc) def _t(x): return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions)", "mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description = 'description' #", "Act @short_description(description) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter)", "agreed to in writing, software # distributed under the License is distributed on", "Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description =", "return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg)", "test_admin_action(self): # Arrange actions = ActionDecorator() # Act desc = 'test description' @actions.action(desc)", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "= 'description' # Act @short_description(description) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) #", "import ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description", "= ActionDecorator() # Act desc = 'test description' @actions.action(desc) def _t(x): return x", "list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__ == '__main__': suite =", "the License. import unittest import mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import", "from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): #", "(the \"License\"); # you may not use this file except in compliance with", "self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg)", "ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description =", "# # Unless required by applicable law or agreed to in writing, software", "def test_admin_action(self): # Arrange actions = ActionDecorator() # Act desc = 'test description'", "x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "Copyright 2014 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange", "by applicable law or agreed to in writing, software # distributed under the", "return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean'))", "description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions = ActionDecorator() #", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result =", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange", "Arrange actions = ActionDecorator() # Act desc = 'test description' @actions.action(desc) def _t(x):", "actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__ == '__main__': suite = unittest.TestLoader().discover('.') unittest.TextTestRunner(verbosity=2).run(suite)", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "# Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert", "mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions = ActionDecorator() # Act desc", "_t(x): return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc)", "'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) #", "file except in compliance with the License. # You may obtain a copy", "@actions.action(desc) def _t(x): return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1)", "description' @actions.action(desc) def _t(x): return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions),", "# Arrange actions = ActionDecorator() # Act desc = 'test description' @actions.action(desc) def", "limitations under the License. import unittest import mock from djhelpers.adminhelpers import ActionDecorator from", "# Act @short_description(description) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "test_decorator(self): # Arrange description = 'description' # Act @short_description(description) def tfunc(x): return x", "specific language governing permissions and # limitations under the License. import unittest import", "@short_description(description) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description,", "= 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter)", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions = ActionDecorator() # Act desc =", "the License. # You may obtain a copy of the License at #", "djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description = 'description' #", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase):", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "actions = ActionDecorator() # Act desc = 'test description' @actions.action(desc) def _t(x): return", "\"License\"); # you may not use this file except in compliance with the", "mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self):", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "# Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): #", "required by applicable law or agreed to in writing, software # distributed under", "from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description = 'description'", "# Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description", "applicable law or agreed to in writing, software # distributed under the License", "language governing permissions and # limitations under the License. import unittest import mock", "unittest import mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase):", "ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions = ActionDecorator() # Act desc = 'test", "# Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__ ==", "# Act desc = 'test description' @actions.action(desc) def _t(x): return x # Assert", "tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean,", "or agreed to in writing, software # distributed under the License is distributed", "test_decorator_with_kwargs(self): # Arrange description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return", "or implied. # See the License for the specific language governing permissions and", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "the specific language governing permissions and # limitations under the License. import unittest", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__ == '__main__': suite", "'description' # Act @short_description(description) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert", "under the License. import unittest import mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description = 'description' # Act @short_description(description) def", "in writing, software # distributed under the License is distributed on an \"AS", "self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description = 'description' # Act", "def test_decorator_with_kwargs(self): # Arrange description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x):", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions = ActionDecorator()" ]
[ "def solution(phone_number): phone_number = list(phone_number) phone_number[:-4] = [\"*\"] * (len(phone_number) - 4) return", "<filename>programmers/blind_phone_number.py # https://programmers.co.kr/learn/courses/30/lessons/12948 def solution(phone_number): phone_number = list(phone_number) phone_number[:-4] = [\"*\"] * (len(phone_number)", "# https://programmers.co.kr/learn/courses/30/lessons/12948 def solution(phone_number): phone_number = list(phone_number) phone_number[:-4] = [\"*\"] * (len(phone_number) -", "https://programmers.co.kr/learn/courses/30/lessons/12948 def solution(phone_number): phone_number = list(phone_number) phone_number[:-4] = [\"*\"] * (len(phone_number) - 4)", "solution(phone_number): phone_number = list(phone_number) phone_number[:-4] = [\"*\"] * (len(phone_number) - 4) return \"\".join(phone_number)" ]
[ "{}) current_cache = current_cache[part] current_level = current_level + 1 else: current_cache[part] = source", "current_cache = self._cache current_level = 0 deepest_level = len(parts) - 1 for part", "def add_to_cache(self, directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for transformation in transformations", "= self._cache for part in parts: if part == '': continue if part", "= path[1] target = path[0] parts = target.split(os.sep) current_cache = self._cache current_level =", "= pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break def", "current_cache[part] return list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep): path = path[1:] if path", "target_pattern = pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break def _build_cache(self, paths):", "if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break def _build_cache(self, paths): for path in", "def get_source_path(self, path): if path.startswith(os.sep): path = path[1:] if path in self._full_path_cache: return", "return '' def _transform_paths(self, paths, patterns): for path in paths: for pattern_pair in", "[ (re.compile(transformation.from_path), transformation.to_path) for transformation in transformations ] paths = directory_lister.list_directory() transformed_paths =", "for pattern_pair in patterns: source_regexp = pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path): yield", "yield source_regexp.sub(target_pattern, path), path break def _build_cache(self, paths): for path in paths: source", "path == os.sep: return list(self._cache.keys()) parts = path.split(os.sep) current_cache = self._cache for part", "if current_level < deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level = current_level +", "path): if path.startswith(os.sep): path = path[1:] if path in self._full_path_cache: return self._full_path_cache[path] return", "= self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if path == os.sep: return list(self._cache.keys())", "= current_cache[part] current_level = current_level + 1 else: current_cache[part] = source self._full_path_cache[target] =", "if part == '': continue if part not in current_cache: return list(current_cache.keys()) current_cache", "0 deepest_level = len(parts) - 1 for part in parts: if current_level <", "get_source_path(self, path): if path.startswith(os.sep): path = path[1:] if path in self._full_path_cache: return self._full_path_cache[path]", "= self._cache current_level = 0 deepest_level = len(parts) - 1 for part in", "(re.compile(transformation.from_path), transformation.to_path) for transformation in transformations ] paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths,", "current_cache = current_cache[part] return list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep): path = path[1:]", "os.sep: return list(self._cache.keys()) parts = path.split(os.sep) current_cache = self._cache for part in parts:", "patterns): for path in paths: for pattern_pair in patterns: source_regexp = pattern_pair[0] target_pattern", "current_cache: return list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep):", "list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep): path = path[1:] if path in self._full_path_cache:", "paths): for path in paths: source = path[1] target = path[0] parts =", "re class Transformer: def __init__(self): self._cache = {} self._full_path_cache = {} def add_to_cache(self,", "path): if path == os.sep: return list(self._cache.keys()) parts = path.split(os.sep) current_cache = self._cache", "add_to_cache(self, directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for transformation in transformations ]", "if path == os.sep: return list(self._cache.keys()) parts = path.split(os.sep) current_cache = self._cache for", "return list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep): path", "pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for transformation in transformations ] paths = directory_lister.list_directory()", "in parts: if part == '': continue if part not in current_cache: return", "import os import re class Transformer: def __init__(self): self._cache = {} self._full_path_cache =", "part in parts: if current_level < deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level", "source_regexp.sub(target_pattern, path), path break def _build_cache(self, paths): for path in paths: source =", "part == '': continue if part not in current_cache: return list(current_cache.keys()) current_cache =", "for part in parts: if current_level < deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part]", "current_cache[part] current_level = current_level + 1 else: current_cache[part] = source self._full_path_cache[target] = source", "parts: if current_level < deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level = current_level", "path = path[1:] if path in self._full_path_cache: return self._full_path_cache[path] return '' def _transform_paths(self,", "target.split(os.sep) current_cache = self._cache current_level = 0 deepest_level = len(parts) - 1 for", "1 for part in parts: if current_level < deepest_level: current_cache.setdefault(part, {}) current_cache =", "for transformation in transformations ] paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths)", "in current_cache: return list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys()) def get_source_path(self, path): if", "transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if path == os.sep: return", "pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break def _build_cache(self,", "return list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep): path = path[1:] if path in", "{} self._full_path_cache = {} def add_to_cache(self, directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path)", "parts: if part == '': continue if part not in current_cache: return list(current_cache.keys())", "path.split(os.sep) current_cache = self._cache for part in parts: if part == '': continue", "pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if path == os.sep: return list(self._cache.keys()) parts =", "current_level < deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level = current_level + 1", "transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for transformation in transformations ] paths =", "source_regexp = pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break", "pattern_pair in patterns: source_regexp = pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern,", "def _build_cache(self, paths): for path in paths: source = path[1] target = path[0]", "os import re class Transformer: def __init__(self): self._cache = {} self._full_path_cache = {}", "pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break def _build_cache(self, paths): for path", "path), path break def _build_cache(self, paths): for path in paths: source = path[1]", "= path.split(os.sep) current_cache = self._cache for part in parts: if part == '':", "paths: source = path[1] target = path[0] parts = target.split(os.sep) current_cache = self._cache", "list(self._cache.keys()) parts = path.split(os.sep) current_cache = self._cache for part in parts: if part", "def _transform_paths(self, paths, patterns): for path in paths: for pattern_pair in patterns: source_regexp", "current_cache = current_cache[part] current_level = current_level + 1 else: current_cache[part] = source self._full_path_cache[target]", "== os.sep: return list(self._cache.keys()) parts = path.split(os.sep) current_cache = self._cache for part in", "in self._full_path_cache: return self._full_path_cache[path] return '' def _transform_paths(self, paths, patterns): for path in", "self._full_path_cache: return self._full_path_cache[path] return '' def _transform_paths(self, paths, patterns): for path in paths:", "'': continue if part not in current_cache: return list(current_cache.keys()) current_cache = current_cache[part] return", "break def _build_cache(self, paths): for path in paths: source = path[1] target =", "= directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if path ==", "<filename>src/filesystem/transformation/transformer.py import os import re class Transformer: def __init__(self): self._cache = {} self._full_path_cache", "path[1] target = path[0] parts = target.split(os.sep) current_cache = self._cache current_level = 0", "directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for transformation in transformations ] paths", "= path[0] parts = target.split(os.sep) current_cache = self._cache current_level = 0 deepest_level =", "import re class Transformer: def __init__(self): self._cache = {} self._full_path_cache = {} def", "{} def add_to_cache(self, directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for transformation in", "for part in parts: if part == '': continue if part not in", "path.startswith(os.sep): path = path[1:] if path in self._full_path_cache: return self._full_path_cache[path] return '' def", "deepest_level = len(parts) - 1 for part in parts: if current_level < deepest_level:", "path[0] parts = target.split(os.sep) current_cache = self._cache current_level = 0 deepest_level = len(parts)", "class Transformer: def __init__(self): self._cache = {} self._full_path_cache = {} def add_to_cache(self, directory_lister,", "self._cache current_level = 0 deepest_level = len(parts) - 1 for part in parts:", "< deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level = current_level + 1 else:", "= path[1:] if path in self._full_path_cache: return self._full_path_cache[path] return '' def _transform_paths(self, paths,", "return self._full_path_cache[path] return '' def _transform_paths(self, paths, patterns): for path in paths: for", "for path in paths: source = path[1] target = path[0] parts = target.split(os.sep)", "path[1:] if path in self._full_path_cache: return self._full_path_cache[path] return '' def _transform_paths(self, paths, patterns):", "current_level = current_level + 1 else: current_cache[part] = source self._full_path_cache[target] = source break", "in patterns: source_regexp = pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path),", "current_level = 0 deepest_level = len(parts) - 1 for part in parts: if", "self._cache for part in parts: if part == '': continue if part not", "= current_cache[part] return list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep): path = path[1:] if", "target = path[0] parts = target.split(os.sep) current_cache = self._cache current_level = 0 deepest_level", "current_cache = self._cache for part in parts: if part == '': continue if", "list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys()) def get_source_path(self, path): if path.startswith(os.sep): path =", "get_directory_contents(self, path): if path == os.sep: return list(self._cache.keys()) parts = path.split(os.sep) current_cache =", "for path in paths: for pattern_pair in patterns: source_regexp = pattern_pair[0] target_pattern =", "directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if path == os.sep:", "__init__(self): self._cache = {} self._full_path_cache = {} def add_to_cache(self, directory_lister, transformations): pattern_pairs =", "path break def _build_cache(self, paths): for path in paths: source = path[1] target", "= target.split(os.sep) current_cache = self._cache current_level = 0 deepest_level = len(parts) - 1", "= [ (re.compile(transformation.from_path), transformation.to_path) for transformation in transformations ] paths = directory_lister.list_directory() transformed_paths", "source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break def _build_cache(self, paths): for path in paths:", "self._build_cache(transformed_paths) def get_directory_contents(self, path): if path == os.sep: return list(self._cache.keys()) parts = path.split(os.sep)", "continue if part not in current_cache: return list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys())", "deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level = current_level + 1 else: current_cache[part]", "not in current_cache: return list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys()) def get_source_path(self, path):", "def __init__(self): self._cache = {} self._full_path_cache = {} def add_to_cache(self, directory_lister, transformations): pattern_pairs", "in parts: if current_level < deepest_level: current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level =", "= pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path break def _build_cache(self, paths): for", "paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if path", "= 0 deepest_level = len(parts) - 1 for part in parts: if current_level", "in paths: for pattern_pair in patterns: source_regexp = pattern_pair[0] target_pattern = pattern_pair[1] if", "if part not in current_cache: return list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys()) def", "part in parts: if part == '': continue if part not in current_cache:", "self._full_path_cache = {} def add_to_cache(self, directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for", "def get_directory_contents(self, path): if path == os.sep: return list(self._cache.keys()) parts = path.split(os.sep) current_cache", "self._full_path_cache[path] return '' def _transform_paths(self, paths, patterns): for path in paths: for pattern_pair", "transformations ] paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path):", "_transform_paths(self, paths, patterns): for path in paths: for pattern_pair in patterns: source_regexp =", "transformation.to_path) for transformation in transformations ] paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs)", "path in paths: source = path[1] target = path[0] parts = target.split(os.sep) current_cache", "'' def _transform_paths(self, paths, patterns): for path in paths: for pattern_pair in patterns:", "if path in self._full_path_cache: return self._full_path_cache[path] return '' def _transform_paths(self, paths, patterns): for", "Transformer: def __init__(self): self._cache = {} self._full_path_cache = {} def add_to_cache(self, directory_lister, transformations):", "_build_cache(self, paths): for path in paths: source = path[1] target = path[0] parts", "current_cache.setdefault(part, {}) current_cache = current_cache[part] current_level = current_level + 1 else: current_cache[part] =", "path in self._full_path_cache: return self._full_path_cache[path] return '' def _transform_paths(self, paths, patterns): for path", "- 1 for part in parts: if current_level < deepest_level: current_cache.setdefault(part, {}) current_cache", "return list(self._cache.keys()) parts = path.split(os.sep) current_cache = self._cache for part in parts: if", "source = path[1] target = path[0] parts = target.split(os.sep) current_cache = self._cache current_level", "path in paths: for pattern_pair in patterns: source_regexp = pattern_pair[0] target_pattern = pattern_pair[1]", "patterns: source_regexp = pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path): yield source_regexp.sub(target_pattern, path), path", "paths, patterns): for path in paths: for pattern_pair in patterns: source_regexp = pattern_pair[0]", "part not in current_cache: return list(current_cache.keys()) current_cache = current_cache[part] return list(current_cache.keys()) def get_source_path(self,", "parts = path.split(os.sep) current_cache = self._cache for part in parts: if part ==", "transformation in transformations ] paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def", "if path.startswith(os.sep): path = path[1:] if path in self._full_path_cache: return self._full_path_cache[path] return ''", "= {} def add_to_cache(self, directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path), transformation.to_path) for transformation", "] paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if", "parts = target.split(os.sep) current_cache = self._cache current_level = 0 deepest_level = len(parts) -", "len(parts) - 1 for part in parts: if current_level < deepest_level: current_cache.setdefault(part, {})", "= len(parts) - 1 for part in parts: if current_level < deepest_level: current_cache.setdefault(part,", "self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self, path): if path == os.sep: return list(self._cache.keys()) parts", "in transformations ] paths = directory_lister.list_directory() transformed_paths = self._transform_paths(paths, pattern_pairs) self._build_cache(transformed_paths) def get_directory_contents(self,", "in paths: source = path[1] target = path[0] parts = target.split(os.sep) current_cache =", "== '': continue if part not in current_cache: return list(current_cache.keys()) current_cache = current_cache[part]", "paths: for pattern_pair in patterns: source_regexp = pattern_pair[0] target_pattern = pattern_pair[1] if source_regexp.match(path):", "= {} self._full_path_cache = {} def add_to_cache(self, directory_lister, transformations): pattern_pairs = [ (re.compile(transformation.from_path),", "self._cache = {} self._full_path_cache = {} def add_to_cache(self, directory_lister, transformations): pattern_pairs = [" ]
[ "res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에", "보관된 해시가 없으면 그냥 보내서 10개 받아오고, 클라에 해시가 보관되어 있으면 그거 보내서", "r) self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고", "QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat =", "= QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ ==", "if __name__ == '__main__': import sys app = QApplication(sys.argv) window = ListView() window.show()", "self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat =", "API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget()", "뷰어에 넣어줌 \"\"\" print(res) for n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if", "res): \"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에", "\"\"\" 클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고, 클라에 해시가 보관되어 있으면", "* from PyQt5.QtWidgets import * from PyQt5.QtGui import * import requests apiserver =", "= QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout()", "= QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput =", "self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import sys app = QApplication(sys.argv) window", "부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res", "res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\"", "initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer", "self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting)", "QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout)", "\"\"\" 채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된 해시가", "\"\" class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400,", "QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__':", "__name__ == '__main__': import sys app = QApplication(sys.argv) window = ListView() window.show() sys.exit(app.exec())", "self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import sys app", "추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된 해시가 없으면 그냥", "self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import sys", "get(self): \"\"\" 클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고, 클라에 해시가 보관되어", "채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된 해시가 없으면", "def save(self): \"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout()", "= QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat", "\"\"\" print(res) for n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n +", "self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN =", "len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout", "[[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌 \"\"\"", "enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\"", "QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to", "self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__", "text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat)", "= QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE')", "else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의 리스트를", "__init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI() def append(self):", "from PyQt5.QtWidgets import * from PyQt5.QtGui import * import requests apiserver = \"\"", "해시 뷰어에 넣어줌 \"\"\" print(res) for n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r))", "Test Client') self.resize(400, 100) self.initUI() def append(self): \"\"\" 채팅을 추가함 \"\"\" res =", "requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\" [[해시, 텍스트]]", "넣어줌 \"\"\" print(res) for n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n", "requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고, 클라에", "send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput)", "* from PyQt5.QtGui import * import requests apiserver = \"\" class ListView(QWidget): def", "self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer)", "self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes =", "self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI() def append(self): \"\"\" 채팅을 추가함 \"\"\"", "n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\")", ": ') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat')", "PyQt5.QtGui import * import requests apiserver = \"\" class ListView(QWidget): def __init__(self, parent=None):", "class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100)", "Client') self.resize(400, 100) self.initUI() def append(self): \"\"\" 채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json()", "그거 보내서 필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json()", "'': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res):", "self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import sys app = QApplication(sys.argv) window =", "self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import", "from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * import", "해시는 해시 뷰어에 넣어줌 \"\"\" print(res) for n, r in enumerate(res): print(n, r)", "self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고 시킴", "def initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ')", "텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌 \"\"\" print(res)", "self.setLayout(self.mainLayout) if __name__ == '__main__': import sys app = QApplication(sys.argv) window = ListView()", "requests apiserver = \"\" class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader", "print(n, r) self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에", "= QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit()", "= \"\" class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client')", "self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import sys app = QApplication(sys.argv)", "self.addItemList(res) def addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막", "import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * import requests apiserver", "클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() ==", "def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI() def", "시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle =", "ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI()", "QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit()", "받아옴 \"\"\" if self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res =", "append(self): \"\"\" 채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된", "리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌 \"\"\" print(res) for n, r", "addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시", "* import requests apiserver = \"\" class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent)", "self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의", "= requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고,", "self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer =", "\"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌", "QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes", "print(res) for n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n + 1==", "r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def", "QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append)", "= QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text", "to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes)", "if self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res)", "== '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self,", "\"\"\" if self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json()", "QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save)", "save(self): \"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting", "self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send')", "넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌 \"\"\" print(res) for n, r in", "마지막 해시는 해시 뷰어에 넣어줌 \"\"\" print(res) for n, r in enumerate(res): print(n,", "self.resize(400, 100) self.initUI() def append(self): \"\"\" 채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def", "보내서 필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res)", "requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고", "import * import requests apiserver = \"\" class ListView(QWidget): def __init__(self, parent=None): super(ListView,", "super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI() def append(self): \"\"\" 채팅을", "self.initUI() def append(self): \"\"\" 채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\"", "저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle", "self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout =", "보내서 10개 받아오고, 클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴 \"\"\"", "self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput", "Downloader Test Client') self.resize(400, 100) self.initUI() def append(self): \"\"\" 채팅을 추가함 \"\"\" res", "받아오고, 클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text()", "res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된 해시가 없으면 그냥 보내서 10개", "import * from PyQt5.QtGui import * import requests apiserver = \"\" class ListView(QWidget):", "') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get)", "n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0]))", "= QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat')", "in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self):", "그리고 마지막 해시는 해시 뷰어에 넣어줌 \"\"\" print(res) for n, r in enumerate(res):", "\"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash", "self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if", "self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat)", "100) self.initUI() def append(self): \"\"\" 채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self):", "self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat", "def get(self): \"\"\" 클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고, 클라에 해시가", "def addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는", "PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * import requests", "import requests apiserver = \"\" class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel", "\"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting =", "형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌 \"\"\" print(res) for", "self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input", "= requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\" [[해시,", "그냥 보내서 10개 받아오고, 클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴", "self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN", "해시가 없으면 그냥 보내서 10개 받아오고, 클라에 해시가 보관되어 있으면 그거 보내서 필요한", "apiserver = \"\" class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test", "해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() == '':", "1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self):", "있으면 그거 보내서 필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() == '': res =", "from PyQt5.QtGui import * import requests apiserver = \"\" class ListView(QWidget): def __init__(self,", "10개 받아오고, 클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴 \"\"\" if", "requests.get(\"http://192.168.127.12:7474/savedb\") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash :", "def append(self): \"\"\" 채팅을 추가함 \"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에", "QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle)", "PyQt5.QtWidgets import * from PyQt5.QtGui import * import requests apiserver = \"\" class", "self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import sys app =", "\"\"\" res = requests.put(f\"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}\").json() def get(self): \"\"\" 클라에 보관된 해시가 없으면 그냥 보내서", "parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI() def append(self): \"\"\"", "self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else: res = requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def", "리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌 \"\"\" print(res) for n,", "if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고 시킴 \"\"\"", "self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI() def append(self): \"\"\" 채팅을 추가함", "클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고, 클라에 해시가 보관되어 있으면 그거", "= QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN)", "없으면 그냥 보내서 10개 받아오고, 클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만", "보관되어 있으면 그거 보내서 필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() == '': res", "for n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n + 1== len(res):", "필요한 부분만 받아옴 \"\"\" if self.currentLastHashViewer.text() == '': res = requests.get(\"http://192.168.127.12:7474/publicchat\").json() self.addItemList(res) else:", "+ 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): \"\"\" API에 저장하라고 시킴 \"\"\" requests.get(\"http://192.168.127.12:7474/savedb\") def", "= requests.get(f\"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}\").json() self.addItemList(res) def addItemList(self, res): \"\"\" [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌" ]
[ "contexts or situations. For example, # the use of certain words used by", "'__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America']) # <NAME>", "\"\"\" from nltk.book import text1, text2, text4 def plot_changes_in_use_of_words(book, words): # Dispersion plot", "<EMAIL> \"\"\" from nltk.book import text1, text2, text4 def plot_changes_in_use_of_words(book, words): # Dispersion", "over the years. book.dispersion_plot(words) if __name__ == '__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4,", "the years. book.dispersion_plot(words) if __name__ == '__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen',", "['citizen', 'democracy', 'freedom', 'duties', 'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) #", "used by Presidents over the years. book.dispersion_plot(words) if __name__ == '__main__': # Inaugural", "ekholabs Author: <EMAIL> \"\"\" from nltk.book import text1, text2, text4 def plot_changes_in_use_of_words(book, words):", "# <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME> - Sense and Sensibility", "of natural language in different contexts or situations. For example, # the use", "text4 def plot_changes_in_use_of_words(book, words): # Dispersion plot of the use of natural language", "book.dispersion_plot(words) if __name__ == '__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom',", "# Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America']) # <NAME> -", "plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad'])", "<NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME> - Sense and Sensibility plot_changes_in_use_of_words(text2,", "__name__ == '__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America'])", "situations. For example, # the use of certain words used by Presidents over", "Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America']) # <NAME> - <NAME>", "years. book.dispersion_plot(words) if __name__ == '__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy',", "language in different contexts or situations. For example, # the use of certain", "== '__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America']) #", "by Presidents over the years. book.dispersion_plot(words) if __name__ == '__main__': # Inaugural Address", "'democracy', 'freedom', 'duties', 'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME>", "Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1,", "example, # the use of certain words used by Presidents over the years.", "the use of natural language in different contexts or situations. For example, #", "use of natural language in different contexts or situations. For example, # the", "Author: <EMAIL> \"\"\" from nltk.book import text1, text2, text4 def plot_changes_in_use_of_words(book, words): #", "from nltk.book import text1, text2, text4 def plot_changes_in_use_of_words(book, words): # Dispersion plot of", "\"\"\" Organisation: ekholabs Author: <EMAIL> \"\"\" from nltk.book import text1, text2, text4 def", "the use of certain words used by Presidents over the years. book.dispersion_plot(words) if", "Organisation: ekholabs Author: <EMAIL> \"\"\" from nltk.book import text1, text2, text4 def plot_changes_in_use_of_words(book,", "plot_changes_in_use_of_words(book, words): # Dispersion plot of the use of natural language in different", "in different contexts or situations. For example, # the use of certain words", "For example, # the use of certain words used by Presidents over the", "def plot_changes_in_use_of_words(book, words): # Dispersion plot of the use of natural language in", "# Dispersion plot of the use of natural language in different contexts or", "plot of the use of natural language in different contexts or situations. For", "natural language in different contexts or situations. For example, # the use of", "# the use of certain words used by Presidents over the years. book.dispersion_plot(words)", "words): # Dispersion plot of the use of natural language in different contexts", "text2, text4 def plot_changes_in_use_of_words(book, words): # Dispersion plot of the use of natural", "or situations. For example, # the use of certain words used by Presidents", "different contexts or situations. For example, # the use of certain words used", "'freedom', 'duties', 'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME> -", "Dispersion plot of the use of natural language in different contexts or situations.", "certain words used by Presidents over the years. book.dispersion_plot(words) if __name__ == '__main__':", "'duties', 'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME> - Sense", "Presidents over the years. book.dispersion_plot(words) if __name__ == '__main__': # Inaugural Address Corpus", "'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME> - Sense and", "<NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME> - Sense and Sensibility plot_changes_in_use_of_words(text2, ['happy', 'sad'])", "text1, text2, text4 def plot_changes_in_use_of_words(book, words): # Dispersion plot of the use of", "- <NAME> plot_changes_in_use_of_words(text1, ['happy', 'sad']) # <NAME> - Sense and Sensibility plot_changes_in_use_of_words(text2, ['happy',", "nltk.book import text1, text2, text4 def plot_changes_in_use_of_words(book, words): # Dispersion plot of the", "of certain words used by Presidents over the years. book.dispersion_plot(words) if __name__ ==", "import text1, text2, text4 def plot_changes_in_use_of_words(book, words): # Dispersion plot of the use", "use of certain words used by Presidents over the years. book.dispersion_plot(words) if __name__", "words used by Presidents over the years. book.dispersion_plot(words) if __name__ == '__main__': #", "Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America']) # <NAME> - <NAME> plot_changes_in_use_of_words(text1, ['happy',", "if __name__ == '__main__': # Inaugural Address Corpus plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties',", "of the use of natural language in different contexts or situations. For example," ]
[ "('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields = ['comment'] admin.site.register(Project)", "list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields =", "admin from .models import Project,Profile,Review,Comment # Register your models here. class ReviewAdmin(admin.ModelAdmin): model", "model = Review list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date',", "models here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine', 'rating', 'user_name', 'comment',", "from django.contrib import admin from .models import Project,Profile,Review,Comment # Register your models here.", "class ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter", "Register your models here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine', 'rating',", "from .models import Project,Profile,Review,Comment # Register your models here. class ReviewAdmin(admin.ModelAdmin): model =", "'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields = ['comment'] admin.site.register(Project) admin.site.register(Profile)", "Review list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields", "'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields = ['comment'] admin.site.register(Project) admin.site.register(Profile) admin.site.register(Review)", "= ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields = ['comment']", "import admin from .models import Project,Profile,Review,Comment # Register your models here. class ReviewAdmin(admin.ModelAdmin):", "= Review list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name']", "your models here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine', 'rating', 'user_name',", "# Register your models here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine',", "'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields = ['comment'] admin.site.register(Project) admin.site.register(Profile) admin.site.register(Review) admin.site.register(Comment)", "Project,Profile,Review,Comment # Register your models here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display =", ".models import Project,Profile,Review,Comment # Register your models here. class ReviewAdmin(admin.ModelAdmin): model = Review", "ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter =", "django.contrib import admin from .models import Project,Profile,Review,Comment # Register your models here. class", "import Project,Profile,Review,Comment # Register your models here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display", "here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date')" ]
[ "= n_out self.model = self._net_model() self.colors = colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units),", "n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out = n_out", "= n_units self.n_out = n_out self.model = self._net_model() self.colors = colors def _net_model(self):", "class AbstractModel(ABC): def predict(self, blob): pass class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red',", "self.n_out = n_out self.model = self._net_model() self.colors = colors def _net_model(self): layer =", "L class AbstractModel(ABC): def predict(self, blob): pass class DefaultModel: def __init__(self, n_units=100, n_out=6,", "pass class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')):", "self._net_model() self.colors = colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1)", "def _normalize_data(self, image): return image.reshape(100, -1, 3) def predict(self, blob): image = self._normalize_data(blob)", "from abc import ABC import chainer import chainer.functions as F import chainer.links as", "def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units", "__init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out", "_net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy,", "model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1, 3)", "def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(),", "n_units self.n_out = n_out self.model = self._net_model() self.colors = colors def _net_model(self): layer", "ABC import chainer import chainer.functions as F import chainer.links as L class AbstractModel(ABC):", "def predict(self, blob): pass class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green',", "DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units =", "'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out = n_out self.model = self._net_model() self.colors", "chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self,", "'yellow')): self.n_units = n_units self.n_out = n_out self.model = self._net_model() self.colors = colors", "chainer import chainer.functions as F import chainer.links as L class AbstractModel(ABC): def predict(self,", "'magenta', 'yellow')): self.n_units = n_units self.n_out = n_out self.model = self._net_model() self.colors =", "self.model = self._net_model() self.colors = colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model", "accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1, 3) def predict(self, blob): image =", "AbstractModel(ABC): def predict(self, blob): pass class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue',", "= self._net_model() self.colors = colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model =", "blob): pass class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta',", "= colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return", "import chainer import chainer.functions as F import chainer.links as L class AbstractModel(ABC): def", "import chainer.links as L class AbstractModel(ABC): def predict(self, blob): pass class DefaultModel: def", "colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier(", "F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image):", "_normalize_data(self, image): return image.reshape(100, -1, 3) def predict(self, blob): image = self._normalize_data(blob) return", "colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out = n_out self.model", "chainer.links as L class AbstractModel(ABC): def predict(self, blob): pass class DefaultModel: def __init__(self,", "n_out self.model = self._net_model() self.colors = colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu)", "self.n_units = n_units self.n_out = n_out self.model = self._net_model() self.colors = colors def", "n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out =", "= chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def", "return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1, 3) def", "'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out = n_out self.model = self._net_model()", "model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return", "image): return image.reshape(100, -1, 3) def predict(self, blob): image = self._normalize_data(blob) return self.model.predictor(image[None]).data", "import ABC import chainer import chainer.functions as F import chainer.links as L class", "as L class AbstractModel(ABC): def predict(self, blob): pass class DefaultModel: def __init__(self, n_units=100,", "'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out = n_out self.model =", "import chainer.functions as F import chainer.links as L class AbstractModel(ABC): def predict(self, blob):", "layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1,", "class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units", "layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy)", "self.colors = colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out))", "= layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100,", "L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1, 3) def predict(self,", "self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1, 3) def predict(self, blob):", "lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1, 3) def predict(self, blob): image", "abc import ABC import chainer import chainer.functions as F import chainer.links as L", "F import chainer.links as L class AbstractModel(ABC): def predict(self, blob): pass class DefaultModel:", "predict(self, blob): pass class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan',", "chainer.functions as F import chainer.links as L class AbstractModel(ABC): def predict(self, blob): pass", "as F import chainer.links as L class AbstractModel(ABC): def predict(self, blob): pass class" ]
[ "Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def", "if time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too ' + request.args.get(\"email\") +", "dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={} dbx", "file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() - maindictionary[request.args.get(\"id\")]", "has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as", "(request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\")", "f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+') as", "file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() -", "import dropbox import json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask", "= dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if", "json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={}", "dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read():", "open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \"", "dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() - maindictionary[request.args.get(\"id\")] >", "dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\"))", "import os import time import dropbox import json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\"", "from flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app =", "with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx", "20: notify.send('Your email too ' + request.args.get(\"email\") + \" with subject: \" +", "text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) +", "= dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW \" if __name__ ==", "return \" NONE TEST VIEW \" if __name__ == \"__main__\": app.run(port= int(os.environ.get('PORT', 5000)),host=\"0.0.0.0\")", "dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t))", "import Counter from notify_run import Notify import os import time import dropbox import", "notify.send('Your email too ' + request.args.get(\"email\") + \" with subject: \" + request.args.get(\"subject\")", "open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with", "Notify import os import time import dropbox import json dropboxkey=\"\" notify = Notify()", "== 0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")]", "as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt',", "with subject: \" + request.args.get(\"subject\") + \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with", "notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\")", "import Notify import os import time import dropbox import json dropboxkey=\"\" notify =", "as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE", "with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return", "dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW \" if __name__ == \"__main__\": app.run(port= int(os.environ.get('PORT',", "as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey)", "time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\"))", "dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\"))", "f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW", "notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={} dbx =", "open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1:", "import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\")", "= time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif", "f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite)", "= Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\"))", "maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite)", "- maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too ' + request.args.get(\"email\") + \" with", "notify_run import Notify import os import time import dropbox import json dropboxkey=\"\" notify", "maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as", "= dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() - maindictionary[request.args.get(\"id\")] > 20:", "dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email", "dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW \" if __name__ == \"__main__\":", "dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey)", "def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with", "dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx =", "dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in", "os import time import dropbox import json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint", "subject: \" + request.args.get(\"subject\") + \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\")", "collections import Counter from notify_run import Notify import os import time import dropbox", "1: if time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too ' + request.args.get(\"email\")", "> 20: notify.send('Your email too ' + request.args.get(\"email\") + \" with subject: \"", "== 1: if time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too ' +", "+ \" with subject: \" + request.args.get(\"subject\") + \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read()", "pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx =", "notify.write_config() from flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app", "f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite)", "import time import dropbox import json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config()", "with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) ==", "home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\")", "as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time()", "from notify_run import Notify import os import time import dropbox import json dropboxkey=\"\"", "' + request.args.get(\"email\") + \" with subject: \" + request.args.get(\"subject\") + \" has", "0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] =", "+\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+') as file:", "\"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW \" if", "dropbox import json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import", "+ \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW \"", "@app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0:", "Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) ==", "import json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request", "email too ' + request.args.get(\"email\") + \" with subject: \" + request.args.get(\"subject\") +", "open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx", "in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\")", "request.args.get(\"email\") + \" with subject: \" + request.args.get(\"subject\") + \" has been opened\")", "dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass", "notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\")", "f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+')", "= dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx", "+ request.args.get(\"email\") + \" with subject: \" + request.args.get(\"subject\") + \" has been", "flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__)", "opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\"))", "+ request.args.get(\"subject\") + \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f:", "+ \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with", "f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST", "from collections import Counter from notify_run import Notify import os import time import", "with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get(\"id\")] = time.time()", "\" + request.args.get(\"subject\") + \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as", "maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too ' + request.args.get(\"email\") + \" with subject:", "dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW \" if __name__", "open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\") dbx =", "Counter from notify_run import Notify import os import time import dropbox import json", "request.args.get(\"subject\") + \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text)", "too ' + request.args.get(\"email\") + \" with subject: \" + request.args.get(\"subject\") + \"", "\" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\")", "maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\") dbx.files_download_to_file(\"logpixel.txt\",\"/logpixel.txt\") dbx.files_download_to_file(\"dictionary.txt\",\"/dictionary.txt\") app = Flask(__name__) @app.route(\"/pixel\") def home():", "dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open(\"bannedpixel.txt\",\"rb\").read(),\"/bannedpixel.txt\",mode=dropbox.files.WriteMode.overwrite) return \" NONE TEST VIEW \" if __name__ == \"__main__\": app.run(port=", "\" with subject: \" + request.args.get(\"subject\") + \" has been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\")", "been opened\") text=open(\"logpixel.txt\",\"a+\").read() text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f:", "'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if", "time import dropbox import json dropboxkey=\"\" notify = Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from", "open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too '", "elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\")", "text=text.replace((request.args.get(\"id\")),\"\") with open(\"logpixel.txt\",\"w+\") as f: f.write(text) with open(\"bannedpixel.txt\",\"a+\") as f: f.writelines((request.args.get(\"id\")) + \"\\n\")", "= Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey)", "dropbox.Dropbox(dropboxkey) dbx.files_upload(open(\"dictionary.txt\",\"rb\").read(),\"/dictionary.txt\",mode=dropbox.files.WriteMode.overwrite) elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your", "app = Flask(__name__) @app.route(\"/pixel\") def home(): maindictionary=json.load(open(\"dictionary.txt\")) if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif", "if (request.args.get(\"id\")) in open(\"bannedpixel.txt\").read(): pass elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as f:", "time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too ' + request.args.get(\"email\") + \"", "open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 0: with open(\"logpixel.txt\",\"a+\") as f: f.writelines(request.args.get(\"id\") +\"\\n\") dbx = dropbox.Dropbox(\"\") dbx.files_upload(open(\"logpixel.txt\",\"rb\").read(),\"/logpixel.txt\",mode=dropbox.files.WriteMode.overwrite)", "elif open(\"logpixel.txt\").read().count(request.args.get(\"id\")) == 1: if time.time() - maindictionary[request.args.get(\"id\")] > 20: notify.send('Your email too", "Notify() notifyendpoint=\"\" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file(\"bannedpixel.txt\",\"/bannedpixel.txt\")" ]
[ "que não serão informados valores iguais) e escrever o maior deles. ''' print('Digite", "<reponame>GabiDeutner/Python_exercises<filename>Aula02/exercise1.py ''' 1. Escreva um programa para ler 2 valores (considere que não", "valores (considere que não serão informados valores iguais) e escrever o maior deles.", "iguais) e escrever o maior deles. ''' print('Digite o numero 1:') numero1 =", "2 valores (considere que não serão informados valores iguais) e escrever o maior", "1. Escreva um programa para ler 2 valores (considere que não serão informados", "''' 1. Escreva um programa para ler 2 valores (considere que não serão", "(considere que não serão informados valores iguais) e escrever o maior deles. '''", "1:') numero1 = float(input()) print('Digite o numero 2:') numero2 = float(input()) if(numero1>numero2): print(numero1)", "e escrever o maior deles. ''' print('Digite o numero 1:') numero1 = float(input())", "para ler 2 valores (considere que não serão informados valores iguais) e escrever", "''' print('Digite o numero 1:') numero1 = float(input()) print('Digite o numero 2:') numero2", "escrever o maior deles. ''' print('Digite o numero 1:') numero1 = float(input()) print('Digite", "ler 2 valores (considere que não serão informados valores iguais) e escrever o", "informados valores iguais) e escrever o maior deles. ''' print('Digite o numero 1:')", "o maior deles. ''' print('Digite o numero 1:') numero1 = float(input()) print('Digite o", "programa para ler 2 valores (considere que não serão informados valores iguais) e", "print('Digite o numero 1:') numero1 = float(input()) print('Digite o numero 2:') numero2 =", "um programa para ler 2 valores (considere que não serão informados valores iguais)", "o numero 1:') numero1 = float(input()) print('Digite o numero 2:') numero2 = float(input())", "deles. ''' print('Digite o numero 1:') numero1 = float(input()) print('Digite o numero 2:')", "valores iguais) e escrever o maior deles. ''' print('Digite o numero 1:') numero1", "numero1 = float(input()) print('Digite o numero 2:') numero2 = float(input()) if(numero1>numero2): print(numero1) else:", "maior deles. ''' print('Digite o numero 1:') numero1 = float(input()) print('Digite o numero", "= float(input()) print('Digite o numero 2:') numero2 = float(input()) if(numero1>numero2): print(numero1) else: print(numero2)", "não serão informados valores iguais) e escrever o maior deles. ''' print('Digite o", "numero 1:') numero1 = float(input()) print('Digite o numero 2:') numero2 = float(input()) if(numero1>numero2):", "serão informados valores iguais) e escrever o maior deles. ''' print('Digite o numero", "Escreva um programa para ler 2 valores (considere que não serão informados valores" ]
[ "started (i.e. it must be running or over). \"\"\" @wraps(view) def func(request, *args,", "\"\"\" @wraps(view) def func(request, *args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is", "return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func def competition_started_required(view): \"\"\" View decorator", "if the competition has not yet started (i.e. it must be running or", "which prohibts access to the decorated view if the competition has not yet", "is currently closed.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func def competition_started_required(view):", "running or over). \"\"\" @wraps(view) def func(request, *args, **kwargs): game_control = GameControl.objects.get() if", "def func(request, *args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently closed.'))", "def func(request, *args, **kwargs): game_control = GameControl.objects.get() if not game_control.competition_running() and not game_control.competition_over():", "over). \"\"\" @wraps(view) def func(request, *args, **kwargs): game_control = GameControl.objects.get() if not game_control.competition_running()", "ugettext as _ from django.contrib import messages from .models import GameControl def registration_open_required(view):", "to the decorated view if the competition has not yet started (i.e. it", "from functools import wraps from django.shortcuts import redirect from django.conf import settings from", "has not yet started (i.e. it must be running or over). \"\"\" @wraps(view)", "_ from django.contrib import messages from .models import GameControl def registration_open_required(view): \"\"\" View", "decorated view if registration is closed from the GameControl object. \"\"\" @wraps(view) def", "it must be running or over). \"\"\" @wraps(view) def func(request, *args, **kwargs): game_control", "from django.contrib import messages from .models import GameControl def registration_open_required(view): \"\"\" View decorator", "func(request, *args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently closed.')) return", "and not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is not available yet.')) return redirect(settings.HOME_URL)", "messages.error(request, _('Sorry, registration is currently closed.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return", "competition has not yet started (i.e. it must be running or over). \"\"\"", "view(request, *args, **kwargs) return func def competition_started_required(view): \"\"\" View decorator which prohibts access", "if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently closed.')) return redirect(settings.HOME_URL) return view(request,", "(i.e. it must be running or over). \"\"\" @wraps(view) def func(request, *args, **kwargs):", "prohibts access to the decorated view if registration is closed from the GameControl", "\"\"\" View decorator which prohibts access to the decorated view if registration is", "django.shortcuts import redirect from django.conf import settings from django.utils.translation import ugettext as _", "decorated view if the competition has not yet started (i.e. it must be", "functools import wraps from django.shortcuts import redirect from django.conf import settings from django.utils.translation", "import ugettext as _ from django.contrib import messages from .models import GameControl def", ".models import GameControl def registration_open_required(view): \"\"\" View decorator which prohibts access to the", "GameControl object. \"\"\" @wraps(view) def func(request, *args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry,", "GameControl def registration_open_required(view): \"\"\" View decorator which prohibts access to the decorated view", "wraps from django.shortcuts import redirect from django.conf import settings from django.utils.translation import ugettext", "view if registration is closed from the GameControl object. \"\"\" @wraps(view) def func(request,", "view if the competition has not yet started (i.e. it must be running", "*args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently closed.')) return redirect(settings.HOME_URL)", "def registration_open_required(view): \"\"\" View decorator which prohibts access to the decorated view if", "registration is currently closed.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func def", "@wraps(view) def func(request, *args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently", "the GameControl object. \"\"\" @wraps(view) def func(request, *args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request,", "= GameControl.objects.get() if not game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is", "redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func def competition_started_required(view): \"\"\" View decorator which", "<reponame>exokortex/kaindorfctf-2018-ctf-gameserver from functools import wraps from django.shortcuts import redirect from django.conf import settings", "not yet started (i.e. it must be running or over). \"\"\" @wraps(view) def", "**kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently closed.')) return redirect(settings.HOME_URL) return", "be running or over). \"\"\" @wraps(view) def func(request, *args, **kwargs): game_control = GameControl.objects.get()", "game_control = GameControl.objects.get() if not game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard", "the decorated view if the competition has not yet started (i.e. it must", "must be running or over). \"\"\" @wraps(view) def func(request, *args, **kwargs): game_control =", "from django.conf import settings from django.utils.translation import ugettext as _ from django.contrib import", "access to the decorated view if registration is closed from the GameControl object.", "from django.shortcuts import redirect from django.conf import settings from django.utils.translation import ugettext as", "not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently closed.')) return redirect(settings.HOME_URL) return view(request, *args,", "redirect from django.conf import settings from django.utils.translation import ugettext as _ from django.contrib", "*args, **kwargs) return func def competition_started_required(view): \"\"\" View decorator which prohibts access to", "decorator which prohibts access to the decorated view if the competition has not", "import wraps from django.shortcuts import redirect from django.conf import settings from django.utils.translation import", "closed.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func def competition_started_required(view): \"\"\" View", "**kwargs): game_control = GameControl.objects.get() if not game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry, the", "as _ from django.contrib import messages from .models import GameControl def registration_open_required(view): \"\"\"", "from django.utils.translation import ugettext as _ from django.contrib import messages from .models import", "import redirect from django.conf import settings from django.utils.translation import ugettext as _ from", "not game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is not available yet.'))", "_('Sorry, registration is currently closed.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func", "*args, **kwargs): game_control = GameControl.objects.get() if not game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry,", "decorator which prohibts access to the decorated view if registration is closed from", "\"\"\" View decorator which prohibts access to the decorated view if the competition", "from .models import GameControl def registration_open_required(view): \"\"\" View decorator which prohibts access to", "import settings from django.utils.translation import ugettext as _ from django.contrib import messages from", "currently closed.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func def competition_started_required(view): \"\"\"", "messages from .models import GameControl def registration_open_required(view): \"\"\" View decorator which prohibts access", "access to the decorated view if the competition has not yet started (i.e.", "closed from the GameControl object. \"\"\" @wraps(view) def func(request, *args, **kwargs): if not", "from the GameControl object. \"\"\" @wraps(view) def func(request, *args, **kwargs): if not GameControl.objects.get().registration_open:", "the decorated view if registration is closed from the GameControl object. \"\"\" @wraps(view)", "import GameControl def registration_open_required(view): \"\"\" View decorator which prohibts access to the decorated", "competition_started_required(view): \"\"\" View decorator which prohibts access to the decorated view if the", "object. \"\"\" @wraps(view) def func(request, *args, **kwargs): if not GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration", "to the decorated view if registration is closed from the GameControl object. \"\"\"", "\"\"\" @wraps(view) def func(request, *args, **kwargs): game_control = GameControl.objects.get() if not game_control.competition_running() and", "django.contrib import messages from .models import GameControl def registration_open_required(view): \"\"\" View decorator which", "django.conf import settings from django.utils.translation import ugettext as _ from django.contrib import messages", "View decorator which prohibts access to the decorated view if registration is closed", "is closed from the GameControl object. \"\"\" @wraps(view) def func(request, *args, **kwargs): if", "func(request, *args, **kwargs): game_control = GameControl.objects.get() if not game_control.competition_running() and not game_control.competition_over(): messages.error(request,", "registration_open_required(view): \"\"\" View decorator which prohibts access to the decorated view if registration", "if not game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is not available", "View decorator which prohibts access to the decorated view if the competition has", "GameControl.objects.get().registration_open: messages.error(request, _('Sorry, registration is currently closed.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs)", "yet started (i.e. it must be running or over). \"\"\" @wraps(view) def func(request,", "GameControl.objects.get() if not game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is not", "not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is not available yet.')) return redirect(settings.HOME_URL) return", "registration is closed from the GameControl object. \"\"\" @wraps(view) def func(request, *args, **kwargs):", "settings from django.utils.translation import ugettext as _ from django.contrib import messages from .models", "if registration is closed from the GameControl object. \"\"\" @wraps(view) def func(request, *args,", "return func def competition_started_required(view): \"\"\" View decorator which prohibts access to the decorated", "return view(request, *args, **kwargs) return func def competition_started_required(view): \"\"\" View decorator which prohibts", "**kwargs) return func def competition_started_required(view): \"\"\" View decorator which prohibts access to the", "import messages from .models import GameControl def registration_open_required(view): \"\"\" View decorator which prohibts", "def competition_started_required(view): \"\"\" View decorator which prohibts access to the decorated view if", "prohibts access to the decorated view if the competition has not yet started", "scoreboard is not available yet.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return func", "which prohibts access to the decorated view if registration is closed from the", "game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is not available yet.')) return redirect(settings.HOME_URL) return view(request,", "the scoreboard is not available yet.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs) return", "the competition has not yet started (i.e. it must be running or over).", "game_control.competition_running() and not game_control.competition_over(): messages.error(request, _('Sorry, the scoreboard is not available yet.')) return", "@wraps(view) def func(request, *args, **kwargs): game_control = GameControl.objects.get() if not game_control.competition_running() and not", "messages.error(request, _('Sorry, the scoreboard is not available yet.')) return redirect(settings.HOME_URL) return view(request, *args,", "_('Sorry, the scoreboard is not available yet.')) return redirect(settings.HOME_URL) return view(request, *args, **kwargs)", "django.utils.translation import ugettext as _ from django.contrib import messages from .models import GameControl", "or over). \"\"\" @wraps(view) def func(request, *args, **kwargs): game_control = GameControl.objects.get() if not", "func def competition_started_required(view): \"\"\" View decorator which prohibts access to the decorated view" ]
[ "\"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for", "= DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a, v_b): pass # the edge", "the graph. ins_matrix(list): The incidence matrix of the graph. Auto creat if the", "Plays mst in interation. Input [root] first generally. Returns: Min spanning tree. Raises:", "GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of mst min_ = math.inf for i in", "root_nb = gi.get_nb(root[i]) # find which edges weight is min for j in", "sorted by weight E = sorted(E, key = lambda x: x[1]) for i", "self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] # edges sorted by", "gi.get_weight(e) min_term = root_nb[j] min_edge = e if len(root) == self.N: return self.mst", "Attention: Undirected graph difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N =", "import GI from .disjoint_set import DS import math class MST(): def __init__(self, adj_matrix,", "# find which edges weight is min for j in range(len(root_nb)): if not(root_nb[j]", "in range(len(root)): root_nb = gi.get_nb(root[i]) # find which edges weight is min for", "of the graph. ins_matrix(list): The incidence matrix of the graph. Auto creat if", "[] # contain vertices list now self.mst = [] # min spanning tree", "x: x[1]) for i in range(len(E)): E[i] = E[i][0] for i in range(len(E)):", "ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a, v_b): pass # the", "self.mst.append(E[i]) if not (v_a in self.con_ver): self.con_ver.append(v_a) if not (v_b in self.con_ver): self.con_ver.append(v_b)", "self.N: return self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge) ### unnecessary check loop ###", "disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a, v_b): pass #", "unnecessary check loop ### return self.prims_algo(root) def put_all(a, b): for i in a:", "ins_matrix(list): The incidence matrix of the graph. Auto creat if the graph is", "check loop ### return self.prims_algo(root) def put_all(a, b): for i in a: if", "range(len(root_nb)): if not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_:", "in interation. Input [root] first generally. Returns: Min spanning tree. Raises: ValueError, TypeError", "if not (v_a in self.con_ver): self.con_ver.append(v_a) if not (v_b in self.con_ver): self.con_ver.append(v_b) return", "= len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver = [] # contain vertices list now", "E[i] = E[i][0] for i in range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) # from", "Min spanning tree. Attention: Undirected graph difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix =", "in self.con_ver): self.con_ver.append(v_a) if not (v_b in self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self,", "get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of mst min_ = math.inf", "root_nb[j] min_edge = e if len(root) == self.N: return self.mst root.append(min_term) self.con_ver =", "interation. Input [root] first generally. Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\"", "ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i,", "if the graph is undirect. Returns: Min spanning tree. Attention: Undirected graph difinition.", "gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] # edges sorted by weight E = sorted(E,", "if gi.get_weight(e) < min_: min_ = gi.get_weight(e) min_term = root_nb[j] min_edge = e", "E = sorted(E, key = lambda x: x[1]) for i in range(len(E)): E[i]", "self.prims_algo(root) def put_all(a, b): for i in a: if not(i in b): b.append(i)", "ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency matrix of the graph. ins_matrix(list): The incidence", "sorted(E, key = lambda x: x[1]) for i in range(len(E)): E[i] = E[i][0]", "self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge) ### unnecessary check loop ### return self.prims_algo(root)", "def put_all(a, b): for i in a: if not(i in b): b.append(i) return", "The adjacency matrix of the graph. ins_matrix(list): The incidence matrix of the graph.", "# from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for i", "DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a, v_b): pass # the edge could", "(v_b in self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self, root): \"\"\" Parameters: root(list): The", "root.append(min_term) self.con_ver = root self.mst.append(min_edge) ### unnecessary check loop ### return self.prims_algo(root) def", "the min spanning tree. Plays mst in interation. Input [root] first generally. Returns:", "edges sorted by weight E = sorted(E, key = lambda x: x[1]) for", "### unnecessary check loop ### return self.prims_algo(root) def put_all(a, b): for i in", "DS import math class MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The", "\"\"\" self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver = []", "self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver = [] # contain vertices list", "now self.mst = [] # min spanning tree now def kruskal_algo(self): \"\"\" Returns:", "= E[i][0] for i in range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) # from disjoint_set", "mst in interation. Input [root] first generally. Returns: Min spanning tree. Raises: ValueError,", "generally. Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" # from get_imformation gi", "\"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of mst", "make circle else: self.mst.append(E[i]) if not (v_a in self.con_ver): self.con_ver.append(v_a) if not (v_b", "ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors", "v_a, v_b) if ds.same_set(v_a, v_b): pass # the edge could make circle else:", "i in range(len(root)): root_nb = gi.get_nb(root[i]) # find which edges weight is min", "[(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] # edges sorted by weight E =", "not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_: min_ =", "= gi.get_weight(e) min_term = root_nb[j] min_edge = e if len(root) == self.N: return", "root of the min spanning tree. Plays mst in interation. Input [root] first", "kruskal_algo(self): \"\"\" Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" # from get_imformation", "in range(len(self.Insidence_Matrix[0]))] # edges sorted by weight E = sorted(E, key = lambda", "Undirected graph difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix)", "root): \"\"\" Parameters: root(list): The root of the min spanning tree. Plays mst", "min for j in range(len(root_nb)): if not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i], root_nb[j])", "undirect. Returns: Min spanning tree. Attention: Undirected graph difinition. Raises: ValueError, TypeError \"\"\"", "\"\"\" Parameters: adj_matrix(list): The adjacency matrix of the graph. ins_matrix(list): The incidence matrix", "# edges sorted by weight E = sorted(E, key = lambda x: x[1])", "Input [root] first generally. Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" #", "(v_a in self.con_ver): self.con_ver.append(v_a) if not (v_b in self.con_ver): self.con_ver.append(v_b) return self.mst def", "prims_algo(self, root): \"\"\" Parameters: root(list): The root of the min spanning tree. Plays", "for j in range(len(root_nb)): if not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if", "else: self.mst.append(E[i]) if not (v_a in self.con_ver): self.con_ver.append(v_a) if not (v_b in self.con_ver):", "which edges weight is min for j in range(len(root_nb)): if not(root_nb[j] in self.con_ver):", "= lambda x: x[1]) for i in range(len(E)): E[i] = E[i][0] for i", "root_nb[j]) if gi.get_weight(e) < min_: min_ = gi.get_weight(e) min_term = root_nb[j] min_edge =", "pass # the edge could make circle else: self.mst.append(E[i]) if not (v_a in", "spanning tree. Raises: ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix)", "graph. Auto creat if the graph is undirect. Returns: Min spanning tree. Attention:", "graph is undirect. Returns: Min spanning tree. Attention: Undirected graph difinition. Raises: ValueError,", "from .disjoint_set import DS import math class MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\"", "min_ = gi.get_weight(e) min_term = root_nb[j] min_edge = e if len(root) == self.N:", "edge could make circle else: self.mst.append(E[i]) if not (v_a in self.con_ver): self.con_ver.append(v_a) if", "\"\"\" Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" # from get_imformation gi", "could make circle else: self.mst.append(E[i]) if not (v_a in self.con_ver): self.con_ver.append(v_a) if not", "v_b): pass # the edge could make circle else: self.mst.append(E[i]) if not (v_a", "__init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency matrix of the graph. ins_matrix(list):", "graph difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix", "Min spanning tree. Raises: ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix,", "The root of the min spanning tree. Plays mst in interation. Input [root]", "def kruskal_algo(self): \"\"\" Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" # from", "= root_nb[j] min_edge = e if len(root) == self.N: return self.mst root.append(min_term) self.con_ver", "gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of mst min_ = math.inf for", "put_all(a, b): for i in a: if not(i in b): b.append(i) return b", "Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" # from get_imformation gi =", "return self.prims_algo(root) def put_all(a, b): for i in a: if not(i in b):", "from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for i in", "from .get_imformation import GI from .disjoint_set import DS import math class MST(): def", "weight E = sorted(E, key = lambda x: x[1]) for i in range(len(E)):", "self.con_ver.append(v_a) if not (v_b in self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self, root): \"\"\"", "find which edges weight is min for j in range(len(root_nb)): if not(root_nb[j] in", "creat if the graph is undirect. Returns: Min spanning tree. Attention: Undirected graph", "ins_matrix self.con_ver = [] # contain vertices list now self.mst = [] #", "range(len(self.Insidence_Matrix[0]))] # edges sorted by weight E = sorted(E, key = lambda x:", "in self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self, root): \"\"\" Parameters: root(list): The root", "= gi.edge_term(E[i]) # from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if", "# the edge could make circle else: self.mst.append(E[i]) if not (v_a in self.con_ver):", "the edge could make circle else: self.mst.append(E[i]) if not (v_a in self.con_ver): self.con_ver.append(v_a)", "graph. ins_matrix(list): The incidence matrix of the graph. Auto creat if the graph", "= GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] # edges", "of the graph. Auto creat if the graph is undirect. Returns: Min spanning", "gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_: min_ = gi.get_weight(e) min_term = root_nb[j] min_edge", "spanning tree. Attention: Undirected graph difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix", "tree. Attention: Undirected graph difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N", "if ds.same_set(v_a, v_b): pass # the edge could make circle else: self.mst.append(E[i]) if", "= [] # contain vertices list now self.mst = [] # min spanning", ".disjoint_set import DS import math class MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters:", ".get_imformation import GI from .disjoint_set import DS import math class MST(): def __init__(self,", "difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix =", "adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver = [] # contain vertices", "loop ### return self.prims_algo(root) def put_all(a, b): for i in a: if not(i", "i in range(len(E)): E[i] = E[i][0] for i in range(len(E)): (v_a, v_b) =", "min_edge = e if len(root) == self.N: return self.mst root.append(min_term) self.con_ver = root", "== self.N: return self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge) ### unnecessary check loop", "tree. Raises: ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E", "math.inf for i in range(len(root)): root_nb = gi.get_nb(root[i]) # find which edges weight", "adj_matrix(list): The adjacency matrix of the graph. ins_matrix(list): The incidence matrix of the", "first generally. Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" # from get_imformation", "range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) # from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst,", "import DS import math class MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list):", "by weight E = sorted(E, key = lambda x: x[1]) for i in", "of the min spanning tree. Plays mst in interation. Input [root] first generally.", "self.Insidence_Matrix) # all neighbors of mst min_ = math.inf for i in range(len(root)):", "= [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] # edges sorted by weight E", "TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of", "len(root) == self.N: return self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge) ### unnecessary check", "self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_: min_ = gi.get_weight(e) min_term", "GI from .disjoint_set import DS import math class MST(): def __init__(self, adj_matrix, ins_matrix):", "circle else: self.mst.append(E[i]) if not (v_a in self.con_ver): self.con_ver.append(v_a) if not (v_b in", "now def kruskal_algo(self): \"\"\" Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" #", "The incidence matrix of the graph. Auto creat if the graph is undirect.", "the graph is undirect. Returns: Min spanning tree. Attention: Undirected graph difinition. Raises:", "neighbors of mst min_ = math.inf for i in range(len(root)): root_nb = gi.get_nb(root[i])", "the graph. Auto creat if the graph is undirect. Returns: Min spanning tree.", "is min for j in range(len(root_nb)): if not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i],", "min_term = root_nb[j] min_edge = e if len(root) == self.N: return self.mst root.append(min_term)", "gi.get_nb(root[i]) # find which edges weight is min for j in range(len(root_nb)): if", "in range(len(root_nb)): if not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) <", "i in range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) # from disjoint_set ds = DS(self.Insidence_Matrix,", "min spanning tree now def kruskal_algo(self): \"\"\" Returns: Min spanning tree. Raises: ValueError,", "if not (v_b in self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self, root): \"\"\" Parameters:", "import math class MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency", "contain vertices list now self.mst = [] # min spanning tree now def", "not (v_a in self.con_ver): self.con_ver.append(v_a) if not (v_b in self.con_ver): self.con_ver.append(v_b) return self.mst", "for i in range(len(self.Insidence_Matrix[0]))] # edges sorted by weight E = sorted(E, key", "[root] first generally. Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\" # from", "tree now def kruskal_algo(self): \"\"\" Returns: Min spanning tree. Raises: ValueError, TypeError \"\"\"", "spanning tree now def kruskal_algo(self): \"\"\" Returns: Min spanning tree. Raises: ValueError, TypeError", "self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self, root): \"\"\" Parameters: root(list): The root of", "self.con_ver.append(v_b) return self.mst def prims_algo(self, root): \"\"\" Parameters: root(list): The root of the", "root(list): The root of the min spanning tree. Plays mst in interation. Input", "from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of mst min_ =", "Parameters: root(list): The root of the min spanning tree. Plays mst in interation.", "get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))]", "matrix of the graph. Auto creat if the graph is undirect. Returns: Min", "# min spanning tree now def kruskal_algo(self): \"\"\" Returns: Min spanning tree. Raises:", "spanning tree. Plays mst in interation. Input [root] first generally. Returns: Min spanning", "min_: min_ = gi.get_weight(e) min_term = root_nb[j] min_edge = e if len(root) ==", "e if len(root) == self.N: return self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge) ###", "all neighbors of mst min_ = math.inf for i in range(len(root)): root_nb =", "Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix", "= [] # min spanning tree now def kruskal_algo(self): \"\"\" Returns: Min spanning", "def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency matrix of the graph.", "x[1]) for i in range(len(E)): E[i] = E[i][0] for i in range(len(E)): (v_a,", "# from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of mst min_", "in self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_: min_ = gi.get_weight(e)", "< min_: min_ = gi.get_weight(e) min_term = root_nb[j] min_edge = e if len(root)", "self.con_ver): self.con_ver.append(v_a) if not (v_b in self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self, root):", "vertices list now self.mst = [] # min spanning tree now def kruskal_algo(self):", "GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] # edges sorted", "adjacency matrix of the graph. ins_matrix(list): The incidence matrix of the graph. Auto", "Raises: ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E =", "= math.inf for i in range(len(root)): root_nb = gi.get_nb(root[i]) # find which edges", "j in range(len(root_nb)): if not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e)", "e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_: min_ = gi.get_weight(e) min_term =", "Raises: ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all", "return self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge) ### unnecessary check loop ### return", "list now self.mst = [] # min spanning tree now def kruskal_algo(self): \"\"\"", "(v_a, v_b) = gi.edge_term(E[i]) # from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a,", "MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency matrix of the", "def prims_algo(self, root): \"\"\" Parameters: root(list): The root of the min spanning tree.", "of mst min_ = math.inf for i in range(len(root)): root_nb = gi.get_nb(root[i]) #", "math class MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency matrix", "lambda x: x[1]) for i in range(len(E)): E[i] = E[i][0] for i in", "= gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_: min_ = gi.get_weight(e) min_term = root_nb[j]", "gi.edge_term(E[i]) # from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a,", "class MST(): def __init__(self, adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency matrix of", "TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver =", "= adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver = [] # contain", "TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i))", "gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) E = [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] #", "Auto creat if the graph is undirect. Returns: Min spanning tree. Attention: Undirected", "tree. Plays mst in interation. Input [root] first generally. Returns: Min spanning tree.", "range(len(root)): root_nb = gi.get_nb(root[i]) # find which edges weight is min for j", "# from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a, v_b):", "i in range(len(self.Insidence_Matrix[0]))] # edges sorted by weight E = sorted(E, key =", "for i in range(len(E)): E[i] = E[i][0] for i in range(len(E)): (v_a, v_b)", "self.con_ver = root self.mst.append(min_edge) ### unnecessary check loop ### return self.prims_algo(root) def put_all(a,", "self.mst, v_a, v_b) if ds.same_set(v_a, v_b): pass # the edge could make circle", "for i in range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) # from disjoint_set ds =", "edges weight is min for j in range(len(root_nb)): if not(root_nb[j] in self.con_ver): e", "in range(len(E)): E[i] = E[i][0] for i in range(len(E)): (v_a, v_b) = gi.edge_term(E[i])", "= gi.get_nb(root[i]) # find which edges weight is min for j in range(len(root_nb)):", "if len(root) == self.N: return self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge) ### unnecessary", "len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver = [] # contain vertices list now self.mst", "mst min_ = math.inf for i in range(len(root)): root_nb = gi.get_nb(root[i]) # find", "= e if len(root) == self.N: return self.mst root.append(min_term) self.con_ver = root self.mst.append(min_edge)", "\"\"\" Parameters: root(list): The root of the min spanning tree. Plays mst in", "Returns: Min spanning tree. Attention: Undirected graph difinition. Raises: ValueError, TypeError \"\"\" self.Adjacency_Matrix", "### return self.prims_algo(root) def put_all(a, b): for i in a: if not(i in", "self.con_ver = [] # contain vertices list now self.mst = [] # min", "weight is min for j in range(len(root_nb)): if not(root_nb[j] in self.con_ver): e =", "= ins_matrix self.con_ver = [] # contain vertices list now self.mst = []", "min_ = math.inf for i in range(len(root)): root_nb = gi.get_nb(root[i]) # find which", "incidence matrix of the graph. Auto creat if the graph is undirect. Returns:", "ValueError, TypeError \"\"\" self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver", "self.mst = [] # min spanning tree now def kruskal_algo(self): \"\"\" Returns: Min", "min spanning tree. Plays mst in interation. Input [root] first generally. Returns: Min", "= GI(self.Adjacency_Matrix, self.Insidence_Matrix) # all neighbors of mst min_ = math.inf for i", "gi.get_weight(e) < min_: min_ = gi.get_weight(e) min_term = root_nb[j] min_edge = e if", "adj_matrix, ins_matrix): \"\"\" Parameters: adj_matrix(list): The adjacency matrix of the graph. ins_matrix(list): The", "self.Adjacency_Matrix = adj_matrix self.N = len(self.Adjacency_Matrix) self.Insidence_Matrix = ins_matrix self.con_ver = [] #", "from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a, v_b): pass", "self.con_ver, self.mst, v_a, v_b) if ds.same_set(v_a, v_b): pass # the edge could make", "ds.same_set(v_a, v_b): pass # the edge could make circle else: self.mst.append(E[i]) if not", "tree. Raises: ValueError, TypeError \"\"\" # from get_imformation gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix) #", "# contain vertices list now self.mst = [] # min spanning tree now", "for i in range(len(root)): root_nb = gi.get_nb(root[i]) # find which edges weight is", "= sorted(E, key = lambda x: x[1]) for i in range(len(E)): E[i] =", "v_b) if ds.same_set(v_a, v_b): pass # the edge could make circle else: self.mst.append(E[i])", "matrix of the graph. ins_matrix(list): The incidence matrix of the graph. Auto creat", "not (v_b in self.con_ver): self.con_ver.append(v_b) return self.mst def prims_algo(self, root): \"\"\" Parameters: root(list):", "# all neighbors of mst min_ = math.inf for i in range(len(root)): root_nb", "range(len(E)): E[i] = E[i][0] for i in range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) #", "if not(root_nb[j] in self.con_ver): e = gi.get_edge(root[i], root_nb[j]) if gi.get_weight(e) < min_: min_", "self.Insidence_Matrix = ins_matrix self.con_ver = [] # contain vertices list now self.mst =", "= root self.mst.append(min_edge) ### unnecessary check loop ### return self.prims_algo(root) def put_all(a, b):", "return self.mst def prims_algo(self, root): \"\"\" Parameters: root(list): The root of the min", "[] # min spanning tree now def kruskal_algo(self): \"\"\" Returns: Min spanning tree.", "self.mst.append(min_edge) ### unnecessary check loop ### return self.prims_algo(root) def put_all(a, b): for i", "E[i][0] for i in range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) # from disjoint_set ds", "Parameters: adj_matrix(list): The adjacency matrix of the graph. ins_matrix(list): The incidence matrix of", "self.mst def prims_algo(self, root): \"\"\" Parameters: root(list): The root of the min spanning", "E = [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))] # edges sorted by weight", "root self.mst.append(min_edge) ### unnecessary check loop ### return self.prims_algo(root) def put_all(a, b): for", "in range(len(E)): (v_a, v_b) = gi.edge_term(E[i]) # from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver,", "key = lambda x: x[1]) for i in range(len(E)): E[i] = E[i][0] for", "v_b) = gi.edge_term(E[i]) # from disjoint_set ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b)", "is undirect. Returns: Min spanning tree. Attention: Undirected graph difinition. Raises: ValueError, TypeError" ]
[ "= Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet() path = \"inventory.csv\" csv_file =", "ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws =", "Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet() path = \"inventory.csv\" csv_file = open(path,", "io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items)", "open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws =", "xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet() path", "import Path sys.path.append(str(Path(\".\").absolute().parent)) from sheet2dict import Worksheet from io import BytesIO ws =", "= BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet() path =", "Worksheet() path = \"inventory.csv\" csv_file = open(path, \"r\", encoding=\"utf-8-sig\") ws.csv_to_dict(csv_file=csv_file, delimiter=\";\") print(\"ALL:\", ws.sheet_items)", "BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet() path = \"inventory.csv\"", "ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\"", "print(\">>\", ws.header) ws = Worksheet() path = \"inventory.csv\" csv_file = open(path, \"r\", encoding=\"utf-8-sig\")", "ws.header) ws = Worksheet() path = \"inventory.csv\" csv_file = open(path, \"r\", encoding=\"utf-8-sig\") ws.csv_to_dict(csv_file=csv_file,", "ws = Worksheet() path = \"inventory.csv\" csv_file = open(path, \"r\", encoding=\"utf-8-sig\") ws.csv_to_dict(csv_file=csv_file, delimiter=\";\")", "ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file", "sys from pathlib import Path sys.path.append(str(Path(\".\").absolute().parent)) from sheet2dict import Worksheet from io import", "= \"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file)", "ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet() path = \"inventory.csv\" csv_file = open(path, \"r\",", "xlsx_file = open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header)", "\"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet()", "from io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\",", "print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file =", "= Worksheet() path = \"inventory.csv\" csv_file = open(path, \"r\", encoding=\"utf-8-sig\") ws.csv_to_dict(csv_file=csv_file, delimiter=\";\") print(\"ALL:\",", "= \"inventory.csv\" csv_file = open(path, \"r\", encoding=\"utf-8-sig\") ws.csv_to_dict(csv_file=csv_file, delimiter=\";\") print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items)", "path = \"inventory.csv\" csv_file = open(path, \"r\", encoding=\"utf-8-sig\") ws.csv_to_dict(csv_file=csv_file, delimiter=\";\") print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\",", "Path sys.path.append(str(Path(\".\").absolute().parent)) from sheet2dict import Worksheet from io import BytesIO ws = Worksheet()", "\"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\",", "import sys from pathlib import Path sys.path.append(str(Path(\".\").absolute().parent)) from sheet2dict import Worksheet from io", "Worksheet from io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items)", "pathlib import Path sys.path.append(str(Path(\".\").absolute().parent)) from sheet2dict import Worksheet from io import BytesIO ws", "sys.path.append(str(Path(\".\").absolute().parent)) from sheet2dict import Worksheet from io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\")", "ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file = open(path,", "BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path =", "import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path", "= open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws", "from sheet2dict import Worksheet from io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\",", "= Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file", "import Worksheet from io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\",", "print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws", "Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file =", "ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read())", "from pathlib import Path sys.path.append(str(Path(\".\").absolute().parent)) from sheet2dict import Worksheet from io import BytesIO", "print(\">>\", ws.header) print(\"ALL:\", ws.sheet_items) print(\"SANITIZED:\", ws.sanitize_sheet_items) path = \"inventory.xlsx\" xlsx_file = open(path, \"rb\")", "sheet2dict import Worksheet from io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path=\"inventory.xlsx\") print(\">>\", ws.header)", "path = \"inventory.xlsx\" xlsx_file = open(path, \"rb\") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet()", "ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(\">>\", ws.header) ws = Worksheet() path = \"inventory.csv\" csv_file" ]
[ "break player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game, move) self.remote_process_client.write_move(move) finally:", "try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = [] for", "line enables my custom debugger window debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token)", "time import sleep class Runner: def __init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client", "import sys from MyStrategy import MyStrategy from RemoteProcessClient import RemoteProcessClient from model.Move import", "= RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token =", "= self.remote_process_client.read_player_context() if player_context is None: break player_trooper = player_context.trooper move = Move()", "sys from MyStrategy import MyStrategy from RemoteProcessClient import RemoteProcessClient from model.Move import Move", "#next line enables my custom debugger window debuggerEnabled = True def run(self): try:", "def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies =", "RemoteProcessClient import RemoteProcessClient from model.Move import Move from time import sleep class Runner:", "True def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies", "while True: player_context = self.remote_process_client.read_player_context() if player_context is None: break player_trooper = player_context.trooper", "import MyStrategy from RemoteProcessClient import RemoteProcessClient from model.Move import Move from time import", "sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next line enables my", "if player_context is None: break player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world,", "MyStrategy import MyStrategy from RemoteProcessClient import RemoteProcessClient from model.Move import Move from time", "run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = []", "else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next line enables my custom", "= True def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context()", "import Move from time import sleep class Runner: def __init__(self): sleep(4) if sys.argv.__len__()", "RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next line enables my custom debugger window debuggerEnabled", "self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next line", "RemoteProcessClient from model.Move import Move from time import sleep class Runner: def __init__(self):", "sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client =", "strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if player_context is None: break player_trooper =", "for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if player_context is", "debugger window debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version()", "31001) self.token = \"0<PASSWORD>\" #next line enables my custom debugger window debuggerEnabled =", "\"0<PASSWORD>\" #next line enables my custom debugger window debuggerEnabled = True def run(self):", "= \"0<PASSWORD>\" #next line enables my custom debugger window debuggerEnabled = True def", "strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if player_context is None:", "player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game, move) self.remote_process_client.write_move(move) finally: self.remote_process_client.close()", "Move from time import sleep class Runner: def __init__(self): sleep(4) if sys.argv.__len__() ==", "Runner: def __init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token", "= [] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if", "if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client", "= RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next line enables my custom debugger window", "= self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = [] for strategy_index in xrange(team_size):", "self.remote_process_client.read_game_context() strategies = [] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context =", "sleep class Runner: def __init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1],", "True: player_context = self.remote_process_client.read_player_context() if player_context is None: break player_trooper = player_context.trooper move", "= self.remote_process_client.read_game_context() strategies = [] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context", "sleep(4) if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else:", "import sleep class Runner: def __init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client =", "import RemoteProcessClient from model.Move import Move from time import sleep class Runner: def", "my custom debugger window debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token) team_size =", "team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = [] for strategy_index in", "self.remote_process_client.read_player_context() if player_context is None: break player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper,", "self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = [] for strategy_index in xrange(team_size): strategies.append(MyStrategy())", "int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next", "window debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game", "player_context is None: break player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game,", "RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\"", "self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = [] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while", "is None: break player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game, move)", "[] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if player_context", "enables my custom debugger window debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token) team_size", "self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = [] for strategy_index", "__init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3]", "strategies = [] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context()", "MyStrategy from RemoteProcessClient import RemoteProcessClient from model.Move import Move from time import sleep", "def __init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token =", "= sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next line enables", "from MyStrategy import MyStrategy from RemoteProcessClient import RemoteProcessClient from model.Move import Move from", "None: break player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game, move) self.remote_process_client.write_move(move)", "game = self.remote_process_client.read_game_context() strategies = [] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True:", "== 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\",", "from model.Move import Move from time import sleep class Runner: def __init__(self): sleep(4)", "self.token = \"0<PASSWORD>\" #next line enables my custom debugger window debuggerEnabled = True", "self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token", "self.remote_process_client = RemoteProcessClient(\"localhost\", 31001) self.token = \"0<PASSWORD>\" #next line enables my custom debugger", "4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient(\"localhost\", 31001)", "class Runner: def __init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2]))", "from RemoteProcessClient import RemoteProcessClient from model.Move import Move from time import sleep class", "in xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if player_context is None: break", "model.Move import Move from time import sleep class Runner: def __init__(self): sleep(4) if", "debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game =", "custom debugger window debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size()", "= player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game, move) self.remote_process_client.write_move(move) finally: self.remote_process_client.close() Runner().run()", "from time import sleep class Runner: def __init__(self): sleep(4) if sys.argv.__len__() == 4:", "xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if player_context is None: break player_trooper", "player_context = self.remote_process_client.read_player_context() if player_context is None: break player_trooper = player_context.trooper move =" ]
[ "all selected cells. delete() -> void (Row, Column) [Value] Selection: --> (1,2) [\"AA\"],", "# OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts for", "return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text)", "self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the QTableSelectionMode between the list options. mode", "@decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete operations for cut functionality''' result =", "self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b':", "items for column in range(self.table.columnCount()): # only select visible items if not self.table.isColumnHidden(column):", "between the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode", "ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode !=", "select_all(self): '''Selects all item in the table''' with MUTEX: model = self.table.selectionModel() mode", "GNU GPL, see licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules from", "= True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete operations for cut functionality'''", "} for keysequence, mode in modes.items(): fun = partial(self.select_mode, qt.SELECTION_MODE[mode]) self.shortcuts[keysequence] = fun", "for facilitated table use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table = table self.copier", "self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def", "mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self):", "pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods to", "# clear selection model.clearSelection() selection = model.selection() # reset the selection mode for", "operations for cut functionality''' result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self):", "Controllers/Bindings/table __________________________ Class with designed inheritance for copy/paste methods. :copyright: (c) 2015 The", "= copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete()", "self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self):", "delete() -> void (Row, Column) [Value] Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2)", "self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show,", "nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods", "result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot)", "import base, threads from xldlib.qt import resources as qt from . import copier,", "= [i for i in items if i is not None] for item", "cut(self): '''Combines copy and delete operations for cut functionality''' result = self.copier.copy() self._delete()", "range(self.table.columnCount()): # only select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode)", "# only select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def", "QTableSelectionMode between the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) '''", "self._delete() return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text()", "i in items if i is not None] for item in filtered: item.setText(blank)", "= { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode in", "MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection", "in selected_indexes) filtered = [i for i in items if i is not", "item in the table''' with MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended'])", "xldlib.definitions import partial from xldlib.onstart.main import APP from xldlib.qt.objects import base, threads from", "University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more details. '''", "cells. delete() -> void (Row, Column) [Value] Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"],", "selection = model.selection() # reset the selection mode for all items for column", "modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode", "'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' }", "load modules/submodules from PySide import QtCore from xldlib.definitions import partial from xldlib.onstart.main import", "class TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts for facilitated table use''' def", "the table''' with MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear", "[] ''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i in selected_indexes)", "self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the QTableSelectionMode between", "threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS", "[\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2) [], (1,3) [], (2,2)", "def copy(self): return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self,", "__________________________ Class with designed inheritance for copy/paste methods. :copyright: (c) 2015 The Regents", "import QtCore from xldlib.definitions import partial from xldlib.onstart.main import APP from xldlib.qt.objects import", "super(TableBindings, self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() #", "'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m':", "# ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result): pass def", "GPLv3.txt for more details. ''' # load modules/submodules from PySide import QtCore from", "self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes = {", "= mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts", "(1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2) [], (1,3) [],", "in range(self.table.columnCount()): # only select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select'])", "in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot)", "to bind to QKeyShortcuts for facilitated table use''' def __init__(self, table): super(TableBindings, self).__init__(table)", "for i in items if i is not None] for item in filtered:", "GPL, see licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules from PySide", "self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC", "California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more details. ''' # load", "decorators, paster # MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # -----", "import resources as qt from . import copier, decorators, paster # MUTEX #", "items if i is not None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if", "[Value] Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2)", "# ------- class TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts for facilitated table", "Regents of the University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for", "[], (1,3) [], (2,2) [], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items =", "selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the QTableSelectionMode between the list", "'''Provides methods to bind to QKeyShortcuts for facilitated table use''' def __init__(self, table):", "of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more details. ''' #", "} modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence,", "table use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table)", "[\"DD\"] --> (1,2) [], (1,3) [], (2,2) [], (2,3) [] ''' selected_indexes =", "copier, decorators, paster # MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS #", "self.table = table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS", "select_mode(self, mode=None): ''' Changes the QTableSelectionMode between the list options. mode -- QtGui.QAbstractItemView.<atribute>", "@decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def", "def __init__(self, table): super(TableBindings, self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table) self.paster =", "PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=\"\"): ''' Excel-like delete function.", "selected cells. delete() -> void (Row, Column) [Value] Selection: --> (1,2) [\"AA\"], (1,3)", "mode=None): ''' Changes the QTableSelectionMode between the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection", "'Extended' } for keysequence, mode in modes.items(): fun = partial(self.select_mode, qt.SELECTION_MODE[mode]) self.shortcuts[keysequence] =", "# MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result):", "if filtered and hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines copy", "# reset the selection mode for all items for column in range(self.table.columnCount()): #", "''' Changes the QTableSelectionMode between the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection", "self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in the", "= (self.table.item(i.row, i.column) for i in selected_indexes) filtered = [i for i in", "(c) 2015 The Regents of the University of California. :license: GNU GPL, see", "mode for all items for column in range(self.table.columnCount()): # only select visible items", "'Ctrl+Shift+e': 'Extended' } for keysequence, mode in modes.items(): fun = partial(self.select_mode, qt.SELECTION_MODE[mode]) self.shortcuts[keysequence]", "self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s': 'Single',", "paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=\"\"): '''", "FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=\"\"): ''' Excel-like delete function. Deletes", "Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2) [],", "= threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) #", "{ 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete,", "delete(self): self._delete() def _delete(self, blank=\"\"): ''' Excel-like delete function. Deletes contents in all", "(2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i in", "def _delete(self, blank=\"\"): ''' Excel-like delete function. Deletes contents in all selected cells.", "as qt from . import copier, decorators, paster # MUTEX # ----- MUTEX", "self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in", "(self.table.item(i.row, i.column) for i in selected_indexes) filtered = [i for i in items", "mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection = model.selection() # reset", "set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v':", "function. Deletes contents in all selected cells. delete() -> void (Row, Column) [Value]", "selection model.clearSelection() selection = model.selection() # reset the selection mode for all items", "self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the QTableSelectionMode between the", "= table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot)", "--> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2) [], (1,3)", "(2,3) [\"DD\"] --> (1,2) [], (1,3) [], (2,2) [], (2,3) [] ''' selected_indexes", "SLOTS # ----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS # -------", "----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject):", "self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all", "for more details. ''' # load modules/submodules from PySide import QtCore from xldlib.definitions", "# ----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class", "self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection = model.selection() # reset the selection", "for cut functionality''' result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self): return", "def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self):", "QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode", "qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c':", "The Regents of the University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt", "partial from xldlib.onstart.main import APP from xldlib.qt.objects import base, threads from xldlib.qt import", "def delete(self): self._delete() def _delete(self, blank=\"\"): ''' Excel-like delete function. Deletes contents in", "'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s':", "from xldlib.qt.objects import base, threads from xldlib.qt import resources as qt from .", "with designed inheritance for copy/paste methods. :copyright: (c) 2015 The Regents of the", "void (Row, Column) [Value] Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3)", "self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i in selected_indexes) filtered = [i for", "from . import copier, decorators, paster # MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive)", "item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"): self.table.changed = True", "self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines", "mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts =", "= self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item", "{ 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode in modes.items():", "'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode in modes.items(): fun", "'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes =", "is not None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table,", "copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods to bind to", "self.delete, 'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended'", ":license: GNU GPL, see licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules", "= self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection = model.selection()", "with MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection()", "i in selected_indexes) filtered = [i for i in items if i is", "the QTableSelectionMode between the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection)", "None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"): self.table.changed", "SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single'])", "QtCore from xldlib.definitions import partial from xldlib.onstart.main import APP from xldlib.qt.objects import base,", "in all selected cells. delete() -> void (Row, Column) [Value] Selection: --> (1,2)", "self._delete() def _delete(self, blank=\"\"): ''' Excel-like delete function. Deletes contents in all selected", "[\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2) [], (1,3) [], (2,2) [], (2,3)", "reset the selection mode for all items for column in range(self.table.columnCount()): # only", "in items if i is not None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes)", "see licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules from PySide import", "filtered = [i for i in items if i is not None] for", "def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides", "mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended']", "and hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete", "= self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i in selected_indexes) filtered = [i", "[], (2,2) [], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column)", "select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS", "import APP from xldlib.qt.objects import base, threads from xldlib.qt import resources as qt", "licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules from PySide import QtCore", "filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def", "def copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods to bind", "'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all }", "i.column) for i in selected_indexes) filtered = [i for i in items if", "self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self): clipboard_text =", "qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the QTableSelectionMode between the list options.", "copy/paste methods. :copyright: (c) 2015 The Regents of the University of California. :license:", "--> (1,2) [], (1,3) [], (2,2) [], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes()", "select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None):", "if i is not None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered", "= paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=\"\"):", "qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f':", "paster # MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def", "contents in all selected cells. delete() -> void (Row, Column) [Value] Selection: -->", "delete operations for cut functionality''' result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def", "MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result)", "Changes the QTableSelectionMode between the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection", "@decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=\"\"): ''' Excel-like delete function. Deletes contents", "def cut(self): '''Combines copy and delete operations for cut functionality''' result = self.copier.copy()", "the University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more details.", "selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i in selected_indexes) filtered =", "only select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self,", "\"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete operations for", "methods. :copyright: (c) 2015 The Regents of the University of California. :license: GNU", "'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode in modes.items(): fun =", "import partial from xldlib.onstart.main import APP from xldlib.qt.objects import base, threads from xldlib.qt", "not None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"):", "of the University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more", "copy and delete operations for cut functionality''' result = self.copier.copy() self._delete() return result", "and delete operations for cut functionality''' result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot)", "the selection mode for all items for column in range(self.table.columnCount()): # only select", "for copy/paste methods. :copyright: (c) 2015 The Regents of the University of California.", "not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the QTableSelectionMode", "threads from xldlib.qt import resources as qt from . import copier, decorators, paster", "self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self,", "options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or", "GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x':", "return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text)", "MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) #", "import copier, decorators, paster # MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS", ":copyright: (c) 2015 The Regents of the University of California. :license: GNU GPL,", "-> void (Row, Column) [Value] Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"],", "qt from . import copier, decorators, paster # MUTEX # ----- MUTEX =", "from PySide import QtCore from xldlib.definitions import partial from xldlib.onstart.main import APP from", "_delete(self, blank=\"\"): ''' Excel-like delete function. Deletes contents in all selected cells. delete()", "selected_indexes) filtered = [i for i in items if i is not None]", "self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection = model.selection() #", "facilitated table use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table = table self.copier =", "= model.selection() # reset the selection mode for all items for column in", "APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts", "Column) [Value] Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] -->", "modules/submodules from PySide import QtCore from xldlib.definitions import partial from xldlib.onstart.main import APP", "def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in the table''' with", "items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes", "APP from xldlib.qt.objects import base, threads from xldlib.qt import resources as qt from", "list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode", "table): super(TableBindings, self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts()", "delete function. Deletes contents in all selected cells. delete() -> void (Row, Column)", "(Row, Column) [Value] Selection: --> (1,2) [\"AA\"], (1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"]", "def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut,", "'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode in modes.items(): fun = partial(self.select_mode, qt.SELECTION_MODE[mode])", "Class with designed inheritance for copy/paste methods. :copyright: (c) 2015 The Regents of", "all items for column in range(self.table.columnCount()): # only select visible items if not", "self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in the table''' with MUTEX: model =", "(1,3) [\"BB\"], (2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2) [], (1,3) [], (2,2) [],", "use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table) self.paster", "model.selection() # reset the selection mode for all items for column in range(self.table.columnCount()):", "self.select_all } modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for", "!= qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block,", "for all items for column in range(self.table.columnCount()): # only select visible items if", "'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode in modes.items(): fun = partial(self.select_mode,", "= self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self): clipboard_text", "[], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i", "cut functionality''' result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy()", "Deletes contents in all selected cells. delete() -> void (Row, Column) [Value] Selection:", "for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"): self.table.changed =", "table''' with MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection", "functionality''' result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def", "# GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy,", "the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode =", "if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the", "hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete operations", "self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste,", "self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=\"\"): ''' Excel-like", "(1,2) [], (1,3) [], (2,2) [], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items", "in the table''' with MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) #", "visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): '''", "= self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection = model.selection() # reset the", "@decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in the table'''", "self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete operations for cut", "xldlib.qt.objects import base, threads from xldlib.qt import resources as qt from . import", "'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a':", "from xldlib.qt import resources as qt from . import copier, decorators, paster #", "model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection =", "''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i in selected_indexes) filtered", "bind to QKeyShortcuts for facilitated table use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table", "'''Combines copy and delete operations for cut functionality''' result = self.copier.copy() self._delete() return", "MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result): pass", "# load modules/submodules from PySide import QtCore from xldlib.definitions import partial from xldlib.onstart.main", "def select_mode(self, mode=None): ''' Changes the QTableSelectionMode between the list options. mode --", ". import copier, decorators, paster # MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) #", "''' Excel-like delete function. Deletes contents in all selected cells. delete() -> void", "filtered and hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and", "# PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=\"\"): ''' Excel-like delete", "from xldlib.definitions import partial from xldlib.onstart.main import APP from xldlib.qt.objects import base, threads", "''' Controllers/Bindings/table __________________________ Class with designed inheritance for copy/paste methods. :copyright: (c) 2015", "self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi',", "resources as qt from . import copier, decorators, paster # MUTEX # -----", "= { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del':", "# SLOTS # ----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS #", "[i for i in items if i is not None] for item in", "self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes", "clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in the table''' with MUTEX: model", "2015 The Regents of the University of California. :license: GNU GPL, see licenses/GNU", "PySide import QtCore from xldlib.definitions import partial from xldlib.onstart.main import APP from xldlib.qt.objects", "clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all", "for i in selected_indexes) filtered = [i for i in items if i", "all item in the table''' with MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode()", "(1,3) [], (2,2) [], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row,", "_paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in the table''' with MUTEX:", "selection mode for all items for column in range(self.table.columnCount()): # only select visible", "column in range(self.table.columnCount()): # only select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(),", "or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts = {", "[\"CC\"], (2,3) [\"DD\"] --> (1,2) [], (1,3) [], (2,2) [], (2,3) [] '''", "details. ''' # load modules/submodules from PySide import QtCore from xldlib.definitions import partial", "item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, \"changed\"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self):", "(2,2) [], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for", "(2,2) [\"CC\"], (2,3) [\"DD\"] --> (1,2) [], (1,3) [], (2,2) [], (2,3) []", "__init__(self, table): super(TableBindings, self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table)", "------- class TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts for facilitated table use'''", "'Del': self.delete, 'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e':", "----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result): pass def copyslot(result):", "methods to bind to QKeyShortcuts for facilitated table use''' def __init__(self, table): super(TableBindings,", "items = (self.table.item(i.row, i.column) for i in selected_indexes) filtered = [i for i", "QKeyShortcuts for facilitated table use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table = table", "copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def", "Excel-like delete function. Deletes contents in all selected cells. delete() -> void (Row,", "''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def", "for column in range(self.table.columnCount()): # only select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column)", "from xldlib.onstart.main import APP from xldlib.qt.objects import base, threads from xldlib.qt import resources", "designed inheritance for copy/paste methods. :copyright: (c) 2015 The Regents of the University", "result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self):", "more details. ''' # load modules/submodules from PySide import QtCore from xldlib.definitions import", "base, threads from xldlib.qt import resources as qt from . import copier, decorators,", "self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection = model.selection() # reset the selection mode", "True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete operations for cut functionality''' result", "inheritance for copy/paste methods. :copyright: (c) 2015 The Regents of the University of", "'''Selects all item in the table''' with MUTEX: model = self.table.selectionModel() mode =", "-- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode)", "blank=\"\"): ''' Excel-like delete function. Deletes contents in all selected cells. delete() ->", "i is not None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and", "xldlib.qt import resources as qt from . import copier, decorators, paster # MUTEX", "def select_all(self): '''Selects all item in the table''' with MUTEX: model = self.table.selectionModel()", "copy(self): return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text):", "table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def", "paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects", "OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts for facilitated", "xldlib.onstart.main import APP from xldlib.qt.objects import base, threads from xldlib.qt import resources as", "clear selection model.clearSelection() selection = model.selection() # reset the selection mode for all", "''' # load modules/submodules from PySide import QtCore from xldlib.definitions import partial from", "to QKeyShortcuts for facilitated table use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table =", "model.clearSelection() selection = model.selection() # reset the selection mode for all items for", "TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts for facilitated table use''' def __init__(self," ]
[ "um MP3 from pygame import mixer mixer.init() mixer.music.load('EX021.mp3') #Adicione o nome da musica", "#Tocando um MP3 from pygame import mixer mixer.init() mixer.music.load('EX021.mp3') #Adicione o nome da", "MP3 from pygame import mixer mixer.init() mixer.music.load('EX021.mp3') #Adicione o nome da musica mixer.music.play()" ]
[ "<reponame>zamoose/wp-ansible #!/usr/bin/python # -*- coding: utf-8 -*- def main(): module = AnsibleModule( argument_spec", "= module.params['state'] before = None after = None changed = False module.exit_json(changed=changed, before=before,", "None changed = False module.exit_json(changed=changed, before=before, after=after) # import module snippets from ansible.module_utils.basic", "None after = None changed = False module.exit_json(changed=changed, before=before, after=after) # import module", "def main(): module = AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(),", "plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path = module.params['path'] state =", "= module.params['path'] state = module.params['state'] before = None after = None changed =", "changed = False module.exit_json(changed=changed, before=before, after=after) # import module snippets from ansible.module_utils.basic import", "= None after = None changed = False module.exit_json(changed=changed, before=before, after=after) # import", "module.params['state'] before = None after = None changed = False module.exit_json(changed=changed, before=before, after=after)", "utf-8 -*- def main(): module = AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(),", "module = AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None),", "before = None after = None changed = False module.exit_json(changed=changed, before=before, after=after) #", "#!/usr/bin/python # -*- coding: utf-8 -*- def main(): module = AnsibleModule( argument_spec =", "module.params['path'] state = module.params['state'] before = None after = None changed = False", "-*- coding: utf-8 -*- def main(): module = AnsibleModule( argument_spec = dict( path=dict(required=True),", "after = None changed = False module.exit_json(changed=changed, before=before, after=after) # import module snippets", "state = module.params['state'] before = None after = None changed = False module.exit_json(changed=changed,", "theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path = module.params['path'] state = module.params['state']", "), supports_check_mode=True ) path = module.params['path'] state = module.params['state'] before = None after", "user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path = module.params['path'] state = module.params['state'] before", ") path = module.params['path'] state = module.params['state'] before = None after = None", "= None changed = False module.exit_json(changed=changed, before=before, after=after) # import module snippets from", "= dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path", "path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path = module.params['path']", "state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path = module.params['path'] state", "argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True )", "= False module.exit_json(changed=changed, before=before, after=after) # import module snippets from ansible.module_utils.basic import *", "-*- def main(): module = AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(),", "False module.exit_json(changed=changed, before=before, after=after) # import module snippets from ansible.module_utils.basic import * main()", "AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True", "dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path =", "executable=dict(default=None), ), supports_check_mode=True ) path = module.params['path'] state = module.params['state'] before = None", "path = module.params['path'] state = module.params['state'] before = None after = None changed", "coding: utf-8 -*- def main(): module = AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(),", "main(): module = AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(),", "version=dict(), executable=dict(default=None), ), supports_check_mode=True ) path = module.params['path'] state = module.params['state'] before =", "supports_check_mode=True ) path = module.params['path'] state = module.params['state'] before = None after =", "# -*- coding: utf-8 -*- def main(): module = AnsibleModule( argument_spec = dict(", "= AnsibleModule( argument_spec = dict( path=dict(required=True), state=dict(), plugin=dict(), theme=dict(), user=dict(), version=dict(), executable=dict(default=None), )," ]
[ "field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet',", "Django 3.1.3 on 2020-12-01 14:32 from django.conf import settings from django.db import migrations,", "('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes',", "name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user',", "migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes', field=models.ManyToManyField(blank=True, related_name='tweet_user', through='tweets.TweetLikes', to=settings.AUTH_USER_MODEL), ), ]", "('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes', field=models.ManyToManyField(blank=True, related_name='tweet_user',", "on 2020-12-01 14:32 from django.conf import settings from django.db import migrations, models import", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "# Generated by Django 3.1.3 on 2020-12-01 14:32 from django.conf import settings from", "= [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet',", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet', name='created',", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True),", "] operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)),", "migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')),", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets',", "3.1.3 on 2020-12-01 14:32 from django.conf import settings from django.db import migrations, models", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'),", "by Django 3.1.3 on 2020-12-01 14:32 from django.conf import settings from django.db import", "), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel(", "14:32 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class", "verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet',", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),", "2020-12-01 14:32 from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "Generated by Django 3.1.3 on 2020-12-01 14:32 from django.conf import settings from django.db", "model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp',", "models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes', field=models.ManyToManyField(blank=True,", "[ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ),", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations =", "('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes', field=models.ManyToManyField(blank=True, related_name='tweet_user', through='tweets.TweetLikes', to=settings.AUTH_USER_MODEL), ),", "django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ]", "import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField(", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "'0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes', field=models.ManyToManyField(blank=True, related_name='tweet_user', through='tweets.TweetLikes',", "('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes',", "to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes', field=models.ManyToManyField(blank=True, related_name='tweet_user', through='tweets.TweetLikes', to=settings.AUTH_USER_MODEL),", "operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True,", "serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField(" ]
[ "always having a start date, but one so far back # that it", "repos = [ r.name for r in Repo.from_yaml() if r.track_pulls ] for repo", "date_arg, make_timezone_aware from repos import Repo from webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce", "collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) args = parser.parse_args(argv[1:])", "args.since is None: # Simplify the logic by always having a start date,", "get_pulls def get_external_pulls(repo): \"\"\"Produce a stream of external pull requests.\"\"\" for issue in", "reverse=True) for i, (count, user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i, count, user_login))", "etc\" ) args = parser.parse_args(argv[1:]) if args.start is None: if args.since is None:", "ahead # that it is like having no end date. args.end = make_timezone_aware(datetime.datetime(2040,", "helpers import date_arg, make_timezone_aware from repos import Repo from webhookdb import get_pulls def", "board[pull.user_login] += 1 return board def main(argv): parser = argparse.ArgumentParser(description=\"Count external pull requests", "else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is None: # Simplify the", "user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i, count, user_login)) if __name__ == \"__main__\":", "the logic by always having an end date, but one so far ahead", "repos import Repo from webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce a stream of", "ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date to start collecting, format is flexible: \"", "k,v in board.items()), reverse=True) for i, (count, user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d}", "logic by always having a start date, but one so far back #", "args.end is None: # Simplify the logic by always having an end date,", "by always having an end date, but one so far ahead # that", "date DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date to start collecting, format is", "etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date to end collecting, format is flexible: \"", "if r.track_pulls ] for repo in repos: for pull in get_external_pulls(repo): yield pull", "1, 1)) pulls = get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board = sorted(((v, k)", "get_contributor_counts(pulls) board = sorted(((v, k) for k,v in board.items()), reverse=True) for i, (count,", "help=\"Date to end collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" )", "back # that it is like having no start date. args.start = make_timezone_aware(datetime.datetime(2000,", "parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a start date DAYS ago\" ) parser.add_argument( \"--start\",", "issue in get_pulls(repo, state=\"all\", org=True): if issue.intext == 'external': yield issue def get_all_external_pulls():", "for pull in pulls: board[pull.user_login] += 1 return board def main(argv): parser =", "collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg,", "\"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date to end collecting, format", "end date, but one so far ahead # that it is like having", "get_pulls_in_window(start, end): for pull in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) < end: yield", "args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board", "end collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) args =", "to start collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument(", "pull in get_external_pulls(repo): yield pull def get_pulls_in_window(start, end): for pull in get_all_external_pulls(): if", "end: yield pull def get_contributor_counts(pulls): board = collections.Counter() for pull in pulls: board[pull.user_login]", "r in Repo.from_yaml() if r.track_pulls ] for repo in repos: for pull in", "+= 1 return board def main(argv): parser = argparse.ArgumentParser(description=\"Count external pull requests opened", "issue def get_all_external_pulls(): repos = [ r.name for r in Repo.from_yaml() if r.track_pulls", "def main(argv): parser = argparse.ArgumentParser(description=\"Count external pull requests opened by person\") parser.add_argument( \"--since\",", "having no start date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now()", "the logic by always having a start date, but one so far back", "collections import datetime import sys from helpers import date_arg, make_timezone_aware from repos import", "None: if args.since is None: # Simplify the logic by always having a", "so far back # that it is like having no start date. args.start", "<gh_stars>0 #!/usr/bin/env python from __future__ import print_function import argparse import collections import datetime", "import date_arg, make_timezone_aware from repos import Repo from webhookdb import get_pulls def get_external_pulls(repo):", "= argparse.ArgumentParser(description=\"Count external pull requests opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use", "a stream of external pull requests.\"\"\" for issue in get_pulls(repo, state=\"all\", org=True): if", "collections.Counter() for pull in pulls: board[pull.user_login] += 1 return board def main(argv): parser", ") parser.add_argument( \"--end\", type=date_arg, help=\"Date to end collecting, format is flexible: \" \"20141225,", "for pull in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) < end: yield pull def", "2014-12-25, etc\" ) args = parser.parse_args(argv[1:]) if args.start is None: if args.since is", "far ahead # that it is like having no end date. args.end =", "argparse import collections import datetime import sys from helpers import date_arg, make_timezone_aware from", "= [ r.name for r in Repo.from_yaml() if r.track_pulls ] for repo in", "no end date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start, args.end) board", "date, but one so far ahead # that it is like having no", "make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board = sorted(((v,", "is None: # Simplify the logic by always having a start date, but", "1)) else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is None: # Simplify", "pull in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) < end: yield pull def get_contributor_counts(pulls):", "= get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board = sorted(((v, k) for k,v in", "for i, (count, user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i, count, user_login)) if", "Simplify the logic by always having a start date, but one so far", "issue.intext == 'external': yield issue def get_all_external_pulls(): repos = [ r.name for r", "import datetime import sys from helpers import date_arg, make_timezone_aware from repos import Repo", "in Repo.from_yaml() if r.track_pulls ] for repo in repos: for pull in get_external_pulls(repo):", "in get_pulls(repo, state=\"all\", org=True): if issue.intext == 'external': yield issue def get_all_external_pulls(): repos", "'external': yield issue def get_all_external_pulls(): repos = [ r.name for r in Repo.from_yaml()", "] for repo in repos: for pull in get_external_pulls(repo): yield pull def get_pulls_in_window(start,", "# that it is like having no end date. args.end = make_timezone_aware(datetime.datetime(2040, 1,", "of external pull requests.\"\"\" for issue in get_pulls(repo, state=\"all\", org=True): if issue.intext ==", "get_external_pulls(repo): yield pull def get_pulls_in_window(start, end): for pull in get_all_external_pulls(): if start <", "None: # Simplify the logic by always having an end date, but one", "board = get_contributor_counts(pulls) board = sorted(((v, k) for k,v in board.items()), reverse=True) for", "it is like having no start date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else:", "having a start date, but one so far back # that it is", "= make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is None: # Simplify the logic by", "sorted(((v, k) for k,v in board.items()), reverse=True) for i, (count, user_login) in enumerate(board,", "repo in repos: for pull in get_external_pulls(repo): yield pull def get_pulls_in_window(start, end): for", "but one so far back # that it is like having no start", "date, but one so far back # that it is like having no", "args = parser.parse_args(argv[1:]) if args.start is None: if args.since is None: # Simplify", "metavar=\"DAYS\", type=int, help=\"Use a start date DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date", "requests.\"\"\" for issue in get_pulls(repo, state=\"all\", org=True): if issue.intext == 'external': yield issue", "requests opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a start date DAYS", "sys from helpers import date_arg, make_timezone_aware from repos import Repo from webhookdb import", "import argparse import collections import datetime import sys from helpers import date_arg, make_timezone_aware", "make_timezone_aware from repos import Repo from webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce a", "1 return board def main(argv): parser = argparse.ArgumentParser(description=\"Count external pull requests opened by", "from __future__ import print_function import argparse import collections import datetime import sys from", "# that it is like having no start date. args.start = make_timezone_aware(datetime.datetime(2000, 1,", "logic by always having an end date, but one so far ahead #", "parser.add_argument( \"--end\", type=date_arg, help=\"Date to end collecting, format is flexible: \" \"20141225, Dec/25/2014,", "pull def get_contributor_counts(pulls): board = collections.Counter() for pull in pulls: board[pull.user_login] += 1", "\" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) args = parser.parse_args(argv[1:]) if args.start is None:", "no start date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now() -", "opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a start date DAYS ago\"", "for repo in repos: for pull in get_external_pulls(repo): yield pull def get_pulls_in_window(start, end):", "def get_external_pulls(repo): \"\"\"Produce a stream of external pull requests.\"\"\" for issue in get_pulls(repo,", "type=date_arg, help=\"Date to end collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\"", "__future__ import print_function import argparse import collections import datetime import sys from helpers", "board def main(argv): parser = argparse.ArgumentParser(description=\"Count external pull requests opened by person\") parser.add_argument(", "to end collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) args", "far back # that it is like having no start date. args.start =", "type=date_arg, help=\"Date to start collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\"", "make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is None: # Simplify the logic by always", "make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is None:", "from helpers import date_arg, make_timezone_aware from repos import Repo from webhookdb import get_pulls", "r.track_pulls ] for repo in repos: for pull in get_external_pulls(repo): yield pull def", "import get_pulls def get_external_pulls(repo): \"\"\"Produce a stream of external pull requests.\"\"\" for issue", "argparse.ArgumentParser(description=\"Count external pull requests opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a", "by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a start date DAYS ago\" )", "start date DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date to start collecting, format", "external pull requests opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a start", "always having an end date, but one so far ahead # that it", "pull requests.\"\"\" for issue in get_pulls(repo, state=\"all\", org=True): if issue.intext == 'external': yield", "stream of external pull requests.\"\"\" for issue in get_pulls(repo, state=\"all\", org=True): if issue.intext", "yield pull def get_pulls_in_window(start, end): for pull in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at)", "make_timezone_aware(pull.created_at) < end: yield pull def get_contributor_counts(pulls): board = collections.Counter() for pull in", "state=\"all\", org=True): if issue.intext == 'external': yield issue def get_all_external_pulls(): repos = [", "main(argv): parser = argparse.ArgumentParser(description=\"Count external pull requests opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\",", "end): for pull in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) < end: yield pull", "person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a start date DAYS ago\" ) parser.add_argument(", "type=int, help=\"Use a start date DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date to", "if args.start is None: if args.since is None: # Simplify the logic by", "date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if", "an end date, but one so far ahead # that it is like", "board = sorted(((v, k) for k,v in board.items()), reverse=True) for i, (count, user_login)", "Repo from webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce a stream of external pull", "in pulls: board[pull.user_login] += 1 return board def main(argv): parser = argparse.ArgumentParser(description=\"Count external", "i, (count, user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i, count, user_login)) if __name__", "# Simplify the logic by always having an end date, but one so", "yield issue def get_all_external_pulls(): repos = [ r.name for r in Repo.from_yaml() if", ") parser.add_argument( \"--start\", type=date_arg, help=\"Date to start collecting, format is flexible: \" \"20141225,", "- datetime.timedelta(days=args.since)) if args.end is None: # Simplify the logic by always having", "in board.items()), reverse=True) for i, (count, user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i,", "for r in Repo.from_yaml() if r.track_pulls ] for repo in repos: for pull", "import sys from helpers import date_arg, make_timezone_aware from repos import Repo from webhookdb", "like having no end date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start,", "external pull requests.\"\"\" for issue in get_pulls(repo, state=\"all\", org=True): if issue.intext == 'external':", "start collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\",", "= parser.parse_args(argv[1:]) if args.start is None: if args.since is None: # Simplify the", "from repos import Repo from webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce a stream", "start date, but one so far back # that it is like having", "import Repo from webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce a stream of external", "\"20141225, Dec/25/2014, 2014-12-25, etc\" ) args = parser.parse_args(argv[1:]) if args.start is None: if", "pull requests opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int, help=\"Use a start date", "is None: if args.since is None: # Simplify the logic by always having", "DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date to start collecting, format is flexible:", "is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date to", "\"--end\", type=date_arg, help=\"Date to end collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25,", "parser.add_argument( \"--start\", type=date_arg, help=\"Date to start collecting, format is flexible: \" \"20141225, Dec/25/2014,", "one so far ahead # that it is like having no end date.", "that it is like having no end date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1))", "args.start is None: if args.since is None: # Simplify the logic by always", "if args.since is None: # Simplify the logic by always having a start", "if args.end is None: # Simplify the logic by always having an end", "1)) pulls = get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board = sorted(((v, k) for", "get_external_pulls(repo): \"\"\"Produce a stream of external pull requests.\"\"\" for issue in get_pulls(repo, state=\"all\",", "get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) < end: yield pull def get_contributor_counts(pulls): board =", "\"--start\", type=date_arg, help=\"Date to start collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25,", "k) for k,v in board.items()), reverse=True) for i, (count, user_login) in enumerate(board, start=1):", "< make_timezone_aware(pull.created_at) < end: yield pull def get_contributor_counts(pulls): board = collections.Counter() for pull", "format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date", "[ r.name for r in Repo.from_yaml() if r.track_pulls ] for repo in repos:", "having an end date, but one so far ahead # that it is", "is None: # Simplify the logic by always having an end date, but", "a start date, but one so far back # that it is like", "start < make_timezone_aware(pull.created_at) < end: yield pull def get_contributor_counts(pulls): board = collections.Counter() for", "get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board = sorted(((v, k) for k,v in board.items()),", "datetime.timedelta(days=args.since)) if args.end is None: # Simplify the logic by always having an", "having no end date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start, args.end)", "Dec/25/2014, 2014-12-25, etc\" ) args = parser.parse_args(argv[1:]) if args.start is None: if args.since", "so far ahead # that it is like having no end date. args.end", "parser.parse_args(argv[1:]) if args.start is None: if args.since is None: # Simplify the logic", "board = collections.Counter() for pull in pulls: board[pull.user_login] += 1 return board def", "pulls = get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board = sorted(((v, k) for k,v", "by always having a start date, but one so far back # that", "org=True): if issue.intext == 'external': yield issue def get_all_external_pulls(): repos = [ r.name", "webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce a stream of external pull requests.\"\"\" for", "r.name for r in Repo.from_yaml() if r.track_pulls ] for repo in repos: for", ") args = parser.parse_args(argv[1:]) if args.start is None: if args.since is None: #", "repos: for pull in get_external_pulls(repo): yield pull def get_pulls_in_window(start, end): for pull in", "= sorted(((v, k) for k,v in board.items()), reverse=True) for i, (count, user_login) in", "date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls)", "datetime import sys from helpers import date_arg, make_timezone_aware from repos import Repo from", "flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date to end", "return board def main(argv): parser = argparse.ArgumentParser(description=\"Count external pull requests opened by person\")", "None: # Simplify the logic by always having a start date, but one", "args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is None: # Simplify the logic", "pull def get_pulls_in_window(start, end): for pull in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) <", "def get_contributor_counts(pulls): board = collections.Counter() for pull in pulls: board[pull.user_login] += 1 return", "yield pull def get_contributor_counts(pulls): board = collections.Counter() for pull in pulls: board[pull.user_login] +=", "\"--since\", metavar=\"DAYS\", type=int, help=\"Use a start date DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg,", "start date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since))", "end date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start, args.end) board =", "Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date to end collecting, format is", "for issue in get_pulls(repo, state=\"all\", org=True): if issue.intext == 'external': yield issue def", "get_pulls(repo, state=\"all\", org=True): if issue.intext == 'external': yield issue def get_all_external_pulls(): repos =", "pulls: board[pull.user_login] += 1 return board def main(argv): parser = argparse.ArgumentParser(description=\"Count external pull", "python from __future__ import print_function import argparse import collections import datetime import sys", "board.items()), reverse=True) for i, (count, user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i, count,", "1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is None: #", "import print_function import argparse import collections import datetime import sys from helpers import", "(count, user_login) in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i, count, user_login)) if __name__ ==", "def get_pulls_in_window(start, end): for pull in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) < end:", "< end: yield pull def get_contributor_counts(pulls): board = collections.Counter() for pull in pulls:", "in get_external_pulls(repo): yield pull def get_pulls_in_window(start, end): for pull in get_all_external_pulls(): if start", "pull in pulls: board[pull.user_login] += 1 return board def main(argv): parser = argparse.ArgumentParser(description=\"Count", "is like having no start date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start", "Repo.from_yaml() if r.track_pulls ] for repo in repos: for pull in get_external_pulls(repo): yield", "a start date DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date to start collecting,", "help=\"Date to start collecting, format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" )", "it is like having no end date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls", "= make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end is", "flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) args = parser.parse_args(argv[1:]) if args.start is", "#!/usr/bin/env python from __future__ import print_function import argparse import collections import datetime import", "print_function import argparse import collections import datetime import sys from helpers import date_arg,", "in repos: for pull in get_external_pulls(repo): yield pull def get_pulls_in_window(start, end): for pull", "from webhookdb import get_pulls def get_external_pulls(repo): \"\"\"Produce a stream of external pull requests.\"\"\"", "if start < make_timezone_aware(pull.created_at) < end: yield pull def get_contributor_counts(pulls): board = collections.Counter()", "is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) args = parser.parse_args(argv[1:]) if args.start", "= get_contributor_counts(pulls) board = sorted(((v, k) for k,v in board.items()), reverse=True) for i,", "help=\"Use a start date DAYS ago\" ) parser.add_argument( \"--start\", type=date_arg, help=\"Date to start", "format is flexible: \" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) args = parser.parse_args(argv[1:]) if", "like having no start date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start =", "args.end) board = get_contributor_counts(pulls) board = sorted(((v, k) for k,v in board.items()), reverse=True)", "one so far back # that it is like having no start date.", "if issue.intext == 'external': yield issue def get_all_external_pulls(): repos = [ r.name for", "\"\"\"Produce a stream of external pull requests.\"\"\" for issue in get_pulls(repo, state=\"all\", org=True):", "in get_all_external_pulls(): if start < make_timezone_aware(pull.created_at) < end: yield pull def get_contributor_counts(pulls): board", "# Simplify the logic by always having a start date, but one so", "import collections import datetime import sys from helpers import date_arg, make_timezone_aware from repos", "2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date to end collecting, format is flexible:", "= collections.Counter() for pull in pulls: board[pull.user_login] += 1 return board def main(argv):", "= make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls = get_pulls_in_window(args.start, args.end) board = get_contributor_counts(pulls) board =", "get_contributor_counts(pulls): board = collections.Counter() for pull in pulls: board[pull.user_login] += 1 return board", "but one so far ahead # that it is like having no end", "parser = argparse.ArgumentParser(description=\"Count external pull requests opened by person\") parser.add_argument( \"--since\", metavar=\"DAYS\", type=int,", "that it is like having no start date. args.start = make_timezone_aware(datetime.datetime(2000, 1, 1))", "for pull in get_external_pulls(repo): yield pull def get_pulls_in_window(start, end): for pull in get_all_external_pulls():", "\" \"20141225, Dec/25/2014, 2014-12-25, etc\" ) parser.add_argument( \"--end\", type=date_arg, help=\"Date to end collecting,", "== 'external': yield issue def get_all_external_pulls(): repos = [ r.name for r in", "for k,v in board.items()), reverse=True) for i, (count, user_login) in enumerate(board, start=1): print(\"{:4d}:", "get_all_external_pulls(): repos = [ r.name for r in Repo.from_yaml() if r.track_pulls ] for", "def get_all_external_pulls(): repos = [ r.name for r in Repo.from_yaml() if r.track_pulls ]", "in enumerate(board, start=1): print(\"{:4d}: {:4d} {}\".format(i, count, user_login)) if __name__ == \"__main__\": sys.exit(main(sys.argv))", "Simplify the logic by always having an end date, but one so far", "is like having no end date. args.end = make_timezone_aware(datetime.datetime(2040, 1, 1)) pulls =", "args.start = make_timezone_aware(datetime.datetime(2000, 1, 1)) else: args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since)) if args.end" ]
[ "python from setuptools import setup from os.path import dirname, abspath, join setup(name='drunkardswalk', packages=['drunkardswalk'],", "from setuptools import setup from os.path import dirname, abspath, join setup(name='drunkardswalk', packages=['drunkardswalk'], )", "#!/usr/bin/env python from setuptools import setup from os.path import dirname, abspath, join setup(name='drunkardswalk'," ]
[ "from docs_snippets.concepts.io_management.subselection import ( execute_full, execute_subselection, ) def test_execute_job(): execute_full() def test_execute_subselection(): execute_subselection()" ]
[ "from _corekeywords import _ScapyKeywords from _runonfailure import _RunOnFailureKeywords from _logging import _LoggingKeywords __all__", "import _ScapyKeywords from _runonfailure import _RunOnFailureKeywords from _logging import _LoggingKeywords __all__ = [\"_ScapyKeywords\",", "from _runonfailure import _RunOnFailureKeywords from _logging import _LoggingKeywords __all__ = [\"_ScapyKeywords\", \"_RunOnFailureKeywords\", \"_LoggingKeywords\"]", "<filename>src/Scapy2Library/keywords/__init__.py from _corekeywords import _ScapyKeywords from _runonfailure import _RunOnFailureKeywords from _logging import _LoggingKeywords", "_corekeywords import _ScapyKeywords from _runonfailure import _RunOnFailureKeywords from _logging import _LoggingKeywords __all__ =", "_ScapyKeywords from _runonfailure import _RunOnFailureKeywords from _logging import _LoggingKeywords __all__ = [\"_ScapyKeywords\", \"_RunOnFailureKeywords\"," ]
[ "@return: the number of non-empty subarrays \"\"\" def numSubarraysWithSum(self, A, S): # Write", "sum @return: the number of non-empty subarrays \"\"\" def numSubarraysWithSum(self, A, S): #", "(len(A) + 1) table[0] = 1 curr = count = 0 for a", "A, S): # Write your code here. table = [0] * (len(A) +", "1 curr = count = 0 for a in A: curr += a", "in A: curr += a if curr >= S: count += table[curr -", "+ 1) table[0] = 1 curr = count = 0 for a in", "table[0] = 1 curr = count = 0 for a in A: curr", "here. table = [0] * (len(A) + 1) table[0] = 1 curr =", "for a in A: curr += a if curr >= S: count +=", "array @param S: the sum @return: the number of non-empty subarrays \"\"\" def", "[0] * (len(A) + 1) table[0] = 1 curr = count = 0", "if curr >= S: count += table[curr - S] table[curr] += 1 return", "class Solution: \"\"\" @param A: an array @param S: the sum @return: the", "A: an array @param S: the sum @return: the number of non-empty subarrays", "subarrays \"\"\" def numSubarraysWithSum(self, A, S): # Write your code here. table =", "# Write your code here. table = [0] * (len(A) + 1) table[0]", "= [0] * (len(A) + 1) table[0] = 1 curr = count =", "curr += a if curr >= S: count += table[curr - S] table[curr]", "number of non-empty subarrays \"\"\" def numSubarraysWithSum(self, A, S): # Write your code", "def numSubarraysWithSum(self, A, S): # Write your code here. table = [0] *", "code here. table = [0] * (len(A) + 1) table[0] = 1 curr", "* (len(A) + 1) table[0] = 1 curr = count = 0 for", "A: curr += a if curr >= S: count += table[curr - S]", "Write your code here. table = [0] * (len(A) + 1) table[0] =", "the number of non-empty subarrays \"\"\" def numSubarraysWithSum(self, A, S): # Write your", "\"\"\" def numSubarraysWithSum(self, A, S): # Write your code here. table = [0]", "count = 0 for a in A: curr += a if curr >=", "@param S: the sum @return: the number of non-empty subarrays \"\"\" def numSubarraysWithSum(self,", "numSubarraysWithSum(self, A, S): # Write your code here. table = [0] * (len(A)", "Solution: \"\"\" @param A: an array @param S: the sum @return: the number", "non-empty subarrays \"\"\" def numSubarraysWithSum(self, A, S): # Write your code here. table", "= 1 curr = count = 0 for a in A: curr +=", "a if curr >= S: count += table[curr - S] table[curr] += 1", "table = [0] * (len(A) + 1) table[0] = 1 curr = count", "curr = count = 0 for a in A: curr += a if", "S): # Write your code here. table = [0] * (len(A) + 1)", "= 0 for a in A: curr += a if curr >= S:", "of non-empty subarrays \"\"\" def numSubarraysWithSum(self, A, S): # Write your code here.", "@param A: an array @param S: the sum @return: the number of non-empty", "0 for a in A: curr += a if curr >= S: count", "an array @param S: the sum @return: the number of non-empty subarrays \"\"\"", "S: the sum @return: the number of non-empty subarrays \"\"\" def numSubarraysWithSum(self, A,", "= count = 0 for a in A: curr += a if curr", "\"\"\" @param A: an array @param S: the sum @return: the number of", "<reponame>jiadaizhao/LintCode class Solution: \"\"\" @param A: an array @param S: the sum @return:", "a in A: curr += a if curr >= S: count += table[curr", "your code here. table = [0] * (len(A) + 1) table[0] = 1", "1) table[0] = 1 curr = count = 0 for a in A:", "curr >= S: count += table[curr - S] table[curr] += 1 return count", "the sum @return: the number of non-empty subarrays \"\"\" def numSubarraysWithSum(self, A, S):", "+= a if curr >= S: count += table[curr - S] table[curr] +=" ]
[ "import ImproperlyConfigured from requests import HTTPError from rest_framework import status from app.enquiries.common.client import", "} try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\", json=data) return True except Exception: return", "APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method, **kwargs): if", "must be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=(", "from datetime import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from", "\"modified_at\": datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\", json=data) return True except", "settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\") client = APIClient(", "be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT,", "= request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as e: if e.response and e.response.status_code", "]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth(", "and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False def set_consent(key, value=True): if not", "from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url,", "not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\")", "{ \"consents\": [f\"{key_type}_marketing\"] if value else [], key_type: key, \"modified_at\": datetime.now().isoformat(), } try:", "settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\") client =", "environment variables must be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE,", "\"/api/v1/person/\" def request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise", "from requests import HTTPError from rest_framework import status from app.enquiries.common.client import APIClient from", "import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method, **kwargs):", "if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False def set_consent(key, value=True):", "key.lower() key_type = \"email\" if \"@\" in key else \"phone\" data = {", "= \"email\" if \"@\" in key else \"phone\" data = { \"consents\": [f\"{key_type}_marketing\"]", "= APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), )", "api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs)", "set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower() key_type = \"email\"", "verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs) def check_consent(key):", "if \"@\" in key else \"phone\" data = { \"consents\": [f\"{key_type}_marketing\"] if value", "from rest_framework import status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON", "bool(len(response.json()[\"consents\"])) except HTTPError as e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False", "else [], key_type: key, \"modified_at\": datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\",", "django.core.exceptions import ImproperlyConfigured from requests import HTTPError from rest_framework import status from app.enquiries.common.client", "if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\"", "request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as e: if e.response and e.response.status_code ==", "datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from requests import HTTPError", "from django.core.exceptions import ImproperlyConfigured from requests import HTTPError from rest_framework import status from", "**kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\" \", \"\")", "e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False def set_consent(key,", "= key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\") return", "CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL,", "HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY,", "value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower() key_type = \"email\" if", "\"consents\": [f\"{key_type}_marketing\"] if value else [], key_type: key, \"modified_at\": datetime.now().isoformat(), } try: url", "settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower() key_type = \"email\" if \"@\" in key", "method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\" \",", "key_type = \"email\" if \"@\" in key else \"phone\" data = { \"consents\":", "key = key.lower() key_type = \"email\" if \"@\" in key else \"phone\" data", "False def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower() key_type", "default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs) def check_consent(key): if not", "key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"]))", "url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as", "not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower() key_type = \"email\" if \"@\" in", "datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\", json=data) return True except Exception:", "client.request(path=url, method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\"", "return bool(len(response.json()[\"consents\"])) except HTTPError as e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return", "None key = key.lower() key_type = \"email\" if \"@\" in key else \"phone\"", "method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables", "def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\" \", \"\") url", "as e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False def", "raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID,", "key_type: key, \"modified_at\": datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\", json=data) return", "in key else \"phone\" data = { \"consents\": [f\"{key_type}_marketing\"] if value else [],", "from django.conf import settings from django.core.exceptions import ImproperlyConfigured from requests import HTTPError from", "return None key = key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response =", "key, \"modified_at\": datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\", json=data) return True", "key else \"phone\" data = { \"consents\": [f\"{key_type}_marketing\"] if value else [], key_type:", "response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as e: if e.response and", "= key.lower() key_type = \"email\" if \"@\" in key else \"phone\" data =", "def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower() key_type =", "check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\" \", \"\") url =", "\"email\" if \"@\" in key else \"phone\" data = { \"consents\": [f\"{key_type}_marketing\"] if", "import settings from django.core.exceptions import ImproperlyConfigured from requests import HTTPError from rest_framework import", "datetime import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from requests", "if value else [], key_type: key, \"modified_at\": datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\"", "False return False def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key =", "try: response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as e: if e.response", "settings from django.core.exceptions import ImproperlyConfigured from requests import HTTPError from rest_framework import status", "), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs) def check_consent(key): if", "== status.HTTP_404_NOT_FOUND: return False return False def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return", "), ) return client.request(path=url, method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None", "if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be", "app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method,", "not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try:", "if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower() key_type = \"email\" if \"@\"", "e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]:", "ImproperlyConfigured from requests import HTTPError from rest_framework import status from app.enquiries.common.client import APIClient", "settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response", "else \"phone\" data = { \"consents\": [f\"{key_type}_marketing\"] if value else [], key_type: key,", "api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url,", "return False def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key = key.lower()", "set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT,", "except HTTPError as e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return", "[f\"{key_type}_marketing\"] if value else [], key_type: key, \"modified_at\": datetime.now().isoformat(), } try: url =", "**kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must", "variables must be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ),", "return False return False def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key", "value else [], key_type: key, \"modified_at\": datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url,", "api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs) def", "auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method,", "HTTPError as e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False", "requests import HTTPError from rest_framework import status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk", "import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID,", "import status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\"", "settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL,", "= { \"consents\": [f\"{key_type}_marketing\"] if value else [], key_type: key, \"modified_at\": datetime.now().isoformat(), }", "from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method, **kwargs): if not", "method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND:", "try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\", json=data) return True except Exception: return None", "\"phone\" data = { \"consents\": [f\"{key_type}_marketing\"] if value else [], key_type: key, \"modified_at\":", "\"@\" in key else \"phone\" data = { \"consents\": [f\"{key_type}_marketing\"] if value else", "e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False def set_consent(key, value=True): if", "settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]:", "[], key_type: key, \"modified_at\": datetime.now().isoformat(), } try: url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}\" request(url=url, method=\"POST\", json=data)", "return client.request(path=url, method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key =", "None key = key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url,", "request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment", "import HTTPError from rest_framework import status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import", "client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ),", "= f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as e:", ") return client.request(path=url, method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None key", "HTTPError from rest_framework import status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth", "def request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_*", "app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def request(url, method, **kwargs): if not all([", "\", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except", "all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\") client", "ImproperlyConfigured(\"CONSENT_SERVICE_* environment variables must be set\") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY,", "status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = \"/api/v1/person/\" def", "django.conf import settings from django.core.exceptions import ImproperlyConfigured from requests import HTTPError from rest_framework", "APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return", "status.HTTP_404_NOT_FOUND: return False return False def set_consent(key, value=True): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return None", "import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from requests import", "rest_framework import status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON =", "settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS[\"ENFORCE_CONSENT_SERVICE\"]: return", "\"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError", "key = key.lower().replace(\" \", \"\") url = f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\")", "= \"/api/v1/person/\" def request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]):", "f\"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/\" try: response = request(url=url, method=\"GET\") return bool(len(response.json()[\"consents\"])) except HTTPError as e: if", "data = { \"consents\": [f\"{key_type}_marketing\"] if value else [], key_type: key, \"modified_at\": datetime.now().isoformat(),", "return None key = key.lower() key_type = \"email\" if \"@\" in key else" ]
[]
[]
[ "rejected by a moderator, with the following comment: %s \"\"\" % (objtype, txt)", "on %s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'):", "request, object_id, form_url='', extra_context=None): if self.model.send_notification: # Anything that sends notification supports manual", "moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj,", "postgresql.org. During moderation, this item has received comments that need to be addressed", "moderation notification\", msgstr) # Also generate a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL,", "QuerySet, which bypasses # the delete() operation on the model, and thus won't", "# conflict with the custom one. def get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request)", "notifications, or done with notifications super(PgwebAdmin, self).save_model(request, obj, form, change) def register_pgwebadmin(model): admin.site.register(model,", "'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview' return fld", "from django.conf import settings from pgweb.core.models import ModerationNotification from mailqueue.util import send_simple_mail class", "super(PgwebAdmin, self).save_model(request, obj, form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt):", "need to be addressed before it can be approved. The comment given by", "this # is something that happens often enough that we care about performance.", "wrapper that will enable a few pg specific things: * Markdown preview for", "course. if not obj.org.email: # Should not happen because we remove the form", "* Add an admin field for \"notification\", that can be sent to the", "that we care about performance. def custom_delete_selected(self, request, queryset): for x in queryset:", "# Define a custom delete_selected action. This is required because the # default", "for x in queryset: x.delete() custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected'] def save_model(self,", "https://www.postgresql.org/account/ and make any changes request, and your submission will be re-moderated. \"\"\"", "(objtype, txt) else: return \"\"\"You recently submitted a %s to postgresql.org. During moderation,", "* Markdown preview for markdown capable textfields (specified by including them in a", "obj.org.email: # Should not happen because we remove the form field. Thus #", "only do processing if something changed, not when adding # a new object.", "about performance. def custom_delete_selected(self, request, queryset): for x in queryset: x.delete() custom_delete_selected.short_description =", "dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) # Remove", "to the submitter of an item to inform them of moderation issues. \"\"\"", "to inform them of moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field,", "Manually calling delete() on each one will be slightly # slower, but will", "deleted obj.delete() return # Either no notifications, or done with notifications super(PgwebAdmin, self).save_model(request,", "admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name if remove: return \"\"\"You", "not when adding # a new object. if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need", "custom one. def get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions", "_get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator %s made a comment to a pending", "action, so it doesn't # conflict with the custom one. def get_actions(self, request):", "send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'],", "any changes request, and your submission will be re-moderated. \"\"\" % (objtype, txt)", "change_view(self, request, object_id, form_url='', extra_context=None): if self.model.send_notification: # Anything that sends notification supports", "uses the delete functionality in QuerySet, which bypasses # the delete() operation on", "item to inform them of moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self,", "the delete() operation on the model, and thus won't send out our #", "like this # is something that happens often enough that we care about", "markdown capable textfields (specified by including them in a class variable named markdown_capable", "selected items\" actions=['custom_delete_selected'] def save_model(self, request, obj, form, change): if change and self.model.send_notification:", "register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name if remove: return", "not be saved, it should be deleted obj.delete() return # Either no notifications,", "a custom delete_selected action. This is required because the # default one uses", "actions['delete_selected'] return actions # Define a custom delete_selected action. This is required because", "won't send out our # notifications. Manually calling delete() on each one will", "too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr) #", "that will enable a few pg specific things: * Markdown preview for markdown", "should be deleted obj.delete() return # Either no notifications, or done with notifications", "id: %s Comment: %s Delete after comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id,", "Either no notifications, or done with notifications super(PgwebAdmin, self).save_model(request, obj, form, change) def", "field. Thus # a hard exception is ok. raise Exception(\"Organization does not have", "the submitter of an item to inform them of moderation issues. \"\"\" change_form_template", "# Should not happen because we remove the form field. Thus # a", "# a hard exception is ok. raise Exception(\"Organization does not have an email,", "extra_context=None): if self.model.send_notification: # Anything that sends notification supports manual notifications if extra_context", "txt) def _get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator %s made a comment to", "request.POST['new_notification'] n.author = request.user.username n.save() # Now send an email too msgstr =", "import admin from django.conf import settings from pgweb.core.models import ModerationNotification from mailqueue.util import", "field names) * Add an admin field for \"notification\", that can be sent", "notifications. Manually calling delete() on each one will be slightly # slower, but", "settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username", "when adding # a new object. if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to", "following comment: %s \"\"\" % (objtype, txt) else: return \"\"\"You recently submitted a", "is required because the # default one uses the delete functionality in QuerySet,", "will be re-moderated. \"\"\" % (objtype, txt) def _get_moderator_notification_text(remove, obj, txt, moderator): return", "markdown_capable that is a tuple of field names) * Add an admin field", "request, queryset): for x in queryset: x.delete() custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected']", "= request.POST['new_notification'] n.author = request.user.username n.save() # Now send an email too msgstr", "with the following comment: %s \"\"\" % (objtype, txt) else: return \"\"\"You recently", "mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will enable a few", "send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr) # Also generate a mail to the", "\"\"\" % (objtype, txt) else: return \"\"\"You recently submitted a %s to postgresql.org.", "changes request, and your submission will be re-moderated. \"\"\" % (objtype, txt) def", "email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr)", "%s Comment: %s Delete after comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id, txt,", "return fld def change_view(self, request, object_id, form_url='', extra_context=None): if self.model.send_notification: # Anything that", "a few pg specific things: * Markdown preview for markdown capable textfields (specified", "form_url, extra_context) # Remove the builtin delete_selected action, so it doesn't # conflict", "by the moderator is: %s Please go to https://www.postgresql.org/account/ and make any changes", "has been rejected by a moderator, with the following comment: %s \"\"\" %", "slightly # slower, but will send proper notifications - and it's not like", "% (objtype, txt) else: return \"\"\"You recently submitted a %s to postgresql.org. During", "comments that need to be addressed before it can be approved. The comment", "extra_context) # Remove the builtin delete_selected action, so it doesn't # conflict with", "= obj.__class__.__name__ n.objectid = obj.id n.text = request.POST['new_notification'] n.author = request.user.username n.save() #", "often enough that we care about performance. def custom_delete_selected(self, request, queryset): for x", "n.author = request.user.username n.save() # Now send an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'),", "that can be sent to the submitter of an item to inform them", "done with notifications super(PgwebAdmin, self).save_model(request, obj, form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def", "that happens often enough that we care about performance. def custom_delete_selected(self, request, queryset):", "n.save() # Now send an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM,", "request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): # Object should not be saved, it should", "to https://www.postgresql.org/account/ and make any changes request, and your submission will be re-moderated.", "This is required because the # default one uses the delete functionality in", "%s to postgresql.org. This submission has been rejected by a moderator, with the", "from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will enable a", "Anything that sends notification supports manual notifications if extra_context == None: extra_context =", "def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name if remove:", "notification!\") n = ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid = obj.id n.text = request.POST['new_notification']", "item has received comments that need to be addressed before it can be", "We only do processing if something changed, not when adding # a new", "Also generate a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s", "= ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) # Remove the builtin", "n.text = request.POST['new_notification'] n.author = request.user.username n.save() # Now send an email too", "go to https://www.postgresql.org/account/ and make any changes request, and your submission will be", "super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) # Remove the builtin delete_selected action, so it", "# Need to send off a new notification. We'll also store # it", "names) * Add an admin field for \"notification\", that can be sent to", "be approved. The comment given by the moderator is: %s Please go to", "field for \"notification\", that can be sent to the submitter of an item", "n.objecttype = obj.__class__.__name__ n.objectid = obj.id n.text = request.POST['new_notification'] n.author = request.user.username n.save()", "_get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name if remove: return \"\"\"You recently submitted a", "will enable a few pg specific things: * Markdown preview for markdown capable", "supports manual notifications if extra_context == None: extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__,", "be re-moderated. \"\"\" % (objtype, txt) def _get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator", "request.user.username n.save() # Now send an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'])", "ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid = obj.id n.text = request.POST['new_notification'] n.author = request.user.username", "a tuple of field names) * Add an admin field for \"notification\", that", "def change_view(self, request, object_id, form_url='', extra_context=None): if self.model.send_notification: # Anything that sends notification", "- and it's not like this # is something that happens often enough", "the model, and thus won't send out our # notifications. Manually calling delete()", "\"\"\"You recently submitted a %s to postgresql.org. This submission has been rejected by", "is something that happens often enough that we care about performance. def custom_delete_selected(self,", "database for future reference, of course. if not obj.org.email: # Should not happen", "send an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation", "a class variable named markdown_capable that is a tuple of field names) *", "db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name in", "or done with notifications super(PgwebAdmin, self).save_model(request, obj, form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin)", "# Object should not be saved, it should be deleted obj.delete() return #", "Object should not be saved, it should be deleted obj.delete() return # Either", "objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) # Remove the builtin delete_selected action,", "does not have an email, canot send notification!\") n = ModerationNotification() n.objecttype =", "comment to a pending object: Object type: %s Object id: %s Comment: %s", "which bypasses # the delete() operation on the model, and thus won't send", "During moderation, this item has received comments that need to be addressed before", "things: * Markdown preview for markdown capable textfields (specified by including them in", "\"Moderation comment on %s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username ))", "submitted a %s to postgresql.org. This submission has been rejected by a moderator,", "after comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id, txt, remove and \"Yes\" or", "also store # it in the database for future reference, of course. if", "no notifications, or done with notifications super(PgwebAdmin, self).save_model(request, obj, form, change) def register_pgwebadmin(model):", "do processing if something changed, not when adding # a new object. if", "<reponame>ChristophBerg/pgweb from django.contrib import admin from django.conf import settings from pgweb.core.models import ModerationNotification", "hard exception is ok. raise Exception(\"Organization does not have an email, canot send", "should not be saved, it should be deleted obj.delete() return # Either no", "settings from pgweb.core.models import ModerationNotification from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin", "your submission will be re-moderated. \"\"\" % (objtype, txt) def _get_moderator_notification_text(remove, obj, txt,", "Exception(\"Organization does not have an email, canot send notification!\") n = ModerationNotification() n.objecttype", "that is a tuple of field names) * Add an admin field for", "return \"\"\"You recently submitted a %s to postgresql.org. During moderation, this item has", "because we remove the form field. Thus # a hard exception is ok.", "if change and self.model.send_notification: # We only do processing if something changed, not", "% (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): # Object should", "re-moderated. \"\"\" % (objtype, txt) def _get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator %s", "extra_context == None: extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request,", "request.POST['new_notification']: # Need to send off a new notification. We'll also store #", "submitter of an item to inform them of moderation issues. \"\"\" change_form_template =", "sends notification supports manual notifications if extra_context == None: extra_context = dict() extra_context['notifications']", "a pending object: Object type: %s Object id: %s Comment: %s Delete after", "django.contrib import admin from django.conf import settings from pgweb.core.models import ModerationNotification from mailqueue.util", "and thus won't send out our # notifications. Manually calling delete() on each", "to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\" % (obj.__class__._meta.verbose_name, obj.id),", "tuple of field names) * Add an admin field for \"notification\", that can", "return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) # Remove the builtin delete_selected action, so", "to be addressed before it can be approved. The comment given by the", "request, obj, form, change): if change and self.model.send_notification: # We only do processing", "performance. def custom_delete_selected(self, request, queryset): for x in queryset: x.delete() custom_delete_selected.short_description = \"Delete", "postgresql.org. This submission has been rejected by a moderator, with the following comment:", "extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context)", "so it doesn't # conflict with the custom one. def get_actions(self, request): actions", "if not obj.org.email: # Should not happen because we remove the form field.", "%s made a comment to a pending object: Object type: %s Object id:", "in queryset: x.delete() custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected'] def save_model(self, request, obj,", "the custom one. def get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return", "# a new object. if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to send off", "preview for markdown capable textfields (specified by including them in a class variable", "a comment to a pending object: Object type: %s Object id: %s Comment:", "obj.__class__._meta.verbose_name if remove: return \"\"\"You recently submitted a %s to postgresql.org. This submission", "with notifications super(PgwebAdmin, self).save_model(request, obj, form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove,", "self).get_actions(request) del actions['delete_selected'] return actions # Define a custom delete_selected action. This is", "is a tuple of field names) * Add an admin field for \"notification\",", "a %s to postgresql.org. This submission has been rejected by a moderator, with", "request): actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions # Define a custom", "it's not like this # is something that happens often enough that we", "it in the database for future reference, of course. if not obj.org.email: #", "def formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if", "one uses the delete functionality in QuerySet, which bypasses # the delete() operation", "and it's not like this # is something that happens often enough that", "= obj.id n.text = request.POST['new_notification'] n.author = request.user.username n.save() # Now send an", "return \"\"\"Moderator %s made a comment to a pending object: Object type: %s", "out our # notifications. Manually calling delete() on each one will be slightly", "is ok. raise Exception(\"Organization does not have an email, canot send notification!\") n", "be sent to the submitter of an item to inform them of moderation", "from pgweb.core.models import ModerationNotification from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper", "our # notifications. Manually calling delete() on each one will be slightly #", "the database for future reference, of course. if not obj.org.email: # Should not", "x.delete() custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected'] def save_model(self, request, obj, form, change):", "'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'):", "new object. if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to send off a new", "Delete after comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id, txt, remove and \"Yes\"", "Comment: %s Delete after comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id, txt, remove", "a hard exception is ok. raise Exception(\"Organization does not have an email, canot", "custom_delete_selected(self, request, queryset): for x in queryset: x.delete() custom_delete_selected.short_description = \"Delete selected items\"", "and self.model.send_notification: # We only do processing if something changed, not when adding", "# slower, but will send proper notifications - and it's not like this", "object. if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to send off a new notification.", "one. def get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions #", "send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will enable a few pg specific", "actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions # Define a custom delete_selected", "def save_model(self, request, obj, form, change): if change and self.model.send_notification: # We only", "save_model(self, request, obj, form, change): if change and self.model.send_notification: # We only do", ")) if request.POST.has_key('remove_after_notify'): # Object should not be saved, it should be deleted", "named markdown_capable that is a tuple of field names) * Add an admin", "store # it in the database for future reference, of course. if not", "del actions['delete_selected'] return actions # Define a custom delete_selected action. This is required", "that sends notification supports manual notifications if extra_context == None: extra_context = dict()", "not like this # is something that happens often enough that we care", "**kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields:", "admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class']", "custom delete_selected action. This is required because the # default one uses the", "recently submitted a %s to postgresql.org. This submission has been rejected by a", "Please go to https://www.postgresql.org/account/ and make any changes request, and your submission will", "request, and your submission will be re-moderated. \"\"\" % (objtype, txt) def _get_moderator_notification_text(remove,", "an admin field for \"notification\", that can be sent to the submitter of", "= _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr) # Also generate", "Remove the builtin delete_selected action, so it doesn't # conflict with the custom", "obj.delete() return # Either no notifications, or done with notifications super(PgwebAdmin, self).save_model(request, obj,", "= admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] =", "n.objectid = obj.id n.text = request.POST['new_notification'] n.author = request.user.username n.save() # Now send", "custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected'] def save_model(self, request, obj, form, change): if", "be addressed before it can be approved. The comment given by the moderator", "sent to the submitter of an item to inform them of moderation issues.", "# notifications. Manually calling delete() on each one will be slightly # slower,", "if self.model.send_notification: # Anything that sends notification supports manual notifications if extra_context ==", "obj.org.email, \"postgresql.org moderation notification\", msgstr) # Also generate a mail to the moderators", "form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name", "' markdown_preview' return fld def change_view(self, request, object_id, form_url='', extra_context=None): if self.model.send_notification: #", "request.user.username )) if request.POST.has_key('remove_after_notify'): # Object should not be saved, it should be", "notification supports manual notifications if extra_context == None: extra_context = dict() extra_context['notifications'] =", "builtin delete_selected action, so it doesn't # conflict with the custom one. def", "self).change_view(request, object_id, form_url, extra_context) # Remove the builtin delete_selected action, so it doesn't", "**kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + '", "markdown_preview' return fld def change_view(self, request, object_id, form_url='', extra_context=None): if self.model.send_notification: # Anything", "else: return \"\"\"You recently submitted a %s to postgresql.org. During moderation, this item", "self).save_model(request, obj, form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype", "the builtin delete_selected action, so it doesn't # conflict with the custom one.", "the following comment: %s \"\"\" % (objtype, txt) else: return \"\"\"You recently submitted", "one will be slightly # slower, but will send proper notifications - and", "that need to be addressed before it can be approved. The comment given", "from django.contrib import admin from django.conf import settings from pgweb.core.models import ModerationNotification from", "will be slightly # slower, but will send proper notifications - and it's", "the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'),", "given by the moderator is: %s Please go to https://www.postgresql.org/account/ and make any", "something changed, not when adding # a new object. if request.POST.has_key('new_notification') and request.POST['new_notification']:", "objtype = obj.__class__._meta.verbose_name if remove: return \"\"\"You recently submitted a %s to postgresql.org.", "Add an admin field for \"notification\", that can be sent to the submitter", "get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions # Define a", "if hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview'", "class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will enable a few pg specific things:", "Now send an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org", "required because the # default one uses the delete functionality in QuerySet, which", "PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will enable a few pg specific things: *", "conflict with the custom one. def get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request) del", "issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field,", "return \"\"\"You recently submitted a %s to postgresql.org. This submission has been rejected", "enough that we care about performance. def custom_delete_selected(self, request, queryset): for x in", "if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to send off a new notification. We'll", "fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview' return fld def change_view(self, request, object_id, form_url='',", "# is something that happens often enough that we care about performance. def", "variable named markdown_capable that is a tuple of field names) * Add an", "the moderator is: %s Please go to https://www.postgresql.org/account/ and make any changes request,", "import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will enable a few pg", "\"notification\", that can be sent to the submitter of an item to inform", "change): if change and self.model.send_notification: # We only do processing if something changed,", "if remove: return \"\"\"You recently submitted a %s to postgresql.org. This submission has", "Markdown preview for markdown capable textfields (specified by including them in a class", "it can be approved. The comment given by the moderator is: %s Please", "# Either no notifications, or done with notifications super(PgwebAdmin, self).save_model(request, obj, form, change)", "new notification. We'll also store # it in the database for future reference,", "we remove the form field. Thus # a hard exception is ok. raise", "model, and thus won't send out our # notifications. Manually calling delete() on", "obj.id n.text = request.POST['new_notification'] n.author = request.user.username n.save() # Now send an email", "_get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr) # Also generate a", "\"\"\" ModelAdmin wrapper that will enable a few pg specific things: * Markdown", "self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview' return fld def change_view(self, request, object_id,", "remove the form field. Thus # a hard exception is ok. raise Exception(\"Organization", "obj, form, change): if change and self.model.send_notification: # We only do processing if", "fld def change_view(self, request, object_id, form_url='', extra_context=None): if self.model.send_notification: # Anything that sends", "admin field for \"notification\", that can be sent to the submitter of an", "super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions # Define a custom delete_selected action. This", "because the # default one uses the delete functionality in QuerySet, which bypasses", "remove: return \"\"\"You recently submitted a %s to postgresql.org. This submission has been", "proper notifications - and it's not like this # is something that happens", "form, change): if change and self.model.send_notification: # We only do processing if something", "send out our # notifications. Manually calling delete() on each one will be", "obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): # Object should not be saved, it", "send proper notifications - and it's not like this # is something that", "moderator is: %s Please go to https://www.postgresql.org/account/ and make any changes request, and", "functionality in QuerySet, which bypasses # the delete() operation on the model, and", "processing if something changed, not when adding # a new object. if request.POST.has_key('new_notification')", "by a moderator, with the following comment: %s \"\"\" % (objtype, txt) else:", "comment: %s \"\"\" % (objtype, txt) else: return \"\"\"You recently submitted a %s", "of moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld =", "notification. We'll also store # it in the database for future reference, of", "%s \"\"\" % (objtype, txt) else: return \"\"\"You recently submitted a %s to", "including them in a class variable named markdown_capable that is a tuple of", "We'll also store # it in the database for future reference, of course.", "obj, txt, moderator): return \"\"\"Moderator %s made a comment to a pending object:", "for \"notification\", that can be sent to the submitter of an item to", "This submission has been rejected by a moderator, with the following comment: %s", "# default one uses the delete functionality in QuerySet, which bypasses # the", "a %s to postgresql.org. During moderation, this item has received comments that need", "happen because we remove the form field. Thus # a hard exception is", "received comments that need to be addressed before it can be approved. The", "hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview' return", "an item to inform them of moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def", "= \"Delete selected items\" actions=['custom_delete_selected'] def save_model(self, request, obj, form, change): if change", "obj, txt): objtype = obj.__class__._meta.verbose_name if remove: return \"\"\"You recently submitted a %s", "operation on the model, and thus won't send out our # notifications. Manually", "thus won't send out our # notifications. Manually calling delete() on each one", "be deleted obj.delete() return # Either no notifications, or done with notifications super(PgwebAdmin,", "Need to send off a new notification. We'll also store # it in", "moderator): return \"\"\"Moderator %s made a comment to a pending object: Object type:", "delete_selected action, so it doesn't # conflict with the custom one. def get_actions(self,", "of course. if not obj.org.email: # Should not happen because we remove the", "return actions # Define a custom delete_selected action. This is required because the", "manual notifications if extra_context == None: extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date')", "# Now send an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email,", "Object type: %s Object id: %s Comment: %s Delete after comment: %s \"\"\"", "canot send notification!\") n = ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid = obj.id n.text", "None: extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url,", "a moderator, with the following comment: %s \"\"\" % (objtype, txt) else: return", "have an email, canot send notification!\") n = ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid", "by including them in a class variable named markdown_capable that is a tuple", "an email, canot send notification!\") n = ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid =", "for future reference, of course. if not obj.org.email: # Should not happen because", "the # default one uses the delete functionality in QuerySet, which bypasses #", "moderator, with the following comment: %s \"\"\" % (objtype, txt) else: return \"\"\"You", "\"Delete selected items\" actions=['custom_delete_selected'] def save_model(self, request, obj, form, change): if change and", "request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to send off a new notification. We'll also", "exception is ok. raise Exception(\"Organization does not have an email, canot send notification!\")", "actions=['custom_delete_selected'] def save_model(self, request, obj, form, change): if change and self.model.send_notification: # We", "request.POST.has_key('remove_after_notify'): # Object should not be saved, it should be deleted obj.delete() return", "something that happens often enough that we care about performance. def custom_delete_selected(self, request,", "delete() operation on the model, and thus won't send out our # notifications.", "ok. raise Exception(\"Organization does not have an email, canot send notification!\") n =", "= obj.__class__._meta.verbose_name if remove: return \"\"\"You recently submitted a %s to postgresql.org. This", "send notification!\") n = ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid = obj.id n.text =", "and your submission will be re-moderated. \"\"\" % (objtype, txt) def _get_moderator_notification_text(remove, obj,", "bypasses # the delete() operation on the model, and thus won't send out", "form field. Thus # a hard exception is ok. raise Exception(\"Organization does not", "comment on %s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if", "be saved, it should be deleted obj.delete() return # Either no notifications, or", "admin from django.conf import settings from pgweb.core.models import ModerationNotification from mailqueue.util import send_simple_mail", "an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\",", "moderation, this item has received comments that need to be addressed before it", "# Also generate a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on", "send off a new notification. We'll also store # it in the database", "not obj.org.email: # Should not happen because we remove the form field. Thus", "Should not happen because we remove the form field. Thus # a hard", "with the custom one. def get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected']", "make any changes request, and your submission will be re-moderated. \"\"\" % (objtype,", "queryset: x.delete() custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected'] def save_model(self, request, obj, form,", "in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview' return fld def change_view(self, request,", "to postgresql.org. During moderation, this item has received comments that need to be", "# the delete() operation on the model, and thus won't send out our", "this item has received comments that need to be addressed before it can", "def _get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name if remove: return \"\"\"You recently submitted", "on each one will be slightly # slower, but will send proper notifications", "to a pending object: Object type: %s Object id: %s Comment: %s Delete", "but will send proper notifications - and it's not like this # is", "= 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model,", "action. This is required because the # default one uses the delete functionality", "import ModerationNotification from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will", "object_id, form_url='', extra_context=None): if self.model.send_notification: # Anything that sends notification supports manual notifications", "items\" actions=['custom_delete_selected'] def save_model(self, request, obj, form, change): if change and self.model.send_notification: #", "email, canot send notification!\") n = ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid = obj.id", "(specified by including them in a class variable named markdown_capable that is a", "delete_selected action. This is required because the # default one uses the delete", "be slightly # slower, but will send proper notifications - and it's not", "type: %s Object id: %s Comment: %s Delete after comment: %s \"\"\" %", "_get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): # Object should not be saved,", "care about performance. def custom_delete_selected(self, request, queryset): for x in queryset: x.delete() custom_delete_selected.short_description", "ModerationNotification from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that will enable", "# Remove the builtin delete_selected action, so it doesn't # conflict with the", "Thus # a hard exception is ok. raise Exception(\"Organization does not have an", "== None: extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id,", "object_id, form_url, extra_context) # Remove the builtin delete_selected action, so it doesn't #", "# it in the database for future reference, of course. if not obj.org.email:", "slower, but will send proper notifications - and it's not like this #", "\"\"\"You recently submitted a %s to postgresql.org. During moderation, this item has received", "n = ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid = obj.id n.text = request.POST['new_notification'] n.author", "raise Exception(\"Organization does not have an email, canot send notification!\") n = ModerationNotification()", "not happen because we remove the form field. Thus # a hard exception", "in the database for future reference, of course. if not obj.org.email: # Should", "reference, of course. if not obj.org.email: # Should not happen because we remove", "txt, moderator): return \"\"\"Moderator %s made a comment to a pending object: Object", "been rejected by a moderator, with the following comment: %s \"\"\" % (objtype,", "will send proper notifications - and it's not like this # is something", "(obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): # Object should not", "msgstr) # Also generate a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment", "self.model.send_notification: # Anything that sends notification supports manual notifications if extra_context == None:", "import settings from pgweb.core.models import ModerationNotification from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\"", "generate a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\"", "pg specific things: * Markdown preview for markdown capable textfields (specified by including", "them in a class variable named markdown_capable that is a tuple of field", "%s Please go to https://www.postgresql.org/account/ and make any changes request, and your submission", "txt) else: return \"\"\"You recently submitted a %s to postgresql.org. During moderation, this", "%s to postgresql.org. During moderation, this item has received comments that need to", "delete() on each one will be slightly # slower, but will send proper", "textfields (specified by including them in a class variable named markdown_capable that is", "notifications if extra_context == None: extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return", "(objtype, txt) def _get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator %s made a comment", "obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): # Object should not be", "form_url='', extra_context=None): if self.model.send_notification: # Anything that sends notification supports manual notifications if", "actions # Define a custom delete_selected action. This is required because the #", "x in queryset: x.delete() custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected'] def save_model(self, request,", "if request.POST.has_key('remove_after_notify'): # Object should not be saved, it should be deleted obj.delete()", "off a new notification. We'll also store # it in the database for", "+ ' markdown_preview' return fld def change_view(self, request, object_id, form_url='', extra_context=None): if self.model.send_notification:", "has received comments that need to be addressed before it can be approved.", "future reference, of course. if not obj.org.email: # Should not happen because we", "= dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) #", "it doesn't # conflict with the custom one. def get_actions(self, request): actions =", "moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self,", "saved, it should be deleted obj.delete() return # Either no notifications, or done", "queryset): for x in queryset: x.delete() custom_delete_selected.short_description = \"Delete selected items\" actions=['custom_delete_selected'] def", "a new object. if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to send off a", "can be sent to the submitter of an item to inform them of", "before it can be approved. The comment given by the moderator is: %s", "fld.widget.attrs['class'] + ' markdown_preview' return fld def change_view(self, request, object_id, form_url='', extra_context=None): if", "if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview' return fld def", "request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr) # Also generate a mail to", "PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name if remove: return \"\"\"You recently", "pgweb.core.models import ModerationNotification from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin): \"\"\" ModelAdmin wrapper that", "submission has been rejected by a moderator, with the following comment: %s \"\"\"", "it should be deleted obj.delete() return # Either no notifications, or done with", "django.conf import settings from pgweb.core.models import ModerationNotification from mailqueue.util import send_simple_mail class PgwebAdmin(admin.ModelAdmin):", "notifications super(PgwebAdmin, self).save_model(request, obj, form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj,", "is: %s Please go to https://www.postgresql.org/account/ and make any changes request, and your", "and make any changes request, and your submission will be re-moderated. \"\"\" %", "in QuerySet, which bypasses # the delete() operation on the model, and thus", "them of moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld", "%s Delete after comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id, txt, remove and", "can be approved. The comment given by the moderator is: %s Please go", "obj.__class__.__name__ n.objectid = obj.id n.text = request.POST['new_notification'] n.author = request.user.username n.save() # Now", "pending object: Object type: %s Object id: %s Comment: %s Delete after comment:", "# Anything that sends notification supports manual notifications if extra_context == None: extra_context", "default one uses the delete functionality in QuerySet, which bypasses # the delete()", "each one will be slightly # slower, but will send proper notifications -", "= request.user.username n.save() # Now send an email too msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj,", "submitted a %s to postgresql.org. During moderation, this item has received comments that", "and request.POST['new_notification']: # Need to send off a new notification. We'll also store", "notification\", msgstr) # Also generate a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation", "change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype = obj.__class__._meta.verbose_name if", "formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name", "not have an email, canot send notification!\") n = ModerationNotification() n.objecttype = obj.__class__.__name__", "the form field. Thus # a hard exception is ok. raise Exception(\"Organization does", "doesn't # conflict with the custom one. def get_actions(self, request): actions = super(PgwebAdmin,", "enable a few pg specific things: * Markdown preview for markdown capable textfields", "recently submitted a %s to postgresql.org. During moderation, this item has received comments", "= ModerationNotification() n.objecttype = obj.__class__.__name__ n.objectid = obj.id n.text = request.POST['new_notification'] n.author =", "of an item to inform them of moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html'", "adding # a new object. if request.POST.has_key('new_notification') and request.POST['new_notification']: # Need to send", "Object id: %s Comment: %s Delete after comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name,", "# We only do processing if something changed, not when adding # a", "happens often enough that we care about performance. def custom_delete_selected(self, request, queryset): for", "to send off a new notification. We'll also store # it in the", "return # Either no notifications, or done with notifications super(PgwebAdmin, self).save_model(request, obj, form,", "addressed before it can be approved. The comment given by the moderator is:", "class variable named markdown_capable that is a tuple of field names) * Add", "obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr) # Also generate a mail", "comment given by the moderator is: %s Please go to https://www.postgresql.org/account/ and make", "made a comment to a pending object: Object type: %s Object id: %s", "change and self.model.send_notification: # We only do processing if something changed, not when", "%s %s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): #", "def _get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator %s made a comment to a", "%s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id, txt, remove and \"Yes\" or \"No\", )", "extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) # Remove the", "if something changed, not when adding # a new object. if request.POST.has_key('new_notification') and", "ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context) # Remove the builtin delete_selected", "obj, form, change) def register_pgwebadmin(model): admin.site.register(model, PgwebAdmin) def _get_notification_text(remove, obj, txt): objtype =", "if extra_context == None: extra_context = dict() extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date') return super(PgwebAdmin,", "of field names) * Add an admin field for \"notification\", that can be", "\"\"\" % (objtype, txt) def _get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator %s made", "in a class variable named markdown_capable that is a tuple of field names)", "fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class']", "def get_actions(self, request): actions = super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions # Define", "db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview' return fld def change_view(self,", "txt): objtype = obj.__class__._meta.verbose_name if remove: return \"\"\"You recently submitted a %s to", "ModelAdmin wrapper that will enable a few pg specific things: * Markdown preview", "submission will be re-moderated. \"\"\" % (objtype, txt) def _get_moderator_notification_text(remove, obj, txt, moderator):", "delete functionality in QuerySet, which bypasses # the delete() operation on the model,", "capable textfields (specified by including them in a class variable named markdown_capable that", "approved. The comment given by the moderator is: %s Please go to https://www.postgresql.org/account/", "\"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs)", "Define a custom delete_selected action. This is required because the # default one", "calling delete() on each one will be slightly # slower, but will send", "self.model.send_notification: # We only do processing if something changed, not when adding #", "% (objtype, txt) def _get_moderator_notification_text(remove, obj, txt, moderator): return \"\"\"Moderator %s made a", "changed, not when adding # a new object. if request.POST.has_key('new_notification') and request.POST['new_notification']: #", "on the model, and thus won't send out our # notifications. Manually calling", "few pg specific things: * Markdown preview for markdown capable textfields (specified by", "db_field, **kwargs) if hasattr(self.model, 'markdown_fields'): if db_field.name in self.model.markdown_fields: fld.widget.attrs['class'] = fld.widget.attrs['class'] +", "specific things: * Markdown preview for markdown capable textfields (specified by including them", "\"postgresql.org moderation notification\", msgstr) # Also generate a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM,", "= super(PgwebAdmin, self).get_actions(request) del actions['delete_selected'] return actions # Define a custom delete_selected action.", "a new notification. We'll also store # it in the database for future", "we care about performance. def custom_delete_selected(self, request, queryset): for x in queryset: x.delete()", "to postgresql.org. This submission has been rejected by a moderator, with the following", "The comment given by the moderator is: %s Please go to https://www.postgresql.org/account/ and", "notifications - and it's not like this # is something that happens often", "inform them of moderation issues. \"\"\" change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs):", "= fld.widget.attrs['class'] + ' markdown_preview' return fld def change_view(self, request, object_id, form_url='', extra_context=None):", "%s Object id: %s Comment: %s Delete after comment: %s \"\"\" % (moderator,", "the delete functionality in QuerySet, which bypasses # the delete() operation on the", "change_form_template = 'admin/change_form_pgweb.html' def formfield_for_dbfield(self, db_field, **kwargs): fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs) if", "comment: %s \"\"\" % (moderator, obj.__class__._meta.verbose_name, obj.id, txt, remove and \"Yes\" or \"No\",", "mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\" % (obj.__class__._meta.verbose_name,", "for markdown capable textfields (specified by including them in a class variable named", "object: Object type: %s Object id: %s Comment: %s Delete after comment: %s", "def custom_delete_selected(self, request, queryset): for x in queryset: x.delete() custom_delete_selected.short_description = \"Delete selected", "a mail to the moderators send_simple_mail(settings.NOTIFICATION_FROM, settings.NOTIFICATION_EMAIL, \"Moderation comment on %s %s\" %", "msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification']) send_simple_mail(settings.NOTIFICATION_FROM, obj.org.email, \"postgresql.org moderation notification\", msgstr) # Also", "%s\" % (obj.__class__._meta.verbose_name, obj.id), _get_moderator_notification_text(request.POST.has_key('remove_after_notify'), obj, request.POST['new_notification'], request.user.username )) if request.POST.has_key('remove_after_notify'): # Object", "\"\"\"Moderator %s made a comment to a pending object: Object type: %s Object" ]
[ "new units. Parameters ---------- old_units : str Current units in SI format. new_units", ": str Target units in SI format. Returns ------- factor : float A", "str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method with partial application", "encoding. Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns", "\"\"\" # pylint: disable=invalid-name def transform(self, y): \"\"\"Transform ``y`` using one-hot encoding. Parameters", ": 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D", ": 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. \"\"\" Y =", "SI format. new_units : str Target units in SI format. Returns ------- factor", "numbers even for binary problems. \"\"\" # pylint: disable=invalid-name def transform(self, y): \"\"\"Transform", "== 1: Y = 1 - Y if len(self.classes_) == 2: Y =", "---------- old_units : str Current units in SI format. new_units : str Target", "``n_classes`` numbers even for binary problems. \"\"\" # pylint: disable=invalid-name def transform(self, y):", "is assumed to be half way between ``neg_label`` and ``pos_label``. Returns ------- y", "from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return", "def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method with partial application of given positional", "UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ + \": \" + str(error)) return", "label will be encoded using ``n_classes`` numbers even for binary problems. \"\"\" #", "*frozen_args, **frozen_kwargs): \"\"\"Wrap a method with partial application of given positional and keyword", "ECG Batch utils.\"\"\" import functools import pint import numpy as np from sklearn.preprocessing", "import pint import numpy as np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY", "Target units in SI format. Returns ------- factor : float A factor to", "they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise", "1: Y = 1 - Y if len(self.classes_) == 2: Y = np.hstack((1", "The threshold used in the binary and multi-label cases. If ``None``, it is", "pint import numpy as np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY =", "frozen_kwargs : misc Fixed keyword arguments. Returns ------- method : callable Wrapped method.", "return factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method with partial application of", "\"\"\"Return a multiplicative factor to convert a measured quantity from old to new", "for binary problems. \"\"\" # pylint: disable=invalid-name def transform(self, y): \"\"\"Transform ``y`` using", "method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): \"\"\"Encode categorical", "threshold used in the binary and multi-label cases. If ``None``, it is assumed", "If ``None``, it is assumed to be half way between ``neg_label`` and ``pos_label``.", "if len(self.classes_) == 1: y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_) ==", "from old to new units. Parameters ---------- old_units : str Current units in", "2: y = super().inverse_transform(Y[:, 1], threshold) else: y = super().inverse_transform(Y, threshold) return y", "class LabelBinarizer(LB): \"\"\"Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label", "Fixed keyword arguments. Returns ------- method : callable Wrapped method. \"\"\" @functools.wraps(func) def", "arguments. frozen_kwargs : misc Fixed keyword arguments. Returns ------- method : callable Wrapped", "Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns -------", "import functools import pint import numpy as np from sklearn.preprocessing import LabelBinarizer as", "func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): \"\"\"Encode categorical features using", "as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor to", "half way between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray of", ": float A factor to convert quantities between units. \"\"\" try: # pint", "back to class labels. Parameters ---------- Y : 2-D ndarray of shape ``[n_samples,", "ValueError(error.__class__.__name__ + \": \" + str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap", "old_units : str Current units in SI format. new_units : str Target units", "shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray of shape ``[n_samples,", "Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns ------- method :", "wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments.", "**kwargs) return method class LabelBinarizer(LB): \"\"\"Encode categorical features using a one-hot scheme. Unlike", "factor to convert quantities between units. \"\"\" try: # pint exceptions are wrapped", "frozen_args : misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns", "misc Fixed keyword arguments. Returns ------- method : callable Wrapped method. \"\"\" @functools.wraps(func)", "be encoded using ``n_classes`` numbers even for binary problems. \"\"\" # pylint: disable=invalid-name", "method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ + \": \"", "shape ``[n_samples,]`` Class labels. \"\"\" if len(self.classes_) == 1: y = super().inverse_transform(1 -", "threshold=None): \"\"\"Transform one-hot encoded labels back to class labels. Parameters ---------- Y :", "= UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ + \": \" + str(error))", "quantity from old to new units. Parameters ---------- old_units : str Current units", "transform(self, y): \"\"\"Transform ``y`` using one-hot encoding. Parameters ---------- y : 1-D ndarray", "LabelBinarizer(LB): \"\"\"Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will", "try: # pint exceptions are wrapped with ValueError exceptions because they don't implement", "== 1: y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2: y", "2: Y = np.hstack((1 - Y, Y)) return Y def inverse_transform(self, Y, threshold=None):", "keyword arguments. Parameters ---------- func : callable A method to wrap. frozen_args :", "of shape ``[n_samples, n_classes]`` One-hot encoded labels. \"\"\" Y = super().transform(y) if len(self.classes_)", "if len(self.classes_) == 2: Y = np.hstack((1 - Y, Y)) return Y def", "if len(self.classes_) == 1: Y = 1 - Y if len(self.classes_) == 2:", "Class labels. Returns ------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot", "binary problems. \"\"\" # pylint: disable=invalid-name def transform(self, y): \"\"\"Transform ``y`` using one-hot", "used in the binary and multi-label cases. If ``None``, it is assumed to", "method : callable Wrapped method. \"\"\" @functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\"", "to convert quantities between units. \"\"\" try: # pint exceptions are wrapped with", "given positional and keyword arguments. Parameters ---------- func : callable A method to", "``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray of shape ``[n_samples, n_classes]``", ": 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float,", "------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. \"\"\" if len(self.classes_)", "positional and keyword arguments. Parameters ---------- func : callable A method to wrap.", "pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor to convert a measured quantity", "a method with partial application of given positional and keyword arguments. Parameters ----------", "with partial application of given positional and keyword arguments. Parameters ---------- func :", "Parameters ---------- func : callable A method to wrap. frozen_args : misc Fixed", "labels. \"\"\" if len(self.classes_) == 1: y = super().inverse_transform(1 - Y, threshold) elif", ": 1-D ndarray of shape ``[n_samples,]`` Class labels. \"\"\" if len(self.classes_) == 1:", "LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor to convert", "positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns ------- method : callable", "``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray of shape ``[n_samples,]`` Class", "it is assumed to be half way between ``neg_label`` and ``pos_label``. Returns -------", "to class labels. Parameters ---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]``", "Y, Y)) return Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded labels back", "Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold :", "Y, threshold=None): \"\"\"Transform one-hot encoded labels back to class labels. Parameters ---------- Y", "str Target units in SI format. Returns ------- factor : float A factor", "way between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray of shape", "\"\"\"Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be", "and keyword arguments. Parameters ---------- func : callable A method to wrap. frozen_args", "disable=invalid-name def transform(self, y): \"\"\"Transform ``y`` using one-hot encoding. Parameters ---------- y :", "import numpy as np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry()", "don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__", "one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers even", "\"\"\"Transform one-hot encoded labels back to class labels. Parameters ---------- Y : 2-D", "sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a", "super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold)", "class labels. Parameters ---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot", "len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold) else: y = super().inverse_transform(Y, threshold)", "to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword", "multiplicative factor to convert a measured quantity from old to new units. Parameters", "units in SI format. new_units : str Target units in SI format. Returns", "Y = 1 - Y if len(self.classes_) == 2: Y = np.hstack((1 -", "``None``, it is assumed to be half way between ``neg_label`` and ``pos_label``. Returns", "method to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs : misc Fixed", "= np.hstack((1 - Y, Y)) return Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot", "except Exception as error: raise ValueError(error.__class__.__name__ + \": \" + str(error)) return factor", "format. Returns ------- factor : float A factor to convert quantities between units.", "convert quantities between units. \"\"\" try: # pint exceptions are wrapped with ValueError", "scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers even for", "arguments. Returns ------- method : callable Wrapped method. \"\"\" @functools.wraps(func) def method(self, *args,", "encoded using ``n_classes`` numbers even for binary problems. \"\"\" # pylint: disable=invalid-name def", "Returns ------- factor : float A factor to convert quantities between units. \"\"\"", "len(self.classes_) == 1: y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2:", "\"\"\" @functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs,", "misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns ------- method", "old to new units. Parameters ---------- old_units : str Current units in SI", "``[n_samples,]`` Class labels. \"\"\" if len(self.classes_) == 1: y = super().inverse_transform(1 - Y,", "UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor to convert a", "Y = np.hstack((1 - Y, Y)) return Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform", "will be encoded using ``n_classes`` numbers even for binary problems. \"\"\" # pylint:", "one-hot encoding. Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels.", "and multi-label cases. If ``None``, it is assumed to be half way between", "A factor to convert quantities between units. \"\"\" try: # pint exceptions are", "cases. If ``None``, it is assumed to be half way between ``neg_label`` and", "\" + str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method with", "__repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ + \":", "ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray of", "and ``pos_label``. Returns ------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels.", "ndarray of shape ``[n_samples,]`` Class labels. \"\"\" if len(self.classes_) == 1: y =", "Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded labels back to class labels.", "\"\"\"Wrap a method with partial application of given positional and keyword arguments. Parameters", "\"\"\"Miscellaneous ECG Batch utils.\"\"\" import functools import pint import numpy as np from", "a measured quantity from old to new units. Parameters ---------- old_units : str", "== 2: y = super().inverse_transform(Y[:, 1], threshold) else: y = super().inverse_transform(Y, threshold) return", "units. \"\"\" try: # pint exceptions are wrapped with ValueError exceptions because they", "A method to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs : misc", ": str Current units in SI format. new_units : str Target units in", "the binary and multi-label cases. If ``None``, it is assumed to be half", "str Current units in SI format. new_units : str Target units in SI", "quantities between units. \"\"\" try: # pint exceptions are wrapped with ValueError exceptions", "y : 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y :", "return Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded labels back to class", "return method class LabelBinarizer(LB): \"\"\"Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``,", "Class labels. \"\"\" if len(self.classes_) == 1: y = super().inverse_transform(1 - Y, threshold)", "features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using", "labels back to class labels. Parameters ---------- Y : 2-D ndarray of shape", "factor : float A factor to convert quantities between units. \"\"\" try: #", "of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional The threshold", "of shape ``[n_samples,]`` Class labels. \"\"\" if len(self.classes_) == 1: y = super().inverse_transform(1", "\"\"\" if len(self.classes_) == 1: y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_)", "Parameters ---------- old_units : str Current units in SI format. new_units : str", "using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes``", "partial application of given positional and keyword arguments. Parameters ---------- func : callable", "to new units. Parameters ---------- old_units : str Current units in SI format.", "Y = super().transform(y) if len(self.classes_) == 1: Y = 1 - Y if", "y : 1-D ndarray of shape ``[n_samples,]`` Class labels. \"\"\" if len(self.classes_) ==", "Wrapped method. \"\"\" @functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args,", "a multiplicative factor to convert a measured quantity from old to new units.", "convert a measured quantity from old to new units. Parameters ---------- old_units :", "encoded labels back to class labels. Parameters ---------- Y : 2-D ndarray of", "np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units):", "method. \"\"\" @functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args,", "error: raise ValueError(error.__class__.__name__ + \": \" + str(error)) return factor def partialmethod(func, *frozen_args,", "multi-label cases. If ``None``, it is assumed to be half way between ``neg_label``", "method class LabelBinarizer(LB): \"\"\"Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each", "len(self.classes_) == 2: Y = np.hstack((1 - Y, Y)) return Y def inverse_transform(self,", "labels. Parameters ---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded", "callable A method to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs :", "Y if len(self.classes_) == 2: Y = np.hstack((1 - Y, Y)) return Y", "optional The threshold used in the binary and multi-label cases. If ``None``, it", "---------- func : callable A method to wrap. frozen_args : misc Fixed positional", "Returns ------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. \"\"\" if", "with ValueError exceptions because they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except", "------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. \"\"\"", "new_units : str Target units in SI format. Returns ------- factor : float", "= super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1],", "\"\"\" try: # pint exceptions are wrapped with ValueError exceptions because they don't", "of given positional and keyword arguments. Parameters ---------- func : callable A method", "One-hot encoded labels. \"\"\" Y = super().transform(y) if len(self.classes_) == 1: Y =", "between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray of shape ``[n_samples,]``", "wrapped with ValueError exceptions because they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude", "# pint exceptions are wrapped with ValueError exceptions because they don't implement __repr__", "\"\"\"Transform ``y`` using one-hot encoding. Parameters ---------- y : 1-D ndarray of shape", "labels. \"\"\" Y = super().transform(y) if len(self.classes_) == 1: Y = 1 -", "---------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y", "**frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): \"\"\"Encode categorical features using a one-hot scheme.", "Y)) return Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded labels back to", "1 - Y if len(self.classes_) == 2: Y = np.hstack((1 - Y, Y))", "def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return", "== 2: Y = np.hstack((1 - Y, Y)) return Y def inverse_transform(self, Y,", "because they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error:", "using ``n_classes`` numbers even for binary problems. \"\"\" # pylint: disable=invalid-name def transform(self,", "to convert a measured quantity from old to new units. Parameters ---------- old_units", "Returns ------- method : callable Wrapped method. \"\"\" @functools.wraps(func) def method(self, *args, **kwargs):", "``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional The threshold used in", "n_classes]`` One-hot encoded labels. \"\"\" Y = super().transform(y) if len(self.classes_) == 1: Y", "a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers", "def transform(self, y): \"\"\"Transform ``y`` using one-hot encoding. Parameters ---------- y : 1-D", "get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor to convert a measured quantity from old", "new_units): \"\"\"Return a multiplicative factor to convert a measured quantity from old to", "threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold) else: y =", "Y, threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold) else: y", "functools import pint import numpy as np from sklearn.preprocessing import LabelBinarizer as LB", ": callable A method to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs", "even for binary problems. \"\"\" # pylint: disable=invalid-name def transform(self, y): \"\"\"Transform ``y``", "= super().transform(y) if len(self.classes_) == 1: Y = 1 - Y if len(self.classes_)", "- Y if len(self.classes_) == 2: Y = np.hstack((1 - Y, Y)) return", "in the binary and multi-label cases. If ``None``, it is assumed to be", "units in SI format. Returns ------- factor : float A factor to convert", "+ str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method with partial", "using one-hot encoding. Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]`` Class", "ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. \"\"\" Y = super().transform(y) if", "application of given positional and keyword arguments. Parameters ---------- func : callable A", "2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional", "binary and multi-label cases. If ``None``, it is assumed to be half way", "pylint: disable=invalid-name def transform(self, y): \"\"\"Transform ``y`` using one-hot encoding. Parameters ---------- y", ": callable Wrapped method. \"\"\" @functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return", "pint exceptions are wrapped with ValueError exceptions because they don't implement __repr__ method", "Batch utils.\"\"\" import functools import pint import numpy as np from sklearn.preprocessing import", "Exception as error: raise ValueError(error.__class__.__name__ + \": \" + str(error)) return factor def", "factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method with partial application of given", "*args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class", "threshold : float, optional The threshold used in the binary and multi-label cases.", "**frozen_kwargs): \"\"\"Wrap a method with partial application of given positional and keyword arguments.", "units. Parameters ---------- old_units : str Current units in SI format. new_units :", "of shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray of shape", "labels. Returns ------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded", "``pos_label``. Returns ------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. \"\"\"", "arguments. Parameters ---------- func : callable A method to wrap. frozen_args : misc", "------- factor : float A factor to convert quantities between units. \"\"\" try:", "def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor to convert a measured quantity from", "factor to convert a measured quantity from old to new units. Parameters ----------", "as np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units,", "implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ +", "Current units in SI format. new_units : str Target units in SI format.", "shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional The threshold used", "method with partial application of given positional and keyword arguments. Parameters ---------- func", "each label will be encoded using ``n_classes`` numbers even for binary problems. \"\"\"", "elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold) else: y = super().inverse_transform(Y,", "return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): \"\"\"Encode categorical features", "assumed to be half way between ``neg_label`` and ``pos_label``. Returns ------- y :", "labels. threshold : float, optional The threshold used in the binary and multi-label", "1: y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2: y =", "1-D ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray", "\": \" + str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method", "*frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): \"\"\"Encode categorical features using a", "``y`` using one-hot encoding. Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]``", "2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. \"\"\" Y = super().transform(y)", "- Y, Y)) return Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded labels", "one-hot encoded labels back to class labels. Parameters ---------- Y : 2-D ndarray", "problems. \"\"\" # pylint: disable=invalid-name def transform(self, y): \"\"\"Transform ``y`` using one-hot encoding.", "\"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): \"\"\"Encode", "1-D ndarray of shape ``[n_samples,]`` Class labels. \"\"\" if len(self.classes_) == 1: y", "partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a method with partial application of given positional and", "# pylint: disable=invalid-name def transform(self, y): \"\"\"Transform ``y`` using one-hot encoding. Parameters ----------", "super().transform(y) if len(self.classes_) == 1: Y = 1 - Y if len(self.classes_) ==", "**kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB):", "encoded labels. \"\"\" Y = super().transform(y) if len(self.classes_) == 1: Y = 1", "``[n_samples, n_classes]`` One-hot encoded labels. \"\"\" Y = super().transform(y) if len(self.classes_) == 1:", "in SI format. new_units : str Target units in SI format. Returns -------", "Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. \"\"\" Y", "ValueError exceptions because they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception", "measured quantity from old to new units. Parameters ---------- old_units : str Current", "len(self.classes_) == 1: Y = 1 - Y if len(self.classes_) == 2: Y", "------- method : callable Wrapped method. \"\"\" @functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped", "*args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): \"\"\"Encode categorical features using a one-hot", "+ \": \" + str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): \"\"\"Wrap a", "---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold", "are wrapped with ValueError exceptions because they don't implement __repr__ method factor =", "= 1 - Y if len(self.classes_) == 2: Y = np.hstack((1 - Y,", "``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers even for binary problems.", "raise ValueError(error.__class__.__name__ + \": \" + str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs):", "to be half way between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D", "y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:,", "keyword arguments. Returns ------- method : callable Wrapped method. \"\"\" @functools.wraps(func) def method(self,", "y): \"\"\"Transform ``y`` using one-hot encoding. Parameters ---------- y : 1-D ndarray of", "ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional The", ": float, optional The threshold used in the binary and multi-label cases. If", "between units. \"\"\" try: # pint exceptions are wrapped with ValueError exceptions because", "be half way between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray", "factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ + \": \" +", ": misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns -------", "encoded labels. threshold : float, optional The threshold used in the binary and", "\"\"\" Y = super().transform(y) if len(self.classes_) == 1: Y = 1 - Y", "One-hot encoded labels. threshold : float, optional The threshold used in the binary", "n_classes]`` One-hot encoded labels. threshold : float, optional The threshold used in the", "np.hstack((1 - Y, Y)) return Y def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded", "Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers even for binary", "as error: raise ValueError(error.__class__.__name__ + \": \" + str(error)) return factor def partialmethod(func,", "Returns ------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels.", "float, optional The threshold used in the binary and multi-label cases. If ``None``,", ": misc Fixed keyword arguments. Returns ------- method : callable Wrapped method. \"\"\"", "numpy as np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def", "func : callable A method to wrap. frozen_args : misc Fixed positional arguments.", "Parameters ---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels.", "callable Wrapped method. \"\"\" @functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self,", "utils.\"\"\" import functools import pint import numpy as np from sklearn.preprocessing import LabelBinarizer", "exceptions because they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as", "categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded", "in SI format. Returns ------- factor : float A factor to convert quantities", "import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative", "method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method", "exceptions are wrapped with ValueError exceptions because they don't implement __repr__ method factor", "- Y, threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold) else:", "format. new_units : str Target units in SI format. Returns ------- factor :", "@functools.wraps(func) def method(self, *args, **kwargs): \"\"\"Wrapped method.\"\"\" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs)", "float A factor to convert quantities between units. \"\"\" try: # pint exceptions", "LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor", "inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded labels back to class labels. Parameters ----------", "shape ``[n_samples, n_classes]`` One-hot encoded labels. \"\"\" Y = super().transform(y) if len(self.classes_) ==", "SI format. Returns ------- factor : float A factor to convert quantities between", "def inverse_transform(self, Y, threshold=None): \"\"\"Transform one-hot encoded labels back to class labels. Parameters", "= pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): \"\"\"Return a multiplicative factor to convert a measured" ]
[ "[[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322,", "[[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser =", "[0, 0, 0]], [[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05,", "\"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] # ---", "test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0,", "= \"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] #", "0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data", "Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data = \"1,2,foo|4,bar,6;\" parser =", "parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] # --- Creation Failure Tests --- #def", "parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data = \"1,2,foo|4,bar,6;\" parser = Parser(data) assert", "0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser", "0.0, 6.0]]] # --- Creation Failure Tests --- #def test_create_none(): # pass #TODO", "# #def test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values(): # pass #TODO #", "pass #TODO # #def test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values(): # pass", "--- Creation Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert", "= Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data = \"1,2,foo|4,bar,6;\" parser", "5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0],", "Tests --- #def test_create_none(): # pass #TODO # #def test_create_empty(): # pass #TODO", "from models.parser import Parser def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser =", "= '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0,", "2.0, 0.0], [0, 0, 0]]] data = \"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0,", "Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456, -0.789, 0.111], [0,", "\"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data =", "import pytest from models.parser import Parser def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;'", "pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None # --- Creation", "test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None # ---", "-0.123, 5.0], [0, 0, 0]], [[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535,", "--- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123,", "assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] # --- Creation Failure Tests ---", "# --- Creation Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data)", "[7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028,", "Creation Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[", "0, 0]], [[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343,", "0, 0]]] data = \"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0,", "Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123,", "#TODO # #def test_create_bad_input_too_few_values(): # pass #TODO # #def test_create_bad_input_delimiters(): # pass #TODO", "#data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None # --- Creation Tests ---", "5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser", "#def test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values(): # pass #TODO # #def", "assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456, -0.789, 0.111], [0, 0,", "def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0,", "Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] # --- Creation Failure Tests", "5.0], [0, 0, 0]], [[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089],", "0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data", "Failure Tests --- #def test_create_none(): # pass #TODO # #def test_create_empty(): # pass", "data = \"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]]", "def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129,", "# --- Creation Failure Tests --- #def test_create_none(): # pass #TODO # #def", "Parser def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None", "-1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser = Parser(data)", "data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0,", "-0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s():", "'0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None # --- Creation Tests --- def test_create_combined_data():", "[0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data =", "test_create_none(): # pass #TODO # #def test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values():", "= Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] # --- Creation Failure", "--- #def test_create_none(): # pass #TODO # #def test_create_empty(): # pass #TODO #", "= '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]],", "-5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]]", "# pass #TODO # #def test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values(): #", "3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0],", "[4.0, 0.0, 6.0]]] # --- Creation Failure Tests --- #def test_create_none(): # pass", "def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0],", "= Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123,", "#def test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values(): # pass #TODO # #def", "#def test_create_none(): # pass #TODO # #def test_create_empty(): # pass #TODO # #def", "pass #TODO # #def test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values(): # pass", "'0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07,", "'0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456,", "= Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456, -0.789, 0.111],", "test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0,", "0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert", "assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]],", "= \"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data", "# pass #TODO # #def test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values(): #", "data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]],", "-0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0,", "pass #TODO # #def test_create_bad_input_too_few_values(): # pass #TODO # #def test_create_bad_input_delimiters(): # pass", "2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser = Parser(data) assert", "[0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1,", "parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data = \"1,2,foo|4,bar,6;\"", "Parser(data) #assert parser.parsed_data==None # --- Creation Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;'", "#TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None # --- Creation Tests", "= Parser(data) #assert parser.parsed_data==None # --- Creation Tests --- def test_create_combined_data(): data =", "Creation Failure Tests --- #def test_create_none(): # pass #TODO # #def test_create_empty(): #", "0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data)", "6.0]]] # --- Creation Failure Tests --- #def test_create_none(): # pass #TODO #", "[0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data =", "parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456, -0.789, 0.111], [0, 0, 0]],", "#TODO # #def test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values(): # pass #TODO", "-0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\"", "0.0], [0, 0, 0]]] data = \"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0,", "assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data = \"1,2,foo|4,bar,6;\" parser = Parser(data)", "[[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser =", "test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values(): # pass #TODO # #def test_create_bad_input_delimiters():", "parser.parsed_data==None # --- Creation Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser =", "models.parser import Parser def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data)", "#assert parser.parsed_data==None # --- Creation Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser", "pytest from models.parser import Parser def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser", "#parser = Parser(data) #assert parser.parsed_data==None # --- Creation Tests --- def test_create_combined_data(): data", "[0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = \"1,2,foo;\" parser = Parser(data) assert parser.parsed_data==[[[1.0,", "data = \"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]]", "-0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9,", "Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947,", "= '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None # --- Creation Tests --- def", "# pass #TODO # #def test_create_bad_input_too_few_values(): # pass #TODO # #def test_create_bad_input_delimiters(): #", "def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None #", "test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values():", "import Parser def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert", "#TODO # #def test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values(): # pass #TODO", "test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945,", "-0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072,", "2.0, 0.0], [4.0, 0.0, 6.0]]] # --- Creation Failure Tests --- #def test_create_none():", "parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2,", "--- Creation Failure Tests --- #def test_create_none(): # pass #TODO # #def test_create_empty():", "# #def test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values(): # pass #TODO #", "-0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data():", "0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0],", "[0, 0, 0]]] data = \"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0],", "0]]] data = \"1,2,foo|4,bar,6;\" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0,", "0]], [[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]]", "0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;'", "0.0], [4.0, 0.0, 6.0]]] # --- Creation Failure Tests --- #def test_create_none(): #", "parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] # --- Creation", "[[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def", "parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06],", "-0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0,", "[[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def", "parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456, -0.789," ]
[ "= type_of_stock def get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend): self.last_dividend = dividend def", "self.fixed_dividend = 0 self.par_value = 0 def set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol", "= 0 self.fixed_dividend = 0 self.par_value = 0 def set_stock_symbol(self, symbol_of_stock: str) ->", "= dividend def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def", "__init__(self): self.stock_symbol = None self.stock_type = None self.last_dividend = 0 self.fixed_dividend = 0", "0 def set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return", "set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def", "= dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value): self.par_value = new_par_value def", "get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value): self.par_value = new_par_value def get_par_value(self): return self.par_value", "self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock", "get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend): self.last_dividend = dividend def get_last_dividend(self): return self.last_dividend", "def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return", "dividend): self.last_dividend = dividend def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend =", "0 self.fixed_dividend = 0 self.par_value = 0 def set_stock_symbol(self, symbol_of_stock: str) -> None:", "= None self.stock_type = None self.last_dividend = 0 self.fixed_dividend = 0 self.par_value =", "None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type =", "get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def get_stock_type(self): return self.stock_type", "= symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def", "symbol_of_stock: str) -> None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self,", "None self.stock_type = None self.last_dividend = 0 self.fixed_dividend = 0 self.par_value = 0", "= None self.last_dividend = 0 self.fixed_dividend = 0 self.par_value = 0 def set_stock_symbol(self,", "def __init__(self): self.stock_symbol = None self.stock_type = None self.last_dividend = 0 self.fixed_dividend =", "StockModel: def __init__(self): self.stock_symbol = None self.stock_type = None self.last_dividend = 0 self.fixed_dividend", "-> None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type", "set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend): self.last_dividend", "def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value): self.par_value = new_par_value def get_par_value(self): return", "self.stock_type = None self.last_dividend = 0 self.fixed_dividend = 0 self.par_value = 0 def", "self.last_dividend = dividend def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix", "get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return self.fixed_dividend", "self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self,", "= 0 def set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self):", "def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value):", "str) -> None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock):", "dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value): self.par_value =", "self.stock_type def set_last_dividend(self, dividend): self.last_dividend = dividend def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self,", "symbol_of_stock def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def get_stock_type(self):", "type_of_stock): self.stock_type = type_of_stock def get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend): self.last_dividend =", "def set_last_dividend(self, dividend): self.last_dividend = dividend def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix):", "self.stock_symbol = None self.stock_type = None self.last_dividend = 0 self.fixed_dividend = 0 self.par_value", "self.last_dividend = 0 self.fixed_dividend = 0 self.par_value = 0 def set_stock_symbol(self, symbol_of_stock: str)", "def get_stock_symbol(self): return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def get_stock_type(self): return", "None self.last_dividend = 0 self.fixed_dividend = 0 self.par_value = 0 def set_stock_symbol(self, symbol_of_stock:", "def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend):", "self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value): self.par_value = new_par_value", "return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def", "= 0 self.par_value = 0 def set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol =", "def get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend): self.last_dividend = dividend def get_last_dividend(self): return", "self.stock_type = type_of_stock def get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend): self.last_dividend = dividend", "class StockModel: def __init__(self): self.stock_symbol = None self.stock_type = None self.last_dividend = 0", "return self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def get_stock_type(self): return self.stock_type def", "self.stock_symbol def set_stock_type(self, type_of_stock): self.stock_type = type_of_stock def get_stock_type(self): return self.stock_type def set_last_dividend(self,", "self.par_value = 0 def set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol = symbol_of_stock def", "return self.stock_type def set_last_dividend(self, dividend): self.last_dividend = dividend def get_last_dividend(self): return self.last_dividend def", "0 self.par_value = 0 def set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol = symbol_of_stock", "set_last_dividend(self, dividend): self.last_dividend = dividend def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend", "dividend def get_last_dividend(self): return self.last_dividend def set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self):", "dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value): self.par_value = new_par_value def get_par_value(self):", "set_fixed_dividend(self, dividend_fix): self.fixed_dividend = dividend_fix def get_fixed_dividend(self): return self.fixed_dividend def set_par_value(self, new_par_value): self.par_value", "type_of_stock def get_stock_type(self): return self.stock_type def set_last_dividend(self, dividend): self.last_dividend = dividend def get_last_dividend(self):", "def set_stock_symbol(self, symbol_of_stock: str) -> None: self.stock_symbol = symbol_of_stock def get_stock_symbol(self): return self.stock_symbol" ]
[ ":param profile: :param contacts: list of [jid ] :return: \"\"\" stackBuilder = YowStackBuilder()", "\"\"\" stackBuilder = YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build()", "yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile, contacts):", "[jid ] :return: \"\"\" stackBuilder = YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers() \\", "YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\" :param profile: :param contacts: list of [jid", "list of [jid ] :return: \"\"\" stackBuilder = YowStackBuilder() self._stack = stackBuilder \\", "from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\" :param profile:", "def __init__(self, profile, contacts): \"\"\" :param profile: :param contacts: list of [jid ]", "YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE,", "= stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile)", "\\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key, val): self._stack.setProp(key, val)", "yowsup.layers import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object):", "YowStackBuilder from yowsup.layers import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer", "from .layer import SyncLayer from yowsup.stacks import YowStackBuilder from yowsup.layers import YowLayerEvent from", "SyncLayer from yowsup.stacks import YowStackBuilder from yowsup.layers import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer", "import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def", "from yowsup.layers import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class", "import YowStackBuilder from yowsup.layers import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import", "YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\" :param", "\"\"\" :param profile: :param contacts: list of [jid ] :return: \"\"\" stackBuilder =", "import YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\" :param profile: :param contacts:", "contacts: list of [jid ] :return: \"\"\" stackBuilder = YowStackBuilder() self._stack = stackBuilder", ".layer import SyncLayer from yowsup.stacks import YowStackBuilder from yowsup.layers import YowLayerEvent from yowsup.layers.auth", "stackBuilder = YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS,", "stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def", "yowsup.stacks import YowStackBuilder from yowsup.layers import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network", ".pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key,", "YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def __init__(self,", "self._stack = stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True)", "= YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts)", "] :return: \"\"\" stackBuilder = YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer)", "class YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\" :param profile: :param contacts: list of", ".build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key, val): self._stack.setProp(key, val) def", ".push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key, val): self._stack.setProp(key,", "yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\" :param profile: :param", "self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key, val): self._stack.setProp(key, val) def start(self):", "from yowsup.layers.auth import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile,", "__init__(self, profile, contacts): \"\"\" :param profile: :param contacts: list of [jid ] :return:", ":param contacts: list of [jid ] :return: \"\"\" stackBuilder = YowStackBuilder() self._stack =", "from yowsup.stacks import YowStackBuilder from yowsup.layers import YowLayerEvent from yowsup.layers.auth import YowAuthenticationProtocolLayer from", "self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key, val): self._stack.setProp(key, val) def start(self): self._stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT)) self._stack.loop()", "profile: :param contacts: list of [jid ] :return: \"\"\" stackBuilder = YowStackBuilder() self._stack", ":return: \"\"\" stackBuilder = YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers() \\ .push(SyncLayer) \\", "profile, contacts): \"\"\" :param profile: :param contacts: list of [jid ] :return: \"\"\"", "YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\" :param profile: :param contacts: list", "\\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key, val):", "contacts): \"\"\" :param profile: :param contacts: list of [jid ] :return: \"\"\" stackBuilder", "\\ .pushDefaultLayers() \\ .push(SyncLayer) \\ .build() self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self,", "contacts) self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True) self._stack.setProfile(profile) def set_prop(self, key, val): self._stack.setProp(key, val) def start(self): self._stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))", "import SyncLayer from yowsup.stacks import YowStackBuilder from yowsup.layers import YowLayerEvent from yowsup.layers.auth import", "of [jid ] :return: \"\"\" stackBuilder = YowStackBuilder() self._stack = stackBuilder \\ .pushDefaultLayers()", "import YowAuthenticationProtocolLayer from yowsup.layers.network import YowNetworkLayer class YowsupSyncStack(object): def __init__(self, profile, contacts): \"\"\"" ]
[ "= input().lower() if unit_input == 'mm': amount = amount / 1000 elif unit_input", "'cm': amount = amount * 100 elif unit_output == 'mi': amount = amount", "/ 0.000621371192 elif unit_input == 'in': amount = amount / 39.3700787 elif unit_input", "amount = amount / 39.3700787 elif unit_input == 'km': amount = amount /", "единици # Да се напише програма, която преобразува разстояние между следните 8 мерни", "amount = amount / 1.0936133 if unit_output == 'mm': amount = amount *", "unit_input = input().lower() # unit_output = input().lower() # # dict = {'m':1, 'mm':1000,", "== 'in': amount = amount * 39.3700787 elif unit_output == 'km': amount =", "elif unit_input == 'mi': amount = amount / 0.000621371192 elif unit_input == 'in':", "'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount = amount * dict[unit_output] / dict[unit_input]", "print(amount) # # Other method # amount = float(input()) # # unit_input =", "'km': amount = amount * 0.001 elif unit_output == 'ft': amount = amount", "# # dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} #", "input().lower() if unit_input == 'mm': amount = amount / 1000 elif unit_input ==", "/ 100 elif unit_input == 'mi': amount = amount / 0.000621371192 elif unit_input", "unit_output == 'ft': amount = amount * 3.2808399 elif unit_output == 'yd': amount", "input().lower() # unit_output = input().lower() # # dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192,", "if unit_input == 'mm': amount = amount / 1000 elif unit_input == 'cm':", "1000 elif unit_output == 'cm': amount = amount * 100 elif unit_output ==", "amount = amount * 100 elif unit_output == 'mi': amount = amount *", "'ft':3.2808399, 'yd':1.0936133} # # amount = amount * dict[unit_output] / dict[unit_input] # #", "= amount * 3.2808399 elif unit_output == 'yd': amount = amount * 1.0936133", "/ 1.0936133 if unit_output == 'mm': amount = amount * 1000 elif unit_output", "'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount = amount * dict[unit_output] / dict[unit_input] #", "= input().lower() unit_output = input().lower() if unit_input == 'mm': amount = amount /", "# unit_output = input().lower() # # dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787,", "amount = amount * 3.2808399 elif unit_output == 'yd': amount = amount *", "elif unit_input == 'km': amount = amount / 0.001 elif unit_input == 'ft':", "amount = amount / 100 elif unit_input == 'mi': amount = amount /", "amount = float(input()) # # unit_input = input().lower() # unit_output = input().lower() #", "amount * 100 elif unit_output == 'mi': amount = amount * 0.000621371192 elif", "# # unit_input = input().lower() # unit_output = input().lower() # # dict =", "'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount = amount * dict[unit_output]", "mm, cm, mi, in, km, ft, yd. Използвайте съответствията от таблицата по-долу: amount", "== 'yd': amount = amount * 1.0936133 print(amount) # # Other method #", "съответствията от таблицата по-долу: amount = float(input()) unit_input = input().lower() unit_output = input().lower()", "amount * 1.0936133 print(amount) # # Other method # amount = float(input()) #", "amount = amount * 39.3700787 elif unit_output == 'km': amount = amount *", "{'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount = amount", "= input().lower() # unit_output = input().lower() # # dict = {'m':1, 'mm':1000, 'cm':100,", "unit_output == 'in': amount = amount * 39.3700787 elif unit_output == 'km': amount", "== 'in': amount = amount / 39.3700787 elif unit_input == 'km': amount =", "km, ft, yd. Използвайте съответствията от таблицата по-долу: amount = float(input()) unit_input =", "100 elif unit_input == 'mi': amount = amount / 0.000621371192 elif unit_input ==", "= input().lower() # # dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399,", "== 'mi': amount = amount * 0.000621371192 elif unit_output == 'in': amount =", "* 0.001 elif unit_output == 'ft': amount = amount * 3.2808399 elif unit_output", "# unit_input = input().lower() # unit_output = input().lower() # # dict = {'m':1,", "'yd': amount = amount / 1.0936133 if unit_output == 'mm': amount = amount", "# конвертор за мерни единици # Да се напише програма, която преобразува разстояние", "преобразува разстояние между следните 8 мерни единици: m, mm, cm, mi, in, km,", "unit_input = input().lower() unit_output = input().lower() if unit_input == 'mm': amount = amount", "float(input()) # # unit_input = input().lower() # unit_output = input().lower() # # dict", "input().lower() unit_output = input().lower() if unit_input == 'mm': amount = amount / 1000", "= float(input()) # # unit_input = input().lower() # unit_output = input().lower() # #", "= amount / 1.0936133 if unit_output == 'mm': amount = amount * 1000", "amount = amount / 0.001 elif unit_input == 'ft': amount = amount /", "elif unit_input == 'yd': amount = amount / 1.0936133 if unit_output == 'mm':", "unit_input == 'mm': amount = amount / 1000 elif unit_input == 'cm': amount", "== 'ft': amount = amount / 3.2808399 elif unit_input == 'yd': amount =", "1000 elif unit_input == 'cm': amount = amount / 100 elif unit_input ==", "elif unit_output == 'ft': amount = amount * 3.2808399 elif unit_output == 'yd':", "amount = amount * 0.001 elif unit_output == 'ft': amount = amount *", "mi, in, km, ft, yd. Използвайте съответствията от таблицата по-долу: amount = float(input())", "== 'mi': amount = amount / 0.000621371192 elif unit_input == 'in': amount =", "unit_output == 'mi': amount = amount * 0.000621371192 elif unit_output == 'in': amount", "39.3700787 elif unit_input == 'km': amount = amount / 0.001 elif unit_input ==", "'ft': amount = amount / 3.2808399 elif unit_input == 'yd': amount = amount", "amount * 1000 elif unit_output == 'cm': amount = amount * 100 elif", "amount / 0.000621371192 elif unit_input == 'in': amount = amount / 39.3700787 elif", "== 'mm': amount = amount * 1000 elif unit_output == 'cm': amount =", "/ 39.3700787 elif unit_input == 'km': amount = amount / 0.001 elif unit_input", "amount = amount / 1000 elif unit_input == 'cm': amount = amount /", "Използвайте съответствията от таблицата по-долу: amount = float(input()) unit_input = input().lower() unit_output =", "по-долу: amount = float(input()) unit_input = input().lower() unit_output = input().lower() if unit_input ==", "= amount * 1.0936133 print(amount) # # Other method # amount = float(input())", "amount = float(input()) unit_input = input().lower() unit_output = input().lower() if unit_input == 'mm':", "'mm': amount = amount / 1000 elif unit_input == 'cm': amount = amount", "= amount / 100 elif unit_input == 'mi': amount = amount / 0.000621371192", "100 elif unit_output == 'mi': amount = amount * 0.000621371192 elif unit_output ==", "unit_output == 'km': amount = amount * 0.001 elif unit_output == 'ft': amount", "разстояние между следните 8 мерни единици: m, mm, cm, mi, in, km, ft,", "= amount * 39.3700787 elif unit_output == 'km': amount = amount * 0.001", "* 1.0936133 print(amount) # # Other method # amount = float(input()) # #", "* 1000 elif unit_output == 'cm': amount = amount * 100 elif unit_output", "1.0936133 if unit_output == 'mm': amount = amount * 1000 elif unit_output ==", "unit_input == 'yd': amount = amount / 1.0936133 if unit_output == 'mm': amount", "за мерни единици # Да се напише програма, която преобразува разстояние между следните", "amount / 0.001 elif unit_input == 'ft': amount = amount / 3.2808399 elif", "float(input()) unit_input = input().lower() unit_output = input().lower() if unit_input == 'mm': amount =", "= amount * 1000 elif unit_output == 'cm': amount = amount * 100", "= amount * 100 elif unit_output == 'mi': amount = amount * 0.000621371192", "cm, mi, in, km, ft, yd. Използвайте съответствията от таблицата по-долу: amount =", "elif unit_output == 'mi': amount = amount * 0.000621371192 elif unit_output == 'in':", "amount * 3.2808399 elif unit_output == 'yd': amount = amount * 1.0936133 print(amount)", "== 'mm': amount = amount / 1000 elif unit_input == 'cm': amount =", "elif unit_output == 'cm': amount = amount * 100 elif unit_output == 'mi':", "m, mm, cm, mi, in, km, ft, yd. Използвайте съответствията от таблицата по-долу:", "Other method # amount = float(input()) # # unit_input = input().lower() # unit_output", "3.2808399 elif unit_input == 'yd': amount = amount / 1.0936133 if unit_output ==", "method # amount = float(input()) # # unit_input = input().lower() # unit_output =", "unit_output == 'yd': amount = amount * 1.0936133 print(amount) # # Other method", "Да се напише програма, която преобразува разстояние между следните 8 мерни единици: m,", "unit_input == 'ft': amount = amount / 3.2808399 elif unit_input == 'yd': amount", "# dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # #", "= amount * 0.001 elif unit_output == 'ft': amount = amount * 3.2808399", "# amount = float(input()) # # unit_input = input().lower() # unit_output = input().lower()", "'km': amount = amount / 0.001 elif unit_input == 'ft': amount = amount", "elif unit_output == 'yd': amount = amount * 1.0936133 print(amount) # # Other", "= amount / 3.2808399 elif unit_input == 'yd': amount = amount / 1.0936133", "единици: m, mm, cm, mi, in, km, ft, yd. Използвайте съответствията от таблицата", "unit_output = input().lower() if unit_input == 'mm': amount = amount / 1000 elif", "input().lower() # # dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133}", "= float(input()) unit_input = input().lower() unit_output = input().lower() if unit_input == 'mm': amount", "'in': amount = amount * 39.3700787 elif unit_output == 'km': amount = amount", "= amount / 0.000621371192 elif unit_input == 'in': amount = amount / 39.3700787", "amount * 0.000621371192 elif unit_output == 'in': amount = amount * 39.3700787 elif", "unit_output = input().lower() # # dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001,", "програма, която преобразува разстояние между следните 8 мерни единици: m, mm, cm, mi,", "== 'cm': amount = amount / 100 elif unit_input == 'mi': amount =", "39.3700787 elif unit_output == 'km': amount = amount * 0.001 elif unit_output ==", "0.000621371192 elif unit_input == 'in': amount = amount / 39.3700787 elif unit_input ==", "elif unit_output == 'km': amount = amount * 0.001 elif unit_output == 'ft':", "= amount / 0.001 elif unit_input == 'ft': amount = amount / 3.2808399", "= amount / 39.3700787 elif unit_input == 'km': amount = amount / 0.001", "unit_input == 'km': amount = amount / 0.001 elif unit_input == 'ft': amount", "== 'ft': amount = amount * 3.2808399 elif unit_output == 'yd': amount =", "* 3.2808399 elif unit_output == 'yd': amount = amount * 1.0936133 print(amount) #", "1.0936133 print(amount) # # Other method # amount = float(input()) # # unit_input", "= amount / 1000 elif unit_input == 'cm': amount = amount / 100", "if unit_output == 'mm': amount = amount * 1000 elif unit_output == 'cm':", "3.2808399 elif unit_output == 'yd': amount = amount * 1.0936133 print(amount) # #", "unit_input == 'cm': amount = amount / 100 elif unit_input == 'mi': amount", "която преобразува разстояние между следните 8 мерни единици: m, mm, cm, mi, in,", "elif unit_input == 'ft': amount = amount / 3.2808399 elif unit_input == 'yd':", "* 100 elif unit_output == 'mi': amount = amount * 0.000621371192 elif unit_output", "unit_output == 'cm': amount = amount * 100 elif unit_output == 'mi': amount", "# # Other method # amount = float(input()) # # unit_input = input().lower()", "мерни единици: m, mm, cm, mi, in, km, ft, yd. Използвайте съответствията от", "unit_input == 'mi': amount = amount / 0.000621371192 elif unit_input == 'in': amount", "amount = amount * 1000 elif unit_output == 'cm': amount = amount *", "== 'cm': amount = amount * 100 elif unit_output == 'mi': amount =", "# Да се напише програма, която преобразува разстояние между следните 8 мерни единици:", "'yd': amount = amount * 1.0936133 print(amount) # # Other method # amount", "= {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount =", "'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount = amount * dict[unit_output] /", "конвертор за мерни единици # Да се напише програма, която преобразува разстояние между", "мерни единици # Да се напише програма, която преобразува разстояние между следните 8", "0.001 elif unit_input == 'ft': amount = amount / 3.2808399 elif unit_input ==", "== 'yd': amount = amount / 1.0936133 if unit_output == 'mm': amount =", "таблицата по-долу: amount = float(input()) unit_input = input().lower() unit_output = input().lower() if unit_input", "* 39.3700787 elif unit_output == 'km': amount = amount * 0.001 elif unit_output", "amount / 1000 elif unit_input == 'cm': amount = amount / 100 elif", "8 мерни единици: m, mm, cm, mi, in, km, ft, yd. Използвайте съответствията", "се напише програма, която преобразува разстояние между следните 8 мерни единици: m, mm,", "unit_output == 'mm': amount = amount * 1000 elif unit_output == 'cm': amount", "elif unit_input == 'cm': amount = amount / 100 elif unit_input == 'mi':", "amount = amount * 0.000621371192 elif unit_output == 'in': amount = amount *", "0.000621371192 elif unit_output == 'in': amount = amount * 39.3700787 elif unit_output ==", "elif unit_input == 'in': amount = amount / 39.3700787 elif unit_input == 'km':", "elif unit_output == 'in': amount = amount * 39.3700787 elif unit_output == 'km':", "напише програма, която преобразува разстояние между следните 8 мерни единици: m, mm, cm,", "amount / 1.0936133 if unit_output == 'mm': amount = amount * 1000 elif", "in, km, ft, yd. Използвайте съответствията от таблицата по-долу: amount = float(input()) unit_input", "'mi': amount = amount * 0.000621371192 elif unit_output == 'in': amount = amount", "'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount = amount *", "amount / 100 elif unit_input == 'mi': amount = amount / 0.000621371192 elif", "между следните 8 мерни единици: m, mm, cm, mi, in, km, ft, yd.", "amount = amount / 3.2808399 elif unit_input == 'yd': amount = amount /", "amount * 0.001 elif unit_output == 'ft': amount = amount * 3.2808399 elif", "amount = amount / 0.000621371192 elif unit_input == 'in': amount = amount /", "= amount * 0.000621371192 elif unit_output == 'in': amount = amount * 39.3700787", "'mm': amount = amount * 1000 elif unit_output == 'cm': amount = amount", "# Other method # amount = float(input()) # # unit_input = input().lower() #", "== 'km': amount = amount / 0.001 elif unit_input == 'ft': amount =", "amount * 39.3700787 elif unit_output == 'km': amount = amount * 0.001 elif", "0.001 elif unit_output == 'ft': amount = amount * 3.2808399 elif unit_output ==", "'in': amount = amount / 39.3700787 elif unit_input == 'km': amount = amount", "/ 3.2808399 elif unit_input == 'yd': amount = amount / 1.0936133 if unit_output", "dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133} # # amount", "/ 1000 elif unit_input == 'cm': amount = amount / 100 elif unit_input", "* 0.000621371192 elif unit_output == 'in': amount = amount * 39.3700787 elif unit_output", "unit_input == 'in': amount = amount / 39.3700787 elif unit_input == 'km': amount", "следните 8 мерни единици: m, mm, cm, mi, in, km, ft, yd. Използвайте", "ft, yd. Използвайте съответствията от таблицата по-долу: amount = float(input()) unit_input = input().lower()", "'mi': amount = amount / 0.000621371192 elif unit_input == 'in': amount = amount", "yd. Използвайте съответствията от таблицата по-долу: amount = float(input()) unit_input = input().lower() unit_output", "'cm': amount = amount / 100 elif unit_input == 'mi': amount = amount", "== 'km': amount = amount * 0.001 elif unit_output == 'ft': amount =", "'ft': amount = amount * 3.2808399 elif unit_output == 'yd': amount = amount", "<reponame>karolinanikolova/SoftUni-Software-Engineering # конвертор за мерни единици # Да се напише програма, която преобразува", "amount / 39.3700787 elif unit_input == 'km': amount = amount / 0.001 elif", "'yd':1.0936133} # # amount = amount * dict[unit_output] / dict[unit_input] # # print(amount)", "amount = amount * 1.0936133 print(amount) # # Other method # amount =", "от таблицата по-долу: amount = float(input()) unit_input = input().lower() unit_output = input().lower() if", "/ 0.001 elif unit_input == 'ft': amount = amount / 3.2808399 elif unit_input", "amount / 3.2808399 elif unit_input == 'yd': amount = amount / 1.0936133 if" ]
[ "import * class StaffModel(BaseModel): def __init__(self): super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT *", "class StaffModel(BaseModel): def __init__(self): super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT * FROM staff", "= f\"SELECT * FROM staff WHERE email='{usr}' AND pass='{passwd}'\" try: return self.database.fetchall(query)[0][0] except", "* class StaffModel(BaseModel): def __init__(self): super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT * FROM", ".BaseModel import * class StaffModel(BaseModel): def __init__(self): super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT", "super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT * FROM staff WHERE email='{usr}' AND pass='{passwd}'\"", "StaffModel(BaseModel): def __init__(self): super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT * FROM staff WHERE", "query = f\"SELECT * FROM staff WHERE email='{usr}' AND pass='{passwd}'\" try: return self.database.fetchall(query)[0][0]", "from .BaseModel import * class StaffModel(BaseModel): def __init__(self): super().__init__() def validStaff(self,usr,passwd): query =", "staff WHERE email='{usr}' AND pass='{passwd}'\" try: return self.database.fetchall(query)[0][0] except Exception as e: return", "WHERE email='{usr}' AND pass='{passwd}'\" try: return self.database.fetchall(query)[0][0] except Exception as e: return False", "__init__(self): super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT * FROM staff WHERE email='{usr}' AND", "def validStaff(self,usr,passwd): query = f\"SELECT * FROM staff WHERE email='{usr}' AND pass='{passwd}'\" try:", "f\"SELECT * FROM staff WHERE email='{usr}' AND pass='{passwd}'\" try: return self.database.fetchall(query)[0][0] except Exception", "* FROM staff WHERE email='{usr}' AND pass='{passwd}'\" try: return self.database.fetchall(query)[0][0] except Exception as", "validStaff(self,usr,passwd): query = f\"SELECT * FROM staff WHERE email='{usr}' AND pass='{passwd}'\" try: return", "FROM staff WHERE email='{usr}' AND pass='{passwd}'\" try: return self.database.fetchall(query)[0][0] except Exception as e:", "def __init__(self): super().__init__() def validStaff(self,usr,passwd): query = f\"SELECT * FROM staff WHERE email='{usr}'" ]
[ "License', classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: Other/Proprietary", "long_description = f.read() except Exception: long_description = '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables", "url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience", "Other/Proprietary License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming", "Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language", "'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language ::", "'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language ::", "os try: with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description = f.read() except Exception: long_description", "'Intended Audience :: Developers', 'Natural Language :: English', 'License :: Other/Proprietary License', 'Operating", "'License :: Other/Proprietary License', 'Operating System :: POSIX :: Linux', 'Programming Language ::", "packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', 'License", "computer within a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ],", "long_description = '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation of the' 'directories", "as f: long_description = f.read() except Exception: long_description = '' setup( name='seaborn-file', version='1.1.1',", "author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended", "name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation of the' 'directories of a computer within", ":: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python ::", ":: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming", "the' 'directories of a computer within a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File'", "f.read() except Exception: long_description = '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation", "'Natural Language :: English', 'License :: Other/Proprietary License', 'Operating System :: POSIX ::", "System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python", "except Exception: long_description = '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation of", "keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience :: Developers', 'Natural", "version='1.1.1', description='Seaborn-File enables the manipulation of the' 'directories of a computer within a", "Exception: long_description = '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation of the'", "enables the manipulation of the' 'directories of a computer within a program.', long_description='',", "with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description = f.read() except Exception: long_description = ''", "Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.6'],", "'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.6'], )", "install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience :: Developers', 'Natural Language", "Language :: English', 'License :: Other/Proprietary License', 'Operating System :: POSIX :: Linux',", "long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License',", "'README.md')) as f: long_description = f.read() except Exception: long_description = '' setup( name='seaborn-file',", "from setuptools import setup import os try: with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description", "setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation of the' 'directories of a computer", "Developers', 'Natural Language :: English', 'License :: Other/Proprietary License', 'Operating System :: POSIX", "a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'],", "author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[", "setup import os try: with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description = f.read() except", "import setup import os try: with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description = f.read()", "program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT", "download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience ::", "'directories of a computer within a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download',", ":: Developers', 'Natural Language :: English', 'License :: Other/Proprietary License', 'Operating System ::", "manipulation of the' 'directories of a computer within a program.', long_description='', author='<NAME>', author_email='<EMAIL>',", "English', 'License :: Other/Proprietary License', 'Operating System :: POSIX :: Linux', 'Programming Language", "f: long_description = f.read() except Exception: long_description = '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File", "classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: Other/Proprietary License',", "= '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation of the' 'directories of", "of a computer within a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'],", ":: English', 'License :: Other/Proprietary License', 'Operating System :: POSIX :: Linux', 'Programming", "POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7',", "extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English',", "the manipulation of the' 'directories of a computer within a program.', long_description='', author='<NAME>',", ":: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python ::", "open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description = f.read() except Exception: long_description = '' setup(", "of the' 'directories of a computer within a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File',", "Audience :: Developers', 'Natural Language :: English', 'License :: Other/Proprietary License', 'Operating System", "a computer within a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[", "License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language", "try: with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description = f.read() except Exception: long_description =", "license='MIT License', classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', 'License ::", "description='Seaborn-File enables the manipulation of the' 'directories of a computer within a program.',", "= f.read() except Exception: long_description = '' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the", "'/tarball/download', keywords=['os'], install_requires=[ ], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience :: Developers',", "within a program.', long_description='', author='<NAME>', author_email='<EMAIL>', url='https://github.com/SeabornGames/File', download_url='https://github.com/SeabornGames/File' '/tarball/download', keywords=['os'], install_requires=[ ], extras_require={},", "], extras_require={}, packages=['seaborn_file'], license='MIT License', classifiers=[ 'Intended Audience :: Developers', 'Natural Language ::", ":: Other/Proprietary License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python',", "Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python", "'' setup( name='seaborn-file', version='1.1.1', description='Seaborn-File enables the manipulation of the' 'directories of a", "import os try: with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description = f.read() except Exception:", "setuptools import setup import os try: with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: long_description =" ]
[ "time import bluetooth def main(): ble = bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light", "= light.value() i = 0 while True: # Write every second, notify every", "1) % 10 ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\", light_density) light_density = light.value()", "notify=i == 0) print(\"Light Lux:\", light_density) light_density = light.value() time.sleep_ms(1000) if __name__ ==", "import BLELightSensor from lightsensor import LightSensor import time import bluetooth def main(): ble", "bluetooth def main(): ble = bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light = LightSensor(36)", "second, notify every 10 seconds. i = (i + 1) % 10 ble_light.set_light(light_density,", "ble.active(True) ble_light = BLELightSensor(ble) light = LightSensor(36) light_density = light.value() i = 0", "% 10 ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\", light_density) light_density = light.value() time.sleep_ms(1000)", "BLELightSensor(ble) light = LightSensor(36) light_density = light.value() i = 0 while True: #", "every 10 seconds. i = (i + 1) % 10 ble_light.set_light(light_density, notify=i ==", "BLELightSensor from lightsensor import LightSensor import time import bluetooth def main(): ble =", "ble = bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light = LightSensor(36) light_density = light.value()", "= BLELightSensor(ble) light = LightSensor(36) light_density = light.value() i = 0 while True:", "10 ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\", light_density) light_density = light.value() time.sleep_ms(1000) if", "import bluetooth def main(): ble = bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light =", "notify every 10 seconds. i = (i + 1) % 10 ble_light.set_light(light_density, notify=i", "import time import bluetooth def main(): ble = bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble)", "every second, notify every 10 seconds. i = (i + 1) % 10", "10 seconds. i = (i + 1) % 10 ble_light.set_light(light_density, notify=i == 0)", "0) print(\"Light Lux:\", light_density) light_density = light.value() time.sleep_ms(1000) if __name__ == \"__main__\": main()", "i = (i + 1) % 10 ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\",", "0 while True: # Write every second, notify every 10 seconds. i =", "ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\", light_density) light_density = light.value() time.sleep_ms(1000) if __name__", "lightsensor import LightSensor import time import bluetooth def main(): ble = bluetooth.BLE() ble.active(True)", "= (i + 1) % 10 ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\", light_density)", "+ 1) % 10 ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\", light_density) light_density =", "bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light = LightSensor(36) light_density = light.value() i =", "def main(): ble = bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light = LightSensor(36) light_density", "= bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light = LightSensor(36) light_density = light.value() i", "== 0) print(\"Light Lux:\", light_density) light_density = light.value() time.sleep_ms(1000) if __name__ == \"__main__\":", "i = 0 while True: # Write every second, notify every 10 seconds.", "ble_lightsensor import BLELightSensor from lightsensor import LightSensor import time import bluetooth def main():", "light_density = light.value() i = 0 while True: # Write every second, notify", "LightSensor import time import bluetooth def main(): ble = bluetooth.BLE() ble.active(True) ble_light =", "import LightSensor import time import bluetooth def main(): ble = bluetooth.BLE() ble.active(True) ble_light", "from lightsensor import LightSensor import time import bluetooth def main(): ble = bluetooth.BLE()", "(i + 1) % 10 ble_light.set_light(light_density, notify=i == 0) print(\"Light Lux:\", light_density) light_density", "= LightSensor(36) light_density = light.value() i = 0 while True: # Write every", "while True: # Write every second, notify every 10 seconds. i = (i", "ble_light = BLELightSensor(ble) light = LightSensor(36) light_density = light.value() i = 0 while", "from ble_lightsensor import BLELightSensor from lightsensor import LightSensor import time import bluetooth def", "light = LightSensor(36) light_density = light.value() i = 0 while True: # Write", "main(): ble = bluetooth.BLE() ble.active(True) ble_light = BLELightSensor(ble) light = LightSensor(36) light_density =", "LightSensor(36) light_density = light.value() i = 0 while True: # Write every second,", "light.value() i = 0 while True: # Write every second, notify every 10", "# Write every second, notify every 10 seconds. i = (i + 1)", "seconds. i = (i + 1) % 10 ble_light.set_light(light_density, notify=i == 0) print(\"Light", "True: # Write every second, notify every 10 seconds. i = (i +", "= 0 while True: # Write every second, notify every 10 seconds. i", "Write every second, notify every 10 seconds. i = (i + 1) %" ]
[ "w in words_in_test_sentences: fs = vw2fs[w] fs_sum += fs # collect fs_sum_total +=", "zorro import configs from zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency'", "= {w: np.array([fs[k] for k in column_names]) for w, fs in f_df.iterrows()} stop_words", "+= fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/", "in f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types used in test", "column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs = {w:", "sentences cn2f = {cn: 0 for cn in column_names} fs_sum_total = 0 for", "\"\"\" How often do words in test sentences occur in each target corpus?", "f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types used in test sentences", "# collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f in zip(column_names, fs_sum):", "<filename>scripts/summarize_word_frequency_by_corpus.py \"\"\" How often do words in test sentences occur in each target", "column_names} fs_sum_total = 0 for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set()", "np.zeros(len(column_names)) for w in words_in_test_sentences: fs = vw2fs[w] fs_sum += fs # collect", "for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set() for w in paradigm_path.read_text().split():", "fs = vw2fs[w] fs_sum += fs # collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name)", "for cn, f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn]", "target corpus? \"\"\" import numpy as np from zorro import configs from zorro.vocab", "{w: np.array([fs[k] for k in column_names]) for w, fs in f_df.iterrows()} stop_words =", "= vw2fs[w] fs_sum += fs # collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for", "in column_names} fs_sum_total = 0 for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences =", "# collect types used in test sentences cn2f = {cn: 0 for cn", "{cn: 0 for cn in column_names} fs_sum_total = 0 for paradigm_path in (configs.Dirs.sentences", "collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f in zip(column_names, fs_sum): print(f'{cn:.<32}", "do words in test sentences occur in each target corpus? \"\"\" import numpy", "fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}')", "cn in column_names} fs_sum_total = 0 for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences", "paradigm_path.read_text().split(): if w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w in", "print(paradigm_path.name) for cn, f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect", "often do words in test sentences occur in each target corpus? \"\"\" import", "\"\"\" import numpy as np from zorro import configs from zorro.vocab import load_vocab_df", "# summary print() print('Summary') for cn, f in cn2f.items(): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum_total:.3f}')", "# collect cn2f[cn] += f # summary print() print('Summary') for cn, f in", "fs_sum += fs # collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f", "as np from zorro import configs from zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True)", "fs # collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f in zip(column_names,", "import numpy as np from zorro import configs from zorro.vocab import load_vocab_df vocab_df", "in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs = {w: np.array([fs[k] for k in column_names])", "from zorro import configs from zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names =", "[f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs = {w: np.array([fs[k] for", "= [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs = {w: np.array([fs[k]", "\"stopwords.txt\").open().read().split()) # collect types used in test sentences cn2f = {cn: 0 for", "used in test sentences cn2f = {cn: 0 for cn in column_names} fs_sum_total", "in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w in words_in_test_sentences: fs = vw2fs[w]", "words_in_test_sentences = set() for w in paradigm_path.read_text().split(): if w not in stop_words: words_in_test_sentences.add(w.lower())", "corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs = {w: np.array([fs[k] for k in", "vocab_df[column_names] vw2fs = {w: np.array([fs[k] for k in column_names]) for w, fs in", "w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w in words_in_test_sentences: fs", "{f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] += f # summary print() print('Summary') for", "occur in each target corpus? \"\"\" import numpy as np from zorro import", "cn2f[cn] += f # summary print() print('Summary') for cn, f in cn2f.items(): print(f'{cn:.<32}", "proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] += f # summary print() print('Summary') for cn,", "test sentences occur in each target corpus? \"\"\" import numpy as np from", "np from zorro import configs from zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names", "cn2f = {cn: 0 for cn in column_names} fs_sum_total = 0 for paradigm_path", "not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w in words_in_test_sentences: fs =", "for cn in column_names} fs_sum_total = 0 for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'):", "configs from zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name", "in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] += f #", "w in paradigm_path.read_text().split(): if w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for", "/ \"stopwords.txt\").open().read().split()) # collect types used in test sentences cn2f = {cn: 0", "import configs from zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for", "fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,}", "vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names]", "fs_sum_total = 0 for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set() for", "k in column_names]) for w, fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split())", "np.array([fs[k] for k in column_names]) for w, fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words", "(configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set() for w in paradigm_path.read_text().split(): if w not", "f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] += f", "in test sentences occur in each target corpus? \"\"\" import numpy as np", "corpus? \"\"\" import numpy as np from zorro import configs from zorro.vocab import", "each target corpus? \"\"\" import numpy as np from zorro import configs from", "if w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w in words_in_test_sentences:", "for corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs = {w: np.array([fs[k] for k", "load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df =", "fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types used in", "w, fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types used", "fs_sum = np.zeros(len(column_names)) for w in words_in_test_sentences: fs = vw2fs[w] fs_sum += fs", "in each target corpus? \"\"\" import numpy as np from zorro import configs", "How often do words in test sentences occur in each target corpus? \"\"\"", "stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w in words_in_test_sentences: fs = vw2fs[w] fs_sum", "= vocab_df[column_names] vw2fs = {w: np.array([fs[k] for k in column_names]) for w, fs", "fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] += f # summary print()", "collect cn2f[cn] += f # summary print() print('Summary') for cn, f in cn2f.items():", "print(fs_sum_total) print(paradigm_path.name) for cn, f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') #", "configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs = {w: np.array([fs[k] for k in column_names]) for", "= set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types used in test sentences cn2f =", "/ 'babyberta').glob('*.txt'): words_in_test_sentences = set() for w in paradigm_path.read_text().split(): if w not in", "for k in column_names]) for w, fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words /", "= np.zeros(len(column_names)) for w in words_in_test_sentences: fs = vw2fs[w] fs_sum += fs #", "= 0 for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set() for w", "collect types used in test sentences cn2f = {cn: 0 for cn in", "words_in_test_sentences: fs = vw2fs[w] fs_sum += fs # collect fs_sum_total += fs_sum.sum() print(fs_sum_total)", "zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names]", "f_df = vocab_df[column_names] vw2fs = {w: np.array([fs[k] for k in column_names]) for w,", "paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set() for w in paradigm_path.read_text().split(): if", "zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] += f # summary", "import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df", "cn, f in zip(column_names, fs_sum): print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] +=", "types used in test sentences cn2f = {cn: 0 for cn in column_names}", "test sentences cn2f = {cn: 0 for cn in column_names} fs_sum_total = 0", "= set() for w in paradigm_path.read_text().split(): if w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum", "words in test sentences occur in each target corpus? \"\"\" import numpy as", "vw2fs = {w: np.array([fs[k] for k in column_names]) for w, fs in f_df.iterrows()}", "stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types used in test sentences cn2f", "in paradigm_path.read_text().split(): if w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w", "in column_names]) for w, fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) #", "for w in words_in_test_sentences: fs = vw2fs[w] fs_sum += fs # collect fs_sum_total", "set() for w in paradigm_path.read_text().split(): if w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum =", "'babyberta').glob('*.txt'): words_in_test_sentences = set() for w in paradigm_path.read_text().split(): if w not in stop_words:", "vw2fs[w] fs_sum += fs # collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn,", "= load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs", "f # summary print() print('Summary') for cn, f in cn2f.items(): print(f'{cn:.<32} {f:>12,} proportion={f/", "column_names]) for w, fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect", "in test sentences cn2f = {cn: 0 for cn in column_names} fs_sum_total =", "for w, fs in f_df.iterrows()} stop_words = set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types", "load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names] f_df = vocab_df[column_names] vw2fs =", "0 for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set() for w in", "words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names)) for w in words_in_test_sentences: fs = vw2fs[w] fs_sum +=", "sentences occur in each target corpus? \"\"\" import numpy as np from zorro", "= {cn: 0 for cn in column_names} fs_sum_total = 0 for paradigm_path in", "from zorro.vocab import load_vocab_df vocab_df = load_vocab_df(return_excluded_words=True) column_names = [f'{corpus_name}-frequency' for corpus_name in", "in (configs.Dirs.sentences / 'babyberta').glob('*.txt'): words_in_test_sentences = set() for w in paradigm_path.read_text().split(): if w", "print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}') # collect cn2f[cn] += f # summary print() print('Summary')", "fs_sum.sum():.2f}') # collect cn2f[cn] += f # summary print() print('Summary') for cn, f", "in words_in_test_sentences: fs = vw2fs[w] fs_sum += fs # collect fs_sum_total += fs_sum.sum()", "0 for cn in column_names} fs_sum_total = 0 for paradigm_path in (configs.Dirs.sentences /", "numpy as np from zorro import configs from zorro.vocab import load_vocab_df vocab_df =", "+= fs # collect fs_sum_total += fs_sum.sum() print(fs_sum_total) print(paradigm_path.name) for cn, f in", "+= f # summary print() print('Summary') for cn, f in cn2f.items(): print(f'{cn:.<32} {f:>12,}", "set((configs.Dirs.external_words / \"stopwords.txt\").open().read().split()) # collect types used in test sentences cn2f = {cn:", "for w in paradigm_path.read_text().split(): if w not in stop_words: words_in_test_sentences.add(w.lower()) fs_sum = np.zeros(len(column_names))" ]
[ "'', 'continent': '', 'country': '', 'hostname': '', 'ip': '', 'isp': '', 'latitude': 0.0,", "% ip_address else: raise Exception('describe_ip currently only supports queries to nekudo') # TODO", "link_search[0] if 'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif source ==", "as err: from labpack.handlers.requests import handle_requests from requests import Request request_object = Request(method='GET',", "('longitude', 'latitude'): if field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if", "geoip module and c dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request", "else: raise Exception('describe_ip currently only supports queries to nekudo') # TODO incorporate geoip", "''' a method to get the details associated with an ip address '''", "ip address ''' # determine url if source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s'", "field in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field] =", "get the details associated with an ip address ''' # determine url if", "= Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) # extract response if source", "ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif source == 'nekudo': response_details = response.json()", "ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] =", "response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import handle_requests from requests", "key not in ip_details.keys() and key != 'location': ip_details[key] = response_details[key] else: response_details", "= response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details", "re response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if table_search:", "response_details = response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius']", "in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0]", "'', 'hostname': '', 'ip': '', 'isp': '', 'latitude': 0.0, 'longitude': 0.0, 'organization': '',", "ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] =", "'assignment'): if field in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search:", "if __name__ == '__main__': from pprint import pprint ip_address = get_ip() ip_details =", "== '__main__': from pprint import pprint ip_address = get_ip() ip_details = describe_ip(ip_address) pprint(ip_details)", "0.0, 'longitude': 0.0, 'organization': '', 'postal_code': '', 'region': '', 'timezone': '', 'type': ''", "for field in ('longitude', 'latitude'): if field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search", "'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code'] for field in field_list:", "'assignment': '', 'city': '', 'continent': '', 'country': '', 'hostname': '', 'ip': '', 'isp':", "''' a method to get current public ip address of machine ''' if", "'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries to aws') import requests try:", "c dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details = {", "def describe_ip(ip_address, source='whatismyip'): ''' a method to get the details associated with an", "= response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip'] for key in", "del ip_details['state/region'] elif source == 'nekudo': response_details = response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude']", "and c dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details =", "of machine ''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip", "= requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import handle_requests from requests import", "send request ip_details = { 'accuracy_radius': 0, 'asn': '', 'assignment': '', 'city': '',", "TODO incorporate geoip module and c dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html #", "if field in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field]", "if key not in ip_details.keys() and key != 'location': ip_details[key] = response_details[key] else:", "country_search: ip_details['country'] = country_search[0][0] for field in ('type', 'assignment'): if field in ip_details.keys():", "in ('longitude', 'latitude'): if field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field])", "request ip_details = { 'accuracy_radius': 0, 'asn': '', 'assignment': '', 'city': '', 'continent':", "with an ip address ''' # determine url if source == 'nekudo': source_url", "ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return", "response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip']", "''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only", "response if source == 'whatismyip': import re response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle',", "link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0] if 'state/region' in ip_details.keys(): ip_details['region']", "link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0] if 'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region']", "ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details if __name__ == '__main__': from", "'', 'type': '' } import requests try: response = requests.get(url=source_url) except Exception as", "= response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] =", "ip_details['state/region'] del ip_details['state/region'] elif source == 'nekudo': response_details = response.json() ip_details['country'] = response_details['country']['name']", "= 'https://geoip.nekudo.com/api/%s' % ip_address elif source == 'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address", "= '2017.06' __licence__ = 'MIT' def get_ip(source='aws'): ''' a method to get current", "only supports queries to nekudo') # TODO incorporate geoip module and c dependencies", "source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif source == 'geoip': source_url", "response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip'] for", "= 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip currently only supports queries to nekudo')", "with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details = { 'accuracy_radius': 0,", "Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip()", "field_search[0] for field in ('longitude', 'latitude'): if field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+')", "'', 'assignment': '', 'city': '', 'continent': '', 'country': '', 'hostname': '', 'ip': '',", "'accuracy_radius': 0, 'asn': '', 'assignment': '', 'city': '', 'continent': '', 'country': '', 'hostname':", "the details associated with an ip address ''' # determine url if source", "in ('type', 'assignment'): if field in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field])", "response_details[key] else: response_details = response.json() for field in ('city', 'ip', 'latitude', 'longitude'): ip_details[field]", "return current_ip def describe_ip(ip_address, source='whatismyip'): ''' a method to get the details associated", "= field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for field in ('longitude', 'latitude'):", "'continent': '', 'country': '', 'hostname': '', 'ip': '', 'isp': '', 'latitude': 0.0, 'longitude':", "'https://freegeoip.net/json/%s' % ip_address elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' %", "= [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City',", "country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0] for field", "'country': '', 'hostname': '', 'ip': '', 'isp': '', 'latitude': 0.0, 'longitude': 0.0, 'organization':", "from labpack.handlers.requests import handle_requests from requests import Request request_object = Request(method='GET', url=source_url) request_details", "address ''' # determine url if source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' %", "if source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif source == 'geoip':", "import handle_requests from requests import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object)", "{ 'accuracy_radius': 0, 'asn': '', 'assignment': '', 'city': '', 'continent': '', 'country': '',", "response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details if __name__ == '__main__': from pprint import", "an ip address ''' # determine url if source == 'nekudo': source_url =", "'rcj1492' __created__ = '2017.06' __licence__ = 'MIT' def get_ip(source='aws'): ''' a method to", "= response_details[key] else: response_details = response.json() for field in ('city', 'ip', 'latitude', 'longitude'):", "get current public ip address of machine ''' if source == 'aws': source_url", "0, 'asn': '', 'assignment': '', 'city': '', 'continent': '', 'country': '', 'hostname': '',", "extract response if source == 'whatismyip': import re response_text = response.content.decode() table_regex =", "'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip currently only", "== 'whatismyip': import re response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search =", "= float(coord_search[0]) if 'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if", "table_search: table_text = table_search[0] field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN',", "source == 'whatismyip': import re response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search", "country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0] for field in ('type', 'assignment'): if field", "in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0]", "re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0]) if 'country' in ip_details.keys():", "0.0, 'organization': '', 'postal_code': '', 'region': '', 'timezone': '', 'type': '' } import", "Exception as err: from labpack.handlers.requests import handle_requests from requests import Request request_object =", "handle_requests from requests import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise", "ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0]) if", "'city': '', 'continent': '', 'country': '', 'hostname': '', 'ip': '', 'isp': '', 'latitude':", "= ip_details['state/region'] del ip_details['state/region'] elif source == 'nekudo': response_details = response.json() ip_details['country'] =", "raise Exception('describe_ip currently only supports queries to nekudo') # TODO incorporate geoip module", "'MIT' def get_ip(source='aws'): ''' a method to get current public ip address of", "'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code'] for field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>'", "ip_details.keys() and key != 'location': ip_details[key] = response_details[key] else: response_details = response.json() for", "re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0] for field in ('type',", "__licence__ = 'MIT' def get_ip(source='aws'): ''' a method to get current public ip", "request_details = handle_requests(request_object) raise Exception(request_details['error']) # extract response if source == 'whatismyip': import", "'' } import requests try: response = requests.get(url=source_url) except Exception as err: from", "if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip'] for key in response_details.keys(): if", "if country_search: ip_details['country'] = country_search[0][0] for field in ('type', 'assignment'): if field in", "''' # determine url if source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address", "'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country'] =", "coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0]) if 'country'", "re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0]", "re.S) field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for field in", "err: from labpack.handlers.requests import handle_requests from requests import Request request_object = Request(method='GET', url=source_url)", "ip address of machine ''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else:", "if field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field]", "in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search = field_regex.findall(table_text) if field_search:", "= response.content.decode() current_ip = current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'): ''' a method", "= 'MIT' def get_ip(source='aws'): ''' a method to get current public ip address", "= response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] =", "in ip_details.keys() and key != 'location': ip_details[key] = response_details[key] else: response_details = response.json()", "= field_search[0] for field in ('longitude', 'latitude'): if field in ip_details.keys(): coord_regex =", "raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'):", "'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code'] for field in field_list: field_regex =", "= coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0]) if 'country' in ip_details.keys(): country_regex =", "re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if table_search: table_text = table_search[0] field_list = [", "url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) # extract response if source == 'whatismyip':", "if coord_search: ip_details[field] = float(coord_search[0]) if 'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search", "'City', 'Latitude', 'Longitude', 'Postal Code'] for field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' %", "http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip currently only supports queries", "else: response_details = response.json() for field in ('city', 'ip', 'latitude', 'longitude'): ip_details[field] =", "'asn': '', 'assignment': '', 'city': '', 'continent': '', 'country': '', 'hostname': '', 'ip':", "source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip currently only supports queries to", "currently only supports queries to aws') import requests try: response = requests.get(url=source_url) except", "= re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] =", "link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0] if 'state/region'", "[ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude',", "table_search[0] field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country',", "% ip_address elif source == 'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address elif source", "incorporate geoip module and c dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send", "country_search[0][0] for field in ('type', 'assignment'): if field in ip_details.keys(): link_regex = re.compile('>(.*?)<')", "Exception('describe_ip currently only supports queries to nekudo') # TODO incorporate geoip module and", "if 'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country']", "== 'nekudo': response_details = response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] =", "'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif source == 'nekudo': response_details", "ip_address else: raise Exception('describe_ip currently only supports queries to nekudo') # TODO incorporate", "re.S) table_search = table_regex.findall(response_text) if table_search: table_text = table_search[0] field_list = [ 'IP',", "not in ip_details.keys() and key != 'location': ip_details[key] = response_details[key] else: response_details =", "method to get current public ip address of machine ''' if source ==", "currently only supports queries to nekudo') # TODO incorporate geoip module and c", "if link_search: ip_details[field] = link_search[0] if 'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del", "table_text = table_search[0] field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment',", "try: response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import handle_requests from", "= current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'): ''' a method to get the", "source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif source == 'geoip': source_url = 'https://freegeoip.net/json/%s' %", "response_details['city'] ip_details['ip'] = response_details['ip'] for key in response_details.keys(): if key not in ip_details.keys()", "details associated with an ip address ''' # determine url if source ==", "ip_details['timezone'] = response_details['time_zone'] return ip_details if __name__ == '__main__': from pprint import pprint", "'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code'] for field in", "('type', 'assignment'): if field in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if", "current_ip = current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'): ''' a method to get", "response.content.decode() current_ip = current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'): ''' a method to", "request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip", "= response_details['time_zone'] return ip_details if __name__ == '__main__': from pprint import pprint ip_address", "'longitude': 0.0, 'organization': '', 'postal_code': '', 'region': '', 'timezone': '', 'type': '' }", "requests import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip", "source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries to aws') import", "aws') import requests try: response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests", "Code'] for field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search =", "Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode()", "supports queries to nekudo') # TODO incorporate geoip module and c dependencies with", "queries to aws') import requests try: response = requests.get(url=source_url) except Exception as err:", "ip_details = { 'accuracy_radius': 0, 'asn': '', 'assignment': '', 'city': '', 'continent': '',", "response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone']", "url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return", "ip_details[field.lower().replace(' ','_')] = field_search[0] for field in ('longitude', 'latitude'): if field in ip_details.keys():", "and key != 'location': ip_details[key] = response_details[key] else: response_details = response.json() for field", "source_url = 'https://freegeoip.net/json/%s' % ip_address elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url =", "('city', 'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name']", "'https://geoip.nekudo.com/api/%s' % ip_address elif source == 'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address elif", "'organization': '', 'postal_code': '', 'region': '', 'timezone': '', 'type': '' } import requests", "handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip def describe_ip(ip_address,", "current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'): ''' a method to get the details", "'hostname': '', 'ip': '', 'isp': '', 'latitude': 0.0, 'longitude': 0.0, 'organization': '', 'postal_code':", "in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0])", "dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details = { 'accuracy_radius':", "response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if table_search: table_text", "'Longitude', 'Postal Code'] for field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S)", "= response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city']", "ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0] for", "ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip'] for key in response_details.keys(): if key not", "= 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries to aws') import requests", "determine url if source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif source", "elif source == 'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address elif source == 'whatismyip':", "source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip", "ip_details[key] = response_details[key] else: response_details = response.json() for field in ('city', 'ip', 'latitude',", "'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif source == 'geoip': source_url = 'https://freegeoip.net/json/%s'", "# send request ip_details = { 'accuracy_radius': 0, 'asn': '', 'assignment': '', 'city':", "def get_ip(source='aws'): ''' a method to get current public ip address of machine", "Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'): '''", "response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details if", "response.json() for field in ('city', 'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country'] =", "'Postal Code'] for field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search", "requests import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) #", "= 'https://freegeoip.net/json/%s' % ip_address elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s'", "'', 'ip': '', 'isp': '', 'latitude': 0.0, 'longitude': 0.0, 'organization': '', 'postal_code': '',", "field in ('type', 'assignment'): if field in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search =", "supports queries to aws') import requests try: response = requests.get(url=source_url) except Exception as", "source == 'nekudo': response_details = response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude']", "link_search: ip_details[field] = link_search[0] if 'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region']", "= handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip def", "machine ''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently", "field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')]", "elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise", "field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for field in ('longitude',", "} import requests try: response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests", "__created__ = '2017.06' __licence__ = 'MIT' def get_ip(source='aws'): ''' a method to get", "raise Exception(request_details['error']) # extract response if source == 'whatismyip': import re response_text =", "'Latitude', 'Longitude', 'Postal Code'] for field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field,", "'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code'] for field in field_list: field_regex", "describe_ip(ip_address, source='whatismyip'): ''' a method to get the details associated with an ip", "key in response_details.keys(): if key not in ip_details.keys() and key != 'location': ip_details[key]", "to nekudo') # TODO incorporate geoip module and c dependencies with local database", "'2017.06' __licence__ = 'MIT' def get_ip(source='aws'): ''' a method to get current public", "'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url", "table_regex.findall(response_text) if table_search: table_text = table_search[0] field_list = [ 'IP', 'Hostname', 'ISP', 'Organization',", "field, re.S) field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for field", "= response.json() for field in ('city', 'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country']", "= { 'accuracy_radius': 0, 'asn': '', 'assignment': '', 'city': '', 'continent': '', 'country':", "in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif source == 'nekudo': response_details =", "= Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip =", "# http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip currently only supports", "= response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if table_search: table_text =", "in response_details.keys(): if key not in ip_details.keys() and key != 'location': ip_details[key] =", "to get current public ip address of machine ''' if source == 'aws':", "module and c dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details", "Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) # extract response", "a method to get the details associated with an ip address ''' #", "raise Exception('get_ip currently only supports queries to aws') import requests try: response =", "field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for field in ('longitude', 'latitude'): if", "'', 'timezone': '', 'type': '' } import requests try: response = requests.get(url=source_url) except", "method to get the details associated with an ip address ''' # determine", "= response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip']", "<gh_stars>1-10 __author__ = 'rcj1492' __created__ = '2017.06' __licence__ = 'MIT' def get_ip(source='aws'): '''", "ip_details['ip'] = response_details['ip'] for key in response_details.keys(): if key not in ip_details.keys() and", "field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace('", "'region': '', 'timezone': '', 'type': '' } import requests try: response = requests.get(url=source_url)", "request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip = response.content.decode() current_ip", "re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0] if 'state/region' in ip_details.keys():", "'', 'isp': '', 'latitude': 0.0, 'longitude': 0.0, 'organization': '', 'postal_code': '', 'region': '',", "for key in response_details.keys(): if key not in ip_details.keys() and key != 'location':", "# http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details = { 'accuracy_radius': 0, 'asn': '', 'assignment':", "field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for field in ('longitude', 'latitude'): if field in", "current public ip address of machine ''' if source == 'aws': source_url =", "table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if table_search: table_text = table_search[0] field_list", "if table_search: table_text = table_search[0] field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type',", "== 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries to", "a method to get current public ip address of machine ''' if source", "queries to nekudo') # TODO incorporate geoip module and c dependencies with local", "requests try: response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import handle_requests", "'location': ip_details[key] = response_details[key] else: response_details = response.json() for field in ('city', 'ip',", "'whatismyip': import re response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text)", "= link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0] if 'state/region' in ip_details.keys(): ip_details['region'] =", "response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip'] for key in response_details.keys(): if key", "get_ip(source='aws'): ''' a method to get current public ip address of machine '''", "response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city']", "# TODO incorporate geoip module and c dependencies with local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html", "response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if table_search: table_text = table_search[0]", "== 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif source == 'geoip': source_url =", "for field in ('type', 'assignment'): if field in ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search", "ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif source == 'nekudo': response_details = response.json() ip_details['country']", "'__main__': from pprint import pprint ip_address = get_ip() ip_details = describe_ip(ip_address) pprint(ip_details) pprint(describe_ip(ip_address,", "response_details.keys(): if key not in ip_details.keys() and key != 'location': ip_details[key] = response_details[key]", "'timezone': '', 'type': '' } import requests try: response = requests.get(url=source_url) except Exception", "# extract response if source == 'whatismyip': import re response_text = response.content.decode() table_regex", "else: raise Exception('get_ip currently only supports queries to aws') import requests try: response", "== 'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup", "!= 'location': ip_details[key] = response_details[key] else: response_details = response.json() for field in ('city',", "'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] =", "== 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip currently", "if 'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif source == 'nekudo':", "response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius']", "for field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search = field_regex.findall(table_text)", "Exception('get_ip currently only supports queries to aws') import requests try: response = requests.get(url=source_url)", "request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) # extract response if", "= table_search[0] field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent',", "to aws') import requests try: response = requests.get(url=source_url) except Exception as err: from", "source='whatismyip'): ''' a method to get the details associated with an ip address", "coord_search: ip_details[field] = float(coord_search[0]) if 'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search =", "key != 'location': ip_details[key] = response_details[key] else: response_details = response.json() for field in", "ip_details['country'] = country_search[0][0] for field in ('type', 'assignment'): if field in ip_details.keys(): link_regex", "% field, re.S) field_search = field_regex.findall(table_text) if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for", "= link_search[0] if 'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif source", "'', 'latitude': 0.0, 'longitude': 0.0, 'organization': '', 'postal_code': '', 'region': '', 'timezone': '',", "public ip address of machine ''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/'", "'', 'postal_code': '', 'region': '', 'timezone': '', 'type': '' } import requests try:", "'', 'region': '', 'timezone': '', 'type': '' } import requests try: response =", "coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0]) if 'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)')", "= handle_requests(request_object) raise Exception(request_details['error']) # extract response if source == 'whatismyip': import re", "ip_details.keys(): link_regex = re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0] if", "'nekudo': response_details = response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude']", "field in field_list: field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S) field_search = field_regex.findall(table_text) if", "= re.compile('>(.*?)<') link_search = link_regex.findall(ip_details[field]) if link_search: ip_details[field] = link_search[0] if 'state/region' in", "http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details = { 'accuracy_radius': 0, 'asn': '', 'assignment': '',", "'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code'] for field", "elif source == 'nekudo': response_details = response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude']", "= country_search[0][0] for field in ('type', 'assignment'): if field in ip_details.keys(): link_regex =", "= response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details if __name__ == '__main__': from pprint", "response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip'] for key in response_details.keys():", "'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries to aws')", "from pprint import pprint ip_address = get_ip() ip_details = describe_ip(ip_address) pprint(ip_details) pprint(describe_ip(ip_address, 'nekudo'))", "response_details['time_zone'] return ip_details if __name__ == '__main__': from pprint import pprint ip_address =", "ip_address elif source == 'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address elif source ==", "'type': '' } import requests try: response = requests.get(url=source_url) except Exception as err:", "field in ('longitude', 'latitude'): if field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search =", "source == 'geoip': source_url = 'https://freegeoip.net/json/%s' % ip_address elif source == 'whatismyip': #", "from requests import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error'])", "= response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']:", "'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code'] for", "url if source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif source ==", "'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude',", "__name__ == '__main__': from pprint import pprint ip_address = get_ip() ip_details = describe_ip(ip_address)", "except Exception as err: from labpack.handlers.requests import handle_requests from requests import Request request_object", "= 'rcj1492' __created__ = '2017.06' __licence__ = 'MIT' def get_ip(source='aws'): ''' a method", "'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal", "import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) current_ip =", "Exception(request_details['error']) # extract response if source == 'whatismyip': import re response_text = response.content.decode()", "for field in ('city', 'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name']", "import pprint ip_address = get_ip() ip_details = describe_ip(ip_address) pprint(ip_details) pprint(describe_ip(ip_address, 'nekudo')) pprint(describe_ip(ip_address, 'geoip'))", "ip_details['country'] = response_details['country']['name'] ip_details['latitude'] = response_details['location']['latitude'] ip_details['longitude'] = response_details['location']['longitude'] ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if", "ip_details if __name__ == '__main__': from pprint import pprint ip_address = get_ip() ip_details", "'latitude': 0.0, 'longitude': 0.0, 'organization': '', 'postal_code': '', 'region': '', 'timezone': '', 'type':", "'latitude'): if field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search:", "current_ip = response.content.decode() current_ip = current_ip.strip() return current_ip def describe_ip(ip_address, source='whatismyip'): ''' a", "'', 'country': '', 'hostname': '', 'ip': '', 'isp': '', 'latitude': 0.0, 'longitude': 0.0,", "ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone']", "# determine url if source == 'nekudo': source_url = 'https://geoip.nekudo.com/api/%s' % ip_address elif", "ip_address elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address else:", "nekudo') # TODO incorporate geoip module and c dependencies with local database #", "import re response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if", "field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region',", "ip_details[field] = float(coord_search[0]) if 'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country'])", "'postal_code': '', 'region': '', 'timezone': '', 'type': '' } import requests try: response", "= response_details['city'] ip_details['ip'] = response_details['ip'] for key in response_details.keys(): if key not in", "handle_requests(request_object) raise Exception(request_details['error']) # extract response if source == 'whatismyip': import re response_text", "if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports", "pprint import pprint ip_address = get_ip() ip_details = describe_ip(ip_address) pprint(ip_details) pprint(describe_ip(ip_address, 'nekudo')) pprint(describe_ip(ip_address,", "'', 'city': '', 'continent': '', 'country': '', 'hostname': '', 'ip': '', 'isp': '',", "return ip_details if __name__ == '__main__': from pprint import pprint ip_address = get_ip()", "= re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0]) if 'country' in", "field in ('city', 'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region']", "to get the details associated with an ip address ''' # determine url", "field in ip_details.keys(): coord_regex = re.compile('\\-?\\d+\\.\\d+') coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] =", "response_details = response.json() for field in ('city', 'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field]", "database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details = { 'accuracy_radius': 0, 'asn': '',", "country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0] for field in ('type', 'assignment'):", "import Request request_object = Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) # extract", "labpack.handlers.requests import handle_requests from requests import Request request_object = Request(method='GET', url=source_url) request_details =", "local database # http://tech.marksblogg.com/ip-address-lookups-in-python.html # send request ip_details = { 'accuracy_radius': 0, 'asn':", "ip_details['accuracy_radius'] = response_details['location']['accuracy_radius'] if response_details['city']: ip_details['city'] = response_details['city'] ip_details['ip'] = response_details['ip'] for key", "= response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details if __name__ ==", "','_')] = field_search[0] for field in ('longitude', 'latitude'): if field in ip_details.keys(): coord_regex", "coord_search = coord_regex.findall(ip_details[field]) if coord_search: ip_details[field] = float(coord_search[0]) if 'country' in ip_details.keys(): country_regex", "Request(method='GET', url=source_url) request_details = handle_requests(request_object) raise Exception(request_details['error']) # extract response if source ==", "'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code']", "'https://whatismyipaddress.com/ip/%s' % ip_address else: raise Exception('describe_ip currently only supports queries to nekudo') #", "__author__ = 'rcj1492' __created__ = '2017.06' __licence__ = 'MIT' def get_ip(source='aws'): ''' a", "current_ip def describe_ip(ip_address, source='whatismyip'): ''' a method to get the details associated with", "if source == 'whatismyip': import re response_text = response.content.decode() table_regex = re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S)", "associated with an ip address ''' # determine url if source == 'nekudo':", "float(coord_search[0]) if 'country' in ip_details.keys(): country_regex = re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search:", "= response_details['ip'] for key in response_details.keys(): if key not in ip_details.keys() and key", "'isp': '', 'latitude': 0.0, 'longitude': 0.0, 'organization': '', 'postal_code': '', 'region': '', 'timezone':", "= re.compile('<table>\\n<tr><th>IP.*?</table>\\n<span\\sstyle', re.S) table_search = table_regex.findall(response_text) if table_search: table_text = table_search[0] field_list =", "= table_regex.findall(response_text) if table_search: table_text = table_search[0] field_list = [ 'IP', 'Hostname', 'ISP',", "response_details['ip'] for key in response_details.keys(): if key not in ip_details.keys() and key !=", "= re.compile('([\\w\\s]+?)($|\\s<img)') country_search = country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0] for field in", "import requests try: response = requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import", "ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details if __name__", "= country_regex.findall(ip_details['country']) if country_search: ip_details['country'] = country_search[0][0] for field in ('type', 'assignment'): if", "ip_details['state/region'] elif source == 'nekudo': response_details = response.json() ip_details['country'] = response_details['country']['name'] ip_details['latitude'] =", "ip_details[field] = link_search[0] if 'state/region' in ip_details.keys(): ip_details['region'] = ip_details['state/region'] del ip_details['state/region'] elif", "in ('city', 'ip', 'latitude', 'longitude'): ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] =", "only supports queries to aws') import requests try: response = requests.get(url=source_url) except Exception", "'longitude'): ip_details[field] = response_details[field] ip_details['country'] = response_details['country_name'] ip_details['region'] = response_details['region_name'] ip_details['postal_code'] = response_details['zip_code']", "'ip': '', 'isp': '', 'latitude': 0.0, 'longitude': 0.0, 'organization': '', 'postal_code': '', 'region':", "source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise Exception('get_ip currently only supports queries", "table_search = table_regex.findall(response_text) if table_search: table_text = table_search[0] field_list = [ 'IP', 'Hostname',", "'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code']", "address of machine ''' if source == 'aws': source_url = 'http://checkip.amazonaws.com/' else: raise", "% ip_address elif source == 'whatismyip': # http://whatismyipaddress.com/ip-lookup source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address", "if field_search: ip_details[field.lower().replace(' ','_')] = field_search[0] for field in ('longitude', 'latitude'): if field", "requests.get(url=source_url) except Exception as err: from labpack.handlers.requests import handle_requests from requests import Request", "response_details['region_name'] ip_details['postal_code'] = response_details['zip_code'] ip_details['timezone'] = response_details['time_zone'] return ip_details if __name__ == '__main__':" ]
[ "model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two', field=models.ForeignKey(default=9, null=True,", "on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ),", "Django 3.1.13 on 2021-09-08 03:58 from django.db import migrations, models import django.db.models.deletion class", "field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree',", "class Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'), ] operations = [ migrations.AlterField( model_name='carcompare',", "model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True,", "[ ('cars', '0044_carcompare'), ] operations = [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL,", "by Django 3.1.13 on 2021-09-08 03:58 from django.db import migrations, models import django.db.models.deletion", "related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField(", "), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two',", "('cars', '0044_carcompare'), ] operations = [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone',", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'), ] operations = [", "[ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three',", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'), ] operations = [ migrations.AlterField(", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cars',", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'), ]", "'0044_carcompare'), ] operations = [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'),", "field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two', field=models.ForeignKey(default=9, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cartwo',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'),", "] operations = [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ),", "dependencies = [ ('cars', '0044_carcompare'), ] operations = [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4,", "<filename>autobuyfast/cars/migrations/0045_auto_20210908_0458.py # Generated by Django 3.1.13 on 2021-09-08 03:58 from django.db import migrations,", "migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two', field=models.ForeignKey(default=9,", "name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two', field=models.ForeignKey(default=9, null=True, on_delete=django.db.models.deletion.SET_NULL,", "= [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare',", "3.1.13 on 2021-09-08 03:58 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "2021-09-08 03:58 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare',", "operations = [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField(", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'), ] operations", "= [ ('cars', '0044_carcompare'), ] operations = [ migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True,", "03:58 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'), ] operations =", "migrations.AlterField( model_name='carcompare', name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3,", "on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two', field=models.ForeignKey(default=9, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cartwo', to='cars.autosearch'), ),", "related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two', field=models.ForeignKey(default=9, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cartwo', to='cars.autosearch'), ), ]", "# Generated by Django 3.1.13 on 2021-09-08 03:58 from django.db import migrations, models", "on 2021-09-08 03:58 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "Migration(migrations.Migration): dependencies = [ ('cars', '0044_carcompare'), ] operations = [ migrations.AlterField( model_name='carcompare', name='car_one',", "null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_two', field=models.ForeignKey(default=9, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cartwo', to='cars.autosearch'),", "null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'),", "Generated by Django 3.1.13 on 2021-09-08 03:58 from django.db import migrations, models import", "name='car_one', field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'), ), migrations.AlterField( model_name='carcompare', name='car_three', field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL," ]
[ "lru_cache.get(5) == 5 def test_min_max_stack(): min_stack = MinStack() max_stack = MaxStack() for data,", "x in range(i)] + [x for x in range(len(list_orig) - 1, i -", "test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1,", "list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist(): lists = [", "list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except IndexError as e: assert", "Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig in lists: list_orig.print()", "list_2_reverse assert list_3_copy == list_3_reverse try: list_0.peek_head() == 0 except IndexError as e:", "assert list_0.copy() == list_0_copy assert list_1.copy() == list_1_copy assert list_2.copy() == list_2_copy assert", "str(list_3) == \"1 ─> 2 ─> 3 ─> None\" assert equal_list(list_0.to_array(), []) assert", "equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)] + [x for x in", "list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert list_1_copy", "range(len(list_orig) - 1, i - 1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(),", "SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1,", "list_1 list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"),", "list_3 == list_0 assert str(list_0) == \"None\" assert str(list_1) == \"1 ─> None\"", "3 lru_cache.put(key=3, value=33) # 3 4 1 lru_cache.put(key=5, value=5) # 5 3 4", "4 1 lru_cache.put(key=5, value=5) # 5 3 4 (no 1) assert lru_cache.get(1) is", "list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x for x in range(start)] + [x for", "range(3): assert len(stack) == 3 - i assert stack.peek() == 2 - i", "SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\")", "in range(end + 1, len(list_orig))] ) def test_queue(): queue = Queue() for i", "next_name=\"n\"), ] for list_orig in lists: list_orig.print() for i in range(len(list_orig)): list_orig_copy =", "assert len(queue) == 3 - i assert queue.peek() == i assert queue.pop() ==", "len(list_orig) // 2 if sublist_length > 0 and i <= len(list_orig) - sublist_length:", "Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1,", "queue.pop() == i def test_stack(): stack = Stack() for i in range(3): assert", "SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\",", "test_min_max_stack(): min_stack = MinStack() max_stack = MaxStack() for data, min_data, max_data in zip([2,", "SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse", "for min_data, max_data in zip([1, 1, 1, 2], [5, 3, 2, 2]): min_stack.pop()", "Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4,", "list_1_copy == list_1 assert list_2_copy == list_2 assert list_3_copy == list_3 assert list_0.copy()", "list_0_copy == list_0_reverse assert list_1_copy == list_1_reverse assert list_2_copy == list_2_reverse assert list_3_copy", "list_2 assert not list_2 == list_3 assert not list_3 == list_0 assert str(list_0)", "SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))),", "list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except IndexError as e:", "i in range(3): assert len(queue) == i queue.push(i) assert queue.peek() == 0 for", "MonotonicQueue from ezcode.list.lru_cache import LRUCache from fixture.utils import equal_list class Node: def __init__(self,", "== list_3_copy assert not list_0 == list_1 assert not list_1 == list_2 assert", "min_data assert max_stack.get_max() == max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data, benchmark", "Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache from fixture.utils import equal_list class Node: def", "\"Peek head at an empty SinglyLinkedList\" else: assert False list_1.peek_head() == 1 list_2.peek_head()", "i def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2,", "list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head))", "import Stack, MinStack, MaxStack from ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache", "== 3 - i assert stack.peek() == 2 - i assert stack.pop() ==", "Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy ==", "stack.peek() == 2 - i assert stack.pop() == 2 - i def test_lru_cache():", "len(list_orig) - sublist_length: start, end = i, i + sublist_length - 1 list_orig_copy", "== min_data assert max_stack.get_max() == max_data for min_data, max_data in zip([1, 1, 1,", "(no 2) assert lru_cache.get(2) is None assert lru_cache.get(4) == 4 # 4 1", "else: assert False list_1.peek_head() == 1 list_2.peek_head() == 2 list_3.peek_head() == 3 list_3_copy.pop_head()", "queue = Queue() for i in range(3): assert len(queue) == i queue.push(i) assert", "max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data, benchmark in zip([5, 3, 1,", "= list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x for x in range(start)] +", "lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1 # 1 3 2 lru_cache.put(key=4,", "list_3 assert list_0.copy() == list_0_copy assert list_1.copy() == list_1_copy assert list_2.copy() == list_2_copy", "assert mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False) for data, benchmark in zip([5, 3,", "ezcode.list.stack import Stack, MinStack, MaxStack from ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache import", "Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig in lists:", "i in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in", "Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)),", "\"1 ─> 2 ─> 3 ─> None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1])", "max_data in zip([2, 1, 3, 5, 4], [2, 1, 1, 1, 1], [2,", "2 if sublist_length > 0 and i <= len(list_orig) - sublist_length: start, end", "assert len(stack) == i stack.push(i) assert stack.peek() == i for i in range(3):", "list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x", "1], [2, 2, 3, 5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data assert", "list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x for x in range(start)]", "assert lru_cache.get(1) is None assert lru_cache.get(3) == 33 assert lru_cache.get(5) == 5 def", "list_2_copy == list_2 assert list_3_copy == list_3 assert list_0.copy() == list_0_copy assert list_1.copy()", "assert list_2.copy() == list_2_copy assert list_3.copy() == list_3_copy assert not list_0 == list_1", "list_1_copy == list_1_reverse assert list_2_copy == list_2_reverse assert list_3_copy == list_3_reverse try: list_0.peek_head()", "next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy =", "SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)),", "== 2 list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() ==", "1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i,", "+ [x for x in range(len(list_orig) - 1, i - 1, -1)]) list_orig_copy", "def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0,", "[1]) assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy()", "in range(3): assert len(queue) == 3 - i assert queue.peek() == i assert", "list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy try:", "[ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"),", "(no 1) assert lru_cache.get(1) is None assert lru_cache.get(3) == 33 assert lru_cache.get(5) ==", "__init__(self, v=None, n=None): self.v = v self.n = n def __repr__(self): return f\"Node({self.v})\"", "data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1", "in zip([5, 3, 1, 2, 4], [5, 3, 1, 1, 1]): mq.push(data) assert", "== max_data for min_data, max_data in zip([1, 1, 1, 2], [5, 3, 2,", "2 list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy", "for x in range(i)] + [x for x in range(len(list_orig) - 1, i", "range(start)] + [x for x in range(end, start - 1, -1)] + [x", "SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\")", "= SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse =", "1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x for x in", "list_2 == list_3 assert not list_3 == list_0 assert str(list_0) == \"None\" assert", "SinglyLinkedList\" else: assert False list_1.peek_head() == 1 list_2.peek_head() == 2 list_3.peek_head() == 3", "= LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert", "data, min_data, max_data in zip([2, 1, 3, 5, 4], [2, 1, 1, 1,", "1, 1], [2, 2, 3, 5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data", "[2, 1, 1, 1, 1], [2, 2, 3, 5, 5]): min_stack.push(data) max_stack.push(data) assert", "max_stack = MaxStack() for data, min_data, max_data in zip([2, 1, 3, 5, 4],", "== list_1 list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\",", "[5, 3, 2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data assert max_stack.get_max() ==", "not list_3 == list_0 assert str(list_0) == \"None\" assert str(list_1) == \"1 ─>", "value=4) # 4 1 3 (no 2) assert lru_cache.get(2) is None assert lru_cache.get(4)", "MinStack, MaxStack from ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache from fixture.utils", "assert list_1_copy == list_1_reverse assert list_2_copy == list_2_reverse assert list_3_copy == list_3_reverse try:", "next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy", "for data, benchmark in zip([5, 3, 1, 2, 4], [5, 3, 1, 1,", "for data, min_data, max_data in zip([2, 1, 3, 5, 4], [2, 1, 1,", "1 lru_cache.put(key=5, value=5) # 5 3 4 (no 1) assert lru_cache.get(1) is None", "= MaxStack() for data, min_data, max_data in zip([2, 1, 3, 5, 4], [2,", "= MonotonicQueue(is_increasing=True) for data, benchmark in zip([5, 3, 1, 2, 4], [5, 3,", "= list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head =", "list_0 == list_1 assert not list_1 == list_2 assert not list_2 == list_3", "Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"),", "stack.push(i) assert stack.peek() == i for i in range(3): assert len(stack) == 3", "n=None): self.v = v self.n = n def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics():", "SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3,", "3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy()", "next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\")", "and i <= len(list_orig) - sublist_length: start, end = i, i + sublist_length", "assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data for min_data, max_data in zip([1,", "== list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except IndexError as e: assert e.args[0]", "list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1),", "assert list_2_copy == list_2 assert list_3_copy == list_3 assert list_0.copy() == list_0_copy assert", "from an empty SinglyLinkedList\" else: assert False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2]))", "- i def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1, value=1)", "list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head =", "2]) assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy", "== 0 except IndexError as e: assert e.args[0] == \"Peek head at an", "an empty SinglyLinkedList\" else: assert False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2])) ==", "[x for x in range(start)] + [x for x in range(end, start -", "False list_1.peek_head() == 1 list_2.peek_head() == 2 list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy", "end_index=end) assert equal_list( list_orig_copy.to_array(), [x for x in range(start)] + [x for x", "1, i - 1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for", "assert lru_cache.get(5) == 5 def test_min_max_stack(): min_stack = MinStack() max_stack = MaxStack() for", "list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse", "Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig in lists: list_orig.print() for", "- i assert queue.peek() == i assert queue.pop() == i def test_stack(): stack", "[1, 2]) assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy()", "- i assert stack.pop() == 2 - i def test_lru_cache(): lru_cache = LRUCache(capacity=3)", "list_0.peek_head() == 0 except IndexError as e: assert e.args[0] == \"Peek head at", "== i assert queue.pop() == i def test_stack(): stack = Stack() for i", "next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0,", "1, 1, 1], [2, 2, 3, 5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() ==", "SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig", "\"1 ─> 2 ─> None\" assert str(list_3) == \"1 ─> 2 ─> 3", "ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache from fixture.utils import equal_list class", "2])) == list_0 def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)),", "== 2 - i def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is None", "ezcode.list.linked_list import SinglyLinkedList from ezcode.list.stack import Stack, MinStack, MaxStack from ezcode.list.queue import Queue,", "x in range(i, -1, -1)] + [x for x in range(i + 1,", "max_stack.push(data) assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data for min_data, max_data in", "1, 1, 1, 1], [2, 2, 3, 5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min()", "= list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy", "data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))),", "x in range(i + 1, len(list_orig))]) sublist_length = len(list_orig) // 2 if sublist_length", "list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert list_2_copy", "list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert list_3_copy == list_3_reverse_copy", "assert list_1.copy() == list_1_copy assert list_2.copy() == list_2_copy assert list_3.copy() == list_3_copy assert", "import Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache from fixture.utils import equal_list class Node:", "== list_3_reverse try: list_0.peek_head() == 0 except IndexError as e: assert e.args[0] ==", "data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1,", "self.n = n def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\",", "range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] +", "SinglyLinkedList from ezcode.list.stack import Stack, MinStack, MaxStack from ezcode.list.queue import Queue, MonotonicQueue from", "= v self.n = n def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 =", "assert len(queue) == i queue.push(i) assert queue.peek() == 0 for i in range(3):", "== 2 - i assert stack.pop() == 2 - i def test_lru_cache(): lru_cache", "= SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 =", "== 33 assert lru_cache.get(5) == 5 def test_min_max_stack(): min_stack = MinStack() max_stack =", "list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)] + [x", "assert not list_3 == list_0 assert str(list_0) == \"None\" assert str(list_1) == \"1", "queue.push(i) assert queue.peek() == 0 for i in range(3): assert len(queue) == 3", "Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3,", "== list_0_copy assert list_1.copy() == list_1_copy assert list_2.copy() == list_2_copy assert list_3.copy() ==", "from ezcode.list.stack import Stack, MinStack, MaxStack from ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache", "lru_cache.put(key=4, value=4) # 4 1 3 (no 2) assert lru_cache.get(2) is None assert", "in range(i)] + [x for x in range(len(list_orig) - 1, i - 1,", "4 1 3 lru_cache.put(key=3, value=33) # 3 4 1 lru_cache.put(key=5, value=5) # 5", "as e: assert e.args[0] == \"Peek head at an empty SinglyLinkedList\" else: assert", "list_3_copy == list_3_reverse try: list_0.peek_head() == 0 except IndexError as e: assert e.args[0]", "for i in range(3): assert len(queue) == 3 - i assert queue.peek() ==", "stack.peek() == i for i in range(3): assert len(stack) == 3 - i", "3 ─> None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2])", "list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse()", "empty SinglyLinkedList\" else: assert False list_1.peek_head() == 1 list_2.peek_head() == 2 list_3.peek_head() ==", "min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data def test_monontonic_queue(): mq", "assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy =", "x in range(len(list_orig) - 1, i - 1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i)", "= SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\",", "== list_0 assert list_1_copy == list_1 assert list_2_copy == list_2 assert list_3_copy ==", "MaxStack from ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache from fixture.utils import", "assert max_stack.get_max() == max_data for min_data, max_data in zip([1, 1, 1, 2], [5,", "e.args[0] == \"Pop head from an empty SinglyLinkedList\" else: assert False list_3.delete(set([2, 3]))", "== list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head,", "- 1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in", "test_stack(): stack = Stack() for i in range(3): assert len(stack) == i stack.push(i)", "sublist_length > 0 and i <= len(list_orig) - sublist_length: start, end = i,", "SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2,", "i assert queue.peek() == i assert queue.pop() == i def test_stack(): stack =", "[5, 3, 1, 1, 1]): mq.push(data) assert mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False)", "assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)] + [x for x", "== list_2_reverse assert list_3_copy == list_3_reverse try: list_0.peek_head() == 0 except IndexError as", "== 5 def test_min_max_stack(): min_stack = MinStack() max_stack = MaxStack() for data, min_data,", "self.v = v self.n = n def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0", "= SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\")", "assert queue.pop() == i def test_stack(): stack = Stack() for i in range(3):", "list_3_copy assert not list_0 == list_1 assert not list_1 == list_2 assert not", "SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0,", "== list_0_reverse assert list_1_copy == list_1_reverse assert list_2_copy == list_2_reverse assert list_3_copy ==", "Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig in lists: list_orig.print() for i in range(len(list_orig)):", "head from an empty SinglyLinkedList\" else: assert False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1,", "Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"),", "list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None,", "assert e.args[0] == \"Peek head at an empty SinglyLinkedList\" else: assert False list_1.peek_head()", "list_1_copy assert list_2.copy() == list_2_copy assert list_3.copy() == list_3_copy assert not list_0 ==", "list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy ==", "max_data for min_data, max_data in zip([1, 1, 1, 2], [5, 3, 2, 2]):", "== \"Pop head from an empty SinglyLinkedList\" else: assert False list_3.delete(set([2, 3])) ==", "Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1,", "next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0 assert", "list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy ==", "start - 1, -1)] + [x for x in range(end + 1, len(list_orig))]", "for i in range(3): assert len(queue) == i queue.push(i) assert queue.peek() == 0", "Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5,", "list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\",", "assert list_0_copy == list_0 assert list_1_copy == list_1 assert list_2_copy == list_2 assert", "== list_3 assert not list_3 == list_0 assert str(list_0) == \"None\" assert str(list_1)", "list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse()", "def test_queue(): queue = Queue() for i in range(3): assert len(queue) == i", "1) assert lru_cache.get(1) is None assert lru_cache.get(3) == 33 assert lru_cache.get(5) == 5", "list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x for x in", "head at an empty SinglyLinkedList\" else: assert False list_1.peek_head() == 1 list_2.peek_head() ==", "equal_list( list_orig_copy.to_array(), [x for x in range(start)] + [x for x in range(end,", "= MonotonicQueue(is_increasing=False) for data, benchmark in zip([5, 3, 1, 2, 4], [5, 5,", "2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data def test_monontonic_queue():", "5 def test_min_max_stack(): min_stack = MinStack() max_stack = MaxStack() for data, min_data, max_data", "zip([5, 3, 1, 2, 4], [5, 5, 5, 5, 5]): mq.push(data) assert mq.peek()", "list_2.copy() == list_2_copy assert list_3.copy() == list_3_copy assert not list_0 == list_1 assert", "3 - i assert stack.peek() == 2 - i assert stack.pop() == 2", "None\" assert str(list_3) == \"1 ─> 2 ─> 3 ─> None\" assert equal_list(list_0.to_array(),", "== list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except IndexError as", "queue.peek() == 0 for i in range(3): assert len(queue) == 3 - i", "[2, 2, 3, 5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data assert max_stack.get_max()", "for data, benchmark in zip([5, 3, 1, 2, 4], [5, 5, 5, 5,", "lru_cache.get(2) is None assert lru_cache.get(4) == 4 # 4 1 3 lru_cache.put(key=3, value=33)", "list_0_copy try: list_0.pop_head() except IndexError as e: assert e.args[0] == \"Pop head from", "an empty SinglyLinkedList\" else: assert False list_1.peek_head() == 1 list_2.peek_head() == 2 list_3.peek_head()", "v self.n = n def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None,", "3 2 lru_cache.put(key=4, value=4) # 4 1 3 (no 2) assert lru_cache.get(2) is", "== i queue.push(i) assert queue.peek() == 0 for i in range(3): assert len(queue)", "None assert lru_cache.get(4) == 4 # 4 1 3 lru_cache.put(key=3, value=33) # 3", "assert str(list_1) == \"1 ─> None\" assert str(list_2) == \"1 ─> 2 ─>", "Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig in lists: list_orig.print() for i", "== list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head,", "def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2)", "Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"),", "lru_cache.get(1) == 1 # 1 3 2 lru_cache.put(key=4, value=4) # 4 1 3", "SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0 assert list_1_copy == list_1", "> 0 and i <= len(list_orig) - sublist_length: start, end = i, i", "except IndexError as e: assert e.args[0] == \"Pop head from an empty SinglyLinkedList\"", "2 - i def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1,", "range(end + 1, len(list_orig))] ) def test_queue(): queue = Queue() for i in", "zip([5, 3, 1, 2, 4], [5, 3, 1, 1, 1]): mq.push(data) assert mq.peek()", "== benchmark mq = MonotonicQueue(is_increasing=False) for data, benchmark in zip([5, 3, 1, 2,", "Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5,", "Queue() for i in range(3): assert len(queue) == i queue.push(i) assert queue.peek() ==", "i - 1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x", "assert queue.peek() == 0 for i in range(3): assert len(queue) == 3 -", "list_orig.print() for i in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for", "in range(i + 1, len(list_orig))]) sublist_length = len(list_orig) // 2 if sublist_length >", "assert list_3_copy == list_3 assert list_0.copy() == list_0_copy assert list_1.copy() == list_1_copy assert", "lru_cache.put(key=3, value=33) # 3 4 1 lru_cache.put(key=5, value=5) # 5 3 4 (no", "= list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)] +", "list_0_copy assert list_1.copy() == list_1_copy assert list_2.copy() == list_2_copy assert list_3.copy() == list_3_copy", "<reponame>zheng-gao/ez_code from ezcode.list.linked_list import SinglyLinkedList from ezcode.list.stack import Stack, MinStack, MaxStack from ezcode.list.queue", "e: assert e.args[0] == \"Peek head at an empty SinglyLinkedList\" else: assert False", "== list_0_copy try: list_0.pop_head() except IndexError as e: assert e.args[0] == \"Pop head", "in range(start)] + [x for x in range(end, start - 1, -1)] +", "3 (no 2) assert lru_cache.get(2) is None assert lru_cache.get(4) == 4 # 4", "== \"1 ─> 2 ─> 3 ─> None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(),", "i stack.push(i) assert stack.peek() == i for i in range(3): assert len(stack) ==", "equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x for x in range(len(list_orig) -", "= SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\",", "== 1 list_2.peek_head() == 2 list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() ==", "= list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert list_1_copy ==", "i in range(3): assert len(queue) == 3 - i assert queue.peek() == i", "next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse =", "str(list_0) == \"None\" assert str(list_1) == \"1 ─> None\" assert str(list_2) == \"1", "SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1,", "next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for", "list_2_copy == list_2_reverse assert list_3_copy == list_3_reverse try: list_0.peek_head() == 0 except IndexError", "equal_list class Node: def __init__(self, v=None, n=None): self.v = v self.n = n", "assert str(list_0) == \"None\" assert str(list_1) == \"1 ─> None\" assert str(list_2) ==", "list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy", "- sublist_length: start, end = i, i + sublist_length - 1 list_orig_copy =", "next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4,", "range(end, start - 1, -1)] + [x for x in range(end + 1,", "4 (no 1) assert lru_cache.get(1) is None assert lru_cache.get(3) == 33 assert lru_cache.get(5)", "Stack, MinStack, MaxStack from ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache from", "list_0_copy == list_0 assert list_1_copy == list_1 assert list_2_copy == list_2 assert list_3_copy", "list_0 def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"),", "3, 5, 4], [2, 1, 1, 1, 1], [2, 2, 3, 5, 5]):", "data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1,", "= Stack() for i in range(3): assert len(stack) == i stack.push(i) assert stack.peek()", "value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1 # 1 3 2", "assert list_0_copy == list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert", "MinStack() max_stack = MaxStack() for data, min_data, max_data in zip([2, 1, 3, 5,", "data, benchmark in zip([5, 3, 1, 2, 4], [5, 5, 5, 5, 5]):", "assert list_1_copy == list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head", "False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist(): lists =", "len(list_orig))]) sublist_length = len(list_orig) // 2 if sublist_length > 0 and i <=", "x in range(end + 1, len(list_orig))] ) def test_queue(): queue = Queue() for", "== list_1 assert not list_1 == list_2 assert not list_2 == list_3 assert", "list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy =", "list_0_reverse assert list_1_copy == list_1_reverse assert list_2_copy == list_2_reverse assert list_3_copy == list_3_reverse", "3, 2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data", "1 3 2 lru_cache.put(key=4, value=4) # 4 1 3 (no 2) assert lru_cache.get(2)", "Node: def __init__(self, v=None, n=None): self.v = v self.n = n def __repr__(self):", "3 4 (no 1) assert lru_cache.get(1) is None assert lru_cache.get(3) == 33 assert", "assert str(list_3) == \"1 ─> 2 ─> 3 ─> None\" assert equal_list(list_0.to_array(), [])", "sublist_length = len(list_orig) // 2 if sublist_length > 0 and i <= len(list_orig)", "assert lru_cache.get(4) == 4 # 4 1 3 lru_cache.put(key=3, value=33) # 3 4", "assert list_2_copy == list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head", "0 and i <= len(list_orig) - sublist_length: start, end = i, i +", "equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy()", "value=5) # 5 3 4 (no 1) assert lru_cache.get(1) is None assert lru_cache.get(3)", "x in range(start)] + [x for x in range(end, start - 1, -1)]", "Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\",", "in lists: list_orig.print() for i in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(),", "+ [x for x in range(end + 1, len(list_orig))] ) def test_queue(): queue", "assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy", "== 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head()", "= list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x for", "1, len(list_orig))] ) def test_queue(): queue = Queue() for i in range(3): assert", "ezcode.list.lru_cache import LRUCache from fixture.utils import equal_list class Node: def __init__(self, v=None, n=None):", "list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert", "benchmark mq = MonotonicQueue(is_increasing=False) for data, benchmark in zip([5, 3, 1, 2, 4],", "= [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\",", "Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6,", "= Queue() for i in range(3): assert len(queue) == i queue.push(i) assert queue.peek()", "None assert lru_cache.get(3) == 33 assert lru_cache.get(5) == 5 def test_min_max_stack(): min_stack =", "─> None\" assert str(list_2) == \"1 ─> 2 ─> None\" assert str(list_3) ==", "Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0,", "Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))),", "equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy =", "4], [2, 1, 1, 1, 1], [2, 2, 3, 5, 5]): min_stack.push(data) max_stack.push(data)", "+ sublist_length - 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x", "2 ─> None\" assert str(list_3) == \"1 ─> 2 ─> 3 ─> None\"", "list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert list_1_copy == list_1_reverse assert list_2_copy ==", "list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except IndexError", "benchmark in zip([5, 3, 1, 2, 4], [5, 3, 1, 1, 1]): mq.push(data)", "1, 1]): mq.push(data) assert mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False) for data, benchmark", "- i assert stack.peek() == 2 - i assert stack.pop() == 2 -", "assert str(list_2) == \"1 ─> 2 ─> None\" assert str(list_3) == \"1 ─>", "== \"Peek head at an empty SinglyLinkedList\" else: assert False list_1.peek_head() == 1", "data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0", "= list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert list_1_copy == list_1_reverse assert list_2_copy", "in range(3): assert len(stack) == 3 - i assert stack.peek() == 2 -", "assert list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head", "in zip([1, 1, 1, 2], [5, 3, 2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min()", "lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1 # 1 3", "list_1 assert list_2_copy == list_2 assert list_3_copy == list_3 assert list_0.copy() == list_0_copy", "assert list_2_copy == list_2_reverse assert list_3_copy == list_3_reverse try: list_0.peek_head() == 0 except", "1, 1, 2], [5, 3, 2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data", "in range(3): assert len(queue) == i queue.push(i) assert queue.peek() == 0 for i", "[]) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1, 2, 3])", "class Node: def __init__(self, v=None, n=None): self.v = v self.n = n def", "data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\",", "1 list_2.peek_head() == 2 list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy", "MaxStack() for data, min_data, max_data in zip([2, 1, 3, 5, 4], [2, 1,", "1, 1, 1]): mq.push(data) assert mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False) for data,", "Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))),", "min_data assert max_stack.get_max() == max_data for min_data, max_data in zip([1, 1, 1, 2],", "data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy", "range(3): assert len(queue) == i queue.push(i) assert queue.peek() == 0 for i in", "list_3_reverse try: list_0.peek_head() == 0 except IndexError as e: assert e.args[0] == \"Peek", "== list_2 assert not list_2 == list_3 assert not list_3 == list_0 assert", "list_0.pop_head() except IndexError as e: assert e.args[0] == \"Pop head from an empty", "[x for x in range(i)] + [x for x in range(len(list_orig) - 1,", "3 4 1 lru_cache.put(key=5, value=5) # 5 3 4 (no 1) assert lru_cache.get(1)", "5, 4], [2, 1, 1, 1, 1], [2, 2, 3, 5, 5]): min_stack.push(data)", "n def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy", "data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2", "list_3_copy == list_3 assert list_0.copy() == list_0_copy assert list_1.copy() == list_1_copy assert list_2.copy()", "3, 5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data", "list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\",", "range(i, -1, -1)] + [x for x in range(i + 1, len(list_orig))]) sublist_length", "assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy", "list_orig_copy.to_array(), [x for x in range(start)] + [x for x in range(end, start", "== \"1 ─> None\" assert str(list_2) == \"1 ─> 2 ─> None\" assert", "list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3", "for x in range(start)] + [x for x in range(end, start - 1,", "// 2 if sublist_length > 0 and i <= len(list_orig) - sublist_length: start,", "SinglyLinkedList\" else: assert False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2])) == list_0 def", "queue.peek() == i assert queue.pop() == i def test_stack(): stack = Stack() for", "list_3 assert not list_3 == list_0 assert str(list_0) == \"None\" assert str(list_1) ==", "i for i in range(3): assert len(stack) == 3 - i assert stack.peek()", "range(i)] + [x for x in range(len(list_orig) - 1, i - 1, -1)])", "list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)] + [x for", "list_1 assert not list_1 == list_2 assert not list_2 == list_3 assert not", "data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse", "SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy =", "list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0,", "i <= len(list_orig) - sublist_length: start, end = i, i + sublist_length -", "3, 1, 2, 4], [5, 5, 5, 5, 5]): mq.push(data) assert mq.peek() ==", "data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\",", "list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)]", "None\" assert str(list_2) == \"1 ─> 2 ─> None\" assert str(list_3) == \"1", "next_name=\"n\") assert list_0_copy == list_0 assert list_1_copy == list_1 assert list_2_copy == list_2", "+ [x for x in range(end, start - 1, -1)] + [x for", "from ezcode.list.lru_cache import LRUCache from fixture.utils import equal_list class Node: def __init__(self, v=None,", "end = i, i + sublist_length - 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end)", "== i for i in range(3): assert len(stack) == 3 - i assert", "1 # 1 3 2 lru_cache.put(key=4, value=4) # 4 1 3 (no 2)", "SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4,", "try: list_0.peek_head() == 0 except IndexError as e: assert e.args[0] == \"Peek head", "list_2.peek_head() == 2 list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head()", "v=None, n=None): self.v = v self.n = n def __repr__(self): return f\"Node({self.v})\" def", "next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\",", "-1, -1)] + [x for x in range(i + 1, len(list_orig))]) sublist_length =", "3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head() == list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except", "list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head))", "in range(end, start - 1, -1)] + [x for x in range(end +", "2], [5, 3, 2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data assert max_stack.get_max()", "1, 3, 5, 4], [2, 1, 1, 1, 1], [2, 2, 3, 5,", "def __init__(self, v=None, n=None): self.v = v self.n = n def __repr__(self): return", "<= len(list_orig) - sublist_length: start, end = i, i + sublist_length - 1", "# 1 3 2 lru_cache.put(key=4, value=4) # 4 1 3 (no 2) assert", "1, 2], [5, 3, 2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data assert", "2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy =", "list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert list_1_copy ==", "zip([1, 1, 1, 2], [5, 3, 2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() ==", "mq.push(data) assert mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False) for data, benchmark in zip([5,", "data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2,", "data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))),", "= SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0 assert list_1_copy ==", "assert not list_2 == list_3 assert not list_3 == list_0 assert str(list_0) ==", "equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1, 2, 3]) list_0_reverse_copy =", "Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0,", "== i def test_stack(): stack = Stack() for i in range(3): assert len(stack)", "# 3 4 1 lru_cache.put(key=5, value=5) # 5 3 4 (no 1) assert", "4 # 4 1 3 lru_cache.put(key=3, value=33) # 3 4 1 lru_cache.put(key=5, value=5)", "max_stack.pop() assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data def test_monontonic_queue(): mq =", "next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\",", "max_stack.get_max() == max_data for min_data, max_data in zip([1, 1, 1, 2], [5, 3,", "for i in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x", "== list_3 assert list_0.copy() == list_0_copy assert list_1.copy() == list_1_copy assert list_2.copy() ==", "data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3,", "test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse =", "1, len(list_orig))]) sublist_length = len(list_orig) // 2 if sublist_length > 0 and i", "i assert stack.peek() == 2 - i assert stack.pop() == 2 - i", "Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2,", "assert not list_1 == list_2 assert not list_2 == list_3 assert not list_3", "i, i + sublist_length - 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list(", "2, 3, 5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data assert max_stack.get_max() ==", "lru_cache.put(key=5, value=5) # 5 3 4 (no 1) assert lru_cache.get(1) is None assert", "from ezcode.list.queue import Queue, MonotonicQueue from ezcode.list.lru_cache import LRUCache from fixture.utils import equal_list", "1 3 lru_cache.put(key=3, value=33) # 3 4 1 lru_cache.put(key=5, value=5) # 5 3", "stack.pop() == 2 - i def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is", "list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x for x", "lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1", "assert list_0_copy == list_0_reverse assert list_1_copy == list_1_reverse assert list_2_copy == list_2_reverse assert", "assert not list_0 == list_1 assert not list_1 == list_2 assert not list_2", "IndexError as e: assert e.args[0] == \"Pop head from an empty SinglyLinkedList\" else:", "min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data for min_data, max_data", "== 3 - i assert queue.peek() == i assert queue.pop() == i def", "test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3,", "- 1, -1)] + [x for x in range(end + 1, len(list_orig))] )", "data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"),", "i def test_stack(): stack = Stack() for i in range(3): assert len(stack) ==", "min_stack.get_min() == min_data assert max_stack.get_max() == max_data for min_data, max_data in zip([1, 1,", "3 - i assert queue.peek() == i assert queue.pop() == i def test_stack():", "─> None\" assert str(list_3) == \"1 ─> 2 ─> 3 ─> None\" assert", "─> 2 ─> 3 ─> None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert", "str(list_1) == \"1 ─> None\" assert str(list_2) == \"1 ─> 2 ─> None\"", "4], [5, 3, 1, 1, 1]): mq.push(data) assert mq.peek() == benchmark mq =", "list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\")", "== list_0 assert str(list_0) == \"None\" assert str(list_1) == \"1 ─> None\" assert", "next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 =", "in range(len(list_orig) - 1, i - 1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert", "Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy", "def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy =", "value=33) # 3 4 1 lru_cache.put(key=5, value=5) # 5 3 4 (no 1)", "== \"None\" assert str(list_1) == \"1 ─> None\" assert str(list_2) == \"1 ─>", "assert stack.pop() == 2 - i def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert lru_cache.get(1)", "2 ─> 3 ─> None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(),", "list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert", "not list_1 == list_2 assert not list_2 == list_3 assert not list_3 ==", "test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data, benchmark in zip([5, 3, 1, 2, 4],", "data, benchmark in zip([5, 3, 1, 2, 4], [5, 3, 1, 1, 1]):", "next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))),", "list_2_copy assert list_3.copy() == list_3_copy assert not list_0 == list_1 assert not list_1", "SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\")", "[x for x in range(end, start - 1, -1)] + [x for x", "== min_data assert max_stack.get_max() == max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data,", "list_2_copy == list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head =", "mq = MonotonicQueue(is_increasing=True) for data, benchmark in zip([5, 3, 1, 2, 4], [5,", "next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))),", "= i, i + sublist_length - 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert", "import SinglyLinkedList from ezcode.list.stack import Stack, MinStack, MaxStack from ezcode.list.queue import Queue, MonotonicQueue", "def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data, benchmark in zip([5, 3, 1, 2,", "try: list_0.pop_head() except IndexError as e: assert e.args[0] == \"Pop head from an", "i in range(3): assert len(stack) == 3 - i assert stack.peek() == 2", "list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head))", "5 3 4 (no 1) assert lru_cache.get(1) is None assert lru_cache.get(3) == 33", "list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert list_2_copy ==", "for i in range(3): assert len(stack) == i stack.push(i) assert stack.peek() == i", "list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except IndexError as e: assert e.args[0] == \"Pop", "max_data in zip([1, 1, 1, 2], [5, 3, 2, 2]): min_stack.pop() max_stack.pop() assert", "assert queue.peek() == i assert queue.pop() == i def test_stack(): stack = Stack()", "def test_stack(): stack = Stack() for i in range(3): assert len(stack) == i", "2 lru_cache.put(key=4, value=4) # 4 1 3 (no 2) assert lru_cache.get(2) is None", "0 for i in range(3): assert len(queue) == 3 - i assert queue.peek()", "assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1,", "list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse", "\"None\" assert str(list_1) == \"1 ─> None\" assert str(list_2) == \"1 ─> 2", "list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert list_1_copy == list_1_reverse", "2) assert lru_cache.get(2) is None assert lru_cache.get(4) == 4 # 4 1 3", "= SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy", "= list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert list_1_copy", "len(list_orig))] ) def test_queue(): queue = Queue() for i in range(3): assert len(queue)", "assert max_stack.get_max() == max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data, benchmark in", "i queue.push(i) assert queue.peek() == 0 for i in range(3): assert len(queue) ==", "== list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert list_3_copy ==", "assert False list_1.peek_head() == 1 list_2.peek_head() == 2 list_3.peek_head() == 3 list_3_copy.pop_head() ==", "import equal_list class Node: def __init__(self, v=None, n=None): self.v = v self.n =", "list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head", "lru_cache.get(3) == 33 assert lru_cache.get(5) == 5 def test_min_max_stack(): min_stack = MinStack() max_stack", "== list_1 assert list_2_copy == list_2 assert list_3_copy == list_3 assert list_0.copy() ==", "None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1 # 1", "in range(i, -1, -1)] + [x for x in range(i + 1, len(list_orig))])", "next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2,", "list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1),", "next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2,", "Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\",", "min_data, max_data in zip([2, 1, 3, 5, 4], [2, 1, 1, 1, 1],", "= SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\")", "assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x for x in range(len(list_orig)", "from fixture.utils import equal_list class Node: def __init__(self, v=None, n=None): self.v = v", "assert stack.peek() == i for i in range(3): assert len(stack) == 3 -", "list_0.copy() == list_0_copy assert list_1.copy() == list_1_copy assert list_2.copy() == list_2_copy assert list_3.copy()", ") def test_queue(): queue = Queue() for i in range(3): assert len(queue) ==", "data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\")", "data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ]", "0 except IndexError as e: assert e.args[0] == \"Peek head at an empty", "assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True)", "5, 5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data for", "\"1 ─> None\" assert str(list_2) == \"1 ─> 2 ─> None\" assert str(list_3)", "list_1_copy list_1_copy.pop_head() == list_0_copy try: list_0.pop_head() except IndexError as e: assert e.args[0] ==", "assert lru_cache.get(3) == 33 assert lru_cache.get(5) == 5 def test_min_max_stack(): min_stack = MinStack()", "Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig in", "for x in range(end, start - 1, -1)] + [x for x in", "sublist_length: start, end = i, i + sublist_length - 1 list_orig_copy = list_orig.copy()", "range(3): assert len(stack) == i stack.push(i) assert stack.peek() == i for i in", "assert stack.peek() == 2 - i assert stack.pop() == 2 - i def", "= list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse()", "mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False) for data, benchmark in zip([5, 3, 1,", "assert lru_cache.get(2) is None assert lru_cache.get(4) == 4 # 4 1 3 lru_cache.put(key=3,", "data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0 assert list_1_copy == list_1 assert list_2_copy ==", "Node(6, Node(7)))))))), data_name=\"v\", next_name=\"n\"), ] for list_orig in lists: list_orig.print() for i in", "+ 1, len(list_orig))]) sublist_length = len(list_orig) // 2 if sublist_length > 0 and", "def test_min_max_stack(): min_stack = MinStack() max_stack = MaxStack() for data, min_data, max_data in", "def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse", "as e: assert e.args[0] == \"Pop head from an empty SinglyLinkedList\" else: assert", "len(queue) == i queue.push(i) assert queue.peek() == 0 for i in range(3): assert", "max_stack.get_max() == max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data, benchmark in zip([5,", "5]): min_stack.push(data) max_stack.push(data) assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data for min_data,", "data_name=\"v\", next_name=\"n\") list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2,", "equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(), [1, 2,", "= len(list_orig) // 2 if sublist_length > 0 and i <= len(list_orig) -", "list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x for x in range(start)] + [x", "-1)] + [x for x in range(end + 1, len(list_orig))] ) def test_queue():", "+ 1, len(list_orig))] ) def test_queue(): queue = Queue() for i in range(3):", "list_0 assert list_1_copy == list_1 assert list_2_copy == list_2 assert list_3_copy == list_3", "list_2 assert list_3_copy == list_3 assert list_0.copy() == list_0_copy assert list_1.copy() == list_1_copy", "== \"1 ─> 2 ─> None\" assert str(list_3) == \"1 ─> 2 ─>", "4 1 3 (no 2) assert lru_cache.get(2) is None assert lru_cache.get(4) == 4", "- 1, i - 1, -1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x", "MonotonicQueue(is_increasing=True) for data, benchmark in zip([5, 3, 1, 2, 4], [5, 3, 1,", "assert False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist(): lists", "== list_2_copy assert list_3.copy() == list_3_copy assert not list_0 == list_1 assert not", "= SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\",", "list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head))", "== list_0 def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\",", "list_1_copy == list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head =", "stack = Stack() for i in range(3): assert len(stack) == i stack.push(i) assert", "assert list_3_copy == list_3_reverse try: list_0.peek_head() == 0 except IndexError as e: assert", "i + sublist_length - 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(),", "list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0 assert list_1_copy", "assert list_3.copy() == list_3_copy assert not list_0 == list_1 assert not list_1 ==", "len(queue) == 3 - i assert queue.peek() == i assert queue.pop() == i", "for i in range(3): assert len(stack) == 3 - i assert stack.peek() ==", "is None assert lru_cache.get(3) == 33 assert lru_cache.get(5) == 5 def test_min_max_stack(): min_stack", "== 4 # 4 1 3 lru_cache.put(key=3, value=33) # 3 4 1 lru_cache.put(key=5,", "= MinStack() max_stack = MaxStack() for data, min_data, max_data in zip([2, 1, 3,", "next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3,", "if sublist_length > 0 and i <= len(list_orig) - sublist_length: start, end =", "3, 1, 1, 1]): mq.push(data) assert mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False) for", "lists = [ SinglyLinkedList(head=Node(0), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1)), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2))),", "mq = MonotonicQueue(is_increasing=False) for data, benchmark in zip([5, 3, 1, 2, 4], [5,", "SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"),", "Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1,", "== i stack.push(i) assert stack.peek() == i for i in range(3): assert len(stack)", "value=3) assert lru_cache.get(1) == 1 # 1 3 2 lru_cache.put(key=4, value=4) # 4", "= SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\",", "== list_1_reverse assert list_2_copy == list_2_reverse assert list_3_copy == list_3_reverse try: list_0.peek_head() ==", "list_3.copy() == list_3_copy assert not list_0 == list_1 assert not list_1 == list_2", "list_1 == list_2 assert not list_2 == list_3 assert not list_3 == list_0", "for x in range(end + 1, len(list_orig))] ) def test_queue(): queue = Queue()", "data_name=\"v\", next_name=\"n\"), ] for list_orig in lists: list_orig.print() for i in range(len(list_orig)): list_orig_copy", "LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1)", "value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1 # 1 3 2 lru_cache.put(key=4, value=4)", "list_0 assert str(list_0) == \"None\" assert str(list_1) == \"1 ─> None\" assert str(list_2)", "Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2,", "assert list_1_copy == list_1 assert list_2_copy == list_2 assert list_3_copy == list_3 assert", "None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2]) assert equal_list(list_3.to_array(),", "list_1.copy() == list_1_copy assert list_2.copy() == list_2_copy assert list_3.copy() == list_3_copy assert not", "3, 1, 2, 4], [5, 3, 1, 1, 1]): mq.push(data) assert mq.peek() ==", "== 0 for i in range(3): assert len(queue) == 3 - i assert", "is None assert lru_cache.get(4) == 4 # 4 1 3 lru_cache.put(key=3, value=33) #", "1]): mq.push(data) assert mq.peek() == benchmark mq = MonotonicQueue(is_increasing=False) for data, benchmark in", "Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name=\"v\",", "SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))),", "# 5 3 4 (no 1) assert lru_cache.get(1) is None assert lru_cache.get(3) ==", "Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse =", "for x in range(i + 1, len(list_orig))]) sublist_length = len(list_orig) // 2 if", "list_1.peek_head() == 1 list_2.peek_head() == 2 list_3.peek_head() == 3 list_3_copy.pop_head() == list_2_copy list_2_copy.pop_head()", "list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert", "list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert list_1_copy == list_1_reverse_copy", "-1)]) list_orig_copy = list_orig.copy() list_orig_copy.reverse(end_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1,", "list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse()", "[x for x in range(end + 1, len(list_orig))] ) def test_queue(): queue =", "[x for x in range(i + 1, len(list_orig))]) sublist_length = len(list_orig) // 2", "f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\")", "in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x for x in range(i)]", "Stack() for i in range(3): assert len(stack) == i stack.push(i) assert stack.peek() ==", "e.args[0] == \"Peek head at an empty SinglyLinkedList\" else: assert False list_1.peek_head() ==", "[1, 2, 3]) list_0_reverse_copy = list_0_reverse.copy() list_1_reverse_copy = list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy", "is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1 #", "== list_1_copy assert list_2.copy() == list_2_copy assert list_3.copy() == list_3_copy assert not list_0", "assert lru_cache.get(1) == 1 # 1 3 2 lru_cache.put(key=4, value=4) # 4 1", "lru_cache = LRUCache(capacity=3) assert lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3)", "assert e.args[0] == \"Pop head from an empty SinglyLinkedList\" else: assert False list_3.delete(set([2,", "data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2,", "Node(5, Node(6))))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name=\"v\",", "Node(3))), data_name=\"v\", next_name=\"n\") list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3,", "33 assert lru_cache.get(5) == 5 def test_min_max_stack(): min_stack = MinStack() max_stack = MaxStack()", "fixture.utils import equal_list class Node: def __init__(self, v=None, n=None): self.v = v self.n", "len(stack) == i stack.push(i) assert stack.peek() == i for i in range(3): assert", "list_orig in lists: list_orig.print() for i in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert", "= SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)),", "LRUCache from fixture.utils import equal_list class Node: def __init__(self, v=None, n=None): self.v =", "SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name=\"v\", next_name=\"n\") list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert", "─> 2 ─> None\" assert str(list_3) == \"1 ─> 2 ─> 3 ─>", "] for list_orig in lists: list_orig.print() for i in range(len(list_orig)): list_orig_copy = list_orig.copy()", "== list_2 assert list_3_copy == list_3 assert list_0.copy() == list_0_copy assert list_1.copy() ==", "range(i + 1, len(list_orig))]) sublist_length = len(list_orig) // 2 if sublist_length > 0", "- 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x for x", "range(3): assert len(queue) == 3 - i assert queue.peek() == i assert queue.pop()", "min_stack = MinStack() max_stack = MaxStack() for data, min_data, max_data in zip([2, 1,", "list_1_reverse assert list_2_copy == list_2_reverse assert list_3_copy == list_3_reverse try: list_0.peek_head() == 0", "for list_orig in lists: list_orig.print() for i in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i)", "list_0_copy == list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert list_2_copy == list_2_reverse_copy assert list_3_copy", "i in range(3): assert len(stack) == i stack.push(i) assert stack.peek() == i for", "[x for x in range(len(list_orig) - 1, i - 1, -1)]) list_orig_copy =", "min_data, max_data in zip([1, 1, 1, 2], [5, 3, 2, 2]): min_stack.pop() max_stack.pop()", "lru_cache.get(1) is None assert lru_cache.get(3) == 33 assert lru_cache.get(5) == 5 def test_min_max_stack():", "2 - i assert stack.pop() == 2 - i def test_lru_cache(): lru_cache =", "start, end = i, i + sublist_length - 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start,", "len(stack) == 3 - i assert stack.peek() == 2 - i assert stack.pop()", "─> 3 ─> None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1,", "1 3 (no 2) assert lru_cache.get(2) is None assert lru_cache.get(4) == 4 #", "sublist_length - 1 list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=start, end_index=end) assert equal_list( list_orig_copy.to_array(), [x for", "== max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for data, benchmark in zip([5, 3,", "list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert list_1_copy == list_1_reverse assert list_2_copy == list_2_reverse", "list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy assert list_1_copy == list_1_reverse_copy assert list_2_copy == list_2_reverse_copy", "IndexError as e: assert e.args[0] == \"Peek head at an empty SinglyLinkedList\" else:", "for x in range(i, -1, -1)] + [x for x in range(i +", "Node(2, Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0 assert list_1_copy == list_1 assert", "list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1,", "MonotonicQueue(is_increasing=False) for data, benchmark in zip([5, 3, 1, 2, 4], [5, 5, 5,", "not list_2 == list_3 assert not list_3 == list_0 assert str(list_0) == \"None\"", "else: assert False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist():", "empty SinglyLinkedList\" else: assert False list_3.delete(set([2, 3])) == list_1 list_2.delete(set([1, 2])) == list_0", "x in range(end, start - 1, -1)] + [x for x in range(end", "return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\",", "lists: list_orig.print() for i in range(len(list_orig)): list_orig_copy = list_orig.copy() list_orig_copy.reverse(start_index=i) assert equal_list(list_orig_copy.to_array(), [x", "list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head,", "2, 4], [5, 3, 1, 1, 1]): mq.push(data) assert mq.peek() == benchmark mq", "2, 2]): min_stack.pop() max_stack.pop() assert min_stack.get_min() == min_data assert max_stack.get_max() == max_data def", "1, 2, 4], [5, 5, 5, 5, 5]): mq.push(data) assert mq.peek() == benchmark", "at an empty SinglyLinkedList\" else: assert False list_1.peek_head() == 1 list_2.peek_head() == 2", "import LRUCache from fixture.utils import equal_list class Node: def __init__(self, v=None, n=None): self.v", "lru_cache.put(key=3, value=3) assert lru_cache.get(1) == 1 # 1 3 2 lru_cache.put(key=4, value=4) #", "assert equal_list( list_orig_copy.to_array(), [x for x in range(start)] + [x for x in", "i assert queue.pop() == i def test_stack(): stack = Stack() for i in", "# 4 1 3 lru_cache.put(key=3, value=33) # 3 4 1 lru_cache.put(key=5, value=5) #", "benchmark in zip([5, 3, 1, 2, 4], [5, 5, 5, 5, 5]): mq.push(data)", "lru_cache.get(4) == 4 # 4 1 3 lru_cache.put(key=3, value=33) # 3 4 1", "str(list_2) == \"1 ─> 2 ─> None\" assert str(list_3) == \"1 ─> 2", "list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_1 = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1),", "next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name=\"v\", next_name=\"n\")", "not list_0 == list_1 assert not list_1 == list_2 assert not list_2 ==", "i assert stack.pop() == 2 - i def test_lru_cache(): lru_cache = LRUCache(capacity=3) assert", "in zip([5, 3, 1, 2, 4], [5, 5, 5, 5, 5]): mq.push(data) assert", "next_name=\"n\") list_1_copy = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_1_reverse = SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 =", "\"Pop head from an empty SinglyLinkedList\" else: assert False list_3.delete(set([2, 3])) == list_1", "== 1 # 1 3 2 lru_cache.put(key=4, value=4) # 4 1 3 (no", "3])) == list_1 list_2.delete(set([1, 2])) == list_0 def test_reverse_sublist(): lists = [ SinglyLinkedList(head=Node(0),", "in zip([2, 1, 3, 5, 4], [2, 1, 1, 1, 1], [2, 2,", "list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head)) list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head)) list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert", "─> None\" assert equal_list(list_0.to_array(), []) assert equal_list(list_1.to_array(), [1]) assert equal_list(list_2.to_array(), [1, 2]) assert", "from ezcode.list.linked_list import SinglyLinkedList from ezcode.list.stack import Stack, MinStack, MaxStack from ezcode.list.queue import", "test_queue(): queue = Queue() for i in range(3): assert len(queue) == i queue.push(i)", "for x in range(len(list_orig) - 1, i - 1, -1)]) list_orig_copy = list_orig.copy()", "[x for x in range(i, -1, -1)] + [x for x in range(i", "= SinglyLinkedList(head=Node(1), data_name=\"v\", next_name=\"n\") list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name=\"v\", next_name=\"n\") list_2_copy = SinglyLinkedList(head=Node(1,", "== list_2_reverse_copy assert list_3_copy == list_3_reverse_copy list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head)) list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head,", "Node(1, Node(2))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2,", "__repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None,", "Node(1))), data_name=\"v\", next_name=\"n\") assert list_0_copy == list_0 assert list_1_copy == list_1 assert list_2_copy", "+ [x for x in range(i + 1, len(list_orig))]) sublist_length = len(list_orig) //", "in range(3): assert len(stack) == i stack.push(i) assert stack.peek() == i for i", "assert len(stack) == 3 - i assert stack.peek() == 2 - i assert", "= n def __repr__(self): return f\"Node({self.v})\" def test_singly_linked_list_basics(): list_0 = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\")", "= list_1_reverse.copy() list_2_reverse_copy = list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert", "= list_2_reverse.copy() list_3_reverse_copy = list_3_reverse.copy() list_0_reverse_copy.reverse() list_1_reverse_copy.reverse() list_2_reverse_copy.reverse() list_3_reverse_copy.reverse() assert list_0_copy == list_0_reverse_copy", "e: assert e.args[0] == \"Pop head from an empty SinglyLinkedList\" else: assert False", "zip([2, 1, 3, 5, 4], [2, 1, 1, 1, 1], [2, 2, 3,", "except IndexError as e: assert e.args[0] == \"Peek head at an empty SinglyLinkedList\"", "1, 2, 4], [5, 3, 1, 1, 1]): mq.push(data) assert mq.peek() == benchmark", "Node(1, Node(2, Node(3)))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name=\"v\", next_name=\"n\"), SinglyLinkedList(head=Node(0,", "assert lru_cache.get(1) is None lru_cache.put(key=1, value=1) lru_cache.put(key=2, value=2) lru_cache.put(key=3, value=3) assert lru_cache.get(1) ==", "list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head)) assert list_0_copy == list_0_reverse assert list_1_copy == list_1_reverse assert", "1, -1)] + [x for x in range(end + 1, len(list_orig))] ) def", "SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_copy = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\") list_0_reverse = SinglyLinkedList(head=None, data_name=\"v\", next_name=\"n\")", "# 4 1 3 (no 2) assert lru_cache.get(2) is None assert lru_cache.get(4) ==", "min_stack.get_min() == min_data assert max_stack.get_max() == max_data def test_monontonic_queue(): mq = MonotonicQueue(is_increasing=True) for", "-1)] + [x for x in range(i + 1, len(list_orig))]) sublist_length = len(list_orig)" ]
[ "ttk: int timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO,", "( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"),", "ttk if timed_out_ratio < 0.20 else -1, \"timed_out_ratio\": timed_out_ratio, } ) dataset =", ".encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\",", "raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, )", "= os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not", "from ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode from", "DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files, )", "Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar", "wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance: int = 30", "from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files, ) from", "typing import List, Optional import altair from ps2_census.enums import PlayerState from ps2_analysis.enums import", "and {control_time}ms control time simulation\" ): ttk: int timed_out_ratio: float ttk, timed_out_ratio =", "update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO)", "\"timed_out_ratio\": timed_out_ratio, } ) dataset = altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect() .encode(", "ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files as", "as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID:", "0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10, 10 ): with", "InfantryWeapon = next(x for x in infantry_weapons if x.item_id == 43) fm: FireMode", "= fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance: int = 30 burst_length: int for", "timed_out_ratio, } ) dataset = altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\",", "else -1, \"timed_out_ratio\": timed_out_ratio, } ) dataset = altair.Data(values=rttks) chart = ( altair.Chart(dataset)", "if x.item_id == 43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks:", "control_time: int for control_time in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length *", "from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\"", "order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK", "), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by burst length and control time", "range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10, 10 ):", "scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by burst length and", "control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time,", "ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate", "= next(x for x in infantry_weapons if x.item_id == 43) fm: FireMode =", "from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files", "list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon = next(x for x", "import os from typing import List, Optional import altair from ps2_census.enums import PlayerState", "infantry_weapons if x.item_id == 43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING]", "import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if not", ") from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] =", "List[dict] = [] distance: int = 30 burst_length: int for burst_length in range(0,", "control time simulation\" ): ttk: int timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance,", "\"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio < 0.20 else", "float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append(", "next(x for x in infantry_weapons if x.item_id == 43) fm: FireMode = wp.fire_groups[0].fire_modes[1]", "generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str =", "sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name}", "timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\":", "ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY:", "= \"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,", "logging import os from typing import List, Optional import altair from ps2_census.enums import", "FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance: int", "with CodeTimer( f\"{burst_length} length and {control_time}ms control time simulation\" ): ttk: int timed_out_ratio:", "= wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance: int =", "in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1): control_time: int for control_time in", "\"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio < 0.20 else -1, \"timed_out_ratio\": timed_out_ratio, }", "by burst length and control time at {distance}m\", height=900, width=900, ) .interactive() )", "from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files", "DATAFILES_DIRECTORY: str = \"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files(", "altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\"", "recoil_compensation=True, ) rttks.append( { \"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\":", "\"ttk\": ttk if timed_out_ratio < 0.20 else -1, \"timed_out_ratio\": timed_out_ratio, } ) dataset", "color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by burst", "\"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, )", "import CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import", "): ttk: int timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length,", "from ps2_census.enums import PlayerState from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from", "cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10, 10 ): with CodeTimer(", "altair from ps2_census.enums import PlayerState from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire", "from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\")", "tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by burst length and control time at", "ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance: int = 30 burst_length: int", "int timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True,", "\"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties(", "import ( update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import", "import ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import", "os from typing import List, Optional import altair from ps2_census.enums import PlayerState from", "CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons", "= [] distance: int = 30 burst_length: int for burst_length in range(0, int(round(fm.max_consecutive_shots", "CodeTimer( f\"{burst_length} length and {control_time}ms control time simulation\" ): ttk: int timed_out_ratio: float", "envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon]", "update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import CodeTimer from", "= altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"),", "logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if not SERVICE_ID: raise", "control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio < 0.20 else -1,", "SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,", "directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) )", "range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1): control_time: int for control_time in range(", "burst length and control time at {distance}m\", height=900, width=900, ) .interactive() ) chart.save(\"bursts_ttk_simulation.html\")", "+ 10, 10 ): with CodeTimer( f\"{burst_length} length and {control_time}ms control time simulation\"", "} ) dataset = altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y(", "found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list(", "update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry", "1): control_time: int for control_time in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length", "< 0.20 else -1, \"timed_out_ratio\": timed_out_ratio, } ) dataset = altair.Data(values=rttks) chart =", "* burst_length * 2) + 10, 10 ): with CodeTimer( f\"{burst_length} length and", "= 30 burst_length: int for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1,", "f\"{burst_length} length and {control_time}ms control time simulation\" ): ttk: int timed_out_ratio: float ttk,", "int = 30 burst_length: int for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) +", "import FireMode from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files,", "int for control_time in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2)", "= ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\",", "ps2_census.enums import PlayerState from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files", "+ cof.bloom * burst_length * 2) + 10, 10 ): with CodeTimer( f\"{burst_length}", "time simulation\" ): ttk: int timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500,", "in infantry_weapons if x.item_id == 43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire =", "+ fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio < 0.20 else -1, \"timed_out_ratio\":", "/ 4)) + 1, 1): control_time: int for control_time in range( 0, cof.recover_time(cof.min_cof_angle()", "timed_out_ratio < 0.20 else -1, \"timed_out_ratio\": timed_out_ratio, } ) dataset = altair.Data(values=rttks) chart", "as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files", "\"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio <", "str = \"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY,", "+ 1, 1): control_time: int for control_time in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom", "\"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by burst length", "TTK by burst length and control time at {distance}m\", height=900, width=900, ) .interactive()", "List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon = next(x", "2) + 10, 10 ): with CodeTimer( f\"{burst_length} length and {control_time}ms control time", "int for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1): control_time: int", "43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = []", "fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance: int = 30 burst_length: int for burst_length", ") rttks.append( { \"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk", "title=f\"{wp.name} TTK by burst length and control time at {distance}m\", height=900, width=900, )", "aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length,", "): with CodeTimer( f\"{burst_length} length and {control_time}ms control time simulation\" ): ttk: int", "( update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import CodeTimer", "SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID", "1, 1): control_time: int for control_time in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom *", "update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY)", "auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\":", "from ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from", "from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files, ) from", "4)) + 1, 1): control_time: int for control_time in range( 0, cof.recover_time(cof.min_cof_angle() +", "import logging import os from typing import List, Optional import altair from ps2_census.enums", "ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files as", "sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by burst length and control", "ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( {", "ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon", "update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import", "weapons\") wp: InfantryWeapon = next(x for x in infantry_weapons if x.item_id == 43)", "{len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon = next(x for x in infantry_weapons if x.item_id", "not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY,", "-1, \"timed_out_ratio\": timed_out_ratio, } ) dataset = altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect()", "), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by", "cof.bloom * burst_length * 2) + 10, 10 ): with CodeTimer( f\"{burst_length} length", "from typing import List, Optional import altair from ps2_census.enums import PlayerState from ps2_analysis.enums", ") from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import (", "in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10, 10", "ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode", "timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, )", "ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode", "y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"], )", "if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files(", "PlayerState from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import (", "x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ), tooltip=[\"ttk:Q\", \"timed_out_ratio:Q\"],", "distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\": distance, \"control_time\": control_time", "import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files,", "simulation\" ): ttk: int timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time,", "for x in infantry_weapons if x.item_id == 43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof:", "import PlayerState from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire from ps2_analysis.fire_groups.data_files import", "distance: int = 30 burst_length: int for burst_length in range(0, int(round(fm.max_consecutive_shots / 4))", ") print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon = next(x for x in infantry_weapons", "int(round(fm.max_consecutive_shots / 4)) + 1, 1): control_time: int for control_time in range( 0,", "burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1): control_time: int for control_time", ".mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color( \"ttk:Q\", scale=altair.Scale(scheme=\"plasma\"), sort=\"descending\" ),", "length and {control_time}ms control time simulation\" ): ttk: int timed_out_ratio: float ttk, timed_out_ratio", "for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1): control_time: int for", "import ( update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import", "10, 10 ): with CodeTimer( f\"{burst_length} length and {control_time}ms control time simulation\" ):", "chart = ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ), color=altair.Color(", "distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio < 0.20", ") dataset = altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\",", "infantry weapons\") wp: InfantryWeapon = next(x for x in infantry_weapons if x.item_id ==", "( update_data_files as update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon", "altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\", order=\"descending\"), ),", "fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio < 0.20 else -1, \"timed_out_ratio\": timed_out_ratio,", "== 43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] =", "burst_length: int for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1): control_time:", ") .properties( title=f\"{wp.name} TTK by burst length and control time at {distance}m\", height=900,", "dataset = altair.Data(values=rttks) chart = ( altair.Chart(dataset) .mark_rect() .encode( x=\"burst_length:O\", y=altair.Y( \"control_time:O\", sort=altair.EncodingSortField(\"control_time\",", "rttks.append( { \"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if", "\"timed_out_ratio:Q\"], ) .properties( title=f\"{wp.name} TTK by burst length and control time at {distance}m\",", "= list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon = next(x for", ") infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon", "10 ): with CodeTimer( f\"{burst_length} length and {control_time}ms control time simulation\" ): ttk:", "{ \"distance\": distance, \"control_time\": control_time + fm.fire_timing.refire_time, \"burst_length\": burst_length, \"ttk\": ttk if timed_out_ratio", "ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if", "not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] =", "30 burst_length: int for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1):", "import List, Optional import altair from ps2_census.enums import PlayerState from ps2_analysis.enums import DamageLocation", "wp: InfantryWeapon = next(x for x in infantry_weapons if x.item_id == 43) fm:", "print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon = next(x for x in infantry_weapons if", "= fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\": distance,", "fm.real_time_to_kill( distance=distance, runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\": distance, \"control_time\":", ") update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)}", "cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance: int = 30 burst_length:", "os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if not SERVICE_ID: raise ValueError(\"CENSUS_SERVICE_ID envvar not found\")", "for control_time in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) +", "runs=500, control_time=control_time, auto_burst_length=burst_length, aim_location=DamageLocation.TORSO, recoil_compensation=True, ) rttks.append( { \"distance\": distance, \"control_time\": control_time +", "x.item_id == 43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict]", "service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp:", "rttks: List[dict] = [] distance: int = 30 burst_length: int for burst_length in", "burst_length * 2) + 10, 10 ): with CodeTimer( f\"{burst_length} length and {control_time}ms", ".properties( title=f\"{wp.name} TTK by burst length and control time at {distance}m\", height=900, width=900,", "import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str", "[] distance: int = 30 burst_length: int for burst_length in range(0, int(round(fm.max_consecutive_shots /", "List, Optional import altair from ps2_census.enums import PlayerState from ps2_analysis.enums import DamageLocation from", "0.20 else -1, \"timed_out_ratio\": timed_out_ratio, } ) dataset = altair.Data(values=rttks) chart = (", "burst_length, \"ttk\": ttk if timed_out_ratio < 0.20 else -1, \"timed_out_ratio\": timed_out_ratio, } )", "if timed_out_ratio < 0.20 else -1, \"timed_out_ratio\": timed_out_ratio, } ) dataset = altair.Data(values=rttks)", "InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str] = os.environ.get(\"CENSUS_SERVICE_ID\") DATAFILES_DIRECTORY: str = \"../datafiles\" if not SERVICE_ID:", "infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon =", "ValueError(\"CENSUS_SERVICE_ID envvar not found\") update_fire_groups_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons:", "generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\") wp: InfantryWeapon = next(x for x in", "fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING] rttks: List[dict] = [] distance:", "Optional import altair from ps2_census.enums import PlayerState from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire", "import altair from ps2_census.enums import PlayerState from ps2_analysis.enums import DamageLocation from ps2_analysis.fire_groups.cone_of_fire import", "x in infantry_weapons if x.item_id == 43) fm: FireMode = wp.fire_groups[0].fire_modes[1] cof: ConeOfFire", "* 2) + 10, 10 ): with CodeTimer( f\"{burst_length} length and {control_time}ms control", "ps2_analysis.fire_groups.data_files import ( update_data_files as update_fire_groups_data_files, ) from ps2_analysis.fire_groups.fire_mode import FireMode from ps2_analysis.utils", "FireMode from ps2_analysis.utils import CodeTimer from ps2_analysis.weapons.infantry.data_files import ( update_data_files as update_infantry_weapons_data_files, )", "directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated {len(infantry_weapons)} infantry weapons\")", "control_time in range( 0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10,", "{control_time}ms control time simulation\" ): ttk: int timed_out_ratio: float ttk, timed_out_ratio = fm.real_time_to_kill(", "service_id=SERVICE_ID, ) update_infantry_weapons_data_files( directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID, ) infantry_weapons: List[InfantryWeapon] = list( generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY) ) print(f\"Generated", "update_infantry_weapons_data_files, ) from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon logging.basicConfig(level=logging.INFO) SERVICE_ID: Optional[str]" ]
[ "DRF from rest_framework.viewsets import ModelViewSet from rest_framework import permissions from rest_framework.permissions import IsAuthenticated", "rest_framework.permissions import IsAuthenticated # Serializer from trello.cards.serializers import CardSerializer # Model from trello.cards.models", "rest_framework import permissions from rest_framework.permissions import IsAuthenticated # Serializer from trello.cards.serializers import CardSerializer", "rest_framework.viewsets import ModelViewSet from rest_framework import permissions from rest_framework.permissions import IsAuthenticated # Serializer", "# Serializer from trello.cards.serializers import CardSerializer # Model from trello.cards.models import Card class", "permissions from rest_framework.permissions import IsAuthenticated # Serializer from trello.cards.serializers import CardSerializer # Model", "from rest_framework.permissions import IsAuthenticated # Serializer from trello.cards.serializers import CardSerializer # Model from", "CardSerializer # Model from trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class = CardSerializer queryset", "# Model from trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class = CardSerializer queryset =", "import IsAuthenticated # Serializer from trello.cards.serializers import CardSerializer # Model from trello.cards.models import", "from rest_framework.viewsets import ModelViewSet from rest_framework import permissions from rest_framework.permissions import IsAuthenticated #", "Model from trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class = CardSerializer queryset = Car.objects.filter(list)", "IsAuthenticated # Serializer from trello.cards.serializers import CardSerializer # Model from trello.cards.models import Card", "from trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class = CardSerializer queryset = Car.objects.filter(list) permission_classes", "import permissions from rest_framework.permissions import IsAuthenticated # Serializer from trello.cards.serializers import CardSerializer #", "ModelViewSet from rest_framework import permissions from rest_framework.permissions import IsAuthenticated # Serializer from trello.cards.serializers", "import CardSerializer # Model from trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class = CardSerializer", "import Card class CardViewSet(ModelViewSet): serializer_class = CardSerializer queryset = Car.objects.filter(list) permission_classes = [permissions.IsAuthenticated]", "from rest_framework import permissions from rest_framework.permissions import IsAuthenticated # Serializer from trello.cards.serializers import", "Serializer from trello.cards.serializers import CardSerializer # Model from trello.cards.models import Card class CardViewSet(ModelViewSet):", "from trello.cards.serializers import CardSerializer # Model from trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class", "import ModelViewSet from rest_framework import permissions from rest_framework.permissions import IsAuthenticated # Serializer from", "# DRF from rest_framework.viewsets import ModelViewSet from rest_framework import permissions from rest_framework.permissions import", "trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class = CardSerializer queryset = Car.objects.filter(list) permission_classes =", "trello.cards.serializers import CardSerializer # Model from trello.cards.models import Card class CardViewSet(ModelViewSet): serializer_class =" ]
[ "%s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return", "% (self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r)", "', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION %s' %", "args, input, output = self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params))", "node.options.items() if k not in ('subName', 'minCount', 'maxCount')] if 'subName' in node.options: buff.append('", "self.indent(opts)) else: if args == 'null': r.append('Usage: %s%s' % (self.pc.name, type_vars)) else: r.append('Usage:", "% self.indent(opts)) else: if args == 'null': r.append('Usage: %s%s' % (self.pc.name, type_vars)) else:", "'minCount', 'maxCount')] if 'subName' in node.options: buff.append(' ' + node.options['subName']) class MiniDumper(TreeDumper): def", "% self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self): return self.get_type_info() + 'Description:\\n' + self.get_doc()", "args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self): return", "r.append('Usage: %s%s' % (self.pc.name, type_vars)) else: r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args))", "r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else: if", "in s.splitlines(): r.append(' ' + l) return '\\n'.join(r) def profile_usage(self, prof): opt =", "k, v in node.options.items() if k not in ('subName', 'minCount', 'maxCount')] if 'subName'", "'subName' in node.options: buff.append(' ' + node.options['subName']) class MiniDumper(TreeDumper): def _visit_root(self, node): return", "get_usage(self): return self.get_type_info() + 'Description:\\n' + self.get_doc() def get_doc(self): return self.pc.doc @property def", "p in self.pc.profiles: opts, args, input, output = self.profile_usage(p) type_vars = ', '.join(map(lambda", "input, output = self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if", "type_vars)) else: r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s'", "v) for k, v in node.options.items() if k not in ('subName', 'minCount', 'maxCount')]", "self.pc.profiles: opts, args, input, output = self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>' %", "TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt, arg,", "get_doc(self): return self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s): r =", "+ l) return '\\n'.join(r) def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema)", "%s%s' % (self.pc.name, type_vars)) else: r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s'", "s.splitlines(): r.append(' ' + l) return '\\n'.join(r) def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema)", "% self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self): return self.get_type_info() +", "self.get_doc() def get_doc(self): return self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s):", "out = MiniDumper().visit(prof.out_schema) return opt, arg, inp, out from caty.core.casm.cursor.dump import TreeDumper class", "s): r = [] for l in s.splitlines(): r.append(' ' + l) return", "opt, arg, inp, out from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node,", "def __init__(self, profile_container): self.pc = profile_container def get_type_info(self): r = [] for p", "opts, args, input, output = self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name,", "self.pc.doc.splitlines()[0].strip() def indent(self, s): r = [] for l in s.splitlines(): r.append(' '", "out from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node, buff): if node.options:", "'.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION %s' % (self.pc.name,", "= [(k, v) for k, v in node.options.items() if k not in ('subName',", "get_type_info(self): r = [] for p in self.pc.profiles: opts, args, input, output =", "+ 'Description:\\n' + self.get_doc() def get_doc(self): return self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip()", "r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output))", "def _process_option(self, node, buff): if node.options: items = [(k, v) for k, v", "node, buff): if node.options: items = [(k, v) for k, v in node.options.items()", "% (self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else: if args == 'null': r.append('Usage:", "== 'null': r.append('Usage: %s%s' % (self.pc.name, type_vars)) else: r.append('Usage: %s%s %s' % (self.pc.name,", "= profile_container def get_type_info(self): r = [] for p in self.pc.profiles: opts, args,", "r = [] for p in self.pc.profiles: opts, args, input, output = self.profile_usage(p)", "u'\\n'.join(r) def get_usage(self): return self.get_type_info() + 'Description:\\n' + self.get_doc() def get_doc(self): return self.pc.doc", "self.get_type_info() + 'Description:\\n' + self.get_doc() def get_doc(self): return self.pc.doc @property def title(self): return", "not in ('subName', 'minCount', 'maxCount')] if 'subName' in node.options: buff.append(' ' + node.options['subName'])", "= TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt,", "def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out", "'\\n'.join(r) def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema)", "k not in ('subName', 'minCount', 'maxCount')] if 'subName' in node.options: buff.append(' ' +", "self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s", "' + l) return '\\n'.join(r) def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg =", "items = [(k, v) for k, v in node.options.items() if k not in", "if opts: r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts))", "in self.pc.profiles: opts, args, input, output = self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>'", "TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node, buff): if node.options: items = [(k, v)", "[] for l in s.splitlines(): r.append(' ' + l) return '\\n'.join(r) def profile_usage(self,", "= [] for l in s.splitlines(): r.append(' ' + l) return '\\n'.join(r) def", "= ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION %s'", "(self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else: if args == 'null': r.append('Usage: %s%s'", "= MiniDumper().visit(prof.out_schema) return opt, arg, inp, out from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper):", "__init__(self, profile_container): self.pc = profile_container def get_type_info(self): r = [] for p in", "profile_container): self.pc = profile_container def get_type_info(self): r = [] for p in self.pc.profiles:", "%s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else: if args == 'null':", "arg, inp, out from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node, buff):", "profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out =", "args == 'null': r.append('Usage: %s%s' % (self.pc.name, type_vars)) else: r.append('Usage: %s%s %s' %", "r = [] for l in s.splitlines(): r.append(' ' + l) return '\\n'.join(r)", "[(k, v) for k, v in node.options.items() if k not in ('subName', 'minCount',", "self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s' %", "CommandUsage(object): def __init__(self, profile_container): self.pc = profile_container def get_type_info(self): r = [] for", "args)) r.append('Option:\\n%s' % self.indent(opts)) else: if args == 'null': r.append('Usage: %s%s' % (self.pc.name,", "= [] for p in self.pc.profiles: opts, args, input, output = self.profile_usage(p) type_vars", "self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s): r = [] for", "else: r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' %", "def indent(self, s): r = [] for l in s.splitlines(): r.append(' ' +", "type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self):", "for l in s.splitlines(): r.append(' ' + l) return '\\n'.join(r) def profile_usage(self, prof):", "@property def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s): r = [] for l", "inp, out from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node, buff): if", "utf-8 class CommandUsage(object): def __init__(self, profile_container): self.pc = profile_container def get_type_info(self): r =", "import TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node, buff): if node.options: items = [(k,", "prof): opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema)", "#coding: utf-8 class CommandUsage(object): def __init__(self, profile_container): self.pc = profile_container def get_type_info(self): r", "if node.options: items = [(k, v) for k, v in node.options.items() if k", "def get_doc(self): return self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s): r", "buff): if node.options: items = [(k, v) for k, v in node.options.items() if", "MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt, arg, inp, out from caty.core.casm.cursor.dump import TreeDumper", "self.pc = profile_container def get_type_info(self): r = [] for p in self.pc.profiles: opts,", "arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt, arg, inp,", "_process_option(self, node, buff): if node.options: items = [(k, v) for k, v in", "'maxCount')] if 'subName' in node.options: buff.append(' ' + node.options['subName']) class MiniDumper(TreeDumper): def _visit_root(self,", "opts: r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else:", "type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else: if args == 'null': r.append('Usage: %s%s' %", "r.append('\\n') return u'\\n'.join(r) def get_usage(self): return self.get_type_info() + 'Description:\\n' + self.get_doc() def get_doc(self):", "ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt, arg, inp, out from", "(self.pc.name, type_vars)) else: r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input))", "for k, v in node.options.items() if k not in ('subName', 'minCount', 'maxCount')] if", "in ('subName', 'minCount', 'maxCount')] if 'subName' in node.options: buff.append(' ' + node.options['subName']) class", "v in node.options.items() if k not in ('subName', 'minCount', 'maxCount')] if 'subName' in", "%s%s %s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n')", "inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt, arg, inp, out from caty.core.casm.cursor.dump", "x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s'", "profile_container def get_type_info(self): r = [] for p in self.pc.profiles: opts, args, input,", "def get_type_info(self): r = [] for p in self.pc.profiles: opts, args, input, output", "return u'\\n'.join(r) def get_usage(self): return self.get_type_info() + 'Description:\\n' + self.get_doc() def get_doc(self): return", "self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self): return self.get_type_info() + 'Description:\\n'", "from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node, buff): if node.options: items", "title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s): r = [] for l in s.splitlines():", "MiniDumper().visit(prof.out_schema) return opt, arg, inp, out from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def", "return self.get_type_info() + 'Description:\\n' + self.get_doc() def get_doc(self): return self.pc.doc @property def title(self):", "return self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s): r = []", "r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self): return self.get_type_info()", "l) return '\\n'.join(r) def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp", "opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return", "in node.options: buff.append(' ' + node.options['subName']) class MiniDumper(TreeDumper): def _visit_root(self, node): return node.name", "for p in self.pc.profiles: opts, args, input, output = self.profile_usage(p) type_vars = ',", "x:'<%s>' % x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars,", "r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self): return self.get_type_info() + 'Description:\\n' +", "if args == 'null': r.append('Usage: %s%s' % (self.pc.name, type_vars)) else: r.append('Usage: %s%s %s'", "(self.pc.name, type_vars, args)) r.append('Input:\\n%s' % self.indent(input)) r.append('Output:\\n%s' % self.indent(output)) r.append('\\n') return u'\\n'.join(r) def", "output = self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if opts:", "r.append('Option:\\n%s' % self.indent(opts)) else: if args == 'null': r.append('Usage: %s%s' % (self.pc.name, type_vars))", "class ArgDumper(TreeDumper): def _process_option(self, node, buff): if node.options: items = [(k, v) for", "= ArgDumper().visit(prof.args_schema) inp = MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt, arg, inp, out", "'Description:\\n' + self.get_doc() def get_doc(self): return self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip() def", "= self.profile_usage(p) type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if opts: r.append('Usage:", "node.options: items = [(k, v) for k, v in node.options.items() if k not", "return '\\n'.join(r) def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg = ArgDumper().visit(prof.args_schema) inp =", "self.indent(output)) r.append('\\n') return u'\\n'.join(r) def get_usage(self): return self.get_type_info() + 'Description:\\n' + self.get_doc() def", "r.append(' ' + l) return '\\n'.join(r) def profile_usage(self, prof): opt = TreeDumper().visit(prof.opts_schema) arg", "l in s.splitlines(): r.append(' ' + l) return '\\n'.join(r) def profile_usage(self, prof): opt", "if k not in ('subName', 'minCount', 'maxCount')] if 'subName' in node.options: buff.append(' '", "def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self, s): r = [] for l in", "= MiniDumper().visit(prof.in_schema) out = MiniDumper().visit(prof.out_schema) return opt, arg, inp, out from caty.core.casm.cursor.dump import", "%s%s OPTION %s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else: if args", "ArgDumper(TreeDumper): def _process_option(self, node, buff): if node.options: items = [(k, v) for k,", "return self.pc.doc.splitlines()[0].strip() def indent(self, s): r = [] for l in s.splitlines(): r.append('", "type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION", "return opt, arg, inp, out from caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def _process_option(self,", "'null': r.append('Usage: %s%s' % (self.pc.name, type_vars)) else: r.append('Usage: %s%s %s' % (self.pc.name, type_vars,", "class CommandUsage(object): def __init__(self, profile_container): self.pc = profile_container def get_type_info(self): r = []", "caty.core.casm.cursor.dump import TreeDumper class ArgDumper(TreeDumper): def _process_option(self, node, buff): if node.options: items =", "in node.options.items() if k not in ('subName', 'minCount', 'maxCount')] if 'subName' in node.options:", "% x.var_name, self.pc.type_params)) if opts: r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args))", "% (self.pc.name, type_vars)) else: r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args)) r.append('Input:\\n%s' %", "indent(self, s): r = [] for l in s.splitlines(): r.append(' ' + l)", "OPTION %s' % (self.pc.name, type_vars, args)) r.append('Option:\\n%s' % self.indent(opts)) else: if args ==", "[] for p in self.pc.profiles: opts, args, input, output = self.profile_usage(p) type_vars =", "+ self.get_doc() def get_doc(self): return self.pc.doc @property def title(self): return self.pc.doc.splitlines()[0].strip() def indent(self,", "if 'subName' in node.options: buff.append(' ' + node.options['subName']) class MiniDumper(TreeDumper): def _visit_root(self, node):", "else: if args == 'null': r.append('Usage: %s%s' % (self.pc.name, type_vars)) else: r.append('Usage: %s%s", "('subName', 'minCount', 'maxCount')] if 'subName' in node.options: buff.append(' ' + node.options['subName']) class MiniDumper(TreeDumper):", "def get_usage(self): return self.get_type_info() + 'Description:\\n' + self.get_doc() def get_doc(self): return self.pc.doc @property" ]
[ "TestCase from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage", "missing keys. self.assertIsNone(self.storage.get('key2')) # We should be able to write a 2nd key,", "still receive None for missing keys. self.assertIsNone(self.storage.get('key2')) # We should be able to", "existing key, self.storage.set('key2', 'some-new-value') # see the value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value')", "write a 2nd key, self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still", "# and still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # We should be", "# When the file is empty, we should receive None for any key.", "key, self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the earlier", "should be able to write a 2nd key, self.storage.set('key2', 'value2') # retrieve it,", "def tearDown(self): os.remove(self.test_filename) def test_storage(self): # When the file is empty, we should", "import NamedTemporaryFile from unittest import TestCase from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def", "'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key2')) # We should", "self.assertEqual(self.storage.get('key1'), 'value1') # and we should still receive None for missing keys. self.assertIsNone(self.storage.get('key3'))", "any key. self.assertIsNone(self.storage.get('key1')) # We should be able to write a key/value, self.storage.set('key1',", "still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # If we re-init the storage", "be able to update an existing key, self.storage.set('key2', 'some-new-value') # see the value", "and we should still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # If we", "receive None for missing keys. self.assertIsNone(self.storage.get('key2')) # We should be able to write", "earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing", "'some-new-value') # the other values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we", "None for missing keys. self.assertIsNone(self.storage.get('key2')) # We should be able to write a", "retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the earlier key we wrote, self.assertEqual(self.storage.get('key1'),", "mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename)", "to write a 2nd key, self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') #", "self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for", "# still retrieve the earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still", "we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key3'))", "when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1')", "import FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def", "for missing keys. self.assertIsNone(self.storage.get('key3')) # If we re-init the storage object with the", "storage object with the same file, self.storage = FileStorage(self.test_filename) # all keys should", "it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1')", "class FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename)", "# We should be able to update an existing key, self.storage.set('key2', 'some-new-value') #", "NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self): # When the file", "retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') #", "keys. self.assertIsNone(self.storage.get('key3')) # If we re-init the storage object with the same file,", "be able to write a key/value, self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1')", "able to write a key/value, self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') #", "def test_storage(self): # When the file is empty, we should receive None for", "self.assertIsNone(self.storage.get('key2')) # We should be able to write a 2nd key, self.storage.set('key2', 'value2')", "empty, we should receive None for any key. self.assertIsNone(self.storage.get('key1')) # We should be", "retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key2'))", "self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the earlier key", "import os from tempfile import NamedTemporaryFile from unittest import TestCase from mbq.client.storage import", "self.storage.set('key2', 'some-new-value') # see the value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the", "tempfile import NamedTemporaryFile from unittest import TestCase from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase):", "self.assertIsNone(self.storage.get('key1')) # We should be able to write a key/value, self.storage.set('key1', 'value1') #", "# retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the earlier key we wrote,", "self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # We", "still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # We should be able to", "# see the value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values", "be able to write a 2nd key, self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'),", "key. self.assertIsNone(self.storage.get('key1')) # We should be able to write a key/value, self.storage.set('key1', 'value1')", "see the value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values should", "should still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # If we re-init the", "test_storage(self): # When the file is empty, we should receive None for any", "FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self): # When the file is empty, we", "object with the same file, self.storage = FileStorage(self.test_filename) # all keys should be", "# and still receive None for missing keys. self.assertIsNone(self.storage.get('key2')) # We should be", "and still receive None for missing keys. self.assertIsNone(self.storage.get('key2')) # We should be able", "able to write a 2nd key, self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2')", "change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values should remain unchanged, self.assertEqual(self.storage.get('key1'),", "the same file, self.storage = FileStorage(self.test_filename) # all keys should be persisted. self.assertEqual(self.storage.get('key2'),", "os from tempfile import NamedTemporaryFile from unittest import TestCase from mbq.client.storage import FileStorage", "should be able to update an existing key, self.storage.set('key2', 'some-new-value') # see the", "'value1') # and we should still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) #", "self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self): # When", "it, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key2')) #", "key/value, self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None", "# and we should still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # If", "for missing keys. self.assertIsNone(self.storage.get('key3')) # We should be able to update an existing", "FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def", "self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key2')) # We", "from tempfile import NamedTemporaryFile from unittest import TestCase from mbq.client.storage import FileStorage class", "a key/value, self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive", "for any key. self.assertIsNone(self.storage.get('key1')) # We should be able to write a key/value,", "'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # We should", "= NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self): # When the", "receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # We should be able to update", "None for any key. self.assertIsNone(self.storage.get('key1')) # We should be able to write a", "receive None for any key. self.assertIsNone(self.storage.get('key1')) # We should be able to write", "the other values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we should still", "missing keys. self.assertIsNone(self.storage.get('key3')) # We should be able to update an existing key,", "# If we re-init the storage object with the same file, self.storage =", "self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and", "unittest import TestCase from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename =", "self.assertIsNone(self.storage.get('key3')) # If we re-init the storage object with the same file, self.storage", "We should be able to update an existing key, self.storage.set('key2', 'some-new-value') # see", "key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys.", "file is empty, we should receive None for any key. self.assertIsNone(self.storage.get('key1')) # We", "with the same file, self.storage = FileStorage(self.test_filename) # all keys should be persisted.", "key, self.storage.set('key2', 'some-new-value') # see the value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') #", "self.assertIsNone(self.storage.get('key3')) # We should be able to update an existing key, self.storage.set('key2', 'some-new-value')", "still retrieve the earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive", "unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we should still receive None for missing keys.", "# the other values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we should", "None for missing keys. self.assertIsNone(self.storage.get('key3')) # We should be able to update an", "an existing key, self.storage.set('key2', 'some-new-value') # see the value change when retrieving, self.assertEqual(self.storage.get('key2'),", "= FileStorage(self.test_filename) # all keys should be persisted. self.assertEqual(self.storage.get('key2'), 'some-new-value') self.assertEqual(self.storage.get('key1'), 'value1') self.assertIsNone(self.storage.get('key3'))", "'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the earlier key we", "should receive None for any key. self.assertIsNone(self.storage.get('key1')) # We should be able to", "tearDown(self): os.remove(self.test_filename) def test_storage(self): # When the file is empty, we should receive", "a 2nd key, self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve", "'value2') # still retrieve the earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and", "'some-new-value') # see the value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other", "the file is empty, we should receive None for any key. self.assertIsNone(self.storage.get('key1')) #", "values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we should still receive None", "missing keys. self.assertIsNone(self.storage.get('key3')) # If we re-init the storage object with the same", "should be able to write a key/value, self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'),", "FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self):", "If we re-init the storage object with the same file, self.storage = FileStorage(self.test_filename)", "2nd key, self.storage.set('key2', 'value2') # retrieve it, self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the", "wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) #", "and still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # We should be able", "value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values should remain unchanged,", "self.storage = FileStorage(self.test_filename) # all keys should be persisted. self.assertEqual(self.storage.get('key2'), 'some-new-value') self.assertEqual(self.storage.get('key1'), 'value1')", "file, self.storage = FileStorage(self.test_filename) # all keys should be persisted. self.assertEqual(self.storage.get('key2'), 'some-new-value') self.assertEqual(self.storage.get('key1'),", "we should receive None for any key. self.assertIsNone(self.storage.get('key1')) # We should be able", "import TestCase from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name", "able to update an existing key, self.storage.set('key2', 'some-new-value') # see the value change", "self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self): # When the file is", "to write a key/value, self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and", "we should still receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # If we re-init", "re-init the storage object with the same file, self.storage = FileStorage(self.test_filename) # all", "None for missing keys. self.assertIsNone(self.storage.get('key3')) # If we re-init the storage object with", "# We should be able to write a key/value, self.storage.set('key1', 'value1') # retrieve", "def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self):", "write a key/value, self.storage.set('key1', 'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and still", "the value change when retrieving, self.assertEqual(self.storage.get('key2'), 'some-new-value') # the other values should remain", "receive None for missing keys. self.assertIsNone(self.storage.get('key3')) # If we re-init the storage object", "We should be able to write a key/value, self.storage.set('key1', 'value1') # retrieve it,", "to update an existing key, self.storage.set('key2', 'some-new-value') # see the value change when", "# retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing keys.", "keys. self.assertIsNone(self.storage.get('key2')) # We should be able to write a 2nd key, self.storage.set('key2',", "remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we should still receive None for missing", "from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage =", "is empty, we should receive None for any key. self.assertIsNone(self.storage.get('key1')) # We should", "for missing keys. self.assertIsNone(self.storage.get('key2')) # We should be able to write a 2nd", "the storage object with the same file, self.storage = FileStorage(self.test_filename) # all keys", "we re-init the storage object with the same file, self.storage = FileStorage(self.test_filename) #", "os.remove(self.test_filename) def test_storage(self): # When the file is empty, we should receive None", "'value1') # retrieve it, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for missing", "NamedTemporaryFile from unittest import TestCase from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def setUp(self):", "= FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self): # When the file is empty,", "We should be able to write a 2nd key, self.storage.set('key2', 'value2') # retrieve", "retrieve the earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None", "other values should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we should still receive", "When the file is empty, we should receive None for any key. self.assertIsNone(self.storage.get('key1'))", "from unittest import TestCase from mbq.client.storage import FileStorage class FileStorageTestCase(TestCase): def setUp(self): self.test_filename", "setUp(self): self.test_filename = NamedTemporaryFile(delete=False).name self.storage = FileStorage(self.test_filename) def tearDown(self): os.remove(self.test_filename) def test_storage(self): #", "same file, self.storage = FileStorage(self.test_filename) # all keys should be persisted. self.assertEqual(self.storage.get('key2'), 'some-new-value')", "update an existing key, self.storage.set('key2', 'some-new-value') # see the value change when retrieving,", "the earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') # and still receive None for", "should remain unchanged, self.assertEqual(self.storage.get('key1'), 'value1') # and we should still receive None for", "keys. self.assertIsNone(self.storage.get('key3')) # We should be able to update an existing key, self.storage.set('key2',", "self.assertEqual(self.storage.get('key2'), 'value2') # still retrieve the earlier key we wrote, self.assertEqual(self.storage.get('key1'), 'value1') #", "# We should be able to write a 2nd key, self.storage.set('key2', 'value2') #" ]
[ "self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static(", "= conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080) self.conf = conf", "web from prometheus_async import aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util import", "await runner.setup() site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay =", "name=\"site\") if self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying by random delay of", "req) -> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache", "webserver on %s:%d\", self.host, self.port) runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner,", "-> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def run(self): LOG.info(\"starting", "return web.FileResponse(path) async def call_client(self) -> None: \"\"\"Run the rpki-client wrapper again.\"\"\" self.result", "async def run(self): LOG.info(\"starting webserver on %s:%d\", self.host, self.port) runner = web.AppRunner(self.app) await", "from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE", "<p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async def config_response(self, req)", "logging import os import random from dataclasses import dataclass from typing import Dict,", "aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] )", "= os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def call_client(self) -> None: \"\"\"Run the rpki-client", "\"\"\"Run the rpki-client wrapper again.\"\"\" self.result = await self.client.run() async def json_result(self, req)", "wrapper again.\"\"\" self.result = await self.client.run() async def json_result(self, req) -> web.Response: if", "runner.setup() site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay = random.uniform(0,", "href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async", "prometheus_async import aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util import repeat LOG", "default to the interval for jitter value self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\",", "LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] = None conf:", "show_index=True, ), ] ) async def index(self, req) -> web.Response: return web.Response( text=\"\"\"<html>", "app: web.Application host: str port: int interval: int jitter: int def __init__(self, conf:", "self.host = conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080) self.conf = conf self.client =", "LOG.info(\"starting webserver on %s:%d\", self.host, self.port) runner = web.AppRunner(self.app) await runner.setup() site =", "self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080) self.conf =", "int jitter: int def __init__(self, conf: Dict) -> None: self.app = web.Application() self.interval", "Optional[ExecutionResult] = None conf: Dict app: web.Application host: str port: int interval: int", "rpki-client wrapper again.\"\"\" self.result = await self.client.run() async def json_result(self, req) -> web.Response:", "self.conf = conf self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\",", "status=500) async def run(self): LOG.info(\"starting webserver on %s:%d\", self.host, self.port) runner = web.AppRunner(self.app)", "<p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", )", "rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE =", "web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True,", "web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def call_client(self) -> None: \"\"\"Run", "RpkiClient from rpkiclientweb.util import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb:", "def __init__(self, conf: Dict) -> None: self.app = web.Application() self.interval = conf.pop(\"interval\") #", "ExecutionResult, RpkiClient from rpkiclientweb.util import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class", "-> web.Response: return web.json_response(self.conf) async def validated_objects(self, req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]),", "web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]),", "\"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] ) async def index(self, req) -> web.Response:", "req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def call_client(self) ->", "web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay", "host: str port: int interval: int jitter: int def __init__(self, conf: Dict) ->", "asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying by random delay", "= web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter:", "index(self, req) -> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a", "interval: int jitter: int def __init__(self, conf: Dict) -> None: self.app = web.Application()", "from prometheus_async import aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util import repeat", "wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p>", "href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async def config_response(self, req) -> web.Response: return web.json_response(self.conf)", "\"json\") return web.FileResponse(path) async def call_client(self) -> None: \"\"\"Run the rpki-client wrapper again.\"\"\"", "os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def call_client(self) -> None: \"\"\"Run the rpki-client wrapper", "self.port) runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\")", "jitter: int def __init__(self, conf: Dict) -> None: self.app = web.Application() self.interval =", "web.Application() self.interval = conf.pop(\"interval\") # default to the interval for jitter value self.jitter", "= random.uniform(0, self.jitter) LOG.info( \"delaying by random delay of [0, %d] seconds of", "= web.Application() self.interval = conf.pop(\"interval\") # default to the interval for jitter value", "import json import logging import os import random from dataclasses import dataclass from", "to the interval for jitter value self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\")", "import aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util import repeat LOG =", "= conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080) self.conf = conf self.client = RpkiClient(**self.conf)", "= conf.pop(\"port\", 8080) self.conf = conf self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index),", "aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util import repeat LOG = logging.getLogger(__name__)", "OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] = None conf: Dict app: web.Application", "directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\",", "None conf: Dict app: web.Application host: str port: int interval: int jitter: int", "conf: Dict app: web.Application host: str port: int interval: int jitter: int def", "= logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] = None conf: Dict", "by random delay of [0, %d] seconds of %f seconds\", self.jitter, jitter_delay, )", "import random from dataclasses import dataclass from typing import Dict, List, Optional from", "8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] = None conf: Dict app: web.Application host: str", "web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying", "import web from prometheus_async import aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from rpkiclientweb.util", "import ExecutionResult, RpkiClient from rpkiclientweb.util import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608", "int def __init__(self, conf: Dict) -> None: self.app = web.Application() self.interval = conf.pop(\"interval\")", "jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying by random delay of [0, %d] seconds", "web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] ) async def index(self,", "-> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p>", "href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async def config_response(self, req) ->", "None: \"\"\"Run the rpki-client wrapper again.\"\"\" self.result = await self.client.run() async def json_result(self,", "self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying by random", "self.jitter) LOG.info( \"delaying by random delay of [0, %d] seconds of %f seconds\",", "<gh_stars>0 import asyncio import dataclasses import json import logging import os import random", "if self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying by random delay of [0,", "self.interval = conf.pop(\"interval\") # default to the interval for jitter value self.jitter =", "seconds of %f seconds\", self.jitter, jitter_delay, ) await asyncio.sleep(jitter_delay) return await repeat(self.interval, self.call_client)", "from aiohttp import web from prometheus_async import aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient", "call_client(self) -> None: \"\"\"Run the rpki-client wrapper again.\"\"\" self.result = await self.client.run() async", "\"delaying by random delay of [0, %d] seconds of %f seconds\", self.jitter, jitter_delay,", "the interval for jitter value self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port", "RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects),", "asyncio import dataclasses import json import logging import os import random from dataclasses", "req) -> web.Response: return web.json_response(self.conf) async def validated_objects(self, req) -> web.FileResponse: path =", "self.app = web.Application() self.interval = conf.pop(\"interval\") # default to the interval for jitter", "import os import random from dataclasses import dataclass from typing import Dict, List,", "self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False,", "objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async def config_response(self, req) -> web.Response:", "return web.json_response(self.conf) async def validated_objects(self, req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return", "async def validated_objects(self, req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async", "web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] ) async", "req) -> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def run(self):", "import logging import os import random from dataclasses import dataclass from typing import", "text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p>", "import dataclass from typing import Dict, List, Optional from aiohttp import web from", "async def call_client(self) -> None: \"\"\"Run the rpki-client wrapper again.\"\"\" self.result = await", "conf.pop(\"interval\") # default to the interval for jitter value self.jitter = conf.pop(\"jitter\") self.host", "follow_symlinks=False, show_index=True, ), ] ) async def index(self, req) -> web.Response: return web.Response(", "os import random from dataclasses import dataclass from typing import Dict, List, Optional", "random from dataclasses import dataclass from typing import Dict, List, Optional from aiohttp", "[ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\",", "), ] ) async def index(self, req) -> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client", "aiohttp import web from prometheus_async import aio from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient from", "__init__(self, conf: Dict) -> None: self.app = web.Application() self.interval = conf.pop(\"interval\") # default", "conf: Dict) -> None: self.app = web.Application() self.interval = conf.pop(\"interval\") # default to", "web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a", "<p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async def config_response(self, req) -> web.Response: return", "= conf self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats),", "self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying by", "result: Optional[ExecutionResult] = None conf: Dict app: web.Application host: str port: int interval:", "jitter value self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080)", "web.Response: return web.json_response(self.conf) async def validated_objects(self, req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\")", "conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080) self.conf = conf self.client", "json_result(self, req) -> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def", "web.FileResponse(path) async def call_client(self) -> None: \"\"\"Run the rpki-client wrapper again.\"\"\" self.result =", "random.uniform(0, self.jitter) LOG.info( \"delaying by random delay of [0, %d] seconds of %f", ") async def index(self, req) -> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body>", ") async def config_response(self, req) -> web.Response: return web.json_response(self.conf) async def validated_objects(self, req)", "Dict app: web.Application host: str port: int interval: int jitter: int def __init__(self,", "= RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\",", "web.json_response(self.conf) async def validated_objects(self, req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path)", "web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a", "delay of [0, %d] seconds of %f seconds\", self.jitter, jitter_delay, ) await asyncio.sleep(jitter_delay)", "= None conf: Dict app: web.Application host: str port: int interval: int jitter:", "\"localhost\") self.port = conf.pop(\"port\", 8080) self.conf = conf self.client = RpkiClient(**self.conf) self.app.add_routes( [", "Dict, List, Optional from aiohttp import web from prometheus_async import aio from rpkiclientweb.rpki_client", "path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def call_client(self) -> None: \"\"\"Run the", "web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def run(self): LOG.info(\"starting webserver on %s:%d\", self.host, self.port)", "value self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080) self.conf", "web.json_response(None, status=500) async def run(self): LOG.info(\"starting webserver on %s:%d\", self.host, self.port) runner =", "<head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a", "web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def run(self): LOG.info(\"starting webserver", "self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def run(self): LOG.info(\"starting webserver on %s:%d\",", "RpkiClientWeb: result: Optional[ExecutionResult] = None conf: Dict app: web.Application host: str port: int", "href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async def config_response(self,", "repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] = None", "= await self.client.run() async def json_result(self, req) -> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result))", "port: int interval: int jitter: int def __init__(self, conf: Dict) -> None: self.app", "str port: int interval: int jitter: int def __init__(self, conf: Dict) -> None:", "def validated_objects(self, req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def", "validated_objects(self, req) -> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def call_client(self)", "-> web.FileResponse: path = os.path.join(os.path.abspath(self.conf[\"output_dir\"]), \"json\") return web.FileResponse(path) async def call_client(self) -> None:", "Optional from aiohttp import web from prometheus_async import aio from rpkiclientweb.rpki_client import ExecutionResult,", "LOG.info( \"delaying by random delay of [0, %d] seconds of %f seconds\", self.jitter,", "json import logging import os import random from dataclasses import dataclass from typing", "] ) async def index(self, req) -> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head>", "from typing import Dict, List, Optional from aiohttp import web from prometheus_async import", "async def json_result(self, req) -> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500)", "web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] ) async def index(self, req) ->", "</html>\"\"\", content_type=\"text/html\", ) async def config_response(self, req) -> web.Response: return web.json_response(self.conf) async def", "def call_client(self) -> None: \"\"\"Run the rpki-client wrapper again.\"\"\" self.result = await self.client.run()", "[0, %d] seconds of %f seconds\", self.jitter, jitter_delay, ) await asyncio.sleep(jitter_delay) return await", "from dataclasses import dataclass from typing import Dict, List, Optional from aiohttp import", "= web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info(", "web.Application host: str port: int interval: int jitter: int def __init__(self, conf: Dict)", "if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def run(self): LOG.info(\"starting webserver on", "dataclasses import json import logging import os import random from dataclasses import dataclass", "return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p>", "of [0, %d] seconds of %f seconds\", self.jitter, jitter_delay, ) await asyncio.sleep(jitter_delay) return", "async def index(self, req) -> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client", "%d] seconds of %f seconds\", self.jitter, jitter_delay, ) await asyncio.sleep(jitter_delay) return await repeat(self.interval,", "List, Optional from aiohttp import web from prometheus_async import aio from rpkiclientweb.rpki_client import", "typing import Dict, List, Optional from aiohttp import web from prometheus_async import aio", "await self.client.run() async def json_result(self, req) -> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return", "self.client.run() async def json_result(self, req) -> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None,", "interval for jitter value self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port =", "class RpkiClientWeb: result: Optional[ExecutionResult] = None conf: Dict app: web.Application host: str port:", "self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result),", "site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if self.jitter: jitter_delay = random.uniform(0, self.jitter)", "def json_result(self, req) -> web.Response: if self.result: return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async", "wrapper</title></head> <body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated", "<h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a", "self.host, self.port) runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(),", "<p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body>", "self.port = conf.pop(\"port\", 8080) self.conf = conf self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\",", "Dict) -> None: self.app = web.Application() self.interval = conf.pop(\"interval\") # default to the", "<body> <h1>rpki-client wrapper</h1> <p><a href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p>", "return web.json_response(None, status=500) async def run(self): LOG.info(\"starting webserver on %s:%d\", self.host, self.port) runner", "rpkiclientweb.util import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult]", "the rpki-client wrapper again.\"\"\" self.result = await self.client.run() async def json_result(self, req) ->", "= conf.pop(\"interval\") # default to the interval for jitter value self.jitter = conf.pop(\"jitter\")", "conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\", 8080) self.conf = conf self.client = RpkiClient(**self.conf) self.app.add_routes(", "logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] = None conf: Dict app:", "dataclass from typing import Dict, List, Optional from aiohttp import web from prometheus_async", "import asyncio import dataclasses import json import logging import os import random from", "self.result = await self.client.run() async def json_result(self, req) -> web.Response: if self.result: return", "def index(self, req) -> web.Response: return web.Response( text=\"\"\"<html> <head><title>rpki-client wrapper</title></head> <body> <h1>rpki-client wrapper</h1>", "self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ),", "8080) self.conf = conf self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response),", "os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] ) async def index(self, req) -> web.Response: return", "runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, self.host, self.port) asyncio.create_task(site.start(), name=\"site\") if", "</body> </html>\"\"\", content_type=\"text/html\", ) async def config_response(self, req) -> web.Response: return web.json_response(self.conf) async", "run(self): LOG.info(\"starting webserver on %s:%d\", self.host, self.port) runner = web.AppRunner(self.app) await runner.setup() site", "-> None: self.app = web.Application() self.interval = conf.pop(\"interval\") # default to the interval", "async def config_response(self, req) -> web.Response: return web.json_response(self.conf) async def validated_objects(self, req) ->", "def run(self): LOG.info(\"starting webserver on %s:%d\", self.host, self.port) runner = web.AppRunner(self.app) await runner.setup()", "# default to the interval for jitter value self.jitter = conf.pop(\"jitter\") self.host =", "int interval: int jitter: int def __init__(self, conf: Dict) -> None: self.app =", "conf self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\", self.config_response), web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\",", "self.jitter: jitter_delay = random.uniform(0, self.jitter) LOG.info( \"delaying by random delay of [0, %d]", "again.\"\"\" self.result = await self.client.run() async def json_result(self, req) -> web.Response: if self.result:", "config_response(self, req) -> web.Response: return web.json_response(self.conf) async def validated_objects(self, req) -> web.FileResponse: path", "return web.json_response(dataclasses.asdict(self.result)) return web.json_response(None, status=500) async def run(self): LOG.info(\"starting webserver on %s:%d\", self.host,", "def config_response(self, req) -> web.Response: return web.json_response(self.conf) async def validated_objects(self, req) -> web.FileResponse:", "dataclasses import dataclass from typing import Dict, List, Optional from aiohttp import web", "conf.pop(\"port\", 8080) self.conf = conf self.client = RpkiClient(**self.conf) self.app.add_routes( [ web.get(\"/\", self.index), web.get(\"/config\",", "content_type=\"text/html\", ) async def config_response(self, req) -> web.Response: return web.json_response(self.conf) async def validated_objects(self,", "-> None: \"\"\"Run the rpki-client wrapper again.\"\"\" self.result = await self.client.run() async def", "%s:%d\", self.host, self.port) runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, self.host, self.port)", "on %s:%d\", self.host, self.port) runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, self.host,", "from rpkiclientweb.util import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result:", "self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] ) async def index(self, req)", "import dataclasses import json import logging import os import random from dataclasses import", "None: self.app = web.Application() self.interval = conf.pop(\"interval\") # default to the interval for", "for jitter value self.jitter = conf.pop(\"jitter\") self.host = conf.pop(\"host\", \"localhost\") self.port = conf.pop(\"port\",", "href=\"/cache\">Cache directory</a></p> <p><a href=\"/config\">Configuration</a></p> <p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\",", "import Dict, List, Optional from aiohttp import web from prometheus_async import aio from", "import repeat LOG = logging.getLogger(__name__) OUTPUT_BUFFER_SIZE = 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] =", "self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ] ) async def", "<p><a href=\"/metrics\">Metrics</a></p> <p><a href=\"/objects/validated\">Validated objects</a></p> <p><a href=\"/result\">Result</a></p> </body> </html>\"\"\", content_type=\"text/html\", ) async def", "= 8_388_608 class RpkiClientWeb: result: Optional[ExecutionResult] = None conf: Dict app: web.Application host:", "random delay of [0, %d] seconds of %f seconds\", self.jitter, jitter_delay, ) await", "web.get(\"/metrics\", aio.web.server_stats), web.get(\"/result\", self.json_result), web.get(\"/objects/validated\", self.validated_objects), web.static( \"/cache\", os.path.abspath(conf[\"cache_dir\"]), follow_symlinks=False, show_index=True, ), ]" ]
[ "-*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import", "django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [", "('users', '0001_initial'), ] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID',", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations", "django.conf import settings class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations =", "import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('users',", "unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies", "[ ('users', '0001_initial'), ] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False,", "('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True, upload_to='image', default=None)), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)), ], ),", "models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True, upload_to='image', default=None)), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)), ], ), ]", "models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'),", "coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from", "= [ ('users', '0001_initial'), ] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True,", "= [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True, upload_to='image',", "[ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True, upload_to='image', default=None)),", "import settings class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [", "import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration):", "Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.CreateModel( name='UserProfile', fields=[", "'0001_initial'), ] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),", "from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations", "settings class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.CreateModel(", "operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True,", "class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.CreateModel( name='UserProfile',", "from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings", "migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True, upload_to='image', default=None)), ('user',", "utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf", "] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image',", "fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True, upload_to='image', default=None)), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)), ],", "migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ]", "__future__ import unicode_literals from django.db import models, migrations from django.conf import settings class", "from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies =", "<gh_stars>0 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import", "name='UserProfile', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('image', models.ImageField(null=True, upload_to='image', default=None)), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),", "dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id',", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models," ]
[ "plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f,", "# list of columns needed from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for", "up the figure fig = plt.figure() # set up the axes ax =", "england 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017')", "one more game def animate(i): xd = [i for i in range(20)] y", "goals goal_diff = row.FTHG - row.FTAG # update the league table based on", "df.iterrows(): # initialize the current league table to be the same as the", "final table') # set axis limits, 461 most games in an individual season", "thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home goals - away", "ax.plot([], [],'o',linestyle='None') # add title, legend, etc. plt.title('\\'99-\\'00 Premier League points distribution over", "plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close()", "for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each frame draws a", "# each row corresponds to the league table after that number of games", "in an individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on", "% tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() #", "points') # draw the background def init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3)", "plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate", "# france 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1", "set axis limits, 461 most games in an individual season axes = plt.gca()", "limits, 461 most games in an individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png')", "-*- coding: utf-8 -*- \"\"\" Created on Wed Mar 7 08:38:14 2018 @author:", "plt.ylabel('Proportion of total points') # draw the background def init(): line.set_data([],[]) plt.bar([i for", "= tables[1:,:] # compute the probability distribution for the final league table p", "range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla()", "anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the animation anim.save('basic_animation.mp4',", "c=%5.3f' % tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close()", "animate(i): xd = [i for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y)", "from matplotlib import animation from scipy.stats import entropy from scipy.optimize import curve_fit import", "np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games = 1 # loop through the season", "b=%5.3f, c=%5.3f' % tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla()", "example by <NAME>: # email: <EMAIL> # website: http://jakevdp.github.com # license: BSD #", "['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load the season data df = pd.read_csv(file,index_col='Date',encoding", "plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve", "for that season teams = list(df.HomeTeam.unique()) # set up array for league tables", "# animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the", "(entropy(p, r) + entropy(q, r)) # the data files have already been acquired", "tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 # increment the", "needed from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: #", "League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany", "np import glob import matplotlib.pyplot as plt from matplotlib import animation from scipy.stats", "2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17", "# plot the individual JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add", "final league tables finals = np.zeros((len(filenames),25)) # initialize a season counter season =", "cleaned # see get_football-data_data.py # build a list of filenames filenames = glob.glob('data/*.csv')", "have already been acquired and cleaned # see get_football-data_data.py # build a list", "distributions # spain 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La", "def animate(i): xd = [i for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd,", "italy 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017')", "tables = tables[1:,:] # compute the probability distribution for the final league table", "# set up the axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, =", "a legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs, f(xs, *popt), 'r-',", "the best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))", "in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each frame draws a distribution after", "distribution # and then compute the JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:])", "legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute examples of final probability", "plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17 xd = [i for", "germany 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'')", "xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked", "in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf()", "+= 1 # increment the game counter num_games += 1 # delete first", "current league table to be the same as the last tables[num_games,:] = tables[num_games-1,:]", "teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17 xd = [i", "finals = np.zeros((len(filenames),25)) # initialize a season counter season = 0 # list", "return 0.5 * (entropy(p, r) + entropy(q, r)) # the data files have", "# compute home goals - away goals goal_diff = row.FTHG - row.FTAG #", "[i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point", "team # for each of the running league tables, convert to a distribution", "one season jsds = np.zeros((len(filenames),500)) # initialize an array to hold final league", "df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team names for that", "tables[num_games,:] = tables[num_games-1,:] # get indices for the teams involved in thisgame home_idx", "time') plt.xlabel('Number of games played') plt.ylabel('JSD with final table') # set axis limits,", "to a distribution # and then compute the JSD for i in range(len(tables[:,0])):", "season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the first 100", "# function to compute Jensen-Shannon divergence def JSD(p, q): r = 0.5 *", "compute home goals - away goals goal_diff = row.FTHG - row.FTAG # update", "p for idx,team in enumerate(p): finals[season,idx] = team # for each of the", "plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close()", "based on an example by <NAME>: # email: <EMAIL> # website: http://jakevdp.github.com #", "total points') # draw the background def init(): line.set_data([],[]) plt.bar([i for i in", "elif goal_diff < 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] +=", "init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the animation anim.save('basic_animation.mp4', fps=50, extra_args=['-vcodec', 'libx264']) plt.show()", "distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') # draw the", "# plot the best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f'", "a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf()", "plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation # code below based on an", "quickly soccer league tables converge to the final distribution \"\"\" import pandas as", "for idx,team in enumerate(p): finals[season,idx] = team # for each of the running", "plt.clf() plt.cla() plt.close() # germany 16-17 xd = [i for i in range(18)]", "distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17 xd = [i for i", "i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each frame draws a distribution", "a distribution # and then compute the JSD for i in range(len(tables[:,0])): #if", "league tables over time') plt.xlabel('Number of games played') plt.ylabel('JSD with final table') #", "the JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:])", "# zoom in on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out", "for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png')", "teams involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home", "= [i for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line,", "counter season = 0 # list of columns needed from the data files", "= tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in enumerate(p): finals[season,idx] = team #", "import curve_fit import seaborn as sns sns.set() # function to compute Jensen-Shannon divergence", "convert to a distribution # and then compute the JSD for i in", "figure fig = plt.figure() # set up the axes ax = plt.axes(xlim=(-1, 20),", "the teams involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute", "row of the table tables = tables[1:,:] # compute the probability distribution for", "spain 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017')", "best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) #", "animation # code below based on an example by <NAME>: # email: <EMAIL>", "# code below based on an example by <NAME>: # email: <EMAIL> #", "* (p + q) return 0.5 * (entropy(p, r) + entropy(q, r)) #", "france 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017')", "average curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend() plt.savefig('convwithavg.png') # plot the", "plt.ylabel('JSD with final table') # set axis limits, 461 most games in an", "in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf()", "# each row will contain the JSD curve data for one season jsds", "season teams = list(df.HomeTeam.unique()) # set up array for league tables # each", "of x values for the games xs = np.array([i for i in range(len(avg))])", "compute the average JSD curve avg = np.sum(jsds,axis=0)/110 # array of x values", "axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') # add", "# loop through the season data game by game for idx,row in df.iterrows():", "# store p for idx,team in enumerate(p): finals[season,idx] = team # for each", "# initialize game counter num_games = 1 # loop through the season data", "see get_football-data_data.py # build a list of filenames filenames = glob.glob('data/*.csv') # initialize", "a * np.exp(-b * x) + c # perform the curve fit popt,", "from scipy.stats import entropy from scipy.optimize import curve_fit import seaborn as sns sns.set()", "game counter num_games += 1 # delete first row of the table tables", "and then compute the JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]):", "0 # list of columns needed from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG']", "16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'')", "last tables[num_games,:] = tables[num_games-1,:] # get indices for the teams involved in thisgame", "glob import matplotlib.pyplot as plt from matplotlib import animation from scipy.stats import entropy", "season += 1 # compute the average JSD curve avg = np.sum(jsds,axis=0)/110 #", "final probability distributions # spain 16-17 xd = [i for i in range(18)]", "plt.clf() plt.cla() plt.close() # compute examples of final probability distributions # spain 16-17", "unique team names for that season teams = list(df.HomeTeam.unique()) # set up array", "over time') plt.xlabel('Number of games played') plt.ylabel('JSD with final table') # set axis", "plt.show() plt.clf() plt.cla() plt.close() # compute examples of final probability distributions # spain", "JSD(p,q) # increment the season counter season += 1 # compute the average", "np.zeros((len(filenames),500)) # initialize an array to hold final league tables finals = np.zeros((len(filenames),25))", "# perform the curve fit popt, pcov = curve_fit(f, xs, avg) # plot", "each row corresponds to the league table after that number of games tables", "converge to the final distribution \"\"\" import pandas as pd import numpy as", "+= 1 # delete first row of the table tables = tables[1:,:] #", "away_idx = teams.index(row['AwayTeam']) # compute home goals - away goals goal_diff = row.FTHG", "np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340,", "final distribution \"\"\" import pandas as pd import numpy as np import glob", "soccer league tables converge to the final distribution \"\"\" import pandas as pd", "= tables[num_games-1,:] # get indices for the teams involved in thisgame home_idx =", "plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute examples of final probability distributions", "* x) + c # perform the curve fit popt, pcov = curve_fit(f,", "hold JSD values # each row will contain the JSD curve data for", "xs, avg) # plot the individual JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray')", "league table based on the result if goal_diff > 0: tables[num_games,home_idx] += 3", "names for that season teams = list(df.HomeTeam.unique()) # set up array for league", "on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) #", "r) + entropy(q, r)) # the data files have already been acquired and", "for the teams involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) #", "return line, # animation function, each frame draws a distribution after one more", "the current league table to be the same as the last tables[num_games,:] =", "row will contain the JSD curve data for one season jsds = np.zeros((len(filenames),500))", "plt.cla() plt.close() # germany 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18]))", "plt.cla() plt.close() # compute examples of final probability distributions # spain 16-17 xd", "# define function for curve-fitting def f(x, a, b, c): return a *", "ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') # add title,", "that number of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games =", "# germany 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017')", "of league tables over time') plt.xlabel('Number of games played') plt.ylabel('JSD with final table')", "from scipy.optimize import curve_fit import seaborn as sns sns.set() # function to compute", "for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution')", "plt.cla() plt.close() # generate animation # code below based on an example by", "plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() #", "# add title, legend, etc. plt.title('\\'99-\\'00 Premier League points distribution over time') plt.xticks([],'')", "for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution')", "range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla()", "# email: <EMAIL> # website: http://jakevdp.github.com # license: BSD # set up the", "curve_fit import seaborn as sns sns.set() # function to compute Jensen-Shannon divergence def", "animation from scipy.stats import entropy from scipy.optimize import curve_fit import seaborn as sns", "below based on an example by <NAME>: # email: <EMAIL> # website: http://jakevdp.github.com", "plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() #", "0.5 * (p + q) return 0.5 * (entropy(p, r) + entropy(q, r))", "plt.title('\\'99-\\'00 Premier League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total", "line.set_data(xd, y) return line, # animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20,", "table p = tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in enumerate(p): finals[season,idx] =", "league tables finals = np.zeros((len(filenames),25)) # initialize a season counter season = 0", "played') plt.ylabel('JSD with final table') # set axis limits, 461 most games in", "3 elif goal_diff < 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx]", "plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17 xd =", "len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the season counter season", "league table to be the same as the last tables[num_games,:] = tables[num_games-1,:] #", "<NAME>: # email: <EMAIL> # website: http://jakevdp.github.com # license: BSD # set up", "+ entropy(q, r)) # the data files have already been acquired and cleaned", "tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute", "store p for idx,team in enumerate(p): finals[season,idx] = team # for each of", "indices for the teams involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam'])", "3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 # increment the game counter", "loop through the season data game by game for idx,row in df.iterrows(): #", "initialize game counter num_games = 1 # loop through the season data game", "= team # for each of the running league tables, convert to a", "np.array([i for i in range(len(avg))]) # define function for curve-fitting def f(x, a,", "in df.iterrows(): # initialize the current league table to be the same as", "season jsds = np.zeros((len(filenames),500)) # initialize an array to hold final league tables", "data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team names for", "2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17", "# each column corresponds to a team # each row corresponds to the", "array to hold final league tables finals = np.zeros((len(filenames),25)) # initialize a season", "plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17 xd", "import pandas as pd import numpy as np import glob import matplotlib.pyplot as", "plt.clf() plt.cla() plt.close() # generate animation # code below based on an example", "initialize an array to hold final league tables finals = np.zeros((len(filenames),25)) # initialize", "columns needed from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames:", "final league table p = tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in enumerate(p):", "examples of final probability distributions # spain 16-17 xd = [i for i", "plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close()", "plt.title('Convergence of league tables over time') plt.xlabel('Number of games played') plt.ylabel('JSD with final", "season = 0 # list of columns needed from the data files cols", "data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load the season", "# initialize an array to hold final league tables finals = np.zeros((len(filenames),25)) #", "set up array for league tables # each column corresponds to a team", "= list(df.HomeTeam.unique()) # set up array for league tables # each column corresponds", "y) return line, # animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000)", "probability distributions # spain 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18]))", "7 08:38:14 2018 @author: <NAME> compute how quickly soccer league tables converge to", "league table after that number of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game", "league table p = tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in enumerate(p): finals[season,idx]", "0.5 * (entropy(p, r) + entropy(q, r)) # the data files have already", "to the league table after that number of games tables = np.zeros((df.shape[0]+1,len(teams))) #", "the JSD curve data for one season jsds = np.zeros((len(filenames),500)) # initialize an", "# compute the probability distribution for the final league table p = tables[-1,:]/np.sum(tables[-1,:])", "= 0.5 * (p + q) return 0.5 * (entropy(p, r) + entropy(q,", "range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate anim = animation.FuncAnimation(fig,", "else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 # increment the game counter num_games", "plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png')", "axis labels plt.title('Convergence of league tables over time') plt.xlabel('Number of games played') plt.ylabel('JSD", "from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load", "plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17 xd = [i for", "goal_diff > 0: tables[num_games,home_idx] += 3 elif goal_diff < 0: tables[num_games,away_idx] += 3", "define function for curve-fitting def f(x, a, b, c): return a * np.exp(-b", "plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17 xd = [i for i in", "xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked", "i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png')", "for curve-fitting def f(x, a, b, c): return a * np.exp(-b * x)", "# and then compute the JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) ==", "range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close()", "plt.cla() plt.close() # england 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20]))", "home goals - away goals goal_diff = row.FTHG - row.FTAG # update the", "* np.exp(-b * x) + c # perform the curve fit popt, pcov", "# update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute examples", "distribution after one more game def animate(i): xd = [i for i in", "pandas as pd import numpy as np import glob import matplotlib.pyplot as plt", "row.FTHG - row.FTAG # update the league table based on the result if", "1 # loop through the season data game by game for idx,row in", "plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation # code", "function, each frame draws a distribution after one more game def animate(i): xd", "etc. plt.title('\\'99-\\'00 Premier League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of", "x values for the games xs = np.array([i for i in range(len(avg))]) #", "[i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point", "the games xs = np.array([i for i in range(len(avg))]) # define function for", "entropy from scipy.optimize import curve_fit import seaborn as sns sns.set() # function to", "first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) # plot the", "# add a legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs, f(xs,", "plt.figure() # set up the axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line,", "each of the running league tables, convert to a distribution # and then", "= np.array([i for i in range(len(avg))]) # define function for curve-fitting def f(x,", "as sns sns.set() # function to compute Jensen-Shannon divergence def JSD(p, q): r", "seaborn as sns sns.set() # function to compute Jensen-Shannon divergence def JSD(p, q):", "plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17 xd =", "plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() #", "plt.xlabel('Number of games played') plt.ylabel('JSD with final table') # set axis limits, 461", "filenames = glob.glob('data/*.csv') # initialize an array to hold JSD values # each", "1 # compute the average JSD curve avg = np.sum(jsds,axis=0)/110 # array of", "set up the figure fig = plt.figure() # set up the axes ax", "for i in range(len(avg))]) # define function for curve-fitting def f(x, a, b,", "#if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the", "plt.close() # generate animation # code below based on an example by <NAME>:", "def JSD(p, q): r = 0.5 * (p + q) return 0.5 *", "import matplotlib.pyplot as plt from matplotlib import animation from scipy.stats import entropy from", "data files have already been acquired and cleaned # see get_football-data_data.py # build", "# italy 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A", "teams = list(df.HomeTeam.unique()) # set up array for league tables # each column", "the background def init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, #", "r = 0.5 * (p + q) return 0.5 * (entropy(p, r) +", "for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] =", "= [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams')", "= glob.glob('data/*.csv') # initialize an array to hold JSD values # each row", "an example by <NAME>: # email: <EMAIL> # website: http://jakevdp.github.com # license: BSD", "label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png') plt.show()", "idx,row in df.iterrows(): # initialize the current league table to be the same", "# the data files have already been acquired and cleaned # see get_football-data_data.py", "xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked", "q) return 0.5 * (entropy(p, r) + entropy(q, r)) # the data files", "the league table after that number of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize", "0: tables[num_games,home_idx] += 3 elif goal_diff < 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx]", "table after that number of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter", "the axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') #", "2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation", "individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the first", "over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') # draw the background", "data game by game for idx,row in df.iterrows(): # initialize the current league", "# animation function, each frame draws a distribution after one more game def", "1 # increment the game counter num_games += 1 # delete first row", "season data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team names", "Mar 7 08:38:14 2018 @author: <NAME> compute how quickly soccer league tables converge", "first row of the table tables = tables[1:,:] # compute the probability distribution", "how quickly soccer league tables converge to the final distribution \"\"\" import pandas", "if goal_diff > 0: tables[num_games,home_idx] += 3 elif goal_diff < 0: tables[num_games,away_idx] +=", "to a team # each row corresponds to the league table after that", "set up the axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([],", "[i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution')", "get the unique team names for that season teams = list(df.HomeTeam.unique()) # set", "corresponds to a team # each row corresponds to the league table after", "distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation # code below based on", "up array for league tables # each column corresponds to a team #", "average JSD curve avg = np.sum(jsds,axis=0)/110 # array of x values for the", "to be the same as the last tables[num_games,:] = tables[num_games-1,:] # get indices", "# generate animation # code below based on an example by <NAME>: #", "row corresponds to the league table after that number of games tables =", "= tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the season counter season += 1", "compute Jensen-Shannon divergence def JSD(p, q): r = 0.5 * (p + q)", "plt from matplotlib import animation from scipy.stats import entropy from scipy.optimize import curve_fit", "filenames filenames = glob.glob('data/*.csv') # initialize an array to hold JSD values #", "[i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point", "the season counter season += 1 # compute the average JSD curve avg", "background def init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation", "in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) #", "'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png')", "enumerate(p): finals[season,idx] = team # for each of the running league tables, convert", "plt.close() # france 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue", "= np.zeros((len(filenames),25)) # initialize a season counter season = 0 # list of", "after one more game def animate(i): xd = [i for i in range(20)]", "distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17 xd = [i for i", "= teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home goals - away goals goal_diff", "each column corresponds to a team # each row corresponds to the league", "most games in an individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom", "generate animation # code below based on an example by <NAME>: # email:", "pd import numpy as np import glob import matplotlib.pyplot as plt from matplotlib", "scipy.optimize import curve_fit import seaborn as sns sns.set() # function to compute Jensen-Shannon", "teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17 xd = [i", "16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked", "of total points') # draw the background def init(): line.set_data([],[]) plt.bar([i for i", "be the same as the last tables[num_games,:] = tables[num_games-1,:] # get indices for", "code below based on an example by <NAME>: # email: <EMAIL> # website:", "number of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games = 1", "JSD(p, q): r = 0.5 * (p + q) return 0.5 * (entropy(p,", "16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'')", "line, # animation function, each frame draws a distribution after one more game", "table to be the same as the last tables[num_games,:] = tables[num_games-1,:] # get", "tables finals = np.zeros((len(filenames),25)) # initialize a season counter season = 0 #", "fig = plt.figure() # set up the axes ax = plt.axes(xlim=(-1, 20), ylim=(0,", "initialize the current league table to be the same as the last tables[num_games,:]", "frame draws a distribution after one more game def animate(i): xd = [i", "get indices for the teams involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx =", "plot the best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' %", "*popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update the legend plt.legend()", "plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update the", "Jensen-Shannon divergence def JSD(p, q): r = 0.5 * (p + q) return", "range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla()", "table tables = tables[1:,:] # compute the probability distribution for the final league", "# load the season data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the", "hold final league tables finals = np.zeros((len(filenames),25)) # initialize a season counter season", "# for each of the running league tables, convert to a distribution #", "contain the JSD curve data for one season jsds = np.zeros((len(filenames),500)) # initialize", "the final distribution \"\"\" import pandas as pd import numpy as np import", "# increment the game counter num_games += 1 # delete first row of", "= teams.index(row['AwayTeam']) # compute home goals - away goals goal_diff = row.FTHG -", "JSD curve data for one season jsds = np.zeros((len(filenames),500)) # initialize an array", "numpy as np import glob import matplotlib.pyplot as plt from matplotlib import animation", "plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') # draw the background def init():", "teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17 xd = [i", "plt.savefig('allseasons.png') # zoom in on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom", "# england 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League", "entropy(q, r)) # the data files have already been acquired and cleaned #", "tables # each column corresponds to a team # each row corresponds to", "in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate anim =", "<EMAIL> # website: http://jakevdp.github.com # license: BSD # set up the figure fig", "i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q)", "divergence def JSD(p, q): r = 0.5 * (p + q) return 0.5", "game for idx,row in df.iterrows(): # initialize the current league table to be", "probability distribution for the final league table p = tables[-1,:]/np.sum(tables[-1,:]) # store p", "for one season jsds = np.zeros((len(filenames),500)) # initialize an array to hold final", "filenames: # load the season data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get", "# get the unique team names for that season teams = list(df.HomeTeam.unique()) #", "same as the last tables[num_games,:] = tables[num_games-1,:] # get indices for the teams", "for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels plt.title('Convergence of", "function for curve-fitting def f(x, a, b, c): return a * np.exp(-b *", "JSD values # each row will contain the JSD curve data for one", "init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each", "the same as the last tables[num_games,:] = tables[num_games-1,:] # get indices for the", "already been acquired and cleaned # see get_football-data_data.py # build a list of", "distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17 xd = [i for i", "plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17 xd", "= [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point", "curve fit popt, pcov = curve_fit(f, xs, avg) # plot the individual JSD", "tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 # increment the game counter num_games +=", "axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average", "scipy.stats import entropy from scipy.optimize import curve_fit import seaborn as sns sns.set() #", "a season counter season = 0 # list of columns needed from the", "plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17 xd", "tables, convert to a distribution # and then compute the JSD for i", "f(x, a, b, c): return a * np.exp(-b * x) + c #", "= np.sum(jsds,axis=0)/110 # array of x values for the games xs = np.array([i", "plt.close() # compute examples of final probability distributions # spain 16-17 xd =", "the league table based on the result if goal_diff > 0: tables[num_games,home_idx] +=", "# initialize a season counter season = 0 # list of columns needed", "increment the season counter season += 1 # compute the average JSD curve", "in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf()", "game def animate(i): xd = [i for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:]))", "plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17 xd = [i for", "= 0 # list of columns needed from the data files cols =", "ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') # add title, legend, etc. plt.title('\\'99-\\'00 Premier", "teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation # code below", "glob.glob('data/*.csv') # initialize an array to hold JSD values # each row will", "1 tables[num_games,away_idx] += 1 # increment the game counter num_games += 1 #", "of the running league tables, convert to a distribution # and then compute", "plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute examples of final probability distributions #", "games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games = 1 # loop", "season data game by game for idx,row in df.iterrows(): # initialize the current", "JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i]", "league tables # each column corresponds to a team # each row corresponds", "animation function, each frame draws a distribution after one more game def animate(i):", "<NAME> compute how quickly soccer league tables converge to the final distribution \"\"\"", "# license: BSD # set up the figure fig = plt.figure() # set", "update the league table based on the result if goal_diff > 0: tables[num_games,home_idx]", "to hold final league tables finals = np.zeros((len(filenames),25)) # initialize a season counter", "as plt from matplotlib import animation from scipy.stats import entropy from scipy.optimize import", "of columns needed from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in", "= [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams')", "games played') plt.ylabel('JSD with final table') # set axis limits, 461 most games", "on Wed Mar 7 08:38:14 2018 @author: <NAME> compute how quickly soccer league", "num_games = 1 # loop through the season data game by game for", "update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute examples of", "in on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380])", "plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation #", "16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'')", "20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') # add title, legend, etc. plt.title('\\'99-\\'00", "import seaborn as sns sns.set() # function to compute Jensen-Shannon divergence def JSD(p,", "array of x values for the games xs = np.array([i for i in", "animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the animation anim.save('basic_animation.mp4', fps=50, extra_args=['-vcodec', 'libx264'])", "League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') #", "row.FTAG # update the league table based on the result if goal_diff >", "for league tables # each column corresponds to a team # each row", "in thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home goals -", "of filenames filenames = glob.glob('data/*.csv') # initialize an array to hold JSD values", "table based on the result if goal_diff > 0: tables[num_games,home_idx] += 3 elif", "goals - away goals goal_diff = row.FTHG - row.FTAG # update the league", "= [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams')", "# get indices for the teams involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx", "# compute the average JSD curve avg = np.sum(jsds,axis=0)/110 # array of x", "q): r = 0.5 * (p + q) return 0.5 * (entropy(p, r)", "# delete first row of the table tables = tables[1:,:] # compute the", "xd = [i for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return", "line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each frame", "plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each frame draws", "coding: utf-8 -*- \"\"\" Created on Wed Mar 7 08:38:14 2018 @author: <NAME>", "the game counter num_games += 1 # delete first row of the table", "zoom in on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again", "r)) # the data files have already been acquired and cleaned # see", "on an example by <NAME>: # email: <EMAIL> # website: http://jakevdp.github.com # license:", "plt.close() # england 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier", "teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17 xd = [i", "a list of filenames filenames = glob.glob('data/*.csv') # initialize an array to hold", "after that number of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games", "2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17", "plt.close() # italy 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie", "# initialize the current league table to be the same as the last", "to the final distribution \"\"\" import pandas as pd import numpy as np", "num_games += 1 # delete first row of the table tables = tables[1:,:]", "plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') # draw the background def init(): line.set_data([],[])", "add title and axis labels plt.title('Convergence of league tables over time') plt.xlabel('Number of", "= [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams')", "array for league tables # each column corresponds to a team # each", "file in filenames: # load the season data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any')", "files have already been acquired and cleaned # see get_football-data_data.py # build a", "acquired and cleaned # see get_football-data_data.py # build a list of filenames filenames", "+= 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 # increment the game", "title and axis labels plt.title('Convergence of league tables over time') plt.xlabel('Number of games", "plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') # add title, legend, etc.", "games in an individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in", "points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') # draw", "= pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team names for that season", "compute the probability distribution for the final league table p = tables[-1,:]/np.sum(tables[-1,:]) #", "JSD') # add a legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs,", "[],'o',linestyle='None') # add title, legend, etc. plt.title('\\'99-\\'00 Premier League points distribution over time')", "the probability distribution for the final league table p = tables[-1,:]/np.sum(tables[-1,:]) # store", "plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17 xd = [i for i in", "# website: http://jakevdp.github.com # license: BSD # set up the figure fig =", "to hold JSD values # each row will contain the JSD curve data", "through the season data game by game for idx,row in df.iterrows(): # initialize", "files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load the season data", "import numpy as np import glob import matplotlib.pyplot as plt from matplotlib import", "# draw the background def init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return", "build a list of filenames filenames = glob.glob('data/*.csv') # initialize an array to", "BSD # set up the figure fig = plt.figure() # set up the", "to compute Jensen-Shannon divergence def JSD(p, q): r = 0.5 * (p +", "tables[1:,:] # compute the probability distribution for the final league table p =", "initialize a season counter season = 0 # list of columns needed from", "plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() #", "= JSD(p,q) # increment the season counter season += 1 # compute the", "counter num_games += 1 # delete first row of the table tables =", "import glob import matplotlib.pyplot as plt from matplotlib import animation from scipy.stats import", "distribution for the final league table p = tables[-1,:]/np.sum(tables[-1,:]) # store p for", "axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') #", "in range(len(avg))]) # define function for curve-fitting def f(x, a, b, c): return", "the average JSD curve avg = np.sum(jsds,axis=0)/110 # array of x values for", "a distribution after one more game def animate(i): xd = [i for i", "xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked", "plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17 xd = [i for i in", "range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels plt.title('Convergence of league tables over", "the curve fit popt, pcov = curve_fit(f, xs, avg) # plot the individual", "tables[num_games,away_idx] += 1 # increment the game counter num_games += 1 # delete", "plt.cla() plt.close() # italy 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20]))", "curve_fit(f, xs, avg) # plot the individual JSD curves for i in range(jsds.shape[0]):", "i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf()", "compute how quickly soccer league tables converge to the final distribution \"\"\" import", "on the result if goal_diff > 0: tables[num_games,home_idx] += 3 elif goal_diff <", "< 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 #", "with final table') # set axis limits, 461 most games in an individual", "i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png')", "c # perform the curve fit popt, pcov = curve_fit(f, xs, avg) #", "result if goal_diff > 0: tables[num_games,home_idx] += 3 elif goal_diff < 0: tables[num_games,away_idx]", "= np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate anim = animation.FuncAnimation(fig, animate, init_func=init,", "by game for idx,row in df.iterrows(): # initialize the current league table to", "+= 1 # compute the average JSD curve avg = np.sum(jsds,axis=0)/110 # array", "for each of the running league tables, convert to a distribution # and", "cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load the season data df", "delete first row of the table tables = tables[1:,:] # compute the probability", "- away goals goal_diff = row.FTHG - row.FTAG # update the league table", "i in range(len(avg))]) # define function for curve-fitting def f(x, a, b, c):", "= 1 # loop through the season data game by game for idx,row", "draws a distribution after one more game def animate(i): xd = [i for", "team names for that season teams = list(df.HomeTeam.unique()) # set up array for", "array to hold JSD values # each row will contain the JSD curve", "corresponds to the league table after that number of games tables = np.zeros((df.shape[0]+1,len(teams)))", "the individual JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and", "tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the season counter season += 1 #", "the running league tables, convert to a distribution # and then compute the", "tables[num_games,home_idx] += 3 elif goal_diff < 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] +=", "idx,team in enumerate(p): finals[season,idx] = team # for each of the running league", "16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'')", "title, legend, etc. plt.title('\\'99-\\'00 Premier League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams')", "# spain 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga", "legend, etc. plt.title('\\'99-\\'00 Premier League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion", "+ c # perform the curve fit popt, pcov = curve_fit(f, xs, avg)", "# set axis limits, 461 most games in an individual season axes =", "Premier League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points')", "jsds[season,i] = JSD(p,q) # increment the season counter season += 1 # compute", "pcov = curve_fit(f, xs, avg) # plot the individual JSD curves for i", "y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate anim = animation.FuncAnimation(fig, animate,", "the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) # plot", "website: http://jakevdp.github.com # license: BSD # set up the figure fig = plt.figure()", "range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla()", "c): return a * np.exp(-b * x) + c # perform the curve", "curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update", "sns.set() # function to compute Jensen-Shannon divergence def JSD(p, q): r = 0.5", "each frame draws a distribution after one more game def animate(i): xd =", "get_football-data_data.py # build a list of filenames filenames = glob.glob('data/*.csv') # initialize an", "the average curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend() plt.savefig('convwithavg.png') # plot", "[i for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, #", "increment the game counter num_games += 1 # delete first row of the", "1 # delete first row of the table tables = tables[1:,:] # compute", "# initialize an array to hold JSD values # each row will contain", "add a legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs, f(xs, *popt),", "the season data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team", "xs = np.array([i for i in range(len(avg))]) # define function for curve-fitting def", "game counter num_games = 1 # loop through the season data game by", "plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels plt.title('Convergence of league tables over time')", "= plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the first 100 games axes.set_xlim([0,100])", "and axis labels plt.title('Convergence of league tables over time') plt.xlabel('Number of games played')", "of games played') plt.ylabel('JSD with final table') # set axis limits, 461 most", "of final probability distributions # spain 16-17 xd = [i for i in", "table') # set axis limits, 461 most games in an individual season axes", "goal_diff < 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1", "animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the animation anim.save('basic_animation.mp4', fps=50, extra_args=['-vcodec',", "for the final league table p = tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team", "curve data for one season jsds = np.zeros((len(filenames),500)) # initialize an array to", "as the last tables[num_games,:] = tables[num_games-1,:] # get indices for the teams involved", "distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17 xd = [i for i", "plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close()", "add title, legend, etc. plt.title('\\'99-\\'00 Premier League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked", "by <NAME>: # email: <EMAIL> # website: http://jakevdp.github.com # license: BSD # set", "-*- \"\"\" Created on Wed Mar 7 08:38:14 2018 @author: <NAME> compute how", "return line, # animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) #", "season counter season = 0 # list of columns needed from the data", "plt.clf() plt.cla() plt.close() # france 16-17 xd = [i for i in range(20)]", "then compute the JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q", "JSD curve avg = np.sum(jsds,axis=0)/110 # array of x values for the games", "that season teams = list(df.HomeTeam.unique()) # set up array for league tables #", "line, # animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save", "> 0: tables[num_games,home_idx] += 3 elif goal_diff < 0: tables[num_games,away_idx] += 3 else:", "league tables converge to the final distribution \"\"\" import pandas as pd import", "np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the season", "the unique team names for that season teams = list(df.HomeTeam.unique()) # set up", "\"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team names for that season teams = list(df.HomeTeam.unique())", "matplotlib import animation from scipy.stats import entropy from scipy.optimize import curve_fit import seaborn", "tables[num_games-1,:] # get indices for the teams involved in thisgame home_idx = teams.index(row['HomeTeam'])", "# compute examples of final probability distributions # spain 16-17 xd = [i", "1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england", "xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams')", "Created on Wed Mar 7 08:38:14 2018 @author: <NAME> compute how quickly soccer", "\"\"\" import pandas as pd import numpy as np import glob import matplotlib.pyplot", "as np import glob import matplotlib.pyplot as plt from matplotlib import animation from", "axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend()", "pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team names for that season teams", "away goals goal_diff = row.FTHG - row.FTAG # update the league table based", "tables over time') plt.xlabel('Number of games played') plt.ylabel('JSD with final table') # set", "# array of x values for the games xs = np.array([i for i", "again axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend", "tables converge to the final distribution \"\"\" import pandas as pd import numpy", "= \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique team names for that season teams =", "# plot the average curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend() plt.savefig('convwithavg.png')", "jsds = np.zeros((len(filenames),500)) # initialize an array to hold final league tables finals", "\"\"\" Created on Wed Mar 7 08:38:14 2018 @author: <NAME> compute how quickly", "axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the first 100 games", "= np.zeros((len(filenames),500)) # initialize an array to hold final league tables finals =", "= plt.figure() # set up the axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12))", "in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels plt.title('Convergence of league tables", "matplotlib.pyplot as plt from matplotlib import animation from scipy.stats import entropy from scipy.optimize", "- row.FTAG # update the league table based on the result if goal_diff", "plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() #", "(p + q) return 0.5 * (entropy(p, r) + entropy(q, r)) # the", "+= 1 tables[num_games,away_idx] += 1 # increment the game counter num_games += 1", "2018 @author: <NAME> compute how quickly soccer league tables converge to the final", "plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average JSD')", "# build a list of filenames filenames = glob.glob('data/*.csv') # initialize an array", "and cleaned # see get_football-data_data.py # build a list of filenames filenames =", "0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 # increment", "= plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') # add title, legend,", "= np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games = 1 # loop through the", "# see get_football-data_data.py # build a list of filenames filenames = glob.glob('data/*.csv') #", "plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation # code below based", "range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment", "list of columns needed from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file", "for file in filenames: # load the season data df = pd.read_csv(file,index_col='Date',encoding =", "initialize an array to hold JSD values # each row will contain the", "the figure fig = plt.figure() # set up the axes ax = plt.axes(xlim=(-1,", "time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') # draw the background def", "plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17 xd = [i for i in", "the season data game by game for idx,row in df.iterrows(): # initialize the", "league tables, convert to a distribution # and then compute the JSD for", "teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home goals - away goals goal_diff =", "2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17", "as pd import numpy as np import glob import matplotlib.pyplot as plt from", "for idx,row in df.iterrows(): # initialize the current league table to be the", "distribution \"\"\" import pandas as pd import numpy as np import glob import", "+ q) return 0.5 * (entropy(p, r) + entropy(q, r)) # the data", "the last tables[num_games,:] = tables[num_games-1,:] # get indices for the teams involved in", "draw the background def init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line,", "JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels", "plt.clf() plt.cla() plt.close() # italy 16-17 xd = [i for i in range(20)]", "utf-8 -*- \"\"\" Created on Wed Mar 7 08:38:14 2018 @author: <NAME> compute", "a team # each row corresponds to the league table after that number", "team # each row corresponds to the league table after that number of", "for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution')", "plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17 xd =", "out again axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average JSD') # add a", "license: BSD # set up the figure fig = plt.figure() # set up", "plt.close() # germany 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga", "return a * np.exp(-b * x) + c # perform the curve fit", "teams') plt.ylabel('Proportion of total points') # draw the background def init(): line.set_data([],[]) plt.bar([i", "# update the league table based on the result if goal_diff > 0:", "the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load the", "the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute examples of final", "counter num_games = 1 # loop through the season data game by game", "column corresponds to a team # each row corresponds to the league table", "= row.FTHG - row.FTAG # update the league table based on the result", "a, b, c): return a * np.exp(-b * x) + c # perform", "plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17 xd = [i for", "import entropy from scipy.optimize import curve_fit import seaborn as sns sns.set() # function", "x) + c # perform the curve fit popt, pcov = curve_fit(f, xs,", "popt, pcov = curve_fit(f, xs, avg) # plot the individual JSD curves for", "Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy", "load the season data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') # get the unique", "of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games = 1 #", "compute the JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q =", "i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png')", "08:38:14 2018 @author: <NAME> compute how quickly soccer league tables converge to the", "values # each row will contain the JSD curve data for one season", "games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) # plot the average curve", "running league tables, convert to a distribution # and then compute the JSD", "home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home goals - away goals", "the table tables = tables[1:,:] # compute the probability distribution for the final", "axis limits, 461 most games in an individual season axes = plt.gca() axes.set_xlim([0,461])", "sns sns.set() # function to compute Jensen-Shannon divergence def JSD(p, q): r =", "# zoom out again axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average JSD') #", "list(df.HomeTeam.unique()) # set up array for league tables # each column corresponds to", "plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17 xd", "i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels plt.title('Convergence of league", "compute examples of final probability distributions # spain 16-17 xd = [i for", "http://jakevdp.github.com # license: BSD # set up the figure fig = plt.figure() #", "avg) # plot the individual JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') #", "curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels plt.title('Convergence", "will contain the JSD curve data for one season jsds = np.zeros((len(filenames),500)) #", "plot the individual JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title", "the final league table p = tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in", "teams.index(row['AwayTeam']) # compute home goals - away goals goal_diff = row.FTHG - row.FTAG", "plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f,", "b, c): return a * np.exp(-b * x) + c # perform the", "== len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the season counter", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Mar 7 08:38:14 2018", "each row will contain the JSD curve data for one season jsds =", "function to compute Jensen-Shannon divergence def JSD(p, q): r = 0.5 * (p", "the data files have already been acquired and cleaned # see get_football-data_data.py #", "import animation from scipy.stats import entropy from scipy.optimize import curve_fit import seaborn as", "goal_diff = row.FTHG - row.FTAG # update the league table based on the", "in filenames: # load the season data df = pd.read_csv(file,index_col='Date',encoding = \"ISO-8859-1\",usecols=cols).dropna(axis=0,how='any') #", "p = tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in enumerate(p): finals[season,idx] = team", "games xs = np.array([i for i in range(len(avg))]) # define function for curve-fitting", "email: <EMAIL> # website: http://jakevdp.github.com # license: BSD # set up the figure", "100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) # plot the average", "data for one season jsds = np.zeros((len(filenames),500)) # initialize an array to hold", "q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the season counter season +=", "+= 3 elif goal_diff < 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1", "curve avg = np.sum(jsds,axis=0)/110 # array of x values for the games xs", "curve-fitting def f(x, a, b, c): return a * np.exp(-b * x) +", "line, = ax.plot([], [],'o',linestyle='None') # add title, legend, etc. plt.title('\\'99-\\'00 Premier League points", "animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the animation", "legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit:", "counter season += 1 # compute the average JSD curve avg = np.sum(jsds,axis=0)/110", "an array to hold JSD values # each row will contain the JSD", "tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in enumerate(p): finals[season,idx] = team # for", "np.exp(-b * x) + c # perform the curve fit popt, pcov =", "i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate anim", "* (entropy(p, r) + entropy(q, r)) # the data files have already been", "labels plt.title('Convergence of league tables over time') plt.xlabel('Number of games played') plt.ylabel('JSD with", "# set up array for league tables # each column corresponds to a", "fit popt, pcov = curve_fit(f, xs, avg) # plot the individual JSD curves", "an individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the", "list of filenames filenames = glob.glob('data/*.csv') # initialize an array to hold JSD", "= curve_fit(f, xs, avg) # plot the individual JSD curves for i in", "461 most games in an individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') #", "# set up the figure fig = plt.figure() # set up the axes", "an array to hold final league tables finals = np.zeros((len(filenames),25)) # initialize a", "individual JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis", "been acquired and cleaned # see get_football-data_data.py # build a list of filenames", "= animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the animation anim.save('basic_animation.mp4', fps=50,", "up the axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None')", "values for the games xs = np.array([i for i in range(len(avg))]) # define", "= ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load the season data df =", "A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france", "based on the result if goal_diff > 0: tables[num_games,home_idx] += 3 elif goal_diff", "for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate", "# increment the season counter season += 1 # compute the average JSD", "zoom out again axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average JSD') # add", "the result if goal_diff > 0: tables[num_games,home_idx] += 3 elif goal_diff < 0:", "for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution')", "plt.cla() plt.close() # france 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20]))", "Wed Mar 7 08:38:14 2018 @author: <NAME> compute how quickly soccer league tables", "plot the average curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend() plt.savefig('convwithavg.png') #", "avg = np.sum(jsds,axis=0)/110 # array of x values for the games xs =", ".12)) line, = ax.plot([], [],'o',linestyle='None') # add title, legend, etc. plt.title('\\'99-\\'00 Premier League", "def init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function,", "season counter season += 1 # compute the average JSD curve avg =", "curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit", "i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png')", "in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf()", "[i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point", "plt.clf() plt.cla() plt.close() # england 16-17 xd = [i for i in range(20)]", "for the games xs = np.array([i for i in range(len(avg))]) # define function", "finals[season,idx] = team # for each of the running league tables, convert to", "more game def animate(i): xd = [i for i in range(20)] y =", "of the table tables = tables[1:,:] # compute the probability distribution for the", "np.sum(jsds,axis=0)/110 # array of x values for the games xs = np.array([i for", "# add title and axis labels plt.title('Convergence of league tables over time') plt.xlabel('Number", "plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17 xd =", "@author: <NAME> compute how quickly soccer league tables converge to the final distribution", "in enumerate(p): finals[season,idx] = team # for each of the running league tables,", "game by game for idx,row in df.iterrows(): # initialize the current league table", "np.zeros((len(filenames),25)) # initialize a season counter season = 0 # list of columns", "f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update the legend", "in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla()", "range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each frame draws a distribution after one", "tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games = 1 # loop through", "range(len(avg))]) # define function for curve-fitting def f(x, a, b, c): return a", "def f(x, a, b, c): return a * np.exp(-b * x) + c", "perform the curve fit popt, pcov = curve_fit(f, xs, avg) # plot the", "involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home goals", "= ax.plot([], [],'o',linestyle='None') # add title, legend, etc. plt.title('\\'99-\\'00 Premier League points distribution" ]
[]
[ "Address from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True) lat", "lng = PointField(source='point.x', read_only=True) class Meta: model = Address fields = (\"street\", \"street_cp\",", "= PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True) class Meta:", "\"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model = Exploitation fields = (\"address\", \"pictures\", \"creator\")", "lat = PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True) class", "import Exploitation, Address from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y',", "Exploitation, Address from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True)", "= Address fields = (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class", "(\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model", "\"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model = Exploitation fields =", "fields = (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class", "PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True) class Meta: model", "core.models import Exploitation, Address from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): # lat =", "PointField class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True) lng", "\"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model = Exploitation", "Meta: model = Address fields = (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\",", "\"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model =", "read_only=True) lng = PointField(source='point.x', read_only=True) class Meta: model = Address fields = (\"street\",", "from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True) lat =", "\"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model = Exploitation fields", "\"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model = Exploitation fields = (\"address\", \"pictures\",", "# lat = PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True)", "drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y',", "import PointField class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True)", "import serializers from core.models import Exploitation, Address from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer):", "model = Address fields = (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\")", "AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True) lng = PointField(source='point.x',", "lat = PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True) class Meta: model = Address", "from core.models import Exploitation, Address from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): # lat", "= PointField(source='point.x', read_only=True) class Meta: model = Address fields = (\"street\", \"street_cp\", \"city\",", "PointField(source='point.x', read_only=True) class Meta: model = Address fields = (\"street\", \"street_cp\", \"city\", \"province\",", "= (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta:", "from rest_framework import serializers from core.models import Exploitation, Address from drf_extra_fields.geo_fields import PointField", "\"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer): class Meta: model = Exploitation fields = (\"address\",", "rest_framework import serializers from core.models import Exploitation, Address from drf_extra_fields.geo_fields import PointField class", "= PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True) class Meta: model = Address fields", "PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True) class Meta: model = Address fields =", "read_only=True) class Meta: model = Address fields = (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\",", "class Meta: model = Address fields = (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\",", "class AddressSerializer(serializers.ModelSerializer): # lat = PointSerializer(source='point.y', read_only=True) lat = PointField(source='point.y', read_only=True) lng =", "read_only=True) lat = PointField(source='point.y', read_only=True) lng = PointField(source='point.x', read_only=True) class Meta: model =", "serializers from core.models import Exploitation, Address from drf_extra_fields.geo_fields import PointField class AddressSerializer(serializers.ModelSerializer): #", "<gh_stars>0 from rest_framework import serializers from core.models import Exploitation, Address from drf_extra_fields.geo_fields import", "Address fields = (\"street\", \"street_cp\", \"city\", \"province\", \"postal_code\", \"country\", \"lat\", \"lng\") class ExploitationSerializer(serializers.ModelSerializer):" ]
[ "float :return: La multiplicación de los dos números. :rtype: float \"\"\" return a", "La suma de los dos números. :rtype: float \"\"\" return a + b", "\"\"\" return a - b def multiplicacion(a: float, b: float) -> float: \"\"\"Multiplica", "dos números. :rtype: float \"\"\" return a + b def resta(a: float, b:", "suma(a: float, b: float) -> float: \"\"\"Suma dos números. :param a: Primer número.", "del repositorio del grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float) ->", "ZeroDivisionError: Si el segundo número es cero. :return: La división de los dos", "dos números. :rtype: float \"\"\" return a - b def multiplicacion(a: float, b:", "return a / b except ZeroDivisionError: return \"No se puede dividir entre cero\"", "float, b: float) -> float: \"\"\"Suma dos números. :param a: Primer número. :a", "Segundo número. :b type: float :return: La multiplicación de los dos números. :rtype:", ":a type: float :param b: Segundo número. :b type: float :raises ZeroDivisionError: Si", "float :param b: Segundo número. :b type: float :return: La suma de los", "-> float: \"\"\"Resta dos números. :param a: Primer número. :a type: float :param", "float :param b: Segundo número. :b type: float :return: La resta de los", "-> float: \"\"\"Suma dos números. :param a: Primer número. :a type: float :param", "\"\"\" return a + b def resta(a: float, b: float) -> float: \"\"\"Resta", "float \"\"\" return a + b def resta(a: float, b: float) -> float:", "def division(a: float, b: float) -> float: \"\"\"Divide dos números. :param a: Primer", ":a type: float :param b: Segundo número. :b type: float :return: La suma", "\"\"\"Suma dos números. :param a: Primer número. :a type: float :param b: Segundo", "b: Segundo número. :b type: float :raises ZeroDivisionError: Si el segundo número es", ":rtype: float \"\"\" return a - b def multiplicacion(a: float, b: float) ->", "La división de los dos números. :rtype: float \"\"\" try: return a /", "Segundo número. :b type: float :return: La suma de los dos números. :rtype:", "a * b def division(a: float, b: float) -> float: \"\"\"Divide dos números.", ":b type: float :return: La resta de los dos números. :rtype: float \"\"\"", "\"\"\"Multiplica dos números. :param a: Primer número. :a type: float :param b: Segundo", "float, b: float) -> float: \"\"\"Divide dos números. :param a: Primer número. :a", "def multiplicacion(a: float, b: float) -> float: \"\"\"Multiplica dos números. :param a: Primer", "float, b: float) -> float: \"\"\"Multiplica dos números. :param a: Primer número. :a", ":param b: Segundo número. :b type: float :return: La suma de los dos", "float, b: float) -> float: \"\"\"Resta dos números. :param a: Primer número. :a", ":a type: float :param b: Segundo número. :b type: float :return: La multiplicación", "b: float) -> float: \"\"\"Multiplica dos números. :param a: Primer número. :a type:", "float :return: La suma de los dos números. :rtype: float \"\"\" return a", "repositorio del grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float) -> float:", "float) -> float: \"\"\"Multiplica dos números. :param a: Primer número. :a type: float", "número. :a type: float :param b: Segundo número. :b type: float :return: La", "return a * b def division(a: float, b: float) -> float: \"\"\"Divide dos", "Si el segundo número es cero. :return: La división de los dos números.", "b def resta(a: float, b: float) -> float: \"\"\"Resta dos números. :param a:", ":rtype: float \"\"\" return a * b def division(a: float, b: float) ->", ":b type: float :return: La suma de los dos números. :rtype: float \"\"\"", "float: \"\"\"Resta dos números. :param a: Primer número. :a type: float :param b:", "https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float) -> float: \"\"\"Suma dos números. :param a:", "La multiplicación de los dos números. :rtype: float \"\"\" return a * b", ":return: La multiplicación de los dos números. :rtype: float \"\"\" return a *", "type: float :raises ZeroDivisionError: Si el segundo número es cero. :return: La división", "\"\"\" try: return a / b except ZeroDivisionError: return \"No se puede dividir", "b: float) -> float: \"\"\"Resta dos números. :param a: Primer número. :a type:", "float :raises ZeroDivisionError: Si el segundo número es cero. :return: La división de", "float) -> float: \"\"\"Divide dos números. :param a: Primer número. :a type: float", "-> float: \"\"\"Multiplica dos números. :param a: Primer número. :a type: float :param", "b: Segundo número. :b type: float :return: La multiplicación de los dos números.", "dos números. :rtype: float \"\"\" try: return a / b except ZeroDivisionError: return", "de los dos números. :rtype: float \"\"\" return a - b def multiplicacion(a:", "Segundo número. :b type: float :raises ZeroDivisionError: Si el segundo número es cero.", "* b def division(a: float, b: float) -> float: \"\"\"Divide dos números. :param", "multiplicacion(a: float, b: float) -> float: \"\"\"Multiplica dos números. :param a: Primer número.", "Primer número. :a type: float :param b: Segundo número. :b type: float :raises", "de los dos números. :rtype: float \"\"\" return a + b def resta(a:", "a: Primer número. :a type: float :param b: Segundo número. :b type: float", "b def multiplicacion(a: float, b: float) -> float: \"\"\"Multiplica dos números. :param a:", "# https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float) -> float: \"\"\"Suma dos números. :param", ":return: La división de los dos números. :rtype: float \"\"\" try: return a", "b def division(a: float, b: float) -> float: \"\"\"Divide dos números. :param a:", ":return: La resta de los dos números. :rtype: float \"\"\" return a -", "\"\"\"AyudaEnPython: https://www.facebook.com/groups/ayudapython \"\"\" # del repositorio del grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a:", "resta(a: float, b: float) -> float: \"\"\"Resta dos números. :param a: Primer número.", "float \"\"\" return a - b def multiplicacion(a: float, b: float) -> float:", ":param b: Segundo número. :b type: float :return: La resta de los dos", "float) -> float: \"\"\"Suma dos números. :param a: Primer número. :a type: float", "float) -> float: \"\"\"Resta dos números. :param a: Primer número. :a type: float", ":param b: Segundo número. :b type: float :return: La multiplicación de los dos", ":a type: float :param b: Segundo número. :b type: float :return: La resta", "type: float :param b: Segundo número. :b type: float :return: La suma de", "es cero. :return: La división de los dos números. :rtype: float \"\"\" try:", "número. :b type: float :return: La suma de los dos números. :rtype: float", "Segundo número. :b type: float :return: La resta de los dos números. :rtype:", "def suma(a: float, b: float) -> float: \"\"\"Suma dos números. :param a: Primer", "division(a: float, b: float) -> float: \"\"\"Divide dos números. :param a: Primer número.", "float :param b: Segundo número. :b type: float :raises ZeroDivisionError: Si el segundo", "los dos números. :rtype: float \"\"\" return a - b def multiplicacion(a: float,", "float \"\"\" try: return a / b except ZeroDivisionError: return \"No se puede", "type: float :return: La resta de los dos números. :rtype: float \"\"\" return", ":raises ZeroDivisionError: Si el segundo número es cero. :return: La división de los", "números. :rtype: float \"\"\" try: return a / b except ZeroDivisionError: return \"No", "número. :b type: float :return: La multiplicación de los dos números. :rtype: float", "+ b def resta(a: float, b: float) -> float: \"\"\"Resta dos números. :param", "- b def multiplicacion(a: float, b: float) -> float: \"\"\"Multiplica dos números. :param", "float :param b: Segundo número. :b type: float :return: La multiplicación de los", "número. :b type: float :raises ZeroDivisionError: Si el segundo número es cero. :return:", "los dos números. :rtype: float \"\"\" return a * b def division(a: float,", "b: Segundo número. :b type: float :return: La resta de los dos números.", "Primer número. :a type: float :param b: Segundo número. :b type: float :return:", "try: return a / b except ZeroDivisionError: return \"No se puede dividir entre", "de los dos números. :rtype: float \"\"\" return a * b def division(a:", "dos números. :param a: Primer número. :a type: float :param b: Segundo número.", "números. :rtype: float \"\"\" return a + b def resta(a: float, b: float)", "de los dos números. :rtype: float \"\"\" try: return a / b except", ":rtype: float \"\"\" return a + b def resta(a: float, b: float) ->", "https://www.facebook.com/groups/ayudapython \"\"\" # del repositorio del grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float,", "grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float) -> float: \"\"\"Suma dos", "# del repositorio del grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float)", "números. :rtype: float \"\"\" return a - b def multiplicacion(a: float, b: float)", "\"\"\" # del repositorio del grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b:", "números. :rtype: float \"\"\" return a * b def division(a: float, b: float)", "los dos números. :rtype: float \"\"\" try: return a / b except ZeroDivisionError:", "float: \"\"\"Suma dos números. :param a: Primer número. :a type: float :param b:", ":param a: Primer número. :a type: float :param b: Segundo número. :b type:", "dos números. :rtype: float \"\"\" return a * b def division(a: float, b:", "float: \"\"\"Divide dos números. :param a: Primer número. :a type: float :param b:", "\"\"\"Resta dos números. :param a: Primer número. :a type: float :param b: Segundo", "del grupo AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float) -> float: \"\"\"Suma", ":b type: float :return: La multiplicación de los dos números. :rtype: float \"\"\"", "float \"\"\" return a * b def division(a: float, b: float) -> float:", "float: \"\"\"Multiplica dos números. :param a: Primer número. :a type: float :param b:", "número es cero. :return: La división de los dos números. :rtype: float \"\"\"", "resta de los dos números. :rtype: float \"\"\" return a - b def", "a - b def multiplicacion(a: float, b: float) -> float: \"\"\"Multiplica dos números.", "type: float :param b: Segundo número. :b type: float :return: La resta de", "-> float: \"\"\"Divide dos números. :param a: Primer número. :a type: float :param", "a + b def resta(a: float, b: float) -> float: \"\"\"Resta dos números.", "número. :b type: float :return: La resta de los dos números. :rtype: float", "b: float) -> float: \"\"\"Suma dos números. :param a: Primer número. :a type:", "números. :param a: Primer número. :a type: float :param b: Segundo número. :b", "type: float :return: La suma de los dos números. :rtype: float \"\"\" return", ":rtype: float \"\"\" try: return a / b except ZeroDivisionError: return \"No se", "return a - b def multiplicacion(a: float, b: float) -> float: \"\"\"Multiplica dos", "b: float) -> float: \"\"\"Divide dos números. :param a: Primer número. :a type:", "\"\"\"Divide dos números. :param a: Primer número. :a type: float :param b: Segundo", ":return: La suma de los dos números. :rtype: float \"\"\" return a +", "multiplicación de los dos números. :rtype: float \"\"\" return a * b def", ":param b: Segundo número. :b type: float :raises ZeroDivisionError: Si el segundo número", "suma de los dos números. :rtype: float \"\"\" return a + b def", "el segundo número es cero. :return: La división de los dos números. :rtype:", "AyudaEnPython: # https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py def suma(a: float, b: float) -> float: \"\"\"Suma dos números.", "return a + b def resta(a: float, b: float) -> float: \"\"\"Resta dos", "type: float :param b: Segundo número. :b type: float :return: La multiplicación de", "type: float :param b: Segundo número. :b type: float :raises ZeroDivisionError: Si el", "división de los dos números. :rtype: float \"\"\" try: return a / b", "float :return: La resta de los dos números. :rtype: float \"\"\" return a", "type: float :return: La multiplicación de los dos números. :rtype: float \"\"\" return", "\"\"\" return a * b def division(a: float, b: float) -> float: \"\"\"Divide", "def resta(a: float, b: float) -> float: \"\"\"Resta dos números. :param a: Primer", "cero. :return: La división de los dos números. :rtype: float \"\"\" try: return", "número. :a type: float :param b: Segundo número. :b type: float :raises ZeroDivisionError:", ":b type: float :raises ZeroDivisionError: Si el segundo número es cero. :return: La", "los dos números. :rtype: float \"\"\" return a + b def resta(a: float,", "segundo número es cero. :return: La división de los dos números. :rtype: float", "La resta de los dos números. :rtype: float \"\"\" return a - b", "b: Segundo número. :b type: float :return: La suma de los dos números." ]
[ "\"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\":", "import CatsArmature __cats_bone_name_to_human_bone_name = { # Order by priority # Required bones \"Hips\":", "HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\":", "typing import Dict import bpy from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature", "HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE,", "HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def", "shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\":", "<reponame>iCyP/VRM_IMPORTER_for_Blender2.8 import traceback from typing import Dict import bpy from ..common.human_bone import HumanBoneName", "toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature", "\"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER,", ".cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = { # Order by", "HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL,", "cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not", "as e: traceback.print_exc() print(f\"Human Bone Name Auto Detection: {e}\") mapping = {} cats_name_to_original_name", "..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name =", "\"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, }", "HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL,", "\"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\":", "\"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\":", "\"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right", "wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\":", "HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL,", "\"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\":", "HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature =", "\"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\":", "HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT,", "for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not original_name: continue mapping[original_name]", "ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional", "cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc() print(f\"Human Bone Name", "\"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left", "try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc() print(f\"Human Bone Name Auto Detection: {e}\")", "knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right", "e: traceback.print_exc() print(f\"Human Bone Name Auto Detection: {e}\") mapping = {} cats_name_to_original_name =", "bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\":", "cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not original_name: continue", "{} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if", "\"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left", "\"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\":", "# Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD,", "CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc() print(f\"Human Bone Name Auto Detection:", "\"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\":", "\"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\":", "HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL,", "import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = {", "from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name", "\"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\":", "{e}\") mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name", "\"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left", "HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL,", "from typing import Dict import bpy from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import", "HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES,", "\"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]:", "Bone Name Auto Detection: {e}\") mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name,", "print(f\"Human Bone Name Auto Detection: {e}\") mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for", "\"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES,", "HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE,", "\"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\":", "Auto Detection: {e}\") mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name in", "Name Auto Detection: {e}\") mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name", "\"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature)", "\"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\":", "HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL,", "\"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right", "HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG,", "HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL,", "Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right", "= { # Order by priority # Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE,", "HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND,", "\"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE,", "priority # Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\":", "# Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER,", "traceback.print_exc() print(f\"Human Bone Name Auto Detection: {e}\") mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name()", "import bpy from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import", "__cats_bone_name_to_human_bone_name = { # Order by priority # Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\":", "HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM,", "mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name =", "HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL,", "Order by priority # Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\":", "\"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND,", "HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones", "\"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) ->", "HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left", "HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\":", "arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\":", "HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE,", "HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE,", "leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE,", "HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\":", "HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG,", "HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT,", "\"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\":", "elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\":", "Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\":", "= {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name)", "\"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature:", "HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\":", "\"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left", "\"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left", "cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not original_name: continue mapping[original_name] =", "\"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\":", "leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\":", "HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL,", "\"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\":", "} def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except", "FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = { # Order by priority #", "\"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right", "= cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not original_name:", "\"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\":", "bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e:", "knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\":", "HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE,", "def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception", "\"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM,", "create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as", "Dict import bpy from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature", "HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL,", "-> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc()", "\"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right", "\"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\":", "Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc() print(f\"Human", "\"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE,", "\"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\":", "\"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\":", "HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE,", "HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL,", "= CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc() print(f\"Human Bone Name Auto", "HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE,", "HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature)", "HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM,", "# Order by priority # Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST,", "in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not original_name: continue mapping[original_name] = human_name return", "shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL, \"Thumb1_R\": HumanBoneName.RIGHT_THUMB_INTERMEDIATE, \"Thumb2_R\": HumanBoneName.RIGHT_THUMB_DISTAL, \"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL,", "HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature)", "by priority # Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK,", "HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\":", "HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\":", "HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL,", "HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE,", "from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = { # Order by priority # Required", "HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = { #", "HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG,", "\"Right ankle\": HumanBoneName.RIGHT_FOOT, \"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, #", "\"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\":", "HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG,", "\"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\":", "HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM,", "\"Thumb0_L\": HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\":", "import Dict import bpy from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from", "elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\": HumanBoneName.RIGHT_LOWER_LEG, \"Right ankle\":", "bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left shoulder\": HumanBoneName.LEFT_SHOULDER, \"Thumb0_R\": HumanBoneName.RIGHT_THUMB_PROXIMAL,", "\"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\":", "HumanBoneName.LEFT_THUMB_PROXIMAL, \"Thumb1_L\": HumanBoneName.LEFT_THUMB_INTERMEDIATE, \"Thumb2_L\": HumanBoneName.LEFT_THUMB_DISTAL, \"IndexFinger1_R\": HumanBoneName.RIGHT_INDEX_PROXIMAL, \"IndexFinger2_R\": HumanBoneName.RIGHT_INDEX_INTERMEDIATE, \"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL,", "arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\": HumanBoneName.RIGHT_UPPER_LEG, \"Right knee\":", "\"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\":", "{ # Order by priority # Required bones \"Hips\": HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\":", "\"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\":", "\"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\":", "FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc() print(f\"Human Bone Name Auto Detection: {e}\") mapping", "human_name in __cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not original_name: continue mapping[original_name] = human_name", "\"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right elbow\": HumanBoneName.RIGHT_LOWER_ARM, \"Right", "ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\": HumanBoneName.RIGHT_EYE, \"Eye_L\": HumanBoneName.LEFT_EYE, \"Right shoulder\": HumanBoneName.RIGHT_SHOULDER, \"Left", "Detection: {e}\") mapping = {} cats_name_to_original_name = cats_armature.cats_name_to_original_name() for cats_name, human_name in __cats_bone_name_to_human_bone_name.items():", "HumanBoneName]: cats_armature = CatsArmature.create(armature) try: FixArmature.create_cats_bone_name_mapping(cats_armature) except Exception as e: traceback.print_exc() print(f\"Human Bone", "CatsArmature __cats_bone_name_to_human_bone_name = { # Order by priority # Required bones \"Hips\": HumanBoneName.HIPS,", "\"Left leg\": HumanBoneName.LEFT_UPPER_LEG, \"Left knee\": HumanBoneName.LEFT_LOWER_LEG, \"Left ankle\": HumanBoneName.LEFT_FOOT, # Optional bones \"Eye_R\":", "from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = { # Order", "HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left", "\"IndexFinger3_R\": HumanBoneName.RIGHT_INDEX_DISTAL, \"IndexFinger1_L\": HumanBoneName.LEFT_INDEX_PROXIMAL, \"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\":", "\"IndexFinger2_L\": HumanBoneName.LEFT_INDEX_INTERMEDIATE, \"IndexFinger3_L\": HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\":", "\"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\":", "HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\": HumanBoneName.LEFT_LITTLE_DISTAL,", "\"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE, \"LittleFinger3_L\":", "traceback from typing import Dict import bpy from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature", "HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\": HumanBoneName.LEFT_LITTLE_PROXIMAL, \"LittleFinger2_L\": HumanBoneName.LEFT_LITTLE_INTERMEDIATE,", "wrist\": HumanBoneName.RIGHT_HAND, \"Left arm\": HumanBoneName.LEFT_UPPER_ARM, \"Left elbow\": HumanBoneName.LEFT_LOWER_ARM, \"Left wrist\": HumanBoneName.LEFT_HAND, \"Right leg\":", "HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL,", ".cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = { # Order by priority # Required bones", "HumanBoneName.LEFT_LITTLE_DISTAL, \"Right toe\": HumanBoneName.RIGHT_TOES, \"Left toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str,", "import traceback from typing import Dict import bpy from ..common.human_bone import HumanBoneName from", "HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL,", "except Exception as e: traceback.print_exc() print(f\"Human Bone Name Auto Detection: {e}\") mapping =", "HumanBoneName.HIPS, \"Spine\": HumanBoneName.SPINE, \"Chest\": HumanBoneName.CHEST, \"Neck\": HumanBoneName.NECK, \"Head\": HumanBoneName.HEAD, \"Right arm\": HumanBoneName.RIGHT_UPPER_ARM, \"Right", "import FixArmature from .cats_blender_plugin_armature import CatsArmature __cats_bone_name_to_human_bone_name = { # Order by priority", "toe\": HumanBoneName.LEFT_TOES, } def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]: cats_armature = CatsArmature.create(armature) try:", "HumanBoneName.LEFT_INDEX_DISTAL, \"MiddleFinger1_R\": HumanBoneName.RIGHT_MIDDLE_PROXIMAL, \"MiddleFinger2_R\": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_R\": HumanBoneName.RIGHT_MIDDLE_DISTAL, \"MiddleFinger1_L\": HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL,", "\"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL, \"RingFinger2_L\": HumanBoneName.LEFT_RING_INTERMEDIATE, \"RingFinger3_L\": HumanBoneName.LEFT_RING_DISTAL, \"LittleFinger1_R\": HumanBoneName.RIGHT_LITTLE_PROXIMAL, \"LittleFinger2_R\": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE, \"LittleFinger3_R\": HumanBoneName.RIGHT_LITTLE_DISTAL, \"LittleFinger1_L\":", "Exception as e: traceback.print_exc() print(f\"Human Bone Name Auto Detection: {e}\") mapping = {}", "__cats_bone_name_to_human_bone_name.items(): original_name = cats_name_to_original_name.get(cats_name) if not original_name: continue mapping[original_name] = human_name return mapping", "bpy from ..common.human_bone import HumanBoneName from .cats_blender_plugin.tools.armature import FixArmature from .cats_blender_plugin_armature import CatsArmature", "HumanBoneName.LEFT_MIDDLE_PROXIMAL, \"MiddleFinger2_L\": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE, \"MiddleFinger3_L\": HumanBoneName.LEFT_MIDDLE_DISTAL, \"RingFinger1_R\": HumanBoneName.RIGHT_RING_PROXIMAL, \"RingFinger2_R\": HumanBoneName.RIGHT_RING_INTERMEDIATE, \"RingFinger3_R\": HumanBoneName.RIGHT_RING_DISTAL, \"RingFinger1_L\": HumanBoneName.LEFT_RING_PROXIMAL," ]
[ "20 if event.key == pygame.K_DOWN: Player2.y += 20 if event.type == pygame.KEYUP: if", "pygame.K_UP: Player2.y -= 20 if event.key == pygame.K_DOWN: Player2.y += 20 if event.type", "== pygame.QUIT: running = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_w:", "data import paddle from data import ball pygame.init() winsize = [900, 550] win", "running = True Player1 = paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225) Ball =", "Ball.render(win) Ball.x += ballspeedx Ball.y += ballspeedy if Ball.x > 486 or Ball.x", "from data import ball pygame.init() winsize = [900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong')", "20 if event.key == pygame.K_UP: Player2.y -= 20 if event.key == pygame.K_DOWN: Player2.y", "Player1.y += 20 if event.key == pygame.K_UP: Player2.y -= 20 if event.key ==", "Player2.y += 20 if event.type == pygame.KEYUP: if event.key == pygame.K_w: Player1.y -=", "*= -1 for event in pygame.event.get(): if event.type == pygame.QUIT: running = False", "Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y += ballspeedy if Ball.x > 486 or", "from data import paddle from data import ball pygame.init() winsize = [900, 550]", "running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y += ballspeedy if Ball.x", "if event.key == pygame.K_s: Player1.y += 20 if event.key == pygame.K_UP: Player2.y -=", "True Player1 = paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225) Ball = ball.Ball(270, 400)", "pygame.display.set_icon(icon) running = True Player1 = paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225) Ball", "pygame from data import paddle from data import ball pygame.init() winsize = [900,", "Ball.y > 836 or Ball.y < 0: ballspeedy *= -1 for event in", "Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y += ballspeedy if Ball.x > 486", "if event.key == pygame.K_UP: Player2.y -= 20 if event.key == pygame.K_DOWN: Player2.y +=", "paddle from data import ball pygame.init() winsize = [900, 550] win = pygame.display.set_mode(winsize)", "= paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225) Ball = ball.Ball(270, 400) ballspeedx =", "ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy = 0.5 while running: win.fill(0) Player1.render(win) Player2.render(win)", "paddle.Paddle(800, 225) Ball = ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy = 0.5 while", "while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y += ballspeedy if", "if event.type == pygame.QUIT: running = False if event.type == pygame.KEYDOWN: if event.key", "0: ballspeedy *= -1 for event in pygame.event.get(): if event.type == pygame.QUIT: running", "if Ball.y > 836 or Ball.y < 0: ballspeedy *= -1 for event", "if Ball.x > 486 or Ball.x < 0: ballspeedx *= -1 if Ball.y", "225) Player2 = paddle.Paddle(800, 225) Ball = ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy", "= [900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running =", "if event.type == pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y -= 20 if event.key", "Player2 = paddle.Paddle(800, 225) Ball = ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy =", "Ball.x > 486 or Ball.x < 0: ballspeedx *= -1 if Ball.y >", "Ball.x < 0: ballspeedx *= -1 if Ball.y > 836 or Ball.y <", "event.type == pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y -= 20 if event.key ==", "pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1 = paddle.Paddle(70, 225)", "ballspeedy *= -1 for event in pygame.event.get(): if event.type == pygame.QUIT: running =", "225) Ball = ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy = 0.5 while running:", "[900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True", "win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1 =", "running = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y -=", "+= 20 if event.key == pygame.K_UP: Player2.y -= 20 if event.key == pygame.K_DOWN:", "Ball = ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy = 0.5 while running: win.fill(0)", "event.key == pygame.K_s: Player1.y += 20 if event.key == pygame.K_UP: Player2.y -= 20", "or Ball.y < 0: ballspeedy *= -1 for event in pygame.event.get(): if event.type", "-1 for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if", "== pygame.K_UP: Player2.y -= 20 if event.key == pygame.K_DOWN: Player2.y += 20 if", "+= ballspeedy if Ball.x > 486 or Ball.x < 0: ballspeedx *= -1", "if event.key == pygame.K_DOWN: Player2.y += 20 if event.type == pygame.KEYUP: if event.key", "icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1 = paddle.Paddle(70, 225) Player2 =", "+= 20 if event.type == pygame.KEYUP: if event.key == pygame.K_w: Player1.y -= 20", "event.key == pygame.K_w: Player1.y -= 20 if event.key == pygame.K_s: Player1.y += 20", "> 836 or Ball.y < 0: ballspeedy *= -1 for event in pygame.event.get():", "ballspeedy = 0.5 while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y", "+= ballspeedx Ball.y += ballspeedy if Ball.x > 486 or Ball.x < 0:", "836 or Ball.y < 0: ballspeedy *= -1 for event in pygame.event.get(): if", "event.type == pygame.QUIT: running = False if event.type == pygame.KEYDOWN: if event.key ==", "== pygame.K_DOWN: Player2.y += 20 if event.type == pygame.KEYUP: if event.key == pygame.K_w:", "== pygame.K_s: Player1.y += 20 if event.key == pygame.K_UP: Player2.y -= 20 if", "win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y += ballspeedy if Ball.x >", "pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y -= 20 if event.key == pygame.K_s: Player1.y", "= False if event.type == pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y -= 20", "Ball.y < 0: ballspeedy *= -1 for event in pygame.event.get(): if event.type ==", "< 0: ballspeedx *= -1 if Ball.y > 836 or Ball.y < 0:", "== pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y -= 20 if event.key == pygame.K_s:", "if event.key == pygame.K_w: Player1.y -= 20 if event.key == pygame.K_s: Player1.y +=", "< 0: ballspeedy *= -1 for event in pygame.event.get(): if event.type == pygame.QUIT:", "event.key == pygame.K_DOWN: Player2.y += 20 if event.type == pygame.KEYUP: if event.key ==", "400) ballspeedx = 0.5 ballspeedy = 0.5 while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win)", "0.5 while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y += ballspeedy", "-1 if Ball.y > 836 or Ball.y < 0: ballspeedy *= -1 for", "Player2.y -= 20 if event.key == pygame.K_DOWN: Player2.y += 20 if event.type ==", "0: ballspeedx *= -1 if Ball.y > 836 or Ball.y < 0: ballspeedy", "pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1 = paddle.Paddle(70, 225) Player2", "-= 20 if event.key == pygame.K_DOWN: Player2.y += 20 if event.type == pygame.KEYUP:", "20 if event.key == pygame.K_s: Player1.y += 20 if event.key == pygame.K_UP: Player2.y", "Ball.y += ballspeedy if Ball.x > 486 or Ball.x < 0: ballspeedx *=", "event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type ==", "data import ball pygame.init() winsize = [900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon", "import pygame from data import paddle from data import ball pygame.init() winsize =", "= 0.5 while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx Ball.y +=", "ballspeedx *= -1 if Ball.y > 836 or Ball.y < 0: ballspeedy *=", "Player1 = paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225) Ball = ball.Ball(270, 400) ballspeedx", "paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225) Ball = ball.Ball(270, 400) ballspeedx = 0.5", "pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1 = paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225)", "ballspeedy if Ball.x > 486 or Ball.x < 0: ballspeedx *= -1 if", "== pygame.K_w: Player1.y -= 20 if event.key == pygame.K_s: Player1.y += 20 if", "Player1.y -= 20 if event.key == pygame.K_s: Player1.y += 20 if event.key ==", "pygame.init() winsize = [900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon)", "for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type", "-= 20 if event.key == pygame.K_s: Player1.y += 20 if event.key == pygame.K_UP:", "pygame.K_s: Player1.y += 20 if event.key == pygame.K_UP: Player2.y -= 20 if event.key", "Ball.x += ballspeedx Ball.y += ballspeedy if Ball.x > 486 or Ball.x <", "0.5 ballspeedy = 0.5 while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x += ballspeedx", "486 or Ball.x < 0: ballspeedx *= -1 if Ball.y > 836 or", "in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == pygame.KEYDOWN:", "= paddle.Paddle(800, 225) Ball = ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy = 0.5", "ball pygame.init() winsize = [900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png')", "event.key == pygame.K_UP: Player2.y -= 20 if event.key == pygame.K_DOWN: Player2.y += 20", "20 if event.type == pygame.KEYUP: if event.key == pygame.K_w: Player1.y -= 20 pygame.display.flip()", "= pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1 = paddle.Paddle(70, 225) Player2 = paddle.Paddle(800,", "pygame.QUIT: running = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y", "*= -1 if Ball.y > 836 or Ball.y < 0: ballspeedy *= -1", "= pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1 = paddle.Paddle(70,", "import paddle from data import ball pygame.init() winsize = [900, 550] win =", "= True Player1 = paddle.Paddle(70, 225) Player2 = paddle.Paddle(800, 225) Ball = ball.Ball(270,", "= ball.Ball(270, 400) ballspeedx = 0.5 ballspeedy = 0.5 while running: win.fill(0) Player1.render(win)", "pygame.K_DOWN: Player2.y += 20 if event.type == pygame.KEYUP: if event.key == pygame.K_w: Player1.y", "<gh_stars>0 import pygame from data import paddle from data import ball pygame.init() winsize", "winsize = [900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running", "= 0.5 ballspeedy = 0.5 while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x +=", "False if event.type == pygame.KEYDOWN: if event.key == pygame.K_w: Player1.y -= 20 if", "550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon = pygame.image.load('gfx/icon.png') pygame.display.set_icon(icon) running = True Player1", "pygame.K_w: Player1.y -= 20 if event.key == pygame.K_s: Player1.y += 20 if event.key", "> 486 or Ball.x < 0: ballspeedx *= -1 if Ball.y > 836", "ballspeedx = 0.5 ballspeedy = 0.5 while running: win.fill(0) Player1.render(win) Player2.render(win) Ball.render(win) Ball.x", "ballspeedx Ball.y += ballspeedy if Ball.x > 486 or Ball.x < 0: ballspeedx", "pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == pygame.KEYDOWN: if", "import ball pygame.init() winsize = [900, 550] win = pygame.display.set_mode(winsize) pygame.display.set_caption('Pong') icon =", "or Ball.x < 0: ballspeedx *= -1 if Ball.y > 836 or Ball.y" ]
[ "3, 6], \"d\": False } big_dict = { \"aa\": 123, \"fdsg\": 2, \"a\":", "{ \"a\": 1, \"b\": [2, 3, 6], \"d\": False } big_dict = {", "\"d\": False } big_dict = { \"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\":", "[2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1,", "\"a\": 1, \"b\": [2, 3, 6], \"d\": False } big_dict = { \"aa\":", "1, \"b\": [2, 6], \"d\": False } big_dict = { \"aa\": 123, \"fdsg\":", "<filename>py/scrap_heroes/test/test_utils.py<gh_stars>0 from __future__ import unicode_literals from django.test import TestCase from ..utils import partial_dict_equals", "test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1, \"b\": [2, 6], \"d\": False } big_dict", "import TestCase from ..utils import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = {", "from ..utils import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1,", "\"b\": [2, 6], \"d\": False } big_dict = { \"aa\": 123, \"fdsg\": 2,", "import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1, \"b\": [2,", "\"a\": 1, \"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict =", "TestCase from ..utils import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = { \"a\":", "= { \"a\": 1, \"b\": [2, 6], \"d\": False } big_dict = {", "big_dict = { \"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3],", "123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def", "def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1, \"b\": [2, 6], \"d\": False }", "1, \"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = {", "self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1, \"b\": [2, 6], \"d\":", "small_dict = { \"a\": 1, \"b\": [2, 3, 6], \"d\": False } big_dict", "False } big_dict = { \"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2,", "\"a\": 1, \"b\": [2, 6], \"d\": False } big_dict = { \"aa\": 123,", "[2, 6], \"d\": False } big_dict = { \"aa\": 123, \"fdsg\": 2, \"a\":", "from django.test import TestCase from ..utils import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict", "small_dict = { \"a\": 1, \"b\": [2, 6], \"d\": False } big_dict =", "{ \"a\": 1, \"b\": [2, 6], \"d\": False } big_dict = { \"aa\":", "} big_dict = { \"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6,", "partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1, \"b\": [2, 3,", "test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1, \"b\": [2, 3, 6], \"d\": False }", "6], \"d\": False } big_dict = { \"aa\": 123, \"fdsg\": 2, \"a\": 1,", "3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1, \"b\": [2,", "} self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1, \"b\": [2, 6],", "\"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3], } self.assertFalse(partial_dict_equals(small_dict, big_dict))", "from __future__ import unicode_literals from django.test import TestCase from ..utils import partial_dict_equals class", "= { \"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3], }", "django.test import TestCase from ..utils import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict =", "= { \"a\": 1, \"b\": [2, 3, 6], \"d\": False } big_dict =", "import unicode_literals from django.test import TestCase from ..utils import partial_dict_equals class DictUtilsTest(TestCase): def", "\"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self):", "2, \"a\": 1, \"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict", "\"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\":", "big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1, \"b\": [2, 6], \"d\": False", "\"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict))", "\"b\": [2, 3, 6], \"d\": False } big_dict = { \"aa\": 123, \"fdsg\":", "def test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1, \"b\": [2, 3, 6], \"d\": False", "[2, 3, 6], \"d\": False } big_dict = { \"aa\": 123, \"fdsg\": 2,", "{ \"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3], } self.assertFalse(partial_dict_equals(small_dict,", "{ \"aa\": 123, \"fdsg\": 2, \"a\": 1, \"b\": [2, 6, 3], } self.assertTrue(partial_dict_equals(small_dict,", "6, 3], } self.assertTrue(partial_dict_equals(small_dict, big_dict)) def test_partial_dict_equals__irregular_case(self): small_dict = { \"a\": 1, \"b\":", "__future__ import unicode_literals from django.test import TestCase from ..utils import partial_dict_equals class DictUtilsTest(TestCase):", "DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1, \"b\": [2, 3, 6], \"d\":", "1, \"b\": [2, 3, 6], \"d\": False } big_dict = { \"aa\": 123,", "..utils import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1, \"b\":", "unicode_literals from django.test import TestCase from ..utils import partial_dict_equals class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self):", "class DictUtilsTest(TestCase): def test_partial_dict_equals__regular_case(self): small_dict = { \"a\": 1, \"b\": [2, 3, 6]," ]
[ "from rdflib.namespace import RDF, OWL from client.model import Klass def test_basic_rdf(): r1 =", "import RDF, OWL from client.model import Klass def test_basic_rdf(): r1 = Klass() rdf", "rdflib.namespace import RDF, OWL from client.model import Klass def test_basic_rdf(): r1 = Klass()", "<filename>tests/test_klass.py from rdflib.namespace import RDF, OWL from client.model import Klass def test_basic_rdf(): r1", "client.model import Klass def test_basic_rdf(): r1 = Klass() rdf = r1.to_graph() assert (None,", "import Klass def test_basic_rdf(): r1 = Klass() rdf = r1.to_graph() assert (None, RDF.type,", "RDF, OWL from client.model import Klass def test_basic_rdf(): r1 = Klass() rdf =", "Klass def test_basic_rdf(): r1 = Klass() rdf = r1.to_graph() assert (None, RDF.type, OWL.Class)", "test_basic_rdf(): r1 = Klass() rdf = r1.to_graph() assert (None, RDF.type, OWL.Class) in rdf", "from client.model import Klass def test_basic_rdf(): r1 = Klass() rdf = r1.to_graph() assert", "def test_basic_rdf(): r1 = Klass() rdf = r1.to_graph() assert (None, RDF.type, OWL.Class) in", "OWL from client.model import Klass def test_basic_rdf(): r1 = Klass() rdf = r1.to_graph()" ]
[ "ast.Call) -> None: \"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node) msg_argument = args.get_argument('msg', 0)", "flake8_pytest_style.config import Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args,", "0) if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert) ->", "= get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node)", "visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def", "import ast from flake8_plugin_utils import Visitor from flake8_pytest_style.config import Config from flake8_pytest_style.errors import", "FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def", "Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call,", "flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, )", "args.get_argument('msg', 0) if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert)", "if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self, node: ast.Call) -> None: if is_fail_call(node): self._check_fail_call(node)", "if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert) -> None:", "is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\" if", "_check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node) msg_argument =", "for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self, node: ast.Call) -> None: if", "import Visitor from flake8_pytest_style.config import Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils", "flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self, node:", "node: ast.Call) -> None: \"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node) msg_argument = args.get_argument('msg',", "AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]):", "from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self,", "ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self, node:", "ast from flake8_plugin_utils import Visitor from flake8_pytest_style.config import Config from flake8_pytest_style.errors import AssertAlwaysFalse,", "get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def", "<reponame>kianmeng/flake8-pytest-style<gh_stars>100-1000 import ast from flake8_plugin_utils import Visitor from flake8_pytest_style.config import Config from flake8_pytest_style.errors", "from flake8_pytest_style.config import Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import (", "( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) ->", "def _check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node) msg_argument", "or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\"", "class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks for PT016.\"\"\" args =", "from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant,", "node: ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self,", "for PT016.\"\"\" args = get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if not msg_argument or", "import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call)", "= args.get_argument('msg', 0) if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node:", "msg_argument = args.get_argument('msg', 0) if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self,", "self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test):", "-> None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self, node: ast.Call)", "get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) -> None:", "flake8_plugin_utils import Visitor from flake8_pytest_style.config import Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from", "\"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self, node: ast.Call) -> None:", "PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self, node: ast.Call) -> None: if is_fail_call(node):", "import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string, is_fail_call, is_falsy_constant, ) class", "is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks for PT016.\"\"\"", "\"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if not msg_argument", "args = get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage,", "not msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks", "msg_argument or is_empty_string(msg_argument): self.error_from_node(FailWithoutMessage, node) def visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks for", "from flake8_plugin_utils import Visitor from flake8_pytest_style.config import Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage", "is_empty_string, is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks", ") class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks for PT016.\"\"\" args", "PT016.\"\"\" args = get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if not msg_argument or is_empty_string(msg_argument):", "-> None: \"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if", "None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node) def visit_Call(self, node: ast.Call) ->", "Visitor from flake8_pytest_style.config import Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import", "None: \"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node) msg_argument = args.get_argument('msg', 0) if not", "def visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse, node)", "node) def visit_Assert(self, node: ast.Assert) -> None: \"\"\"Checks for PT015.\"\"\" if is_falsy_constant(node.test): self.error_from_node(AssertAlwaysFalse,", "is_fail_call, is_falsy_constant, ) class FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks for", "import Config from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage from flake8_pytest_style.utils import ( get_simple_call_args, is_empty_string,", "FailVisitor(Visitor[Config]): def _check_fail_call(self, node: ast.Call) -> None: \"\"\"Checks for PT016.\"\"\" args = get_simple_call_args(node)" ]
[ "chats print('winding up..') c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2,", "User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin password: <PASSWORD>')", "class Command(BaseCommand): def _create(self): # creating user and super-user User.objects.all().delete() print('Creating admin..') admin", "print('winding up..') c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3,", "u2, u3, g) # add msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2) c3.messages.add(m3) print('done.')", "chats Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2 =", "users to chats print('winding up..') c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin, u1, g)", "other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random',", "u2, u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3, g) # add msgs", "Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz')", "BaseCommand from django.contrib.auth.models import User from django.utils.crypto import get_random_string from chat_app.models import *", "u1, u2, u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3, g) # add", "User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete() print('Creating chats..')", "User from django.utils.crypto import get_random_string from chat_app.models import * class Command(BaseCommand): def _create(self):", "add users to chats print('winding up..') c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin, u1,", "* class Command(BaseCommand): def _create(self): # creating user and super-user User.objects.all().delete() print('Creating admin..')", "add msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2) c3.messages.add(m3) print('done.') def handle(self, *args, **kwargs):", "= User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1',", "# add users to chats print('winding up..') c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin,", "create msgs Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin,", "description='Chat Room 3') print('done.') # create msgs Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin,", "= User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin password: <PASSWORD>') print('Creating Ghost user..') g", "print('Creating Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..') u1 =", "print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo Bar') m3", "u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete()", "m2 = Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add", "from django.contrib.auth.models import User from django.utils.crypto import get_random_string from chat_app.models import * class", "c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room", "print('done.') # create msgs Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello World!') m2", "creating user and super-user User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.')", "description='Chat Room 2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.') # create", "import get_random_string from chat_app.models import * class Command(BaseCommand): def _create(self): # creating user", "u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3, g) # add msgs in", "<reponame>PS-Division-BITS/Chat from django.core.management.base import BaseCommand from django.contrib.auth.models import User from django.utils.crypto import get_random_string", "messages..') m1 = Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo Bar') m3 =", "up..') c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3, g)", "c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.') # create msgs Message.objects.all().delete() print('Creating", "user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8))", "password='<PASSWORD>') print('admin created.') print('username: admin password: <PASSWORD>') print('Creating Ghost user..') g = User.objects.create_user(username='Ghost',", "content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add users to chats", "Command(BaseCommand): def _create(self): # creating user and super-user User.objects.all().delete() print('Creating admin..') admin =", "chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat", "Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3", "= Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.') # create msgs Message.objects.all().delete() print('Creating messages..')", "print('done.') print('Creating other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3", "= Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add users to chats print('winding up..') c1.participants.add(admin,", "g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3, g) # add msgs in chats", "Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2 = Chat.objects.create(uri='2',", "c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3, g) # add msgs in chats c1.messages.add(m1,", "Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add users to", "Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.') # create msgs Message.objects.all().delete() print('Creating messages..') m1", "3') print('done.') # create msgs Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello World!')", "import * class Command(BaseCommand): def _create(self): # creating user and super-user User.objects.all().delete() print('Creating", "from django.core.management.base import BaseCommand from django.contrib.auth.models import User from django.utils.crypto import get_random_string from", "User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main',", "m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add users to chats print('winding up..')", "= User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') #", "Room 3') print('done.') # create msgs Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello", "# create msgs Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello World!') m2 =", "from django.utils.crypto import get_random_string from chat_app.models import * class Command(BaseCommand): def _create(self): #", "Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..') u1 = User.objects.create_user(username='Ritik',", "print('done.') # add users to chats print('winding up..') c1.participants.add(admin, u1, u2, u3, g)", "user and super-user User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username:", "= Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3')", "import User from django.utils.crypto import get_random_string from chat_app.models import * class Command(BaseCommand): def", "import BaseCommand from django.contrib.auth.models import User from django.utils.crypto import get_random_string from chat_app.models import", "_create(self): # creating user and super-user User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>')", "u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.')", "Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo Bar')", "admin password: <PASSWORD>') print('Creating Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other", "content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.')", "msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2) c3.messages.add(m3) print('done.') def handle(self, *args, **kwargs): self._create()", "c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin, u2, u3, g) #", "name='main', description='Main Chat Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3 =", "users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8))", "and super-user User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin", "Buzz') print('done.') # add users to chats print('winding up..') c1.participants.add(admin, u1, u2, u3,", "= Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz", "print('Creating other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 =", "def _create(self): # creating user and super-user User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin',", "msgs Message.objects.all().delete() print('Creating messages..') m1 = Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo", "print('done.') # create chats Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main Chat", "password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main", "c3.participants.add(admin, u2, u3, g) # add msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2) c3.messages.add(m3)", "password=get_random_string(8),) print('done.') print('Creating other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8))", "World!') m2 = Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') #", "Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.')", "# add msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2) c3.messages.add(m3) print('done.') def handle(self, *args,", "django.utils.crypto import get_random_string from chat_app.models import * class Command(BaseCommand): def _create(self): # creating", "print('admin created.') print('username: admin password: <PASSWORD>') print('Creating Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),)", "Room 2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.') # create msgs", "g) # add msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2) c3.messages.add(m3) print('done.') def handle(self,", "admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin password: <PASSWORD>') print('Creating Ghost user..')", "super-user User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin password:", "chat_app.models import * class Command(BaseCommand): def _create(self): # creating user and super-user User.objects.all().delete()", "# create chats Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room')", "<PASSWORD>') print('Creating Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..') u1", "Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add users to chats print('winding", "created.') print('username: admin password: <PASSWORD>') print('Creating Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.')", "u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete() print('Creating chats..') c1 =", "User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create", "m1 = Message.objects.create(sender=admin, content='Hello World!') m2 = Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin,", "Chat Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3 = Chat.objects.create(uri='3', name='chat3',", "User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin password: <PASSWORD>') print('Creating Ghost user..') g =", "django.core.management.base import BaseCommand from django.contrib.auth.models import User from django.utils.crypto import get_random_string from chat_app.models", "from chat_app.models import * class Command(BaseCommand): def _create(self): # creating user and super-user", "print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2 = Chat.objects.create(uri='2', name='chat2',", "u3, g) # add msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2) c3.messages.add(m3) print('done.') def", "description='Main Chat Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3 = Chat.objects.create(uri='3',", "admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin password: <PASSWORD>') print('Creating Ghost", "password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats", "u1, g) c3.participants.add(admin, u2, u3, g) # add msgs in chats c1.messages.add(m1, m2)", "= User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 =", "g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2", "Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add users to chats print('winding up..') c1.participants.add(admin, u1,", "password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete() print('Creating chats..') c1", "= User.objects.create_user(username='Chetan', password=get_random_string(8)) u3 = User.objects.create_user(username='Random', password=get_random_string(8)) print('done.') # create chats Chat.objects.all().delete() print('Creating", "content='Wazz Buzz') print('done.') # add users to chats print('winding up..') c1.participants.add(admin, u1, u2,", "g) c3.participants.add(admin, u2, u3, g) # add msgs in chats c1.messages.add(m1, m2) c2.messages.add(m2)", "print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin created.') print('username: admin password: <PASSWORD>') print('Creating", "c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room", "name='chat2', description='Chat Room 2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.') #", "# creating user and super-user User.objects.all().delete() print('Creating admin..') admin = User.objects.create_superuser(username='admin', password='<PASSWORD>') print('admin", "print('username: admin password: <PASSWORD>') print('Creating Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating", "create chats Chat.objects.all().delete() print('Creating chats..') c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2", "password: <PASSWORD>') print('Creating Ghost user..') g = User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..')", "name='chat3', description='Chat Room 3') print('done.') # create msgs Message.objects.all().delete() print('Creating messages..') m1 =", "= Chat.objects.create(uri='1', name='main', description='Main Chat Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2')", "get_random_string from chat_app.models import * class Command(BaseCommand): def _create(self): # creating user and", "to chats print('winding up..') c1.participants.add(admin, u1, u2, u3, g) c2.participants.add(admin, u1, g) c3.participants.add(admin,", "django.contrib.auth.models import User from django.utils.crypto import get_random_string from chat_app.models import * class Command(BaseCommand):", "= Message.objects.create(sender=admin, content='Foo Bar') m3 = Message.objects.create(sender=admin, content='Wazz Buzz') print('done.') # add users", "Room') c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat", "2') c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3') print('done.') # create msgs Message.objects.all().delete()", "User.objects.create_user(username='Ghost', password=get_random_string(8),) print('done.') print('Creating other users..') u1 = User.objects.create_user(username='Ritik', password=get_random_string(8)) u2 = User.objects.create_user(username='Chetan'," ]
[ "uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response(", "{}]), True) @responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'),", "__future__ import absolute_import, unicode_literals import responses from altapay import API, UpdateOrder from .test_cases", "[{}, {}]), True) @responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response(", "10000001) self.assertEqual(uo.error_message, \"Number of original order lines \" \"and updated ones does not", "status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api)", "responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate def", "UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api)", "False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original order lines \" \"and updated ones", "= UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must", "2 elements\"): uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST,", "self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2 elements\"): uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self):", "def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment", "self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2 elements\"):", "must \" \"contain 2 elements\"): uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self): uo =", "= UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]),", "[]) @responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200,", "self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original order lines", "self.api = API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'),", "\"order_lines must \" \"contain 2 elements\"): uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self): uo", "import absolute_import, unicode_literals import responses from altapay import API, UpdateOrder from .test_cases import", "\" \"contain 2 elements\"): uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api)", "True) @responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200,", "\"contain 2 elements\"): uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add(", "self.assertEqual(uo.error_message, \"Number of original order lines \" \"and updated ones does not match.\")", "@responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml')", "body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2 elements\"): uo.update(\"payment", "import responses from altapay import API, UpdateOrder from .test_cases import TestCase class UpdateOrderTest(TestCase):", "class UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo =", "id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original order lines \"", "import API, UpdateOrder from .test_cases import TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api =", "uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{},", "API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'),", "body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number", "responses from altapay import API, UpdateOrder from .test_cases import TestCase class UpdateOrderTest(TestCase): def", "[{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original order lines \" \"and", "responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain", "content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2 elements\"): uo.update(\"payment id\", []) @responses.activate", "@responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml')", "setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST,", "from .test_cases import TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate", "with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2 elements\"): uo.update(\"payment id\", []) @responses.activate def", "content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add(", ".test_cases import TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate def", "test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception,", "{}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original order lines \" \"and updated", "@responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml')", "def setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add(", "responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2", "UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True)", "responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001)", "from altapay import API, UpdateOrder from .test_cases import TestCase class UpdateOrderTest(TestCase): def setUp(self):", "responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code,", "UpdateOrder from .test_cases import TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test', auto_login=False)", "import TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self):", "= UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]),", "id\", []) @responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'),", "from __future__ import absolute_import, unicode_literals import responses from altapay import API, UpdateOrder from", "content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original order", "auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200,", "<reponame>AltaPay/python-client-library from __future__ import absolute_import, unicode_literals import responses from altapay import API, UpdateOrder", "def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with", "status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2 elements\"): uo.update(\"payment id\", [])", "id\", [{}, {}]), True) @responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'),", "self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message,", "uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{},", "test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\",", "'200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \" \"contain 2 elements\"): uo.update(\"payment id\",", "test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\",", "= API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response(", "unicode_literals import responses from altapay import API, UpdateOrder from .test_cases import TestCase class", "status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original", "responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate", "self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of original order lines \" \"and updated ones does", "self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST,", "self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate def test_update_order_error(self):", "UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines must \"", "'200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate def test_update_order_error(self): uo =", "uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') with self.assertRaisesRegexp(Exception, \"order_lines", "elements\"): uo.update(\"payment id\", []) @responses.activate def test_update_order_success(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'),", "altapay import API, UpdateOrder from .test_cases import TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api", "UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False)", "TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test', auto_login=False) @responses.activate def test_update_order_invalid_order_line(self): uo", "def test_update_order_error(self): uo = UpdateOrder(api=self.api) responses.add( responses.POST, self.get_api_url('API/updateOrder'), body=self.load_xml_response( '200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment", "'200_update_order_error.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), False) self.assertEqual(uo.error_code, 10000001) self.assertEqual(uo.error_message, \"Number of", "API, UpdateOrder from .test_cases import TestCase class UpdateOrderTest(TestCase): def setUp(self): self.api = API(mode='test',", "absolute_import, unicode_literals import responses from altapay import API, UpdateOrder from .test_cases import TestCase", "body=self.load_xml_response( '200_update_order_success.xml'), status=200, content_type='application/xml') self.assertEqual(uo.update(\"payment id\", [{}, {}]), True) @responses.activate def test_update_order_error(self): uo" ]
[ "random_search: n_estimators = [int(x) for x in np.linspace(start=10, stop=500, num=10)] max_features = ['auto',", "make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features':", "param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle')", "pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return", "0.1, 0.5, 0.8, 1, 1.2, 2, 5, 10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75,", "pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth':", "grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train,", "0.01, 0.1, 1] grids['svc'] = {'svc__C': C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train,", "pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']),", "learning models.\"\"\" import os import time import pprint import random as rn import", "C = [0.001, 0.01, 0.1, 1, 10] gamma = [0.001, 0.01, 0.1, 1]", "<filename>src/titanic/scripts/cross_validate_models.py \"\"\"Cross-validate machine learning models.\"\"\" import os import time import pprint import random", "0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds},", "X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train, pipes,", "cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes,", "kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting =", "'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split =", "from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier", "DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C = [0.001, 0.01, 0.1, 1, 10] gamma =", "'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return", "3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train, pipes, grids,", "time:', finish - start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init',", "random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'],", "[0.001, 0.01, 0.1, 1, 10] gamma = [0.001, 0.01, 0.1, 1] grids['svc'] =", "4] bootstrap = [True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth,", "import OrdinalEncoder import titanic.tools as tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from", "mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2, 2,", "in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1,", "'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch =", "import ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train,", "cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']),", "[0.6, 0.75, 0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3},", "serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc']", "= make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C = [0.001, 0.01, 0.1,", "y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids = dict() kfolds = KFold(n_splits=5, shuffle=True,", "= {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap}", "min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start", "def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'],", "{'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2, 2, 5, 10]} grids['logreg'] = {'logisticregression__C':", "from category_encoders.ordinal import OrdinalEncoder import titanic.tools as tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier,", "'random_state': RANDOM_SEED})) if random_search: n_estimators = [int(x) for x in np.linspace(start=10, stop=500, num=10)]", "import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from category_encoders.ordinal", "0.1, 1] grids['svc'] = {'svc__C': C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train,", "random_state=42) start = time.time() randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit execution time:', finish", "\"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C':", "dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train,", "in np.linspace(start=10, stop=500, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x", "y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids, kfolds) if __name__ == '__main__':", "= dict() grids = dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes,", "y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train, pipes, grids,", "import KFold from sklearn.pipeline import make_pipeline from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import", "{'svc__C': C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc',", "gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc", "LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from category_encoders.ordinal import", "category_encoders.ordinal import OrdinalEncoder import titanic.tools as tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString", "0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg',", "cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes,", "SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED']", "SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C =", "= [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train,", "pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5,", "models.\"\"\" import os import time import pprint import random as rn import numpy", "as tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier from", "sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train, pipes,", "kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate", "cv=3, verbose=0, random_state=42) start = time.time() randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit execution", "grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids, kfolds) if __name__ == '__main__': os.chdir(r'../../../') main()", "grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1,", "{'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv':", "import make_pipeline from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import", "tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier from titanic.config", "num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110,", "kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] =", "numpy as np from sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline from sklearn.model_selection", "sklearn.svm import SVC from category_encoders.ordinal import OrdinalEncoder import titanic.tools as tools from titanic.modelling", "from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier from titanic.config import", "svc def cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']),", "'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2,", "pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True,", "[int(x) for x in np.linspace(start=10, stop=500, num=10)] max_features = ['auto', 'sqrt'] max_depth =", "= KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids,", "= [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5,", "[0.01, 0.1, 0.5, 0.8, 1, 1.2, 2, 5, 10]} grids['logreg'] = {'logisticregression__C': [0.6,", "2, 5, 10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]} logreg =", "logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\"", "kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False):", "1, 10] gamma = [0.001, 0.01, 0.1, 1] grids['svc'] = {'svc__C': C, 'svc__gamma':", "= [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False]", "y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc',", "False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf,", "10] gamma = [0.001, 0.01, 0.1, 1] grids['svc'] = {'svc__C': C, 'svc__gamma': gamma}", "tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids = dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train,", "sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from", "cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']),", "num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap", "= [0.001, 0.01, 0.1, 1, 10] gamma = [0.001, 0.01, 0.1, 1] grids['svc']", "for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf", "sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train", "serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\"", "stop=500, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10,", "make_pipeline from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier,", "{'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid)", "('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle')", "X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main(): X_train =", "= [0.001, 0.01, 0.1, 1] grids['svc'] = {'svc__C': C, 'svc__gamma': gamma} svc =", "ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train,", "0.1, 1, 10] gamma = [0.001, 0.01, 0.1, 1] grids['svc'] = {'svc__C': C,", "kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids, kfolds) if __name__", "sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from", "from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from category_encoders.ordinal import OrdinalEncoder", "cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']),", "RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import", "serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators =", "pprint import random as rn import numpy as np from sklearn.model_selection import KFold", "y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train,", "= ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None)", "import numpy as np from sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline from", "sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train, pipes, grids, kfolds):", "estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train,", "SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto',", "min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True,", "grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title',", "'0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] =", "RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state':", "return svc def cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg',", "voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids", "finish = time.time() print('randsearch.fit execution time:', finish - start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'],", "('forest', pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init',", "bootstrap = [True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split':", "[0.001, 0.01, 0.1, 1] grids['svc'] = {'svc__C': C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'],", "SVC from category_encoders.ordinal import OrdinalEncoder import titanic.tools as tools from titanic.modelling import SimpleDataFrameImputer,", "bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start = time.time()", "pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids, kfolds) if __name__ == '__main__': os.chdir(r'../../../')", "DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] =", "for x in np.linspace(start=10, stop=500, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x)", "pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear'))", "'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10,", "= ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def", "pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])]", "= [True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split,", "grids['svc'] = {'svc__C': C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds},", "pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest", "titanic.tools as tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier", "sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model", "1] grids['svc'] = {'svc__C': C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv':", "grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2, 2, 5, 10]} grids['logreg']", "return forest def cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] =", "min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators,", "'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3,", "= time.time() print('randsearch.fit execution time:', finish - start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train,", "y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(),", "ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train,", "[2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid", "make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1,", "X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def", "= ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train,", "= {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2, 2, 5, 10]} grids['logreg'] =", "import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm", "if random_search: n_estimators = [int(x) for x in np.linspace(start=10, stop=500, num=10)] max_features =", "= tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids = dict() kfolds =", "pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C = [0.001, 0.01,", "LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01,", "min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0,", "as np from sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline from sklearn.model_selection import", "OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split':", "'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C = [0.001, 0.01, 0.1, 1, 10]", "titanic.modelling import ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def", "kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train,", "param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\"", "grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train,", "KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds)", "logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier", "x in np.linspace(start=10, stop=500, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for", "param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC", "pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear',", "pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C = [0.001,", "SVC(kernel='linear', C=0.1, probability=False)) C = [0.001, 0.01, 0.1, 1, 10] gamma = [0.001,", "import os import time import pprint import random as rn import numpy as", "forest def cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age',", "kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck',", "rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age',", "start = time.time() randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit execution time:', finish -", "import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds):", "1.2, 2, 5, 10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]} logreg", "'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if", "random as rn import numpy as np from sklearn.model_selection import KFold from sklearn.pipeline", "RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate", "max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'],", "[True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf':", "= {'svc__C': C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init',", "titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier from titanic.config import RANDOM_SEED", "time.time() randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit execution time:', finish - start) pprint.pprint(randsearch.best_params_)", "from sklearn.svm import SVC from category_encoders.ordinal import OrdinalEncoder import titanic.tools as tools from", "= {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'],", "grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train,", "import pprint import random as rn import numpy as np from sklearn.model_selection import", "= RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start = time.time() randsearch.fit(X_train, y_train) finish", "pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start = time.time() randsearch.fit(X_train,", "voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main(): X_train", "LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2, 2, 5, 10]}", "kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate", "\"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C", "main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids = dict()", "probability=False)) C = [0.001, 0.01, 0.1, 1, 10] gamma = [0.001, 0.01, 0.1,", "y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(),", "logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators", "start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return", "np from sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline from sklearn.model_selection import RandomizedSearchCV", "RANDOM_SEED})) if random_search: n_estimators = [int(x) for x in np.linspace(start=10, stop=500, num=10)] max_features", "'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid,", "\"\"\"Cross-validate machine learning models.\"\"\" import os import time import pprint import random as", "sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from category_encoders.ordinal import OrdinalEncoder import", "= ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return", "X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train, pipes,", "handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64,", "RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from category_encoders.ordinal import OrdinalEncoder import titanic.tools as", "True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED}))", "import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC from category_encoders.ordinal import OrdinalEncoder import titanic.tools", "10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid = {'randomforestclassifier__n_estimators':", "n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch", "ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main():", "'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start =", "0.01, 0.1, 1, 10] gamma = [0.001, 0.01, 0.1, 1] grids['svc'] = {'svc__C':", "y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids,", "as rn import numpy as np from sklearn.model_selection import KFold from sklearn.pipeline import", "random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start = time.time() randsearch.fit(X_train, y_train) finish = time.time()", "titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids,", "5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid =", "OrdinalEncoder import titanic.tools as tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling", "pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting',", "max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50,", "dict() grids = dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids,", "y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'],", "10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search: n_estimators = [int(x) for x in", "KFold from sklearn.pipeline import make_pipeline from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression", "= make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8,", "return voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict()", "mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4,", "from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.svm import SVC", "- start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle')", "y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids,", "= '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg']", "C=0.1, probability=False)) C = [0.001, 0.01, 0.1, 1, 10] gamma = [0.001, 0.01,", "serialize_to=r'models/voting.pickle') return voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes =", "x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf =", "grids = dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds)", "import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED)", "2, 4] bootstrap = [True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth':", "import SVC from category_encoders.ordinal import OrdinalEncoder import titanic.tools as tools from titanic.modelling import", "def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(),", "= [int(x) for x in np.linspace(start=10, stop=500, num=10)] max_features = ['auto', 'sqrt'] max_depth", "grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting", "\"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'),", "y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle')", "kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train =", "cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids, kfolds) if __name__ ==", "max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap =", "'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap': bootstrap} pprint.pprint(random_grid) randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42)", "'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search: n_estimators", "logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes", "os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\"", "def cross_validate_voting(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest',", "'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search: n_estimators = [int(x) for x in np.linspace(start=10,", "kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes,", "import time import pprint import random as rn import numpy as np from", "y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train, pipes, grids,", "shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train,", "= ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def cross_validate_svc(X_train,", "from sklearn.pipeline import make_pipeline from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from", "= make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70,", "voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/voting', serialize_to=r'models/voting.pickle') return voting", "randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start = time.time() randsearch.fit(X_train, y_train)", "0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best',", "pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1,", "grids, kfolds): \"\"\"Cross-validate LogisticRegression pipeline.\"\"\" pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg']", "[('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv':", "\"\"\"Cross-validate VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators,", "'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search: n_estimators = [int(x) for x", "VotingClassifier from sklearn.svm import SVC from category_encoders.ordinal import OrdinalEncoder import titanic.tools as tools", "from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes,", "mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C = [0.001, 0.01, 0.1, 1, 10] gamma", "VotingClassifier.\"\"\" estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'),", "grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids, kfolds) if", "max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110, num=11)]", "print('randsearch.fit execution time:', finish - start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv':", "rn import numpy as np from sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline", "0.75, 0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv':", "pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])] voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'), X_train, y_train, sklearn_cvs_kws={'cv': kfolds},", "4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search: n_estimators = [int(x) for", "sklearn.pipeline import make_pipeline from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble", "os import time import pprint import random as rn import numpy as np", "['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split", "time import pprint import random as rn import numpy as np from sklearn.model_selection", "forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest', serialize_to=r'models/forest.pickle') return forest def", "RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap':", "ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train,", "kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False))", "0.8, 1, 1.2, 2, 5, 10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85,", "def cross_validate_svc(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate SVC pipeline.\"\"\" pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'],", "y_train) finish = time.time() print('randsearch.fit execution time:', finish - start) pprint.pprint(randsearch.best_params_) forest =", "= time.time() randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit execution time:', finish - start)", "64, 'random_state': RANDOM_SEED})) if random_search: n_estimators = [int(x) for x in np.linspace(start=10, stop=500,", "svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def", "C, 'svc__gamma': gamma} svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle')", "70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search:", "tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids = dict() kfolds = KFold(n_splits=5,", "max_depth = [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2,", "0.5, 0.8, 1, 1.2, 2, 5, 10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8,", "logreg def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] =", "np.linspace(start=10, stop=500, num=10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in", "= tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids = dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED)", "n_estimators = [int(x) for x in np.linspace(start=10, stop=500, num=10)] max_features = ['auto', 'sqrt']", "5, 10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'],", "'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search: n_estimators =", "CategoricalToString from titanic.modelling import ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0'", "from titanic.modelling import ExtendedClassifier from titanic.config import RANDOM_SEED np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED)", "import random as rn import numpy as np from sklearn.model_selection import KFold from", "return logreg def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest']", "cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate RandomForestClassifier pipeline.\"\"\" pipes['forest'] = make_pipeline(CategoricalToString(), SimpleDataFrameImputer(median_cols=['Age',", "random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train,", "np.random.seed(RANDOM_SEED) os.environ['PYTHONHASHSEED'] = '0' rn.seed(RANDOM_SEED) def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds): \"\"\"Cross-validate LogisticRegression", "verbose=0, random_state=42) start = time.time() randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit execution time:',", "= [1, 2, 4] bootstrap = [True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features':", "from sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline from sklearn.model_selection import RandomizedSearchCV from", "110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4]", "DataFrameDummifier(), LogisticRegression(solver='liblinear')) grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2, 2, 5,", "10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]} logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train,", "pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds) cross_validate_voting(X_train, y_train, pipes, grids, kfolds)", "machine learning models.\"\"\" import os import time import pprint import random as rn", "[int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10]", "sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/svc', serialize_to=r'models/svc.pickle') return svc def cross_validate_voting(X_train, y_train, pipes, grids, kfolds):", "def main(): X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids =", "randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit execution time:', finish - start) pprint.pprint(randsearch.best_params_) forest", "'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 64, 'random_state': RANDOM_SEED})) if random_search: n_estimators = [int(x)", "time.time() print('randsearch.fit execution time:', finish - start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train,", "[1, 2, 4] bootstrap = [True, False] random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features,", "pipes = dict() grids = dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train,", "= dict() kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) cross_validate_logreg(X_train, y_train, pipes, grids, kfolds) cross_validate_forest(X_train,", "import titanic.tools as tools from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString from titanic.modelling import", "X_train = tools.deserialize(r'data/processed/X_train.pickle') y_train = tools.deserialize(r'data/processed/y_train.pickle') pipes = dict() grids = dict() kfolds", "np.linspace(10, 110, num=11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2,", "gamma = [0.001, 0.01, 0.1, 1] grids['svc'] = {'svc__C': C, 'svc__gamma': gamma} svc", "n_iter=50, cv=3, verbose=0, random_state=42) start = time.time() randsearch.fit(X_train, y_train) finish = time.time() print('randsearch.fit", "'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators':", "ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg", "random_grid = {'randomforestclassifier__n_estimators': n_estimators, 'randomforestclassifier__max_features': max_features, 'randomforestclassifier__max_depth': max_depth, 'randomforestclassifier__min_samples_split': min_samples_split, 'randomforestclassifier__min_samples_leaf': min_samples_leaf, 'randomforestclassifier__bootstrap':", "'Fare'], mode_cols=['Embarked']), OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'], handle_unknown='impute'), RandomForestClassifier(**{'bootstrap': True, 'max_depth': 70, 'max_features': 'auto', 'min_samples_leaf':", "logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'], sklearn_gscv_kws={'cv': 3}, sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle')", "make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'], mode_cols=['Embarked']), DataFrameDummifier(), SVC(kernel='linear', C=0.1, probability=False)) C = [0.001, 0.01, 0.1, 1,", "1, 1.2, 2, 5, 10]} grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]}", "finish - start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds}, param_strategy='init', logdir_path=r'logs/models/forest',", "sklearn_cvs_kws={'cv': kfolds}, param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train, pipes, grids, kfolds,", "param_strategy='best', logdir_path=r'logs/models/logreg', serialize_to=r'models/logreg.pickle') return logreg def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False): \"\"\"Cross-validate", "execution time:', finish - start) pprint.pprint(randsearch.best_params_) forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train, sklearn_cvs_kws={'cv': kfolds},", "pipes, grids, kfolds) cross_validate_forest(X_train, y_train, pipes, grids, kfolds) cross_validate_svc(X_train, y_train, pipes, grids, kfolds)", "RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3, verbose=0, random_state=42) start = time.time() randsearch.fit(X_train, y_train) finish =" ]
[ "test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self):", "'00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000') def test_already_used_username(self): with", "contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1)", "pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2,", "test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1) assert len(result)", "'email2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with", "not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user", "2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2 =", "reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with", "!= '' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase):", "signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key", "\\ forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) ==", "settings from contests.models import User from contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError,", "name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User',", "get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user = get_user_from_token( jwt.encode({'id': 1},", "test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2,", "user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc", "django.core.exceptions import ValidationError from ugrade import settings from contests.models import User from contests.exceptions", "user not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self):", "with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user = get_user_from_token(", "import ValidationError from ugrade import settings from contests.models import User from contests.exceptions import", "get_all_users() assert len(users) == 5 @pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2", "mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1)", "\\ sign_up, \\ forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert", "pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name',", "def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users) == 5 @pytest.mark.django_db def test_get_user_by_id():", "'Test 1' assert get_user_by_email(1, 'email2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with", "assert user.id == 2 assert token is not None and token != ''", "mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self):", "test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>')", "'<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1)", "mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self):", "<gh_stars>10-100 import pytest import jwt import bcrypt from mixer.backend.django import mixer from django.test", "name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert user1.name == '<NAME>' assert user1.has_permission('perm1') and", "Name', '<PASSWORD>', '<PASSWORD>') assert user.id == 2 assert token is not None and", "user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User',", "bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError):", "'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest", "user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1',", "import get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\", "assert user1.name == '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2']", "user, token = sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id == 2", "TestCase from django.core.exceptions import ValidationError from ugrade import settings from contests.models import User", "'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1", "test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self):", "verify=False) assert token_data['id'] == 2 user = User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db", "reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase):", "email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with", "user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def", "pytest import jwt import bcrypt from mixer.backend.django import mixer from django.test import TestCase", "jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id':", "password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with", "'00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some", "UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import get_all_permissions, \\ get_all_users, \\ get_user_by_id,", "id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1)", "@classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>',", "pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError):", "2 user = User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def", "'<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6)", "get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1)", "test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users) == 5 @pytest.mark.django_db def test_get_user_by_id(): perm1", "wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none algorithm with pytest.raises(AuthenticationError): get_user_from_token(", "with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not", "name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678')", "'<PASSWORD>', '<PASSWORD>') assert user.id == 2 assert token is not None and token", "'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User',", "pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user, token", "user, token = sign_in(2, 'testtest') assert user.id == 2 assert token is not", "assert get_user_by_username(1, 'username2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2,", "None and token != '' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2", "contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1) assert len(result) ==", "== ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1,", "range(1, 6)), email=(\"<EMAIL>\" % n for n in range(1, 6)), username=(\"user%d\" % n", "bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User',", "UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\ get_user_by_username,", "= mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User',", "'<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def", "'email1').name == 'Test 1' assert get_user_by_email(1, 'email2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1,", "n for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def", "mixer.backend.django import mixer from django.test import TestCase from django.core.exceptions import ValidationError from ugrade", "get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result", "assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db", "password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def", "contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError):", "mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2',", "name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test 1' assert get_user_by_username(1, 'username2').name", "with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with", "= User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self):", "with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user =", "bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError):", "from django.core.exceptions import ValidationError from ugrade import settings from contests.models import User from", "'name', 'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name', 'password',", "'testtest') assert user.id == 2 assert token is not None and token !=", "'<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user =", "User from contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError,", "# check wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none algorithm with", "name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name ==", "username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin',", "def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1) assert", "'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>')", "not None and token != '' token_data = jwt.decode(token, verify=False) assert token_data['id'] ==", "user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod", "'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\"", "sign_up, \\ forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions())", "= get_all_users() assert len(users) == 5 @pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1')", "sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1,", "sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is not None def test_success(self): user,", "'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000')", "def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User',", "contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('')", "with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY,", "@pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>',", "import settings from contests.models import User from contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\", "@pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest',", "mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def", "sign_up(1, 'username', 'name', 'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass',", "'12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert", "'Test 1' assert get_user_by_username(1, 'username2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with", "class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1',", "with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>'", "pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n", "and token != '' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 @pytest.mark.django_db", "token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 user = User.objects.get(pk=2) assert user.signup_otc", "= User.objects.get(pk=1) assert user.reset_password_otc is not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1)", "@pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1)", "'' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod", "get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for", "== 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError):", "pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none", "contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError", "is not None and token != '' token_data = jwt.decode(token, verify=False) assert token_data['id']", "n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with", "'<PASSWORD>', '<PASSWORD>') def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>',", "is not None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1)", "algorithm='HS256') ) def test_success(self): user = get_user_from_token( jwt.encode({'id': 1}, settings.SECRET_KEY, algorithm='HS256') ) assert", "1' assert get_user_by_email(1, 'email2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError):", "mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name", "# invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user", "import pytest import jwt import bcrypt from mixer.backend.django import mixer from django.test import", "for n in range(1, 6)), username=(\"user%d\" % n for n in range(1, 6)),", "name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with", "Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>',", "setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for n in range(1, 6)), email=(\"<EMAIL>\" %", "mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>',", "forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None def", "= User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1", "sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass',", "def test_success(self): user, token = sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id", "code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert", "username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError):", "2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test 1' assert get_user_by_email(1, 'email2').name ==", "sign_up(2, 'username', 'name', 'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass',", "error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is not None def test_success(self):", "= User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert", "get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user not exists with pytest.raises(AuthenticationError): get_user_from_token(", "'<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc", "setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>',", "def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1)", "id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'),", "permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert user1.name == '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2')", "is not None def test_success(self): user, token = sign_up(2, 'username', 'My Name', '<PASSWORD>',", "bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>')", "with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" %", "= User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert", "assert len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users)", "mixer from django.test import TestCase from django.core.exceptions import ValidationError from ugrade import settings", "def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc", "with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with", "\\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\ sign_in, \\ sign_up, \\ forgot_password, \\", "user.reset_password_otc is not None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save()", "pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user not exists with pytest.raises(AuthenticationError):", "== 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError):", "= mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1) assert len(result) == 5", "\\ get_contest_users, \\ sign_in, \\ sign_up, \\ forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db", "== 'Test 1' assert get_user_by_username(1, 'username2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent')", "@pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2')", ") # user not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') )", "'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user, token = sign_in(2,", "def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678')", "= jwt.decode(token, verify=False) assert token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls):", "with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key with pytest.raises(AuthenticationError): get_user_from_token(", "'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is not None def test_success(self): user, token", "'utf-8')) def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None reset_password(1,", "pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256')", "with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>'", "settings.SECRET_KEY, algorithm='HS256') ) # user not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY,", "user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls):", "token = sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id == 2 assert", "get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\ sign_in,", "import jwt import bcrypt from mixer.backend.django import mixer from django.test import TestCase from", "get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1,", "in result: assert user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod", "assert error.value.message_dict['username'] is not None def test_success(self): user, token = sign_up(2, 'username', 'My", "user1.name == '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2'] with", "'<PASSWORD>') def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>')", "get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\ sign_in, \\ sign_up,", "pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user, token = sign_in(2, 'testtest') assert user.id ==", "@pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name", "get_user_by_email, \\ get_contest_users, \\ sign_in, \\ sign_up, \\ forgot_password, \\ reset_password, \\ get_user_from_token", "= mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1)", "name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test", "test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test", "ugrade import settings from contests.models import User from contests.exceptions import NoSuchUserError, \\ NoSuchContestError,", "assert token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest',", "'<KEY>') # check none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>')", "assert get_user_by_email(1, 'email1').name == 'Test 1' assert get_user_by_email(1, 'email2').name == 'Test 2' with", "contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def", "assert user.reset_password_otc is not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>',", "payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user not exists", "token != '' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 @pytest.mark.django_db class", "test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test", "name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert", "1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self):", "# check none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') #", "range(1, 6)), username=(\"user%d\" % n for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User',", "assert token_data['id'] == 2 user = User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db class", "def test_success(self): user = get_user_from_token( jwt.encode({'id': 1}, settings.SECRET_KEY, algorithm='HS256') ) assert user.id ==", "1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test", "\\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\", "def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError):", "'<PASSWORD>') assert error.value.message_dict['username'] is not None def test_success(self): user, token = sign_up(2, 'username',", "with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 =", "name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError):", "user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1)", "and user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username():", "% n for n in range(1, 6)), username=(\"user%d\" % n for n in", "user.reset_password_otc is not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password,", "test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None reset_password(1, user.reset_password_otc, '<PASSWORD>')", "'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self):", "assert user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls):", "sign_up(4, 'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def", "pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name',", "from contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\", "== 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users) == 5", "user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc =", "1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3)", "len(result) == 5 for user in result: assert user.name == 'Name' with pytest.raises(NoSuchContestError):", "test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user", "\\ sign_in, \\ sign_up, \\ forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions():", "check wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none algorithm with pytest.raises(AuthenticationError):", "\\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import", "token_data['id'] == 2 user = User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase):", "assert len(result) == 5 for user in result: assert user.name == 'Name' with", "id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1)", "'00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self): with", "6)), username=(\"user%d\" % n for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User", "class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1',", "in range(1, 6)), email=(\"<EMAIL>\" % n for n in range(1, 6)), username=(\"user%d\" %", "name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username',", "@classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some", "is not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8'))", "settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user = get_user_from_token( jwt.encode({'id': 1}, settings.SECRET_KEY, algorithm='HS256') )", "AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import get_all_permissions, \\ get_all_users,", "email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with", "name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test", "@pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>',", "ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User',", "def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for n in range(1, 6)), email=(\"<EMAIL>\"", "verify=False) assert token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 =", "as error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is not None def", "users = get_all_users() assert len(users) == 5 @pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission',", "import mixer from django.test import TestCase from django.core.exceptions import ValidationError from ugrade import", "None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1',", "'<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError):", "signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self):", "bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None", "assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is", "@classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1',", "# user not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def", "pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name',", "get_contest_users, \\ sign_in, \\ sign_up, \\ forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db def", "= mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1',", "test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user, token = sign_in(2, 'testtest') assert", "with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db", "get_user_by_username(1, 'username2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2')", "'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some", "import User from contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\", "= get_contest_users(1) assert len(result) == 5 for user in result: assert user.name ==", "test_success(self): user, token = sign_in(2, 'testtest') assert user.id == 2 assert token is", "contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test 1' assert get_user_by_username(1, 'username2').name == 'Test 2'", "'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3,", "invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user not", "email=(\"<EMAIL>\" % n for n in range(1, 6)), username=(\"user%d\" % n for n", "'name', 'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000') def", "user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1)", "mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1) assert len(result) == 5 for user in", "5 @pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User',", "mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert", "== 2 user = User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod", "from django.test import TestCase from django.core.exceptions import ValidationError from ugrade import settings from", "with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user,", "'<KEY>') # invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) #", "def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def", "test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is", "'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10},", "def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>',", "reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1)", "1' assert get_user_by_username(1, 'username2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError):", "@pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2')", "test_success(self): user = get_user_from_token( jwt.encode({'id': 1}, settings.SECRET_KEY, algorithm='HS256') ) assert user.id == 1", "user1 = get_user_by_id(1) assert user1.name == '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes", "n in range(1, 6)), email=(\"<EMAIL>\" % n for n in range(1, 6)), username=(\"user%d\"", "get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') #", "mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2',", "forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert", "mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test 1' assert get_user_by_email(1,", "get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2,", "b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000') with", "User.objects.get(pk=1) assert user.reset_password_otc is not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert", "assert user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest',", "from contests.models import User from contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\", "username=(\"user%d\" % n for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6',", "2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test 1' assert get_user_by_username(1, 'username2').name ==", "result: assert user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def", "% n for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>')", "5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users) == 5 @pytest.mark.django_db", "bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not", "% n for n in range(1, 6)), email=(\"<EMAIL>\" % n for n in", "def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError)", "email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with", "def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None reset_password(1, user.reset_password_otc,", "bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'),", "username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self):", "\\ get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\ sign_in, \\", "= mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw(", "def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user, token = sign_in(2, 'testtest')", "contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4,", "none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload", "@pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for n", "id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1) assert len(result) == 5 for user", "bcrypt from mixer.backend.django import mixer from django.test import TestCase from django.core.exceptions import ValidationError", "1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test", "pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def", "6)), email=(\"<EMAIL>\" % n for n in range(1, 6)), username=(\"user%d\" % n for", "len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users) ==", "'username', 'name', 'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000')", "with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') ) # user not exists with", "def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User',", "assert token is not None and token != '' token_data = jwt.decode(token, verify=False)", "with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check", "exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user =", "'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name',", "GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>',", "user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest',", "username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test 1' assert get_user_by_username(1, 'username2').name == 'Test", "pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest',", "with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 =", "user.save() forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod", "user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password,", "pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1)", "contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1',", "2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1,", "sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2, 'u',", "'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result =", "pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save()", "def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc", "import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from", "test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None def test_success_and_use_old_otc(self): user", "get_user_by_username(1, 'username1').name == 'Test 1' assert get_user_by_username(1, 'username2').name == 'Test 2' with pytest.raises(NoSuchUserError):", "'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1')", "User.objects.get(pk=1) assert user.reset_password_otc is not None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc =", "signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2,", "pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self):", "user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self):", "password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3,", "def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError):", "pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest',", "b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with", "jwt import bcrypt from mixer.backend.django import mixer from django.test import TestCase from django.core.exceptions", "for n in range(1, 6)), email=(\"<EMAIL>\" % n for n in range(1, 6)),", "'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self):", "'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError):", "== 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User", "email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name',", "user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1)", "user = User.objects.get(pk=1) assert user.reset_password_otc is not None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1)", "reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db def", "mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>',", "pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>')", "id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User',", "get_user_by_email(1, 'email2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2')", "test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2,", "key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>", "'<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'},", "email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>',", "\\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import get_all_permissions, \\", "'<PASSWORD>') assert user.id == 2 assert token is not None and token !=", "from ugrade import settings from contests.models import User from contests.exceptions import NoSuchUserError, \\", "= sign_in(2, 'testtest') assert user.id == 2 assert token is not None and", "contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test 1' assert", "get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest", "result = get_contest_users(1) assert len(result) == 5 for user in result: assert user.name", "setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>',", "name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name ==", "user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user =", "with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload with pytest.raises(AuthenticationError):", "@classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for n in range(1, 6)),", "2 assert token is not None and token != '' token_data = jwt.decode(token,", "get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id':", "6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6,", "'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1", "['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest", "name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test 1' assert get_user_by_email(1, 'email2').name", ") def test_success(self): user = get_user_from_token( jwt.encode({'id': 1}, settings.SECRET_KEY, algorithm='HS256') ) assert user.id", "'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with", "n for n in range(1, 6)), username=(\"user%d\" % n for n in range(1,", "mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name', contest=contest1) result = get_contest_users(1) assert len(result) == 5 for", "def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users =", "id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2,", "@pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name", "assert len(users) == 5 @pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2 =", "token = sign_in(2, 'testtest') assert user.id == 2 assert token is not None", "error.value.message_dict['username'] is not None def test_success(self): user, token = sign_up(2, 'username', 'My Name',", "== 'Test 1' assert get_user_by_email(1, 'email2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent')", "1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self):", "setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def", "pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>')", "username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass',", "not None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user", "test_success(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user =", "name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert", "sign_in, \\ sign_up, \\ forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission')", "def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(", "assert user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name", "\\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\ sign_in, \\ sign_up, \\", "= mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1',", "with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user, token = sign_in(2, 'testtest') assert user.id", "'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id == 2 assert token is not None", "'' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 user = User.objects.get(pk=2) assert", "test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1) assert", "algorithm='HS256') ) # user not exists with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256')", "check none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid", "!= '' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 user = User.objects.get(pk=2)", "with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username',", "\\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import get_all_permissions, \\ get_all_users, \\", "pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest',", "'password', '<PASSWORD>') assert error.value.message_dict['username'] is not None def test_success(self): user, token = sign_up(2,", "None def test_success(self): user, token = sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert", "token != '' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 user =", "pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None", "email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass')", "test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc =", "== '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError):", "jwt.decode(token, verify=False) assert token_data['id'] == 2 user = User.objects.get(pk=2) assert user.signup_otc is None", "mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1)", "User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 =", "get_user_from_token( '<KEY>') # check none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token(", "\\ reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db", "User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc == '00000000'", "forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None def test_success_and_use_old_otc(self): user =", "def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username',", "and token != '' token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 user", "is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>',", "pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2,", "User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 =", "'username', 'name', 'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678')", "'00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1,", "mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert user1.name == '<NAME>'", "== '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User',", "user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def", "def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def", "mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test 1' assert get_user_by_username(1,", "from .core import get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\", "with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users(): contest1 = mixer.blend('contests.Contest', id=1) mixer.cycle(5).blend('contests.User', name='Name',", "SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User',", "reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1,", "not None def test_success(self): user, token = sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>')", "import bcrypt from mixer.backend.django import mixer from django.test import TestCase from django.core.exceptions import", "user = User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls):", "1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2',", "def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user", "'<PASSWORD>') def test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1,", "None reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class", "user.id == 2 assert token is not None and token != '' token_data", "User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some", "name='Name', contest=contest1) result = get_contest_users(1) assert len(result) == 5 for user in result:", "range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7,", "pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError):", "get_user_by_email(1, 'email1').name == 'Test 1' assert get_user_by_email(1, 'email2').name == 'Test 2' with pytest.raises(NoSuchUserError):", "in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError):", "code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert user1.name == '<NAME>' assert", "with pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is not", "= jwt.decode(token, verify=False) assert token_data['id'] == 2 user = User.objects.get(pk=2) assert user.signup_otc is", "'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000') def test_already_used_username(self):", "mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert user1.name == '<NAME>' assert user1.has_permission('perm1')", "pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload with pytest.raises(AuthenticationError): get_user_from_token(", "'<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase):", "email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check", "'<PASSWORD>') def test_success(self): user, token = sign_in(2, 'testtest') assert user.id == 2 assert", "= User.objects.get(pk=2) assert user.signup_otc is None @pytest.mark.django_db class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User',", "pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def", "= '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user = User.objects.get(pk=1)", "@pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users", "mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2)", "from mixer.backend.django import mixer from django.test import TestCase from django.core.exceptions import ValidationError from", "mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User',", "contest1 = mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username',", "sign_in(1, '<PASSWORD>') def test_success(self): user, token = sign_in(2, 'testtest') assert user.id == 2", "'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is not None def test_success(self): user, token =", "password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw(", "token_data = jwt.decode(token, verify=False) assert token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def", "mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4,", "test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is", "= User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1", "with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest',", "get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User')", "test_wrong_code(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>')", "= mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 =", "token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1)", "id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>',", "class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2)", "= mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert user1.name ==", "username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError):", "pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user = get_user_from_token( jwt.encode({'id':", "contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test 1' assert get_user_by_email(1, 'email2').name == 'Test 2'", "user = User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls):", "user in result: assert user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class SignInTest(TestCase):", "jwt.encode({'id': 10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user = get_user_from_token( jwt.encode({'id': 1}, settings.SECRET_KEY,", "for user in result: assert user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db class", "test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key with pytest.raises(AuthenticationError):", "'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000') def test_wrong_otc(self):", "contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong", "len(users) == 5 @pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission',", "sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id == 2 assert token is", "get_contest_users(1) assert len(result) == 5 for user in result: assert user.name == 'Name'", "'name', 'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def", "'username1').name == 'Test 1' assert get_user_by_username(1, 'username2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1,", "mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self): with", "def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') # check wrong key with", "Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError):", "bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass') def test_havent_signed_up(self):", "test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as", "for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self):", "6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', name='User 6', email='<EMAIL>') def test_wrong_user_id(self): with pytest.raises(AuthenticationError): sign_in(7, 'pass')", "User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() with pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user", "NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core", "username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1, 'username1').name == 'Test 1'", "'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id == 2 assert token is not", "2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2', username='username2', contest=contest1) assert get_user_by_username(1,", "get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db def test_get_contest_users():", "User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>',", "mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token') with pytest.raises(AuthenticationError): get_user_from_token('') #", "user1.has_permission('perm2') assert user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1", "contest=contest1) result = get_contest_users(1) assert len(result) == 5 for user in result: assert", "assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest',", "get_user_from_token( '<KEY>') # invalid payload with pytest.raises(AuthenticationError): get_user_from_token( jwt.encode({'id': 'hehe'}, settings.SECRET_KEY, algorithm='HS256') )", "\\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users():", "with pytest.raises(AuthenticationError): sign_up(2, 'username', 'name', 'somepass', '00000000') def test_already_used_username(self): with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin',", "with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000') def test_wrong_otc(self): with pytest.raises(AuthenticationError): sign_up(2, 'username',", "signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError):", "test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name',", "= '<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db class", "2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1')", "email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test 1' assert get_user_by_email(1, 'email2').name == 'Test", "sign_in(2, 'testtest') assert user.id == 2 assert token is not None and token", "django.test import TestCase from django.core.exceptions import ValidationError from ugrade import settings from contests.models", "%d\" % n for n in range(1, 6)), email=(\"<EMAIL>\" % n for n", "n for n in range(1, 6)), email=(\"<EMAIL>\" % n for n in range(1,", "== 5 for user in result: assert user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2)", "\\ UsernameAlreadyUsedError from .core import get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\", "10}, settings.SECRET_KEY, algorithm='HS256') ) def test_success(self): user = get_user_from_token( jwt.encode({'id': 1}, settings.SECRET_KEY, algorithm='HS256')", "def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1)", "get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\ sign_in, \\ sign_up, \\ forgot_password,", "2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1')", "pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1)", "test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5 @pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users()", "pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username'] is not None", "assert get_user_by_email(1, 'email2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2,", "'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest", "= get_user_by_id(1) assert user1.name == '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes ==", "email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test 1'", "token is not None and token != '' token_data = jwt.decode(token, verify=False) assert", "'username2').name == 'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with", "None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user =", "b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>',", "def test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>') assert error.value.message_dict['username']", "forgot_password, \\ reset_password, \\ get_user_from_token @pytest.mark.django_db def test_get_all_permissions(): mixer.cycle(5).blend('contests.Permission') assert len(get_all_permissions()) == 5", "mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for n in range(1, 6)), email=(\"<EMAIL>\" % n", "not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db", "reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user", "mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users) == 5 @pytest.mark.django_db def test_get_user_by_id(): perm1 =", "pytest.raises(AuthenticationError): reset_password(1, '<PASSWORD>', '<PASSWORD>') def test_success(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save()", "Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def", "def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1,", "NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError, \\ UserAlreadySignedUpError, \\ UsernameAlreadyUsedError from .core import get_all_permissions,", "assert user.reset_password_otc is not None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>'", "in range(1, 6)), username=(\"user%d\" % n for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8'))", "User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc", "email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def", "def test_success_and_create_new_otc(self): forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None def test_success_and_use_old_otc(self):", "'<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) def test_with_forgot_password(self): forgot_password(1) user", "'<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def", "perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1) assert user1.name", "user = User.objects.get(pk=1) assert user.reset_password_otc is not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user =", "get_user_by_username, \\ get_user_by_email, \\ get_contest_users, \\ sign_in, \\ sign_up, \\ forgot_password, \\ reset_password,", "with pytest.raises(UsernameAlreadyUsedError): sign_up(2, 'jauhararifin', 'name', 'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as error:", "contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2)", "\\ get_user_by_email, \\ get_contest_users, \\ sign_in, \\ sign_up, \\ forgot_password, \\ reset_password, \\", "bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8')) @pytest.mark.django_db class GetUserFromTokenTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1)", "SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for n in range(1,", "algorithm with pytest.raises(AuthenticationError): get_user_from_token( '<KEY> with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # invalid payload with", "b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass',", "ResetPasswordTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>',", "@pytest.mark.django_db def test_get_all_users(): mixer.cycle(5).blend('contests.User') users = get_all_users() assert len(users) == 5 @pytest.mark.django_db def", "with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def", "def test_success(self): user, token = sign_in(2, 'testtest') assert user.id == 2 assert token", "def test_wrong_user_id(self): with pytest.raises(NoSuchUserError): reset_password(3, '<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>',", "get_user_from_token('') # check wrong key with pytest.raises(AuthenticationError): get_user_from_token( '<KEY>') # check none algorithm", "class ForgotPasswordTest(TestCase): @classmethod def setUpTestData(cls): mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'))", "id=1, name='Contest 1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User',", "'Test 2' with pytest.raises(NoSuchUserError): get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3,", "import TestCase from django.core.exceptions import ValidationError from ugrade import settings from contests.models import", "class SignInTest(TestCase): @classmethod def setUpTestData(cls): mixer.cycle(5).blend('contests.User', name=(\"User %d\" % n for n in", "get_user_by_username(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_username(2, 'username2') with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email():", "email='<EMAIL>', contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self):", "mixer.blend('contests.User', name='Test 1', email='email1', contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name", "test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2])", "def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2 = mixer.blend('contests.Contest', id=2) mixer.blend('contests.User', name='Some Name',", "perm1 = mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1", "5 for user in result: assert user.name == 'Name' with pytest.raises(NoSuchContestError): get_contest_users(2) @pytest.mark.django_db", "contests.models import User from contests.exceptions import NoSuchUserError, \\ NoSuchContestError, \\ AuthenticationError, \\ UserHaventSignedUpError,", "UsernameAlreadyUsedError from .core import get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email,", "ValidationError from ugrade import settings from contests.models import User from contests.exceptions import NoSuchUserError,", "forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db class ResetPasswordTest(TestCase): @classmethod def", "password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1, signup_otc='12345678') def test_authentication_error(self): with pytest.raises(AuthenticationError): get_user_from_token('some.invalid.token')", "= '<PASSWORD>' user.save() reset_password(1, '<PASSWORD>', '<PASSWORD>') user = User.objects.get(pk=1) assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8'))", ".core import get_all_permissions, \\ get_all_users, \\ get_user_by_id, \\ get_user_by_username, \\ get_user_by_email, \\ get_contest_users,", "== 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1 = mixer.blend('contests.Contest', id=1) contest2", "'somepass', '12345678') def test_invalid_input(self): with pytest.raises(ValidationError) as error: sign_up(2, 'u', 'name', 'password', '<PASSWORD>')", "forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc is not None reset_password(1, user.reset_password_otc, '<PASSWORD>') user", "with pytest.raises(NoSuchContestError): get_user_by_username(3, 'username1') @pytest.mark.django_db def test_get_user_by_email(): contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1')", "contest=contest2, signup_otc='12345678') mixer.blend('contests.User', name='<NAME>', email='<EMAIL>', username='jauhararifin', password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with", "= User.objects.get(pk=1) assert user.reset_password_otc is not None def test_success_and_use_old_otc(self): user = User.objects.get(pk=1) user.reset_password_otc", "email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): forgot_password(2) def", "mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest1,", "'<PASSWORD>', '<PASSWORD>') def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError): reset_password(2, '<PASSWORD>', '<PASSWORD>') def test_wrong_code(self): user =", "== 5 @pytest.mark.django_db def test_get_user_by_id(): perm1 = mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2')", "get_user_by_id(1) assert user1.name == '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert user1.permission_codes == ['perm1',", "== 2 assert token is not None and token != '' token_data =", "= User.objects.get(pk=1) user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc ==", "mixer.blend('contests.User', name='Some Name', email='<EMAIL>', username='username', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'), contest=contest1) mixer.blend('contests.User', email='<EMAIL>', contest=contest2, signup_otc='12345678')", "'<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000') def test_wrong_otc(self): with", "jwt.decode(token, verify=False) assert token_data['id'] == 2 @pytest.mark.django_db class SignUpTest(TestCase): @classmethod def setUpTestData(cls): contest1", "contest1 = mixer.blend('contests.Contest', id=1) mixer.blend('contests.User', name='Some Name 1', email='<EMAIL>', username='username1', password=bcrypt.hashpw( b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'),", "1') mixer.blend('contests.Contest', id=2, name='Contest 2') mixer.blend('contests.User', name='Test 1', username='username1', contest=contest1) mixer.blend('contests.User', name='Test 2',", "assert user1.permission_codes == ['perm1', 'perm2'] with pytest.raises(NoSuchUserError): get_user_by_id(6) @pytest.mark.django_db def test_get_user_by_username(): contest1 =", "= sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id == 2 assert token", "name=(\"User %d\" % n for n in range(1, 6)), email=(\"<EMAIL>\" % n for", "with pytest.raises(NoSuchUserError): get_user_by_email(1, 'nonexistent') with pytest.raises(NoSuchUserError): get_user_by_email(2, 'email2') with pytest.raises(NoSuchContestError): get_user_by_email(3, 'email1') @pytest.mark.django_db", "n in range(1, 6)), username=(\"user%d\" % n for n in range(1, 6)), password=bcrypt.hashpw(b't<PASSWORD>',", "contest=contest1) mixer.blend('contests.User', name='Test 2', email='email2', contest=contest1) assert get_user_by_email(1, 'email1').name == 'Test 1' assert", "with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000') with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass',", "perm2]) user1 = get_user_by_id(1) assert user1.name == '<NAME>' assert user1.has_permission('perm1') and user1.has_permission('perm2') assert", "mixer.blend('contests.Permission', code='perm1') perm2 = mixer.blend('contests.Permission', code='perm2') mixer.cycle(5).blend('contests.User', name='<NAME>', permissions=[perm1, perm2]) user1 = get_user_by_id(1)", "sign_in(6, 'pass') def test_wrong_password(self): with pytest.raises(AuthenticationError): sign_in(1, '<PASSWORD>') def test_success(self): user, token =", "'name', 'somepass', '<PASSWORD>') def test_already_signed_up(self): with pytest.raises(UserAlreadySignedUpError): sign_up(1, 'username', 'name', 'somepass', '00000000') def", "test_success(self): user, token = sign_up(2, 'username', 'My Name', '<PASSWORD>', '<PASSWORD>') assert user.id ==", "password=bcrypt.hashpw( b'userpass', bcrypt.gensalt()).decode('utf-8'), contest=contest2) def test_wrong_email(self): with pytest.raises(NoSuchUserError): sign_up(4, 'username', 'name', 'somepass', '00000000')", "bcrypt.gensalt()).decode('utf-8')) mixer.blend('contests.User', email='<EMAIL>', signup_otc='12345678') def test_wrong_email(self): with pytest.raises(NoSuchUserError): forgot_password(3) def test_havent_signed_up(self): with pytest.raises(UserHaventSignedUpError):", "assert get_user_by_username(1, 'username1').name == 'Test 1' assert get_user_by_username(1, 'username2').name == 'Test 2' with", "user.reset_password_otc = '<PASSWORD>' user.save() forgot_password(1) user = User.objects.get(pk=1) assert user.reset_password_otc == '00000000' @pytest.mark.django_db" ]
[ "cms.PSet( barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"),", "cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"), track = cms.InputTag(\"hiGeneralTracks\"), photons = cms.InputTag(\"cleanPhotons\")", "= cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco =", "= cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"), track =", "endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"), track", "cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"),", "FWCore.ParameterSet.Config as cms isolationInputParameters = cms.PSet( barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco", "barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco", "import FWCore.ParameterSet.Config as cms isolationInputParameters = cms.PSet( barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"),", "cms isolationInputParameters = cms.PSet( barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"),", "= cms.PSet( barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco =", "cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"), track = cms.InputTag(\"hiGeneralTracks\"),", "hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"), track = cms.InputTag(\"hiGeneralTracks\"), photons = cms.InputTag(\"cleanPhotons\") )", "= cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"), track = cms.InputTag(\"hiGeneralTracks\"), photons =", "isolationInputParameters = cms.PSet( barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco = cms.InputTag(\"horeco\"), hfreco", "horeco = cms.InputTag(\"horeco\"), hfreco = cms.InputTag(\"hfreco\"), hbhereco = cms.InputTag(\"hbhereco\"), track = cms.InputTag(\"hiGeneralTracks\"), photons", "as cms isolationInputParameters = cms.PSet( barrelBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandBarrelBasicClusters\"), endcapBasicCluster = cms.InputTag(\"islandBasicClusters\",\"islandEndcapBasicClusters\"), horeco =" ]
[ "#! /usr/bin/env python #coding=utf-8 import socket # 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket", "HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\")", "while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\")) # 接收对方发送过来的数据,最大接收1024个字节 recvData = serverSocket.recv(BUFSIZ) print('接收到的数据为:',", "ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\")) # 接收对方发送过来的数据,最大接收1024个字节 recvData", "socket # 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据", "#coding=utf-8 import socket # 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1'", "serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据", "参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR)", "= socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data", "创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST,", "指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while", "serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\")) # 接收对方发送过来的数据,最大接收1024个字节 recvData = serverSocket.recv(BUFSIZ)", "/usr/bin/env python #coding=utf-8 import socket # 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket =", "PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\")) # 接收对方发送过来的数据,最大接收1024个字节 recvData =", "socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data =", "指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True:", "BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\")) # 接收对方发送过来的数据,最大接收1024个字节", "python #coding=utf-8 import socket # 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)", "PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\"))", "# 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT)", "import socket # 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000", "#从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024 ADDR=(HOST, PORT) serverSocket.connect(ADDR) while True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\")) #", "True: #提示用户输入数据 send_data = input(\"请输入要发送的数据:\") serverSocket.send(send_data.encode(\"utf-8\")) # 接收对方发送过来的数据,最大接收1024个字节 recvData = serverSocket.recv(BUFSIZ) print('接收到的数据为:', recvData.decode('utf-8'))", "<reponame>phpyii/workerman-test #! /usr/bin/env python #coding=utf-8 import socket # 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议", "# 创建socket对象 # 参数一 指定用ipv4版本,参数2 指定用udp协议 serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) HOST='127.0.0.1' PORT=17000 #从指定的端口,从任何发送者,接收UDP数据 BUFSIZ=1024" ]
[ "and message.content[0] != '!': return command = message.content.split()[0][1:] args = message.content.split()[1:] if command", "if command == 'request': await make_request(client, message, args) elif command == 'cancel': await", "!= '!': return command = message.content.split()[0][1:] args = message.content.split()[1:] if command == 'request':", "command == 'place': await get_place(client, message, args) elif command == 'close': await end_request(client,", "from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import get_place from", "return if message.content and message.content[0] != '!': return command = message.content.split()[0][1:] args =", "@staticmethod async def run(client: discord.Client, message: discord.Message): if message.author.bot: return if message.content and", "get_place from src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod async def run(client: discord.Client, message:", "discord.Message): if message.author.bot: return if message.content and message.content[0] != '!': return command =", "disable, enable from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import", "if message.content and message.content[0] != '!': return command = message.content.split()[0][1:] args = message.content.split()[1:]", "get_place(client, message, args) elif command == 'close': await end_request(client, message) elif command ==", "'request': await make_request(client, message, args) elif command == 'cancel': await cancel_request(client, message, args)", "import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import", "import discord from src.eventsHandler.on_message.commands.activate import disable, enable from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request", "== 'cancel': await cancel_request(client, message, args) elif command == 'place': await get_place(client, message,", "message.content.split()[0][1:] args = message.content.split()[1:] if command == 'request': await make_request(client, message, args) elif", "from src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod async def run(client: discord.Client, message: discord.Message):", "from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod async def", "cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import make_request", "'close': await end_request(client, message) elif command == 'enable': await enable(client, message, args) elif", "args) elif command == 'place': await get_place(client, message, args) elif command == 'close':", "== 'place': await get_place(client, message, args) elif command == 'close': await end_request(client, message)", "message, args) elif command == 'close': await end_request(client, message) elif command == 'enable':", "== 'close': await end_request(client, message) elif command == 'enable': await enable(client, message, args)", "enable from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import get_place", "from src.eventsHandler.on_message.commands.activate import disable, enable from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request", "async def run(client: discord.Client, message: discord.Message): if message.author.bot: return if message.content and message.content[0]", "await end_request(client, message) elif command == 'enable': await enable(client, message, args) elif command", "= message.content.split()[0][1:] args = message.content.split()[1:] if command == 'request': await make_request(client, message, args)", "import disable, enable from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place", "== 'request': await make_request(client, message, args) elif command == 'cancel': await cancel_request(client, message,", "command == 'cancel': await cancel_request(client, message, args) elif command == 'place': await get_place(client,", "return command = message.content.split()[0][1:] args = message.content.split()[1:] if command == 'request': await make_request(client,", "elif command == 'enable': await enable(client, message, args) elif command == 'disable': await", "'cancel': await cancel_request(client, message, args) elif command == 'place': await get_place(client, message, args)", "import make_request class OnMessage: @staticmethod async def run(client: discord.Client, message: discord.Message): if message.author.bot:", "message, args) elif command == 'place': await get_place(client, message, args) elif command ==", "src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod async def run(client:", "cancel_request(client, message, args) elif command == 'place': await get_place(client, message, args) elif command", "import get_place from src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod async def run(client: discord.Client,", "discord from src.eventsHandler.on_message.commands.activate import disable, enable from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import", "elif command == 'cancel': await cancel_request(client, message, args) elif command == 'place': await", "command = message.content.split()[0][1:] args = message.content.split()[1:] if command == 'request': await make_request(client, message,", "command == 'close': await end_request(client, message) elif command == 'enable': await enable(client, message,", "'!': return command = message.content.split()[0][1:] args = message.content.split()[1:] if command == 'request': await", "def run(client: discord.Client, message: discord.Message): if message.author.bot: return if message.content and message.content[0] !=", "src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import make_request class OnMessage:", "end_request(client, message) elif command == 'enable': await enable(client, message, args) elif command ==", "message.content.split()[1:] if command == 'request': await make_request(client, message, args) elif command == 'cancel':", "from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import make_request class", "command == 'request': await make_request(client, message, args) elif command == 'cancel': await cancel_request(client,", "import end_request from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod", "message, args) elif command == 'cancel': await cancel_request(client, message, args) elif command ==", "'enable': await enable(client, message, args) elif command == 'disable': await disable(client, message, args)", "= message.content.split()[1:] if command == 'request': await make_request(client, message, args) elif command ==", "command == 'enable': await enable(client, message, args) elif command == 'disable': await disable(client,", "await make_request(client, message, args) elif command == 'cancel': await cancel_request(client, message, args) elif", "'place': await get_place(client, message, args) elif command == 'close': await end_request(client, message) elif", "make_request class OnMessage: @staticmethod async def run(client: discord.Client, message: discord.Message): if message.author.bot: return", "message) elif command == 'enable': await enable(client, message, args) elif command == 'disable':", "await get_place(client, message, args) elif command == 'close': await end_request(client, message) elif command", "class OnMessage: @staticmethod async def run(client: discord.Client, message: discord.Message): if message.author.bot: return if", "args) elif command == 'cancel': await cancel_request(client, message, args) elif command == 'place':", "args) elif command == 'close': await end_request(client, message) elif command == 'enable': await", "end_request from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod async", "message.content and message.content[0] != '!': return command = message.content.split()[0][1:] args = message.content.split()[1:] if", "make_request(client, message, args) elif command == 'cancel': await cancel_request(client, message, args) elif command", "await cancel_request(client, message, args) elif command == 'place': await get_place(client, message, args) elif", "if message.author.bot: return if message.content and message.content[0] != '!': return command = message.content.split()[0][1:]", "OnMessage: @staticmethod async def run(client: discord.Client, message: discord.Message): if message.author.bot: return if message.content", "message.author.bot: return if message.content and message.content[0] != '!': return command = message.content.split()[0][1:] args", "src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from src.eventsHandler.on_message.commands.place import get_place from src.eventsHandler.on_message.commands.request", "== 'enable': await enable(client, message, args) elif command == 'disable': await disable(client, message,", "src.eventsHandler.on_message.commands.request import make_request class OnMessage: @staticmethod async def run(client: discord.Client, message: discord.Message): if", "discord.Client, message: discord.Message): if message.author.bot: return if message.content and message.content[0] != '!': return", "src.eventsHandler.on_message.commands.activate import disable, enable from src.eventsHandler.on_message.commands.cancel import cancel_request from src.eventsHandler.on_message.commands.end_request import end_request from", "run(client: discord.Client, message: discord.Message): if message.author.bot: return if message.content and message.content[0] != '!':", "elif command == 'close': await end_request(client, message) elif command == 'enable': await enable(client,", "args = message.content.split()[1:] if command == 'request': await make_request(client, message, args) elif command", "message.content[0] != '!': return command = message.content.split()[0][1:] args = message.content.split()[1:] if command ==", "message: discord.Message): if message.author.bot: return if message.content and message.content[0] != '!': return command", "elif command == 'place': await get_place(client, message, args) elif command == 'close': await" ]
[ "import url_for def send_reset_email(user): token = user.generate_reset_token() msg = Message('Password Reset Request', sender=\"<EMAIL>\",", "= f'''To reset your password, visit the following link: {url_for('user.reset_password', token=token, _external=True)}''' mail.send(msg)", "flask_mail import Message from flask import url_for def send_reset_email(user): token = user.generate_reset_token() msg", "Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset your password, visit the following", "import Message from flask import url_for def send_reset_email(user): token = user.generate_reset_token() msg =", ".. import mail from flask_mail import Message from flask import url_for def send_reset_email(user):", "send_reset_email(user): token = user.generate_reset_token() msg = Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body =", "url_for def send_reset_email(user): token = user.generate_reset_token() msg = Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email])", "Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset your password, visit the following link:", "def send_reset_email(user): token = user.generate_reset_token() msg = Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body", "from flask_mail import Message from flask import url_for def send_reset_email(user): token = user.generate_reset_token()", "msg = Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset your password,", "sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset your password, visit the following link: {url_for('user.reset_password',", "token = user.generate_reset_token() msg = Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To", "flask import url_for def send_reset_email(user): token = user.generate_reset_token() msg = Message('Password Reset Request',", "Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset your password, visit the", "mail from flask_mail import Message from flask import url_for def send_reset_email(user): token =", "user.generate_reset_token() msg = Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset your", "= user.generate_reset_token() msg = Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset", "recipients=[user.email]) msg.body = f'''To reset your password, visit the following link: {url_for('user.reset_password', token=token,", "from .. import mail from flask_mail import Message from flask import url_for def", "<gh_stars>1-10 from .. import mail from flask_mail import Message from flask import url_for", "import mail from flask_mail import Message from flask import url_for def send_reset_email(user): token", "Message from flask import url_for def send_reset_email(user): token = user.generate_reset_token() msg = Message('Password", "from flask import url_for def send_reset_email(user): token = user.generate_reset_token() msg = Message('Password Reset", "= Message('Password Reset Request', sender=\"<EMAIL>\", recipients=[user.email]) msg.body = f'''To reset your password, visit", "msg.body = f'''To reset your password, visit the following link: {url_for('user.reset_password', token=token, _external=True)}'''" ]
[ "\"\"\" \"\"\" THOUGHT PROCESS it is not sorted, so sorting would take n(log(n))", "n(log(n)) -> not worth sorting looping through and keeping track of max (brute", "max_i = None for ele in list_of_integers: if max_i is None or max_i", "sorted, so sorting would take n(log(n)) -> not worth sorting looping through and", "reducing to 1/2 run time -> still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force", "find_peak(list_of_integers): \"\"\"BRUTE force implementation for question \"\"\" max_i = None for ele in", "if max_i is None or max_i < ele: max_i = ele return max_i", "looping through and keeping track of max (brute force) -> O(n) possibly looping", "sorting looping through and keeping track of max (brute force) -> O(n) possibly", "ele in list_of_integers: if max_i is None or max_i < ele: max_i =", "so sorting would take n(log(n)) -> not worth sorting looping through and keeping", "not worth sorting looping through and keeping track of max (brute force) ->", "<filename>0x10-python-network_0/6-peak.py #!/usr/bin/python3 \"\"\"script for finding peak in list of ints, interview prep \"\"\"", "through and keeping track of max (brute force) -> O(n) possibly looping from", "and keeping track of max (brute force) -> O(n) possibly looping from each", "for question \"\"\" max_i = None for ele in list_of_integers: if max_i is", "\"\"\" max_i = None for ele in list_of_integers: if max_i is None or", "-> still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation for question \"\"\" max_i", "list_of_integers: if max_i is None or max_i < ele: max_i = ele return", "= None for ele in list_of_integers: if max_i is None or max_i <", "\"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation for question \"\"\" max_i = None for", "still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation for question \"\"\" max_i =", "PROCESS it is not sorted, so sorting would take n(log(n)) -> not worth", "ints, interview prep \"\"\" \"\"\" THOUGHT PROCESS it is not sorted, so sorting", "looping from each end reducing to 1/2 run time -> still O(n) \"\"\"", "peak in list of ints, interview prep \"\"\" \"\"\" THOUGHT PROCESS it is", "not sorted, so sorting would take n(log(n)) -> not worth sorting looping through", "None for ele in list_of_integers: if max_i is None or max_i < ele:", "keeping track of max (brute force) -> O(n) possibly looping from each end", "O(n) possibly looping from each end reducing to 1/2 run time -> still", "from each end reducing to 1/2 run time -> still O(n) \"\"\" def", "for ele in list_of_integers: if max_i is None or max_i < ele: max_i", "(brute force) -> O(n) possibly looping from each end reducing to 1/2 run", "-> O(n) possibly looping from each end reducing to 1/2 run time ->", "sorting would take n(log(n)) -> not worth sorting looping through and keeping track", "def find_peak(list_of_integers): \"\"\"BRUTE force implementation for question \"\"\" max_i = None for ele", "#!/usr/bin/python3 \"\"\"script for finding peak in list of ints, interview prep \"\"\" \"\"\"", "list of ints, interview prep \"\"\" \"\"\" THOUGHT PROCESS it is not sorted,", "in list of ints, interview prep \"\"\" \"\"\" THOUGHT PROCESS it is not", "is not sorted, so sorting would take n(log(n)) -> not worth sorting looping", "max (brute force) -> O(n) possibly looping from each end reducing to 1/2", "force implementation for question \"\"\" max_i = None for ele in list_of_integers: if", "question \"\"\" max_i = None for ele in list_of_integers: if max_i is None", "run time -> still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation for question", "of max (brute force) -> O(n) possibly looping from each end reducing to", "implementation for question \"\"\" max_i = None for ele in list_of_integers: if max_i", "\"\"\"script for finding peak in list of ints, interview prep \"\"\" \"\"\" THOUGHT", "of ints, interview prep \"\"\" \"\"\" THOUGHT PROCESS it is not sorted, so", "THOUGHT PROCESS it is not sorted, so sorting would take n(log(n)) -> not", "prep \"\"\" \"\"\" THOUGHT PROCESS it is not sorted, so sorting would take", "it is not sorted, so sorting would take n(log(n)) -> not worth sorting", "\"\"\" THOUGHT PROCESS it is not sorted, so sorting would take n(log(n)) ->", "for finding peak in list of ints, interview prep \"\"\" \"\"\" THOUGHT PROCESS", "worth sorting looping through and keeping track of max (brute force) -> O(n)", "force) -> O(n) possibly looping from each end reducing to 1/2 run time", "each end reducing to 1/2 run time -> still O(n) \"\"\" def find_peak(list_of_integers):", "1/2 run time -> still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation for", "take n(log(n)) -> not worth sorting looping through and keeping track of max", "to 1/2 run time -> still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation", "end reducing to 1/2 run time -> still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE", "track of max (brute force) -> O(n) possibly looping from each end reducing", "would take n(log(n)) -> not worth sorting looping through and keeping track of", "time -> still O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation for question \"\"\"", "possibly looping from each end reducing to 1/2 run time -> still O(n)", "\"\"\"BRUTE force implementation for question \"\"\" max_i = None for ele in list_of_integers:", "in list_of_integers: if max_i is None or max_i < ele: max_i = ele", "interview prep \"\"\" \"\"\" THOUGHT PROCESS it is not sorted, so sorting would", "-> not worth sorting looping through and keeping track of max (brute force)", "O(n) \"\"\" def find_peak(list_of_integers): \"\"\"BRUTE force implementation for question \"\"\" max_i = None", "finding peak in list of ints, interview prep \"\"\" \"\"\" THOUGHT PROCESS it" ]
[ "head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone)", "data['ques'] his = data['hist'] batch_size, rnd, max_his_length = his.size() cap = his[:, 0,", "his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len,", "= ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() -", "cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores = self.head(fuse_feat,", "= build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括 classification head, generation", "VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size,", "build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括 classification head, generation head", "his.size() cap = his[:, 0, :] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len", "his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len)", "= his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len,", "= self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location", "1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat =", "his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len,", "from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len,", "__init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model", "self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output,", "= self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded", "= data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his", "self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed,", "import torch.nn as nn from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class", "q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores = self.head(fuse_feat, data) return", "img = data['img_feat'] ques = data['ques'] his = data['hist'] batch_size, rnd, max_his_length =", "# 包括 classification head, generation head def forward(self, data): img = data['img_feat'] ques", "classification head, generation head def forward(self, data): img = data['img_feat'] ques = data['ques']", "data['hist'] batch_size, rnd, max_his_length = his.size() cap = his[:, 0, :] ques_len =", "cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1,", "word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder)", "torch.nn as nn from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module):", "def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0)", "c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat,", ":] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat =", "q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :]", "head, generation head def forward(self, data): img = data['img_feat'] ques = data['ques'] his", "cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded,", "= data['img_feat'] ques = data['ques'] his = data['hist'] batch_size, rnd, max_his_length = his.size()", "max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed,", "= hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length)", "his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location", ":] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed =", "VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone,", "q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy()", "= self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores", "= data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb", "build_backbone(backbone) self.head = build_head(head) # 包括 classification head, generation head def forward(self, data):", "@VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model =", "import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder,", "cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :]", "ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat", "c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores = self.head(fuse_feat, data) return scores", "ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size),", "self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括 classification head, generation head def", "class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len,", "vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model =", "generation head def forward(self, data): img = data['img_feat'] ques = data['ques'] his =", "encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone", "= data['ques'] his = data['hist'] batch_size, rnd, max_his_length = his.size() cap = his[:,", "ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size),", "super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head", "ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques)", "his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores = self.head(fuse_feat, data)", "= cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd,", "= nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head)", "hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed", "0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed =", "ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his)", "self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head =", "hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous())", "self.head = build_head(head) # 包括 classification head, generation head def forward(self, data): img", "0, :] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed", "batch_size, rnd, max_his_length = his.size() cap = his[:, 0, :] ques_len = data['ques_len']", "self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded =", "rnd, max_his_length = his.size() cap = his[:, 0, :] ques_len = data['ques_len'] hist_len", "his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len,", "= data['hist'] batch_size, rnd, max_his_length = his.size() cap = his[:, 0, :] ques_len", "rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb,", "c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() -", "= build_head(head) # 包括 classification head, generation head def forward(self, data): img =", "data): img = data['img_feat'] ques = data['ques'] his = data['hist'] batch_size, rnd, max_his_length", "= c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded,", ":] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output,", "data['img_feat'] ques = data['ques'] his = data['hist'] batch_size, rnd, max_his_length = his.size() cap", "his = data['hist'] batch_size, rnd, max_his_length = his.size() cap = his[:, 0, :]", "build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model", "data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb =", "his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed,", "= q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location,", "padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括 classification", "word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括", "fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size)", "self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location =", "his[:, 0, :] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0]", "backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone =", "head def forward(self, data): img = data['img_feat'] ques = data['ques'] his = data['hist']", "his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1", "hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location =", "build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__()", "cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat", "build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head):", "cap = his[:, 0, :] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len =", "-1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img,", "data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his =", "= his.size() cap = his[:, 0, :] ques_len = data['ques_len'] hist_len = data['hist_len']", "= his[:, 0, :] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:,", "self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores =", "nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) #", "max_his_length = his.size() cap = his[:, 0, :] ques_len = data['ques_len'] hist_len =", "= self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output,", "nn from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self,", "包括 classification head, generation head def forward(self, data): img = data['img_feat'] ques =", "cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location,", "= self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat =", "build_head(head) # 包括 classification head, generation head def forward(self, data): img = data['img_feat']", "..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size,", "- 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded", "as nn from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def", "def forward(self, data): img = data['img_feat'] ques = data['ques'] his = data['hist'] batch_size,", "cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output,", "ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy()", "self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括 classification head,", "forward(self, data): img = data['img_feat'] ques = data['ques'] his = data['hist'] batch_size, rnd,", "= build_backbone(backbone) self.head = build_head(head) # 包括 classification head, generation head def forward(self,", "cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size,", "ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1", "= his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb,", "cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1)", "- 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat", "ques = data['ques'] his = data['hist'] batch_size, rnd, max_his_length = his.size() cap =", "1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded =" ]
[ "- 2000., V0 + 2000], [velscale/10, 800.]] for spec in specs: print(\"Processing spectrum", "sky = np.argwhere((wave < line - 10) | (wave > line + 10)).ravel()", "= 0. ######################################################################## # Preparing the fit dv = (logwave_temp[0] - logwave[0]) *", "t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"]", "= pp.gas_component emtable = [] for j, comp in enumerate(pp.component[gas]): t = Table()", "-*- \"\"\" Forked in Hydra IMF from Hydra/MUSE on Feb 19, 2018 @author:", "[[V0 - 2000., V0 + 2000], [velscale/10, 800.]] for spec in specs: print(\"Processing", "table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\",", "fluxerr) ################################################################### # Rebinning the data to a logarithmic scale for ppxf wave_range", "table = Table.read(spec) wave_lin = table[\"wave\"] flux = table[\"flux\"] fluxerr = table[\"fluxerr\"] #", "wave_lin = table[\"wave\"] flux = table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing red part", "output): \"\"\" Read all yaml files in a ppf directory to one make", "Table.read(spec) wave_lin = table[\"wave\"] flux = table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing red", "print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml)", "outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__ == '__main__': targetSN = 100", "wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up the", "object, we have to make it a scalar pp.chi2 = float(pp.chi2) for key", "= np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx]", "Forked in Hydra IMF from Hydra/MUSE on Feb 19, 2018 @author: <NAME> Run", "# Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0])", "ppxf from ppxf import ppxf_util from spectres import spectres import context import misc", "if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\":", "gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name = name", "\"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2 is a astropy.unit.quantity object, we have", "ppf directory to one make table for all bins. \"\"\" filenames = sorted([_", "velscale is None else velscale V0 = context.V if V0 is None else", "spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up the gas templates gas_templates, line_names,", "# Setting up the gas templates gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0],", "flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False,", "scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave", "== '__main__': targetSN = 100 sample = \"kinematics\" velscale = context.velscale tempfile =", "np import matplotlib.pyplot as plt from astropy.io import fits from astropy import constants", "Fitting with two components pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start,", "__name__ == '__main__': targetSN = 100 sample = \"kinematics\" velscale = context.velscale tempfile", "if os.path.exists(outyaml) and not redo: continue table = Table.read(spec) wave_lin = table[\"wave\"] flux", "gas_component = components > 0 start = [start0[:2]] * (ngas + 1) bounds", "keys], names=[names[k] for k in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True)", "Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) #", "constants from astropy.table import Table, vstack, hstack from ppxf.ppxf import ppxf from ppxf", "yaml import numpy as np import matplotlib.pyplot as plt from astropy.io import fits", "component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name", "1) bounds = [bounds0] * (ngas + 1) moments = [2] * (ngas", "one make table for all bins. \"\"\" filenames = sorted([_ for _ in", "\"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_", "from ppxf import ppxf_util from spectres import spectres import context import misc from", "from der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\" Running", "redo: continue table = Table.read(spec) wave_lin = table[\"wave\"] flux = table[\"flux\"] fluxerr =", "# Removing red part of the spectrum idx = np.where(wave_lin < 7000)[0] wave_lin", "hstack from ppxf.ppxf import ppxf from ppxf import ppxf_util from spectres import spectres", "+ 1) moments = [2] * (ngas + 1) ######################################################################## # Fitting with", "= yaml.load(f) data = Table([[props[k]] for k in keys], names=[names[k] for k in", "data = Table([[props[k]] for k in keys], names=[names[k] for k in keys]) outtable.append(data)", "= getattr(pp, key) klist = [\"V\", \"sigma\"] for j, sol in enumerate(pp.sol): for", "outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__ == '__main__': targetSN =", "a logarithmic scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux,", "import os import yaml import numpy as np import matplotlib.pyplot as plt from", "plot save(pp, outdir) def save(pp, outdir): \"\"\" Save results from pPXF into files", "context.V if V0 is None else V0 # Reading templates ssp_templates = fits.getdata(templates_file,", "= ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component,", "> 0 start = [start0[:2]] * (ngas + 1) bounds = [bounds0] *", "None else velscale V0 = context.V if V0 is None else V0 #", "hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use", "in skylines: sky = np.argwhere((wave < line - 10) | (wave > line", "os.path.exists(outyaml) and not redo: continue table = Table.read(spec) wave_lin = table[\"wave\"] flux =", "tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir =", "= [] for fname in filenames: with open(os.path.join(direc, fname)) as f: props =", "[pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable =", "in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)),", "Run pPXF in data \"\"\" import os import yaml import numpy as np", "a astropy.unit.quantity object, we have to make it a scalar pp.chi2 = float(pp.chi2)", "= fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ################################################################### # Rebinning", "velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1]", "pp.name)), overwrite=True) def make_table(direc, output): \"\"\" Read all yaml files in a ppf", "np.exp(logwave_temp) # Use first spectrum to set emission lines start0 = [V0, 100.,", "= [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] =", "extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp", "6863]) goodpixels = np.arange(len(wave)) for line in skylines: sky = np.argwhere((wave < line", "5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for line in skylines: sky =", "\"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo: continue table = Table.read(spec) wave_lin = table[\"wave\"]", "the spectrum idx = np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux = flux[idx]", "0 start = [start0[:2]] * (ngas + 1) bounds = [bounds0] * (ngas", "= table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing red part of the spectrum idx", "fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the fit dv =", "fluxerr = table[\"fluxerr\"] # Removing red part of the spectrum idx = np.where(wave_lin", "outdir): \"\"\" Save results from pPXF into files excluding fitting arrays. \"\"\" array_keys", "for fname in filenames: with open(os.path.join(direc, fname)) as f: props = yaml.load(f) data", "= (logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components =", "import matplotlib.pyplot as plt from astropy.io import fits from astropy import constants from", "np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the data to a logarithmic scale for", "import numpy as np import matplotlib.pyplot as plt from astropy.io import fits from", "= [\"V\", \"sigma\"] for j, sol in enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i],", "hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use first spectrum to set emission lines start0", "\"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\",", "import yaml import numpy as np import matplotlib.pyplot as plt from astropy.io import", "with emission lines gas = pp.gas_component emtable = [] for j, comp in", "specs: print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if", "the gas templates gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas", "ppxf import ppxf_util from spectres import spectres import context import misc from der_snr", "= \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### # Masking bad", "# Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps =", "\"{}.png\".format(name)), dpi=250) plt.close() pp.name = name # Saving results and plot save(pp, outdir)", "isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir,", "[pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]]", "in a ppf directory to one make table for all bins. \"\"\" filenames", "Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) #", "open(os.path.join(direc, fname)) as f: props = yaml.load(f) data = Table([[props[k]] for k in", "0.] bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]] for spec", "= wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin,", "pPXF into files excluding fitting arrays. \"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\",", "wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up the gas templates gas_templates, line_names, line_wave", "10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0])", "(ngas + 1) ######################################################################## # Fitting with two components pp = ppxf(templates, flux,", "= name # Saving results and plot save(pp, outdir) def save(pp, outdir): \"\"\"", "for k in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__", "as plt from astropy.io import fits from astropy import constants from astropy.table import", "= float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict,", "np.ndarray)] table = Table([getattr(pp, key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True)", "wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) ####################################################################", "with two components pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv,", "moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir,", "logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use first spectrum to set", "misc from der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\"", "= np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the fit dv = (logwave_temp[0]", "from astropy.io import fits from astropy import constants from astropy.table import Table, vstack,", "\"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2 is a astropy.unit.quantity object, we", "Read all yaml files in a ppf directory to one make table for", "fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## #", "= context.V if V0 is None else V0 # Reading templates ssp_templates =", "import misc from der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None):", "spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and", "arrays. \"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys =", "klist = [\"V\", \"sigma\"] for j, sol in enumerate(pp.sol): for i in range(len(sol)):", "vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250)", "for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave =", "is None else velscale V0 = context.V if V0 is None else V0", "format=\"fits\", overwrite=True) if __name__ == '__main__': targetSN = 100 sample = \"kinematics\" velscale", "* \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component", "spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the fit dv", "[2] * (ngas + 1) ######################################################################## # Fitting with two components pp =", "[ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] =", "######################################################################## # Preparing the fit dv = (logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value", "= [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable", "wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### # Masking bad pixels skylines = np.array([4785,", "[_ for _ in array_keys if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key)", "\"w\") as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table with emission lines gas", "V0 + 2000], [velscale/10, 800.]] for spec in specs: print(\"Processing spectrum {}\".format(spec)) name", "np.argwhere((wave < line - 10) | (wave > line + 10)).ravel() goodpixels =", "array_keys if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key) for key in array_keys],", "overwrite=True) def make_table(direc, output): \"\"\" Read all yaml files in a ppf directory", "part of the spectrum idx = np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux", "it a scalar pp.chi2 = float(pp.chi2) for key in save_keys: ppdict[key] = getattr(pp,", "2.95) ngas = gas_templates.shape[1] #################################################################### # Masking bad pixels skylines = np.array([4785, 5577,", "spectres import context import misc from der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir,", "= sorted([_ for _ in os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\",", "context.velscale if velscale is None else velscale V0 = context.V if V0 is", "if V0 is None else V0 # Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T", "V0 = context.V if V0 is None else V0 # Reading templates ssp_templates", "fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the", "= np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components > 0 start = [start0[:2]] *", "= np.argwhere((wave < line - 10) | (wave > line + 10)).ravel() goodpixels", "data \"\"\" import os import yaml import numpy as np import matplotlib.pyplot as", "spectres import spectres import context import misc from der_snr import DER_SNR def run_ppxf(specs,", "from Hydra/MUSE on Feb 19, 2018 @author: <NAME> Run pPXF in data \"\"\"", "= [V0, 100., 0., 0.] bounds0 = [[V0 - 2000., V0 + 2000],", "os.mkdir(outdir) specs = sorted([_ for _ in os.listdir(\".\") if _.endswith(\".fits\")]) run_ppxf(specs, tempfile, outdir,", "- logwave[0]) * \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype(", "= [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2 is a", "import Table, vstack, hstack from ppxf.ppxf import ppxf from ppxf import ppxf_util from", "overwrite=True) if __name__ == '__main__': targetSN = 100 sample = \"kinematics\" velscale =", "degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name = name # Saving results", "ngas = gas_templates.shape[1] #################################################################### # Masking bad pixels skylines = np.array([4785, 5577, 5889,", "der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\" Running pPXF.", "= Table.read(spec) wave_lin = table[\"wave\"] flux = table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing", "= \"kinematics\" velscale = context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir =", "red part of the spectrum idx = np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx]", "_.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\",", "goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels", "{\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable", "\"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable = [] for fname in", "pp.chi2 = float(pp.chi2) for key in save_keys: ppdict[key] = getattr(pp, key) klist =", "os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs =", "spectrum to set emission lines start0 = [V0, 100., 0., 0.] bounds0 =", "save(pp, outdir): \"\"\" Save results from pPXF into files excluding fitting arrays. \"\"\"", "keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\", \"V_0\":", "= [bounds0] * (ngas + 1) moments = [2] * (ngas + 1)", "# Fitting with two components pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments,", "\"der_sn\"] names = {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\",", "table[\"wave\"] flux = table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing red part of the", "wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave >", "6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for line in skylines: sky = np.argwhere((wave", "sample = \"kinematics\" velscale = context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir", "= [[V0 - 2000., V0 + 2000], [velscale/10, 800.]] for spec in specs:", "2000., V0 + 2000], [velscale/10, 800.]] for spec in specs: print(\"Processing spectrum {}\".format(spec))", "# Saving results and plot save(pp, outdir) def save(pp, outdir): \"\"\" Save results", "DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\" Running pPXF. \"\"\" velscale", "flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up the gas", "all bins. \"\"\" filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(\".yaml\")]) keys", "in specs: print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name))", "spec_errs=fluxerr) #################################################################### # Setting up the gas templates gas_templates, line_names, line_wave = \\", "t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"]", "into files excluding fitting arrays. \"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\",", "fit dv = (logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates))", "[start0[:2]] * (ngas + 1) bounds = [bounds0] * (ngas + 1) moments", "wave_lin = wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn", "j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f, default_flow_style=False) #", "in enumerate(pp.component[gas]): t = Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"]", "#################################################################### # Masking bad pixels skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863])", "goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum", "names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\",", "[bounds0] * (ngas + 1) moments = [2] * (ngas + 1) ########################################################################", "= np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)]", "lines gas = pp.gas_component emtable = [] for j, comp in enumerate(pp.component[gas]): t", "gas = pp.gas_component emtable = [] for j, comp in enumerate(pp.component[gas]): t =", "up the gas templates gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95)", "= Table([getattr(pp, key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict =", "fname)) as f: props = yaml.load(f) data = Table([[props[k]] for k in keys],", "\"kinematics\" velscale = context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir,", "flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the fit dv = (logwave_temp[0] - logwave[0])", "\"\"\" import os import yaml import numpy as np import matplotlib.pyplot as plt", "is a astropy.unit.quantity object, we have to make it a scalar pp.chi2 =", "= components > 0 start = [start0[:2]] * (ngas + 1) bounds =", "flux, velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave > wave_lin[0]) & (wave <", "(ngas + 1) bounds = [bounds0] * (ngas + 1) moments = [2]", "logarithmic scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1]", "components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components > 0 start = [start0[:2]]", "fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up the gas templates", "np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components > 0 start", "table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing red part of the spectrum idx =", "= os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo: continue table = Table.read(spec) wave_lin", "line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### #", "np.int) gas_component = components > 0 start = [start0[:2]] * (ngas + 1)", "files excluding fitting arrays. \"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\",", "= Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp)", "der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the data", "make table for all bins. \"\"\" filenames = sorted([_ for _ in os.listdir(direc)", "k in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__ ==", "lines start0 = [V0, 100., 0., 0.] bounds0 = [[V0 - 2000., V0", "velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds,", "wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave,", "from pPXF into files excluding fitting arrays. \"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\",", "= [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"]", "= table[\"wave\"] flux = table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing red part of", "\"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\",", "os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names =", "= [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\", \"V_0\": \"V\",", "= fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file,", "yaml files in a ppf directory to one make table for all bins.", "Hydra IMF from Hydra/MUSE on Feb 19, 2018 @author: <NAME> Run pPXF in", "* (ngas + 1) bounds = [bounds0] * (ngas + 1) moments =", "IMF from Hydra/MUSE on Feb 19, 2018 @author: <NAME> Run pPXF in data", "\"\"\" velscale = context.velscale if velscale is None else velscale V0 = context.V", "outdir, velscale=None, redo=False, V0=None): \"\"\" Running pPXF. \"\"\" velscale = context.velscale if velscale", "Table([[props[k]] for k in keys], names=[names[k] for k in keys]) outtable.append(data) outtable =", "redo=False, V0=None): \"\"\" Running pPXF. \"\"\" velscale = context.velscale if velscale is None", "in enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] =", "= [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] =", "100 sample = \"kinematics\" velscale = context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample))", "to a logarithmic scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range,", "np.arange(len(wave)) for line in skylines: sky = np.argwhere((wave < line - 10) |", "in os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names", "None else V0 # Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file,", "\"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable = [] for fname in filenames: with", "Save results from pPXF into files excluding fitting arrays. \"\"\" array_keys = [\"lam\",", "j, sol in enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i],", "[] for j, comp in enumerate(pp.component[gas]): t = Table() t[\"name\"] = [ pp.gas_names[j]]", "= vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output): \"\"\" Read all yaml", "filenames: with open(os.path.join(direc, fname)) as f: props = yaml.load(f) data = Table([[props[k]] for", "key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys = [\"name\",", "gas_templates.shape[1] #################################################################### # Masking bad pixels skylines = np.array([4785, 5577, 5889, 6300, 6360,", "the fit dv = (logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates,", "= vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__ == '__main__': targetSN = 100 sample", "np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for line in skylines:", "have to make it a scalar pp.chi2 = float(pp.chi2) for key in save_keys:", "= os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs", "\"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\",", "a ppf directory to one make table for all bins. \"\"\" filenames =", "\"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _ in os.listdir(\".\") if", "astropy import constants from astropy.table import Table, vstack, hstack from ppxf.ppxf import ppxf", "= Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use first spectrum to set emission", "\"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_ for _ in array_keys", "scalar pp.chi2 = float(pp.chi2) for key in save_keys: ppdict[key] = getattr(pp, key) klist", "\"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_", "np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr)", "vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__ == '__main__': targetSN = 100 sample =", "import ppxf from ppxf import ppxf_util from spectres import spectres import context import", "ppdict[key] = getattr(pp, key) klist = [\"V\", \"sigma\"] for j, sol in enumerate(pp.sol):", "2018 @author: <NAME> Run pPXF in data \"\"\" import os import yaml import", "else V0 # Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1)", "ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### # Masking bad pixels skylines", "= np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels =", "if __name__ == '__main__': targetSN = 100 sample = \"kinematics\" velscale = context.velscale", "\"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if", "the data to a logarithmic scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave", "skylines: sky = np.argwhere((wave < line - 10) | (wave > line +", "= gas_templates.shape[1] #################################################################### # Masking bad pixels skylines = np.array([4785, 5577, 5889, 6300,", "key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys", "pPXF in data \"\"\" import os import yaml import numpy as np import", "Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the", "\"ncomp\", \"chi2\"] # Chi2 is a astropy.unit.quantity object, we have to make it", "save_keys: ppdict[key] = getattr(pp, key) klist = [\"V\", \"sigma\"] for j, sol in", "constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components", "outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _", "\"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable = [] for fname", "\"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable = [] for fname in filenames:", "i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir,", "t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]]", "> wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr)", "= np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the data to a logarithmic scale", "default_flow_style=False) # Saving table with emission lines gas = pp.gas_component emtable = []", "in data \"\"\" import os import yaml import numpy as np import matplotlib.pyplot", "ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names,", "[] for fname in filenames: with open(os.path.join(direc, fname)) as f: props = yaml.load(f)", "def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\" Running pPXF. \"\"\" velscale =", "in Hydra IMF from Hydra/MUSE on Feb 19, 2018 @author: <NAME> Run pPXF", "if velscale is None else velscale V0 = context.V if V0 is None", "np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx] der_sn", "Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use first spectrum to set emission lines", "velscale = context.velscale if velscale is None else velscale V0 = context.V if", "2000], [velscale/10, 800.]] for spec in specs: print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\",", "fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15,", "getattr(pp, key) klist = [\"V\", \"sigma\"] for j, sol in enumerate(pp.sol): for i", "\"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not", "enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i])", "as np import matplotlib.pyplot as plt from astropy.io import fits from astropy import", "fits from astropy import constants from astropy.table import Table, vstack, hstack from ppxf.ppxf", "@author: <NAME> Run pPXF in data \"\"\" import os import yaml import numpy", "comp in enumerate(pp.component[gas]): t = Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]]", "we have to make it a scalar pp.chi2 = float(pp.chi2) for key in", "pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]]", "sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir):", "line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### # Masking", "Masking bad pixels skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels =", "from astropy import constants from astropy.table import Table, vstack, hstack from ppxf.ppxf import", "continue table = Table.read(spec) wave_lin = table[\"wave\"] flux = table[\"flux\"] fluxerr = table[\"fluxerr\"]", "templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp", "templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components >", "in keys], names=[names[k] for k in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\",", "\"\"\" Forked in Hydra IMF from Hydra/MUSE on Feb 19, 2018 @author: <NAME>", "excluding fitting arrays. \"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"]", "bins. \"\"\" filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(\".yaml\")]) keys =", "table = Table([getattr(pp, key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict", "logwave[0]) * \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int)", "components pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components,", "= np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components > 0", "(wave > line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels mask", "velscale=None, redo=False, V0=None): \"\"\" Running pPXF. \"\"\" velscale = context.velscale if velscale is", "6360, 6863]) goodpixels = np.arange(len(wave)) for line in skylines: sky = np.argwhere((wave <", "in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys = [\"name\", \"regul\",", "table[\"fluxerr\"] # Removing red part of the spectrum idx = np.where(wave_lin < 7000)[0]", "Removing red part of the spectrum idx = np.where(wave_lin < 7000)[0] wave_lin =", "\"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable =", "params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp =", "from astropy.table import Table, vstack, hstack from ppxf.ppxf import ppxf from ppxf import", "nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use first", "set emission lines start0 = [V0, 100., 0., 0.] bounds0 = [[V0 -", "logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave > wave_lin[0])", "f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table with emission lines gas = pp.gas_component", "################################################################### # Rebinning the data to a logarithmic scale for ppxf wave_range =", "misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the data to a", "\"apoly\"] array_keys = [_ for _ in array_keys if isinstance(getattr(pp, _), np.ndarray)] table", "plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels)", "context import misc from der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False,", "to make it a scalar pp.chi2 = float(pp.chi2) for key in save_keys: ppdict[key]", "\"chi2\"] # Chi2 is a astropy.unit.quantity object, we have to make it a", "\"sigma\"] for j, sol in enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] =", "\"\"\" filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\",", "= Table([[props[k]] for k in keys], names=[names[k] for k in keys]) outtable.append(data) outtable", "\"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2 is a astropy.unit.quantity object, we have to", "\"clean\", \"ncomp\", \"chi2\"] # Chi2 is a astropy.unit.quantity object, we have to make", "names = {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\":", "skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for line", "# -*- coding: utf-8 -*- \"\"\" Forked in Hydra IMF from Hydra/MUSE on", "coding: utf-8 -*- \"\"\" Forked in Hydra IMF from Hydra/MUSE on Feb 19,", "V0 # Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps", "gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name = name #", "float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table", "= context.velscale if velscale is None else velscale V0 = context.V if V0", "goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name = name # Saving results and plot", "ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f, default_flow_style=False)", "for key in save_keys: ppdict[key] = getattr(pp, key) klist = [\"V\", \"sigma\"] for", "bad pixels skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave))", "_), np.ndarray)] table = Table([getattr(pp, key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)),", "flux, spec_errs=fluxerr) #################################################################### # Setting up the gas templates gas_templates, line_names, line_wave =", "# Chi2 is a astropy.unit.quantity object, we have to make it a scalar", "a scalar pp.chi2 = float(pp.chi2) for key in save_keys: ppdict[key] = getattr(pp, key)", "t = Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]]", "with open(os.path.join(direc, fname)) as f: props = yaml.load(f) data = Table([[props[k]] for k", "components > 0 start = [start0[:2]] * (ngas + 1) bounds = [bounds0]", "+ 2000], [velscale/10, 800.]] for spec in specs: print(\"Processing spectrum {}\".format(spec)) name =", "for all bins. \"\"\" filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(\".yaml\")])", "start = [start0[:2]] * (ngas + 1) bounds = [bounds0] * (ngas +", "fname in filenames: with open(os.path.join(direc, fname)) as f: props = yaml.load(f) data =", "V0=None): \"\"\" Running pPXF. \"\"\" velscale = context.velscale if velscale is None else", "ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave)", "Saving table with emission lines gas = pp.gas_component emtable = [] for j,", "table with emission lines gas = pp.gas_component emtable = [] for j, comp", "pp.gas_component emtable = [] for j, comp in enumerate(pp.component[gas]): t = Table() t[\"name\"]", "gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components > 0 start =", "for _ in array_keys if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key) for", "= {} save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] #", "for line in skylines: sky = np.argwhere((wave < line - 10) | (wave", "= float(pp.chi2) for key in save_keys: ppdict[key] = getattr(pp, key) klist = [\"V\",", "\\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### # Masking bad pixels", "vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output): \"\"\" Read all yaml files", "for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with", "= float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving", "yaml.load(f) data = Table([[props[k]] for k in keys], names=[names[k] for k in keys])", "t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc,", "ppxf_util from spectres import spectres import context import misc from der_snr import DER_SNR", "names=[names[k] for k in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if", "= spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up the gas templates gas_templates,", "bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name = name # Saving results and", "vstack, hstack from ppxf.ppxf import ppxf from ppxf import ppxf_util from spectres import", "[pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output): \"\"\"", "wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux,", "yaml.dump(ppdict, f, default_flow_style=False) # Saving table with emission lines gas = pp.gas_component emtable", "\"mpoly\", \"apoly\"] array_keys = [_ for _ in array_keys if isinstance(getattr(pp, _), np.ndarray)]", "fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data", "make_table(direc, output): \"\"\" Read all yaml files in a ppf directory to one", "800.]] for spec in specs: print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml", "flux = table[\"flux\"] fluxerr = table[\"fluxerr\"] # Removing red part of the spectrum", "> line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels", "[velscale/10, 800.]] for spec in specs: print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\")", "f: props = yaml.load(f) data = Table([[props[k]] for k in keys], names=[names[k] for", "<NAME> Run pPXF in data \"\"\" import os import yaml import numpy as", "emission lines gas = pp.gas_component emtable = [] for j, comp in enumerate(pp.component[gas]):", "plt.close() pp.name = name # Saving results and plot save(pp, outdir) def save(pp,", "np.arange(ngas)+1)).astype( np.int) gas_component = components > 0 start = [start0[:2]] * (ngas +", "# Saving table with emission lines gas = pp.gas_component emtable = [] for", "= {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"}", "# Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing", "name # Saving results and plot save(pp, outdir) def save(pp, outdir): \"\"\" Save", "import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\" Running pPXF. \"\"\"", "= [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True)", "= [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output):", "pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1,", "< line - 10) | (wave > line + 10)).ravel() goodpixels = np.intersect1d(goodpixels,", "\"der_sn\": \"SNR\"} outtable = [] for fname in filenames: with open(os.path.join(direc, fname)) as", "def save(pp, outdir): \"\"\" Save results from pPXF into files excluding fitting arrays.", "save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2 is", "specs = sorted([_ for _ in os.listdir(\".\") if _.endswith(\".fits\")]) run_ppxf(specs, tempfile, outdir, redo=False)", "= np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for line in", "= np.exp(logwave) wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr", "Table([getattr(pp, key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {}", "= ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave > wave_lin[0]) &", "ppxf.ppxf import ppxf from ppxf import ppxf_util from spectres import spectres import context", "wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir)", "two components pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave,", "dv = (logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components", "emtable = [] for j, comp in enumerate(pp.component[gas]): t = Table() t[\"name\"] =", "\"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable = [] for fname in filenames: with open(os.path.join(direc,", "os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\")", "to set emission lines start0 = [V0, 100., 0., 0.] bounds0 = [[V0", "and not redo: continue table = Table.read(spec) wave_lin = table[\"wave\"] flux = table[\"flux\"]", "os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo: continue table = Table.read(spec) wave_lin =", "- 10) | (wave > line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) #", "t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t)", "1) moments = [2] * (ngas + 1) ######################################################################## # Fitting with two", "[wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave", "Saving results and plot save(pp, outdir) def save(pp, outdir): \"\"\" Save results from", "0., 0.] bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]] for", "fitting arrays. \"\"\" array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys", "keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__ == '__main__': targetSN", "outtable = [] for fname in filenames: with open(os.path.join(direc, fname)) as f: props", "\\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component =", "\"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\":", "import spectres import context import misc from der_snr import DER_SNR def run_ppxf(specs, templates_file,", "input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the fit", "[V0, 100., 0., 0.] bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10,", "= np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] =", "= [start0[:2]] * (ngas + 1) bounds = [bounds0] * (ngas + 1)", "j, comp in enumerate(pp.component[gas]): t = Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] =", "if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key) for key in array_keys], names=array_keys)", "ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave > wave_lin[0]) & (wave", "from spectres import spectres import context import misc from der_snr import DER_SNR def", "outdir) def save(pp, outdir): \"\"\" Save results from pPXF into files excluding fitting", "utf-8 -*- \"\"\" Forked in Hydra IMF from Hydra/MUSE on Feb 19, 2018", "ppdict = {} save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"]", "os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _ in os.listdir(\".\")", "numpy as np import matplotlib.pyplot as plt from astropy.io import fits from astropy", "{}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not", "wave_temp = np.exp(logwave_temp) # Use first spectrum to set emission lines start0 =", "\"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_ for _ in array_keys if isinstance(getattr(pp, _),", "props = yaml.load(f) data = Table([[props[k]] for k in keys], names=[names[k] for k", "t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable)", "for k in keys], names=[names[k] for k in keys]) outtable.append(data) outtable = vstack(outtable)", "quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name = name # Saving", "start0 = [V0, 100., 0., 0.] bounds0 = [[V0 - 2000., V0 +", "open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table with emission", "spectrum idx = np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux = flux[idx] fluxerr", "+ 1) ######################################################################## # Fitting with two components pp = ppxf(templates, flux, fluxerr,", "moments = [2] * (ngas + 1) ######################################################################## # Fitting with two components", "sorted([_ for _ in os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\",", "pp.name = name # Saving results and plot save(pp, outdir) def save(pp, outdir):", "Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] =", "\"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo: continue table =", "lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close()", "(logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps),", "\"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_ for _ in array_keys if isinstance(getattr(pp,", "ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp =", "Hydra/MUSE on Feb 19, 2018 @author: <NAME> Run pPXF in data \"\"\" import", "f, default_flow_style=False) # Saving table with emission lines gas = pp.gas_component emtable =", "first spectrum to set emission lines start0 = [V0, 100., 0., 0.] bounds0", "[pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def", "sky) # Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(", "targetSN = 100 sample = \"kinematics\" velscale = context.velscale tempfile = os.path.join(context.data_dir, \"templates\",", "\"\"\" Read all yaml files in a ppf directory to one make table", "enumerate(pp.component[gas]): t = Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] =", "emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output): \"\"\" Read all", "\"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable = []", "is None else V0 # Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params =", "= wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn =", "velscale = context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN))", "= os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0],", "= os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _ in", "of the spectrum idx = np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux =", "Rebinning the data to a logarithmic scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]]", "7000)[0] wave_lin = wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2]", "table for all bins. \"\"\" filenames = sorted([_ for _ in os.listdir(direc) if", "array_keys = [_ for _ in array_keys if isinstance(getattr(pp, _), np.ndarray)] table =", "5577, 5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for line in skylines: sky", "Table, vstack, hstack from ppxf.ppxf import ppxf from ppxf import ppxf_util from spectres", "goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning", "np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the fit dv = (logwave_temp[0] -", "all yaml files in a ppf directory to one make table for all", "= [_ for _ in array_keys if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp,", "array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys = [\"name\", \"regul\", \"degree\",", "else velscale V0 = context.V if V0 is None else V0 # Reading", "as f: props = yaml.load(f) data = Table([[props[k]] for k in keys], names=[names[k]", "fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ################################################################### #", "import fits from astropy import constants from astropy.table import Table, vstack, hstack from", "= Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"] = [pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"]", "np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components > 0 start = [start0[:2]] * (ngas", "= [] for j, comp in enumerate(pp.component[gas]): t = Table() t[\"name\"] = [", "\"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output): \"\"\" Read all yaml files in a", "run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\" Running pPXF. \"\"\" velscale = context.velscale", "flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ###################################################################", "'__main__': targetSN = 100 sample = \"kinematics\" velscale = context.velscale tempfile = os.path.join(context.data_dir,", "pPXF. \"\"\" velscale = context.velscale if velscale is None else velscale V0 =", "= ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use first spectrum", "np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels,", "# Use first spectrum to set emission lines start0 = [V0, 100., 0.,", "os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], \"ppxf\") if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for", "& (wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### #", "np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] =", "= spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo: continue", "k in keys], names=[names[k] for k in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output,", "name = spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo:", "wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux", "astropy.io import fits from astropy import constants from astropy.table import Table, vstack, hstack", "\"sigmaerr\", \"der_sn\": \"SNR\"} outtable = [] for fname in filenames: with open(os.path.join(direc, fname))", "100., 0., 0.] bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]]", "in save_keys: ppdict[key] = getattr(pp, key) klist = [\"V\", \"sigma\"] for j, sol", "V0 is None else V0 # Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params", "{} save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2", "as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table with emission lines gas =", "= misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the data to", "Running pPXF. \"\"\" velscale = context.velscale if velscale is None else velscale V0", "not redo: continue table = Table.read(spec) wave_lin = table[\"wave\"] flux = table[\"flux\"] fluxerr", "np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ########################################################################", "######################################################################## # Fitting with two components pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True,", "= [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir,", "on Feb 19, 2018 @author: <NAME> Run pPXF in data \"\"\" import os", "for spec in specs: print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml =", "[\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2 is a astropy.unit.quantity", "\"SNR\"} outtable = [] for fname in filenames: with open(os.path.join(direc, fname)) as f:", "np.exp(logwave) wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr =", "Chi2 is a astropy.unit.quantity object, we have to make it a scalar pp.chi2", "range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\")", "line in skylines: sky = np.argwhere((wave < line - 10) | (wave >", "19, 2018 @author: <NAME> Run pPXF in data \"\"\" import os import yaml", "bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]] for spec in", "save(pp, outdir) def save(pp, outdir): \"\"\" Save results from pPXF into files excluding", "dpi=250) plt.close() pp.name = name # Saving results and plot save(pp, outdir) def", "and plot save(pp, outdir) def save(pp, outdir): \"\"\" Save results from pPXF into", "os import yaml import numpy as np import matplotlib.pyplot as plt from astropy.io", "\"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\", \"chi2\"] # Chi2 is a astropy.unit.quantity object,", "# Preparing the fit dv = (logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value templates", "idx = np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux = flux[idx] fluxerr =", "directory to one make table for all bins. \"\"\" filenames = sorted([_ for", "= table[\"fluxerr\"] # Removing red part of the spectrum idx = np.where(wave_lin <", "wave = np.exp(logwave) wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux,", "spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo: continue table", "def make_table(direc, output): \"\"\" Read all yaml files in a ppf directory to", "\"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\": \"Verr\", \"sigma_0\":", "gas templates gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas =", "np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0.", "for j, comp in enumerate(pp.component[gas]): t = Table() t[\"name\"] = [ pp.gas_names[j]] t[\"flux\"]", "files in a ppf directory to one make table for all bins. \"\"\"", "(wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting", "\"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_ for _ in array_keys if", "emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output): \"\"\" Read all yaml files in", "\"\"\" Running pPXF. \"\"\" velscale = context.velscale if velscale is None else velscale", "in filenames: with open(os.path.join(direc, fname)) as f: props = yaml.load(f) data = Table([[props[k]]", "< wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up", "results from pPXF into files excluding fitting arrays. \"\"\" array_keys = [\"lam\", \"galaxy\",", "if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _ in os.listdir(\".\") if _.endswith(\".fits\")])", "# Rebinning the data to a logarithmic scale for ppxf wave_range = [wave_lin[0],", "key in save_keys: ppdict[key] = getattr(pp, key) klist = [\"V\", \"sigma\"] for j,", "goodpixels = np.arange(len(wave)) for line in skylines: sky = np.argwhere((wave < line -", "1) ######################################################################## # Fitting with two components pp = ppxf(templates, flux, fluxerr, velscale=velscale,", "to one make table for all bins. \"\"\" filenames = sorted([_ for _", "float(pp.chi2) for key in save_keys: ppdict[key] = getattr(pp, key) klist = [\"V\", \"sigma\"]", "* (ngas + 1) ######################################################################## # Fitting with two components pp = ppxf(templates,", "array_keys = [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_ for", "= [\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_ for _", "\"\"\" Save results from pPXF into files excluding fitting arrays. \"\"\" array_keys =", "wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave", "plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name = name # Saving results and plot save(pp,", "make it a scalar pp.chi2 = float(pp.chi2) for key in save_keys: ppdict[key] =", "key) klist = [\"V\", \"sigma\"] for j, sol in enumerate(pp.sol): for i in", "os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _ in os.listdir(\".\") if _.endswith(\".fits\")]) run_ppxf(specs, tempfile,", "# Masking bad pixels skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels", "line - 10) | (wave > line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky)", "for j, sol in enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i])", "pixels skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for", "Setting up the gas templates gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]],", "[\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"] names = {\"name\": \"spec\", \"V_0\": \"V\", \"Verr_0\":", "mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input", "in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format=\"fits\", overwrite=True) if __name__ == '__main__':", "(ngas + 1) moments = [2] * (ngas + 1) ######################################################################## # Fitting", "start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)),", "mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, \"{}.png\".format(name)), dpi=250) plt.close() pp.name =", "data_sn = np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the data to a logarithmic", "plt from astropy.io import fits from astropy import constants from astropy.table import Table,", "flux = flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux /", "| (wave > line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels", "\"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table with emission lines", "* (ngas + 1) moments = [2] * (ngas + 1) ######################################################################## #", "import ppxf_util from spectres import spectres import context import misc from der_snr import", "matplotlib.pyplot as plt from astropy.io import fits from astropy import constants from astropy.table", "= [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave =", "\"V\", \"Verr_0\": \"Verr\", \"sigma_0\": \"sigma\", \"sigmaerr_0\": \"sigmaerr\", \"der_sn\": \"SNR\"} outtable = [] for", "10) | (wave > line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making", "= [2] * (ngas + 1) ######################################################################## # Fitting with two components pp", "overwrite=True) ppdict = {} save_keys = [\"name\", \"regul\", \"degree\", \"mdegree\", \"reddening\", \"clean\", \"ncomp\",", "Preparing the fit dv = (logwave_temp[0] - logwave[0]) * \\ constants.c.to(\"km/s\").value templates =", "#################################################################### # Setting up the gas templates gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp,", "for key in array_keys], names=array_keys) table.write(os.path.join(outdir, \"{}_bestfit.fits\".format(pp.name)), overwrite=True) ppdict = {} save_keys =", "outyaml = os.path.join(outdir, \"{}.yaml\".format(name)) if os.path.exists(outyaml) and not redo: continue table = Table.read(spec)", "ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as", "gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] ####################################################################", "0. ######################################################################## # Preparing the fit dv = (logwave_temp[0] - logwave[0]) * \\", "_ in array_keys if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key) for key", "j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f:", "+ 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels = np.intersect1d(goodpixels,", "Reading templates ssp_templates = fits.getdata(templates_file, extname=\"SSPS\").T params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1]", "[wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### # Masking bad pixels skylines =", "[pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format(", "emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)), overwrite=True) def make_table(direc, output): \"\"\" Read", "Use first spectrum to set emission lines start0 = [V0, 100., 0., 0.]", "ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)[\"loglam\"].data wave_temp = np.exp(logwave_temp) # Use first spectrum to", "[pp.gas_flux[j]] t[\"fluxerr\"] = [pp.gas_flux_error[j]] t[\"V\"] = [pp.sol[comp][0]] t[\"Verr\"] = [pp.error[comp][0]] t[\"sigma\"] = [pp.sol[comp][1]]", "_ in os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\", \"der_sn\"]", "[\"V\", \"sigma\"] for j, sol in enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)]", "import constants from astropy.table import Table, vstack, hstack from ppxf.ppxf import ppxf from", "spec in specs: print(\"Processing spectrum {}\".format(spec)) name = spec.replace(\".fits\", \"\") outyaml = os.path.join(outdir,", "Feb 19, 2018 @author: <NAME> Run pPXF in data \"\"\" import os import", "= flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr)", "velscale V0 = context.V if V0 is None else V0 # Reading templates", "templates_file, outdir, velscale=None, redo=False, V0=None): \"\"\" Running pPXF. \"\"\" velscale = context.velscale if", "templates gas_templates, line_names, line_wave = \\ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1]", "bounds = [bounds0] * (ngas + 1) moments = [2] * (ngas +", "= np.exp(logwave_temp) # Use first spectrum to set emission lines start0 = [V0,", "goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)]", "[\"lam\", \"galaxy\", \"noise\", \"bestfit\", \"gas_bestfit\", \"mpoly\", \"apoly\"] array_keys = [_ for _ in", "outtable.write(output, format=\"fits\", overwrite=True) if __name__ == '__main__': targetSN = 100 sample = \"kinematics\"", "= 100 sample = \"kinematics\" velscale = context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale),", "= context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir)", "data to a logarithmic scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave =", "emission lines start0 = [V0, 100., 0., 0.] bounds0 = [[V0 - 2000.,", "= np.arange(len(wave)) for line in skylines: sky = np.argwhere((wave < line - 10)", "float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f,", "+ 1) bounds = [bounds0] * (ngas + 1) moments = [2] *", "from ppxf.ppxf import ppxf from ppxf import ppxf_util from spectres import spectres import", "not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _ in os.listdir(\".\") if _.endswith(\".fits\")]) run_ppxf(specs,", "< 7000)[0] wave_lin = wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx] der_sn =", "in array_keys if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key) for key in", "t[\"sigma\"] = [pp.sol[comp][1]] t[\"sigmaerr\"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, \"{}_emission_lines.fits\".format( pp.name)),", "line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels =", "with open(os.path.join(outdir, \"{}.yaml\".format(pp.name)), \"w\") as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table with", "/ fluxerr) ################################################################### # Rebinning the data to a logarithmic scale for ppxf", "import context import misc from der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None,", "for _ in os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\", \"Verr_0\", \"sigma_0\", \"sigmaerr_0\",", "context.velscale tempfile = os.path.join(context.data_dir, \"templates\", \"emiles_vel{}_{}_fwhm2.95.fits\".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, \"MUSE/sn{}/sci\".format(targetSN)) os.chdir(wdir) outdir", "astropy.table import Table, vstack, hstack from ppxf.ppxf import ppxf from ppxf import ppxf_util", "-*- coding: utf-8 -*- \"\"\" Forked in Hydra IMF from Hydra/MUSE on Feb", "astropy.unit.quantity object, we have to make it a scalar pp.chi2 = float(pp.chi2) for", "results and plot save(pp, outdir) def save(pp, outdir): \"\"\" Save results from pPXF", "sol in enumerate(pp.sol): for i in range(len(sol)): ppdict[\"{}_{}\".format(klist[i], j)] = float(sol[i]) ppdict[\"{}err_{}\".format(klist[i], j)]", "filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(\".yaml\")]) keys = [\"name\", \"V_0\"," ]
[ "parser.add_argument('filter',metavar='FILTER', help='the filter to use') args = parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font", "import importlib import argparse import sys from glob import glob parser = argparse.ArgumentParser(description='Filter", "= parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp']", "find filter '%s'.\\nTry one of: %s\" % (filter_script, \", \".join(modules))) sys.exit(1) save_file =", "importlib import argparse import sys from glob import glob parser = argparse.ArgumentParser(description='Filter a", "= args.filter sys.modules['GlyphsApp'] = glyphsLib try: i = importlib.import_module(filter_script) except ModuleNotFoundError as e:", "glob import glob parser = argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs", "in x] print(\"Couldn't find filter '%s'.\\nTry one of: %s\" % (filter_script, \", \".join(modules)))", "metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to use') args = parser.parse_args() base_path", "help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to use') args = parser.parse_args() base_path =", "e: modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x]", "import glob parser = argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file')", "help='the filter to use') args = parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font =", "for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x] print(\"Couldn't find filter '%s'.\\nTry", "import argparse import sys from glob import glob parser = argparse.ArgumentParser(description='Filter a font", "file') parser.add_argument('filter',metavar='FILTER', help='the filter to use') args = parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path)", "= glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib try: i = importlib.import_module(filter_script) except", "glyphsLib try: i = importlib.import_module(filter_script) except ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3] for", "if \"/NaN\" not in x] print(\"Couldn't find filter '%s'.\\nTry one of: %s\" %", "%s\" % (filter_script, \", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\", \"-\"+filter_script+\".glyphs\") glyphsLib.Glyphs.font.save(save_file) print(\"Saved on", "% (filter_script, \", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\", \"-\"+filter_script+\".glyphs\") glyphsLib.Glyphs.font.save(save_file) print(\"Saved on %s\"", "parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] =", "filter to use') args = parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input)", "from glob import glob parser = argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS', help='the", "as e: modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in", "= importlib.import_module(filter_script) except ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\"))", "import glyphsLib import importlib import argparse import sys from glob import glob parser", "parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to use') args = parser.parse_args()", "Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to use') args = parser.parse_args() base_path = \"NaNGlyphFilters\"", "[x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x] print(\"Couldn't find filter", "(filter_script, \", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\", \"-\"+filter_script+\".glyphs\") glyphsLib.Glyphs.font.save(save_file) print(\"Saved on %s\" %", "\", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\", \"-\"+filter_script+\".glyphs\") glyphsLib.Glyphs.font.save(save_file) print(\"Saved on %s\" % save_file)", "= glyphsLib try: i = importlib.import_module(filter_script) except ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3]", "i = importlib.import_module(filter_script) except ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3] for x in", "glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib try: i = importlib.import_module(filter_script) except ModuleNotFoundError", "glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib try: i = importlib.import_module(filter_script)", "x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x] print(\"Couldn't find filter '%s'.\\nTry one", "one of: %s\" % (filter_script, \", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\", \"-\"+filter_script+\".glyphs\") glyphsLib.Glyphs.font.save(save_file)", "of: %s\" % (filter_script, \", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\", \"-\"+filter_script+\".glyphs\") glyphsLib.Glyphs.font.save(save_file) print(\"Saved", "\"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib try: i", "sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x] print(\"Couldn't find filter '%s'.\\nTry one of: %s\"", "filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib try: i = importlib.import_module(filter_script) except ModuleNotFoundError as", "argparse import sys from glob import glob parser = argparse.ArgumentParser(description='Filter a font file')", "sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib try: i =", "sys from glob import glob parser = argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS',", "glob parser = argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER',", "except ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\"", "= [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x] print(\"Couldn't find", "args = parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter", "try: i = importlib.import_module(filter_script) except ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3] for x", "args.filter sys.modules['GlyphsApp'] = glyphsLib try: i = importlib.import_module(filter_script) except ModuleNotFoundError as e: modules", "base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib", "ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not", "filter '%s'.\\nTry one of: %s\" % (filter_script, \", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\",", "print(\"Couldn't find filter '%s'.\\nTry one of: %s\" % (filter_script, \", \".join(modules))) sys.exit(1) save_file", "font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to use') args", "modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x] print(\"Couldn't", "importlib.import_module(filter_script) except ModuleNotFoundError as e: modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+\"/*.py\")) if", "'%s'.\\nTry one of: %s\" % (filter_script, \", \".join(modules))) sys.exit(1) save_file = args.input.replace(\".glyphs\", \"-\"+filter_script+\".glyphs\")", "= argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter", "glyphsLib import importlib import argparse import sys from glob import glob parser =", "file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to use') args =", "sys.modules['GlyphsApp'] = glyphsLib try: i = importlib.import_module(filter_script) except ModuleNotFoundError as e: modules =", "in sorted(glob(base_path+\"/*.py\")) if \"/NaN\" not in x] print(\"Couldn't find filter '%s'.\\nTry one of:", "\"/NaN\" not in x] print(\"Couldn't find filter '%s'.\\nTry one of: %s\" % (filter_script,", "import sys from glob import glob parser = argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input',", "a font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to use')", "use') args = parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script =", "x] print(\"Couldn't find filter '%s'.\\nTry one of: %s\" % (filter_script, \", \".join(modules))) sys.exit(1)", "parser = argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the", "to use') args = parser.parse_args() base_path = \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script", "argparse.ArgumentParser(description='Filter a font file') parser.add_argument('input', metavar='GLYPHS', help='the Glyphs file') parser.add_argument('filter',metavar='FILTER', help='the filter to", "not in x] print(\"Couldn't find filter '%s'.\\nTry one of: %s\" % (filter_script, \",", "= \"NaNGlyphFilters\" sys.path.append(base_path) glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input) filter_script = args.filter sys.modules['GlyphsApp'] = glyphsLib try:" ]
[ "import time import sys auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth)", "def limit_handle(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping", "= api.home_timeline() for tweet in public_tweets: print(tweet.text) ''' def limit_handle(cursor): try: while True:", "tweepy import time import sys auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api =", "in public_tweets: print(tweet.text) ''' def limit_handle(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError:", "try: while True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for 7", "True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for 7 minutes.\") time.sleep(10)", "except StopIteration: return #Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python", "for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale' for tweet in", "keywords python numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the", "StopIteration: return #Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2", "try: tweet.favorite() print('I liked the tweet') except tweepy.TweepError as e: print(e.reason) except StopIteration:", "cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for 7 minutes.\") time.sleep(10) except StopIteration:", "api.home_timeline() for tweet in public_tweets: print(tweet.text) ''' def limit_handle(cursor): try: while True: yield", "follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets):", "print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I", "time import sys auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me()", "tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for", "= tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline()", "'''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet in public_tweets: print(tweet.text) ''' def limit_handle(cursor):", "numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the tweet') except", "tweet.favorite() print('I liked the tweet') except tweepy.TweepError as e: print(e.reason) except StopIteration: break", "sys auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets", "'<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet", "= tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet in public_tweets: print(tweet.text) '''", "auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets =", "print(tweet.text) ''' def limit_handle(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle", "tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the tweet') except tweepy.TweepError as e:", "import sys auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count)", "public_tweets = api.home_timeline() for tweet in public_tweets: print(tweet.text) ''' def limit_handle(cursor): try: while", "tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for 7 minutes.\") time.sleep(10) except StopIteration: return #Generous", "import tweepy import time import sys auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>') auth.set_access_token('<KEY>', '<KEY>') api", "public_tweets: print(tweet.text) ''' def limit_handle(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit", "limit_handle(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for", "bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale' for tweet", "except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for 7 minutes.\") time.sleep(10) except StopIteration: return", "in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the tweet') except tweepy.TweepError as e: print(e.reason)", "yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for 7 minutes.\") time.sleep(10) except", "while True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded. Sleeping for 7 minutes.\")", "limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite()", "search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the tweet') except tweepy.TweepError", "tweet in public_tweets: print(tweet.text) ''' def limit_handle(cursor): try: while True: yield cursor.next() except", "for tweet in public_tweets: print(tweet.text) ''' def limit_handle(cursor): try: while True: yield cursor.next()", "Handle Exceeded. Sleeping for 7 minutes.\") time.sleep(10) except StopIteration: return #Generous bot for", "tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet in public_tweets: print(tweet.text) ''' def", "Exceeded. Sleeping for 7 minutes.\") time.sleep(10) except StopIteration: return #Generous bot for follower", "Sleeping for 7 minutes.\") time.sleep(10) except StopIteration: return #Generous bot for follower in", "7 minutes.\") time.sleep(10) except StopIteration: return #Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count)", "return #Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale'", "python numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the tweet')", "for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the tweet') except tweepy.TweepError as", "tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked the tweet') except tweepy.TweepError as e: print(e.reason) except", "for 7 minutes.\") time.sleep(10) except StopIteration: return #Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()):", "in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try:", "time.sleep(10) except StopIteration: return #Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords", "minutes.\") time.sleep(10) except StopIteration: return #Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach", "api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet in public_tweets: print(tweet.text)", "print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet in public_tweets: print(tweet.text) ''' def limit_handle(cursor): try:", "'<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet in public_tweets:", "auth.set_access_token('<KEY>', '<KEY>') api = tweepy.API(auth) '''user=api.me() print(user.name,user.screen_name,user.followers_count) public_tweets = api.home_timeline() for tweet in", "#Generous bot for follower in limit_handle(tweepy.Cursor(api.followers).items()): print(follower.name,follower.followers_count) #seach keywords python numberOfTweets=2 search_str='indiaforsale' for", "print(\"Limit Handle Exceeded. Sleeping for 7 minutes.\") time.sleep(10) except StopIteration: return #Generous bot", "''' def limit_handle(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: print(\"Limit Handle Exceeded.", "#seach keywords python numberOfTweets=2 search_str='indiaforsale' for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets): try: tweet.favorite() print('I liked" ]
[ "Km ) # Estado del tiempo usando el ID de la estacion print(ow.get_weather(4885))", "Radio en Km ) # Estado del tiempo usando el ID de la", "tiempo usando el ID de la estacion print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10)", "start_date = datetime(2013,9,10) end_date = datetime(2013, 9, 15) # Estado del tiempo en", "fechas start_date = datetime(2013,9,10) end_date = datetime(2013, 9, 15) # Estado del tiempo", "# Obtener las estaciones metereologicas cercanas stations = ow.find_stations_near( 7.0, # Longitud 50.0,", "Altitud 100 # Radio en Km ) # Estado del tiempo usando el", "Estado del tiempo usando el ID de la estacion print(ow.get_weather(4885)) # fechas start_date", "del tiempo usando el ID de la estacion print(ow.get_weather(4885)) # fechas start_date =", "datetime import datetime ow = openweather.openweather() # Obtener las estaciones metereologicas cercanas stations", "= ow.find_stations_near( 7.0, # Longitud 50.0, # Altitud 100 # Radio en Km", "print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10) end_date = datetime(2013, 9, 15) # Estado", "end_date = datetime(2013, 9, 15) # Estado del tiempo en intervalo diario print(ow.get_historic_weather(4885,", "from datetime import datetime ow = openweather.openweather() # Obtener las estaciones metereologicas cercanas", "la estacion print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10) end_date = datetime(2013, 9, 15)", "Estado del tiempo en intervalo diario print(ow.get_historic_weather(4885, start_date, end_date)) print(ow.get_historic_weather(4885, start_date, end_date, \"day\"))", "9, 15) # Estado del tiempo en intervalo diario print(ow.get_historic_weather(4885, start_date, end_date)) print(ow.get_historic_weather(4885,", "ow = openweather.openweather() # Obtener las estaciones metereologicas cercanas stations = ow.find_stations_near( 7.0,", "las estaciones metereologicas cercanas stations = ow.find_stations_near( 7.0, # Longitud 50.0, # Altitud", "# Estado del tiempo usando el ID de la estacion print(ow.get_weather(4885)) # fechas", "# Altitud 100 # Radio en Km ) # Estado del tiempo usando", "de la estacion print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10) end_date = datetime(2013, 9,", ") # Estado del tiempo usando el ID de la estacion print(ow.get_weather(4885)) #", "7.0, # Longitud 50.0, # Altitud 100 # Radio en Km ) #", "# Longitud 50.0, # Altitud 100 # Radio en Km ) # Estado", "= datetime(2013,9,10) end_date = datetime(2013, 9, 15) # Estado del tiempo en intervalo", "datetime(2013, 9, 15) # Estado del tiempo en intervalo diario print(ow.get_historic_weather(4885, start_date, end_date))", "en Km ) # Estado del tiempo usando el ID de la estacion", "estaciones metereologicas cercanas stations = ow.find_stations_near( 7.0, # Longitud 50.0, # Altitud 100", "cercanas stations = ow.find_stations_near( 7.0, # Longitud 50.0, # Altitud 100 # Radio", "datetime ow = openweather.openweather() # Obtener las estaciones metereologicas cercanas stations = ow.find_stations_near(", "metereologicas cercanas stations = ow.find_stations_near( 7.0, # Longitud 50.0, # Altitud 100 #", "stations = ow.find_stations_near( 7.0, # Longitud 50.0, # Altitud 100 # Radio en", "Longitud 50.0, # Altitud 100 # Radio en Km ) # Estado del", "openweather.openweather() # Obtener las estaciones metereologicas cercanas stations = ow.find_stations_near( 7.0, # Longitud", "<reponame>juan1305/0.11-incremento_descremento import openweather from datetime import datetime ow = openweather.openweather() # Obtener las", "el ID de la estacion print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10) end_date =", "estacion print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10) end_date = datetime(2013, 9, 15) #", "= datetime(2013, 9, 15) # Estado del tiempo en intervalo diario print(ow.get_historic_weather(4885, start_date,", "import datetime ow = openweather.openweather() # Obtener las estaciones metereologicas cercanas stations =", "ow.find_stations_near( 7.0, # Longitud 50.0, # Altitud 100 # Radio en Km )", "Obtener las estaciones metereologicas cercanas stations = ow.find_stations_near( 7.0, # Longitud 50.0, #", "15) # Estado del tiempo en intervalo diario print(ow.get_historic_weather(4885, start_date, end_date)) print(ow.get_historic_weather(4885, start_date,", "# Radio en Km ) # Estado del tiempo usando el ID de", "# Estado del tiempo en intervalo diario print(ow.get_historic_weather(4885, start_date, end_date)) print(ow.get_historic_weather(4885, start_date, end_date,", "ID de la estacion print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10) end_date = datetime(2013,", "100 # Radio en Km ) # Estado del tiempo usando el ID", "usando el ID de la estacion print(ow.get_weather(4885)) # fechas start_date = datetime(2013,9,10) end_date", "# fechas start_date = datetime(2013,9,10) end_date = datetime(2013, 9, 15) # Estado del", "import openweather from datetime import datetime ow = openweather.openweather() # Obtener las estaciones", "datetime(2013,9,10) end_date = datetime(2013, 9, 15) # Estado del tiempo en intervalo diario", "50.0, # Altitud 100 # Radio en Km ) # Estado del tiempo", "= openweather.openweather() # Obtener las estaciones metereologicas cercanas stations = ow.find_stations_near( 7.0, #", "openweather from datetime import datetime ow = openweather.openweather() # Obtener las estaciones metereologicas" ]
[ "@since 06-SEP-2021 # @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import", "'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')])", "# # @since 06-SEP-2021 # @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as", "= pub.get('title')[0] volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted =", "from Crossref and publishers might not have pushed comprehensive metadata (especially in the", "API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email')) # Public", "a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi, volume, dateReceived, dateAccepted, dateOnline, title,", "crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title = pub.get('title')[0] volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name", "years). # # @since 06-SEP-2021 # @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful", "'your URL', 'your email')) # Public Crossref API with no etiquette crossref =", "in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi, volume, dateReceived, dateAccepted, dateOnline, title, authors,", "in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title = pub.get('title')[0] volume = pub.get('volume') dateReceived =", "pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi, volume, dateReceived, dateAccepted, dateOnline, title, authors, sep='\\t')", "(especially in the early years). # # @since 06-SEP-2021 # @version 29-SEP-2021 #", "# Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import jmespath # Please use the", "# @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import jmespath #", "Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email')) #", "with no etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi", "(https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data", "Caveat: data come from Crossref and publishers might not have pushed comprehensive metadata", "<filename>1-code/editorialAssessment.py # Harvest Crossref for editorial assessment dates for ISSN 1866-7538: Arabian Journal", "cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title = pub.get('title')[0]", "polite method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0',", "not have pushed comprehensive metadata (especially in the early years). # # @since", "pub.get('title')[0] volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name", "editorial assessment dates for ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517) # See", "comprehensive metadata (especially in the early years). # # @since 06-SEP-2021 # @version", "Arabian Journal of Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/", "Public Crossref API with no etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub", "= jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline =", "(https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import jmespath # Please use the polite method", "'your email')) # Public Crossref API with no etiquette crossref = cr.Journals() #", "Journal of Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ #", "29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import jmespath # Please use", "'.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi,", "jmespath # Please use the polite method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service)", "06-SEP-2021 # @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import jmespath", "# Public Crossref API with no etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for", "jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors = ';", "crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title", "Crossref and publishers might not have pushed comprehensive metadata (especially in the early", "= pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\",", "API with no etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'):", "come from Crossref and publishers might not have pushed comprehensive metadata (especially in", "@version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import jmespath # Please", "dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME')", "https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come from Crossref and publishers might not have", "pub) authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if", "dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors", "= jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for", "query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your", "method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your", "publishers might not have pushed comprehensive metadata (especially in the early years). #", "title = pub.get('title')[0] volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted", "the early years). # # @since 06-SEP-2021 # @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi)", "'1.0', 'your URL', 'your email')) # Public Crossref API with no etiquette crossref", "# Caveat: data come from Crossref and publishers might not have pushed comprehensive", "authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author')", "use the polite method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref =", "'; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS'", "Please use the polite method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref", "= cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title =", "== 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name ==", "for ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ #", "= '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author') else", "the polite method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment',", "# # Caveat: data come from Crossref and publishers might not have pushed", "Crossref API with no etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in", "# Harvest Crossref for editorial assessment dates for ISSN 1866-7538: Arabian Journal of", "See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come from", "pub.get('DOI') title = pub.get('title')[0] volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub)", "no etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi =", "# crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email')) # Public Crossref API", "# @since 06-SEP-2021 # @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr", "See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come from Crossref and publishers might", "+', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi, volume,", "Harvest Crossref for editorial assessment dates for ISSN 1866-7538: Arabian Journal of Geosciences", "volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name ==", "email')) # Public Crossref API with no etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters", "= cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email')) # Public Crossref API with no", "for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title = pub.get('title')[0] volume = pub.get('volume')", "import crossref.restful as cr import jmespath # Please use the polite method to", "== 'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in", "https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title = pub.get('title')[0] volume =", "early years). # # @since 06-SEP-2021 # @version 29-SEP-2021 # Crossref (https://github.com/fabiobatalha/crossrefapi) import", "pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub)", "and publishers might not have pushed comprehensive metadata (especially in the early years).", "https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come from Crossref and", "as cr import jmespath # Please use the polite method to query the", "also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come from Crossref", "'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\",", "Crossref for editorial assessment dates for ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517)", "data come from Crossref and publishers might not have pushed comprehensive metadata (especially", "# https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title = pub.get('title')[0] volume", "metadata (especially in the early years). # # @since 06-SEP-2021 # @version 29-SEP-2021", "pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI') title = pub.get('title')[0] volume = pub.get('volume') dateReceived", "== 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family')", "etiquette crossref = cr.Journals() # https://github.com/CrossRef/rest-api-doc#multiple-filters for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'): doi = pub.get('DOI')", "pushed comprehensive metadata (especially in the early years). # # @since 06-SEP-2021 #", "cr import jmespath # Please use the polite method to query the Crossref", "ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See", "might not have pushed comprehensive metadata (especially in the early years). # #", "assessment dates for ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517) # See also", "import jmespath # Please use the polite method to query the Crossref API", "to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL',", "doi = pub.get('DOI') title = pub.get('title')[0] volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name ==", "1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also", "dates for ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/", "# See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come", "for editorial assessment dates for ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517) #", "the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email'))", "URL', 'your email')) # Public Crossref API with no etiquette crossref = cr.Journals()", "(https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) # crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email')) # Public Crossref", "jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a", "'+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi, volume, dateReceived,", "# Please use the polite method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service) #", "also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come from Crossref and publishers might not", "Crossref (https://github.com/fabiobatalha/crossrefapi) import crossref.restful as cr import jmespath # Please use the polite", "jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name", "cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email')) # Public Crossref API with no etiquette", "pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family') +', '+a.get('given',", "= pub.get('DOI') title = pub.get('title')[0] volume = pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\",", "= jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors =", "# See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat: data come from Crossref and publishers", "of Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # #", "Geosciences (https://www.springer.com/journal/12517) # See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/ # See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/ # # Caveat:", "crossref.restful as cr import jmespath # Please use the polite method to query", "'accepted'].value|[0]\", pub) dateOnline = jmespath.search(\"assertion[?name == 'first_online'].value|[0]\", pub) authors = '; '.join([a.get('family') +',", "for a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi, volume, dateReceived, dateAccepted, dateOnline,", "have pushed comprehensive metadata (especially in the early years). # # @since 06-SEP-2021", "pub.get('volume') dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub)", "in the early years). # # @since 06-SEP-2021 # @version 29-SEP-2021 # Crossref", "crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email')) # Public Crossref API with", "'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS' print(doi, volume, dateReceived, dateAccepted,", "dateReceived = jmespath.search(\"assertion[?name == 'received'].value|[0]\", pub) dateAccepted = jmespath.search(\"assertion[?name == 'accepted'].value|[0]\", pub) dateOnline" ]
[ "return colormap[mod(N,7)] def get_cmap(N): '''Returns a function that maps each index in 0,", "cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N = 30 fig=plt.figure()", "a function that maps each index in 0, 1, ... N-1 to a", "matplotlib.cm as cmx import matplotlib.colors as colors def my_color_map(N): from numpy import mod", "distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index):", "scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N", "fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for i", "map_index_to_rgb_color def main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5,", "cmap(i) rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col) ax.add_artist(rect) ax.set_yticks([]) plt.show() if __name__=='__main__':", "plt import matplotlib.cm as cmx import matplotlib.colors as colors def my_color_map(N): from numpy", "0, N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for i in range(N): col =", "def main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5])", "maps each index in 0, 1, ... N-1 to a distinct RGB color.'''", "RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return", "cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N = 30", "index in 0, 1, ... N-1 to a distinct RGB color.''' color_norm =", "rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col) ax.add_artist(rect) ax.set_yticks([]) plt.show() if __name__=='__main__': main()", "... N-1 to a distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map =", "def my_color_map(N): from numpy import mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N):", "N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap =", "colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def", "ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for i in range(N): col = cmap(i) rect", "scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0,", "0, 1, ... N-1 to a distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1)", "= cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N =", "import matplotlib.colors as colors def my_color_map(N): from numpy import mod colormap = ['r','g','b','k','c','m','y']", "in 0, 1, ... N-1 to a distinct RGB color.''' color_norm = colors.Normalize(vmin=0,", "plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for i in range(N):", "main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap", "colormap[mod(N,7)] def get_cmap(N): '''Returns a function that maps each index in 0, 1,", "def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111)", "import matplotlib.pyplot as plt import matplotlib.cm as cmx import matplotlib.colors as colors def", "as colors def my_color_map(N): from numpy import mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)]", "function that maps each index in 0, 1, ... N-1 to a distinct", "import matplotlib.cm as cmx import matplotlib.colors as colors def my_color_map(N): from numpy import", "numpy import mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns a function", "color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index)", "= colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color", "i in range(N): col = cmap(i) rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col)", "each index in 0, 1, ... N-1 to a distinct RGB color.''' color_norm", "my_color_map(N): from numpy import mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns", "N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for i in range(N): col = cmap(i)", "col = cmap(i) rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col) ax.add_artist(rect) ax.set_yticks([]) plt.show()", "1, ... N-1 to a distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map", "that maps each index in 0, 1, ... N-1 to a distinct RGB", "mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns a function that maps", "get_cmap(N) for i in range(N): col = cmap(i) rect = plt.Rectangle((i, -0.5), 1,", "N-1 to a distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm,", "cmx import matplotlib.colors as colors def my_color_map(N): from numpy import mod colormap =", "colors def my_color_map(N): from numpy import mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def", "vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def main():", "as cmx import matplotlib.colors as colors def my_color_map(N): from numpy import mod colormap", "color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return", "matplotlib.pyplot as plt import matplotlib.cm as cmx import matplotlib.colors as colors def my_color_map(N):", "return scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([", "= get_cmap(N) for i in range(N): col = cmap(i) rect = plt.Rectangle((i, -0.5),", "30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for", "import mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns a function that", "as plt import matplotlib.cm as cmx import matplotlib.colors as colors def my_color_map(N): from", "map_index_to_rgb_color(index): return scalar_map.to_rgba(index) return map_index_to_rgb_color def main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled')", "<gh_stars>10-100 import matplotlib.pyplot as plt import matplotlib.cm as cmx import matplotlib.colors as colors", "in range(N): col = cmap(i) rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col) ax.add_artist(rect)", "from numpy import mod colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns a", "range(N): col = cmap(i) rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col) ax.add_artist(rect) ax.set_yticks([])", "def get_cmap(N): '''Returns a function that maps each index in 0, 1, ...", "'''Returns a function that maps each index in 0, 1, ... N-1 to", "for i in range(N): col = cmap(i) rect = plt.Rectangle((i, -0.5), 1, 1,", "ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for i in", "a distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv') def", "['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns a function that maps each index in", "0.5]) cmap = get_cmap(N) for i in range(N): col = cmap(i) rect =", "return map_index_to_rgb_color def main(): N = 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N])", "colormap = ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns a function that maps each", "to a distinct RGB color.''' color_norm = colors.Normalize(vmin=0, vmax=N-1) scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')", "cmap = get_cmap(N) for i in range(N): col = cmap(i) rect = plt.Rectangle((i,", "ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N) for i in range(N): col", "= 30 fig=plt.figure() ax=fig.add_subplot(111) plt.axis('scaled') ax.set_xlim([ 0, N]) ax.set_ylim([-0.5, 0.5]) cmap = get_cmap(N)", "get_cmap(N): '''Returns a function that maps each index in 0, 1, ... N-1", "= cmap(i) rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col) ax.add_artist(rect) ax.set_yticks([]) plt.show() if", "= ['r','g','b','k','c','m','y'] return colormap[mod(N,7)] def get_cmap(N): '''Returns a function that maps each index", "matplotlib.colors as colors def my_color_map(N): from numpy import mod colormap = ['r','g','b','k','c','m','y'] return" ]
[ "data = json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid),", "assert machine.hostname == 'localhost' assert Service.query.count() == 1 service = Service.query.first() assert isinstance(service.created_at,", "pikka_bird_server from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import Report", "collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code == 415 assert", "{'load': {'avg_15_min': 1.62939453125}} assert report.service == service def test_create_json(self, client, collection_valid): res =", "Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_partial(self, client, collection_valid): collection_invalid =", "import Collection from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import Report from pikka_bird_server.models.service import", "0 def test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections',", "= json.loads(res.data) assert res.status_code == 422 assert data == { 'message': '422: Unprocessable", "'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 422 assert data", "1 service = Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code == 'system' assert Collection.query.count()", "0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_empty(self, client): res", "= json.loads(res.data) assert res.status_code == 415 assert data == { 'message': '415: Unsupported", "report.service == service def test_create_json(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type':", "'system' assert Collection.query.count() == 1 collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at", "1, 424242) assert collection.hostname == 'localhost' assert collection.machine == machine assert collection.pid ==", "424242) assert collection.hostname == 'localhost' assert collection.machine == machine assert collection.pid == 42", "client, collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type':", "json.loads(res.data) assert res.status_code == 422 assert data == { 'message': '422: Unprocessable Entity'}", "data == { 'message': '422: Unprocessable Entity'} assert Machine.query.count() == 1 assert Service.query.count()", "0 def test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR':", "Collection from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import Report from pikka_bird_server.models.service import Service", "assert report.data == {'load': {'avg_15_min': 1.62939453125}} assert report.service == service def test_create_json(self, client,", "'127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid): res = client.post('/collections',", "1 machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address ==", "Not Found'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count() ==", "res.status_code == 415 assert data == { 'message': '415: Unsupported Media Type'} assert", "== 0 assert Report.query.count() == 0 def test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for',", "collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data =", "collection.version_server == pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert Report.query.count() == 1 report =", "report.data == {'load': {'avg_15_min': 1.62939453125}} assert report.service == service def test_create_json(self, client, collection_valid):", "Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1' assert machine.hostname", "Found'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count() == 0", "data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code ==", "isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20, 616977) assert", "'127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 422 assert data == { 'message':", "client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code", "def assert_create_success(self, res, data): assert res.status_code == 201 assert data == {} assert", "test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data", "res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data)", "assert report.collection == collection assert report.data == {'load': {'avg_15_min': 1.62939453125}} assert report.service ==", "isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1' assert machine.hostname == 'localhost'", "0 assert Report.query.count() == 0 def test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid),", "assert collection.machine == machine assert collection.pid == 42 assert collection.version_server == pikka_bird_server.__version__ assert", "== 0 def test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res =", "collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'},", "assert Service.query.count() == 1 service = Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code ==", "Report.query.count() == 1 report = Report.query.first() assert report.collection == collection assert report.data ==", "res.status_code == 422 assert data == { 'message': '422: Unprocessable Entity'} assert Machine.query.count()", "== 42 assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert Report.query.count() ==", "== service def test_create_json(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'},", "Report.query.count() == 0 def test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'},", "== 1 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() ==", "= collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR':", "import pikka_bird_server from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import", "assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_empty(self, client): res =", "19, 33, 1, 424242) assert collection.hostname == 'localhost' assert collection.machine == machine assert", "class TestCollections: def assert_create_success(self, res, data): assert res.status_code == 201 assert data ==", "= Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code == 'system' assert Collection.query.count() == 1", "client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data)", "assert Collection.query.count() == 1 collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at ==", "environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 422 assert data ==", "assert Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def", "assert data == { 'message': '404: Not Found'} assert Machine.query.count() == 0 assert", "assert res.status_code == 415 assert data == { 'message': '415: Unsupported Media Type'}", "collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20, 616977) assert collection.collecting_at == datetime.datetime(2015,", "== {'load': {'avg_15_min': 1.62939453125}} assert report.service == service def test_create_json(self, client, collection_valid): res", "environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 404 assert data ==", "'message': '415: Unsupported Media Type'} assert Machine.query.count() == 0 assert Service.query.count() == 0", "data): assert res.status_code == 201 assert data == {} assert Machine.query.count() == 1", "== 1 collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4,", "import Report from pikka_bird_server.models.service import Service class TestCollections: def assert_create_success(self, res, data): assert", "client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data)", "data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code == 415 assert data == { 'message':", "collection.machine == machine assert collection.pid == 42 assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector", "'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid): res =", "== 0 def test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={", "service.code == 'system' assert Collection.query.count() == 1 collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime)", "machine.hostname == 'localhost' assert Service.query.count() == 1 service = Service.query.first() assert isinstance(service.created_at, datetime.datetime)", "'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 404 assert", "'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 404 assert data == {", "res, data): assert res.status_code == 201 assert data == {} assert Machine.query.count() ==", "'404: Not Found'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count()", "client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code == 415 assert data == {", "Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count()", "== {} assert Machine.query.count() == 1 machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert", "data) def test_create_no_content_type(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert", "headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 422", "404 assert data == { 'message': '404: Not Found'} assert Machine.query.count() == 0", "{'avg_15_min': 1.62939453125}} assert report.service == service def test_create_json(self, client, collection_valid): res = client.post('/collections',", "def test_create_json(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR':", "data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code ==", "assert Machine.query.count() == 1 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert", "422 assert data == { 'message': '422: Unprocessable Entity'} assert Machine.query.count() == 1", "collection.version_collector == '1.2.3' assert Report.query.count() == 1 report = Report.query.first() assert report.collection ==", "data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code ==", "== 'localhost' assert Service.query.count() == 1 service = Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert", "json.loads(res.data) assert res.status_code == 404 assert data == { 'message': '404: Not Found'}", "== '1.2.3' assert Report.query.count() == 1 report = Report.query.first() assert report.collection == collection", "machine assert collection.pid == 42 assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector == '1.2.3'", "== 422 assert data == { 'message': '422: Unprocessable Entity'} assert Machine.query.count() ==", "Machine.query.count() == 1 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count()", "'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 422 assert data == {", "'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 422 assert", "environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid): res", "== 1 machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address", "assert res.status_code == 422 assert data == { 'message': '422: Unprocessable Entity'} assert", "assert Report.query.count() == 0 def test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname']", "= json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data", "collection assert report.data == {'load': {'avg_15_min': 1.62939453125}} assert report.service == service def test_create_json(self,", "1 report = Report.query.first() assert report.collection == collection assert report.data == {'load': {'avg_15_min':", "Report.query.first() assert report.collection == collection assert report.data == {'load': {'avg_15_min': 1.62939453125}} assert report.service", "= Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1' assert", "service def test_create_json(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={", "Service.query.count() == 1 service = Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code == 'system'", "report = Report.query.first() assert report.collection == collection assert report.data == {'load': {'avg_15_min': 1.62939453125}}", "def test_create_no_content_type(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code", "== datetime.datetime(2015, 4, 4, 19, 33, 1, 424242) assert collection.hostname == 'localhost' assert", "datetime.datetime(2015, 4, 4, 19, 33, 1, 424242) assert collection.hostname == 'localhost' assert collection.machine", "415 assert data == { 'message': '415: Unsupported Media Type'} assert Machine.query.count() ==", "Media Type'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count() ==", "== 0 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() ==", "datetime from flask import json import msgpack import pikka_bird_server from pikka_bird_server.models.collection import Collection", "'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client,", "'422: Unprocessable Entity'} assert Machine.query.count() == 1 assert Service.query.count() == 0 assert Collection.query.count()", "data == {} assert Machine.query.count() == 1 machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime)", "Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_empty(self,", "'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 404 assert data", "201 assert data == {} assert Machine.query.count() == 1 machine = Machine.query.first() assert", "== 1 service = Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code == 'system' assert", "data = json.loads(res.data) assert res.status_code == 422 assert data == { 'message': '422:", "== 201 assert data == {} assert Machine.query.count() == 1 machine = Machine.query.first()", "assert res.status_code == 404 assert data == { 'message': '404: Not Found'} assert", "19, 32, 20, 616977) assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1,", "assert res.status_code == 201 assert data == {} assert Machine.query.count() == 1 machine", "assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20, 616977)", "pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import Report from pikka_bird_server.models.service", "collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data =", "client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data", "datetime.datetime(2015, 4, 4, 19, 32, 20, 616977) assert collection.collecting_at == datetime.datetime(2015, 4, 4,", "== 1 report = Report.query.first() assert report.collection == collection assert report.data == {'load':", "4, 19, 33, 1, 424242) assert collection.hostname == 'localhost' assert collection.machine == machine", "assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1' assert machine.hostname ==", "= client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert", "'415: Unsupported Media Type'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert", "headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self,", "Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_invalid_url(self,", "0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_partial(self, client, collection_valid):", "{} assert Machine.query.count() == 1 machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at,", "== 0 def test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'},", "data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def", "test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={", "assert Machine.query.count() == 1 machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime)", "== 0 assert Report.query.count() == 0 def test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}),", "== { 'message': '415: Unsupported Media Type'} assert Machine.query.count() == 0 assert Service.query.count()", "'127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 404 assert data == { 'message':", "Report from pikka_bird_server.models.service import Service class TestCollections: def assert_create_success(self, res, data): assert res.status_code", "616977) assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1, 424242) assert collection.hostname", "32, 20, 616977) assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1, 424242)", "0 assert Report.query.count() == 0 def test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy() del", "= client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code == 415 assert data ==", "headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self,", "def test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'})", "== datetime.datetime(2015, 4, 4, 19, 32, 20, 616977) assert collection.collecting_at == datetime.datetime(2015, 4,", "'127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid): res = client.post('/collections',", "collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={", "res.status_code == 404 assert data == { 'message': '404: Not Found'} assert Machine.query.count()", "== machine assert collection.pid == 42 assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector ==", "assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1, 424242) assert collection.hostname ==", "json.loads(res.data) assert res.status_code == 415 assert data == { 'message': '415: Unsupported Media", "pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert Report.query.count() == 1 report = Report.query.first() assert", "res.status_code == 201 assert data == {} assert Machine.query.count() == 1 machine =", "collection.hostname == 'localhost' assert collection.machine == machine assert collection.pid == 42 assert collection.version_server", "TestCollections: def assert_create_success(self, res, data): assert res.status_code == 201 assert data == {}", "assert data == { 'message': '415: Unsupported Media Type'} assert Machine.query.count() == 0", "== '127.0.0.1' assert machine.hostname == 'localhost' assert Service.query.count() == 1 service = Service.query.first()", "from pikka_bird_server.models.report import Report from pikka_bird_server.models.service import Service class TestCollections: def assert_create_success(self, res,", "test_create_no_content_type(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code ==", "report.collection == collection assert report.data == {'load': {'avg_15_min': 1.62939453125}} assert report.service == service", "self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'},", "isinstance(service.created_at, datetime.datetime) assert service.code == 'system' assert Collection.query.count() == 1 collection = Collection.query.first()", "res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data)", "res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data)", "def test_create_binary(self, client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR':", "from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import Report from pikka_bird_server.models.service import Service class", "Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_invalid_url(self, client, collection_valid): res =", "test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'})", "collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1, 424242) assert collection.hostname == 'localhost'", "def test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR':", "collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4, 4, 19,", "4, 19, 32, 20, 616977) assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33,", "== 'system' assert Collection.query.count() == 1 collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert", "Unsupported Media Type'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count()", "test_create_binary(self, client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'})", "'127.0.0.1' assert machine.hostname == 'localhost' assert Service.query.count() == 1 service = Service.query.first() assert", "== 0 assert Report.query.count() == 0 def test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy()", "data = json.loads(res.data) assert res.status_code == 404 assert data == { 'message': '404:", "= Report.query.first() assert report.collection == collection assert report.data == {'load': {'avg_15_min': 1.62939453125}} assert", "Collection.query.count() == 1 collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015,", "collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data =", "datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20, 616977) assert collection.collecting_at", "client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data", "assert isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1' assert machine.hostname == 'localhost' assert Service.query.count()", "Machine from pikka_bird_server.models.report import Report from pikka_bird_server.models.service import Service class TestCollections: def assert_create_success(self,", "collection.pid == 42 assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert Report.query.count()", "33, 1, 424242) assert collection.hostname == 'localhost' assert collection.machine == machine assert collection.pid", "data = json.loads(res.data) assert res.status_code == 415 assert data == { 'message': '415:", "Report.query.count() == 0 def test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res", "environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid): res", "collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data =", "pikka_bird_server.models.service import Service class TestCollections: def assert_create_success(self, res, data): assert res.status_code == 201", "client): res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data =", "import json import msgpack import pikka_bird_server from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import", "pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import Report from pikka_bird_server.models.service import Service class TestCollections:", "0 def test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={", "assert collection.hostname == 'localhost' assert collection.machine == machine assert collection.pid == 42 assert", "== 0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_invalid_url(self, client,", "{ 'message': '404: Not Found'} assert Machine.query.count() == 0 assert Service.query.count() == 0", "= client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res,", "'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid):", "headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code == 404", "0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_invalid_url(self, client, collection_valid):", "== collection assert report.data == {'load': {'avg_15_min': 1.62939453125}} assert report.service == service def", "'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid): res =", "Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_partial(self,", "1.62939453125}} assert report.service == service def test_create_json(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid),", "test_create_json(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'})", "'message': '422: Unprocessable Entity'} assert Machine.query.count() == 1 assert Service.query.count() == 0 assert", "= json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={", "del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data", "assert Report.query.count() == 0 def test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={", "== pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert Report.query.count() == 1 report = Report.query.first()", "res = client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data)", "from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report import Report from", "0 assert Report.query.count() == 0 def test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}), headers={", "'1.2.3' assert Report.query.count() == 1 report = Report.query.first() assert report.collection == collection assert", "from pikka_bird_server.models.service import Service class TestCollections: def assert_create_success(self, res, data): assert res.status_code ==", "machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1'", "assert report.service == service def test_create_json(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid), headers={", "import msgpack import pikka_bird_server from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import Machine from", "client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code", "42 assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert Report.query.count() == 1", "assert_create_success(self, res, data): assert res.status_code == 201 assert data == {} assert Machine.query.count()", "from flask import json import msgpack import pikka_bird_server from pikka_bird_server.models.collection import Collection from", "assert data == { 'message': '422: Unprocessable Entity'} assert Machine.query.count() == 1 assert", "== 415 assert data == { 'message': '415: Unsupported Media Type'} assert Machine.query.count()", "Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_empty(self, client): res = client.post('/collections',", "assert collection.version_collector == '1.2.3' assert Report.query.count() == 1 report = Report.query.first() assert report.collection", "assert isinstance(service.created_at, datetime.datetime) assert service.code == 'system' assert Collection.query.count() == 1 collection =", "data == { 'message': '404: Not Found'} assert Machine.query.count() == 0 assert Service.query.count()", "datetime.datetime) assert service.code == 'system' assert Collection.query.count() == 1 collection = Collection.query.first() assert", "import datetime from flask import json import msgpack import pikka_bird_server from pikka_bird_server.models.collection import", "= client.post('/collections', data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res,", "datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1' assert machine.hostname == 'localhost' assert", "Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code == 'system' assert Collection.query.count() == 1 collection", "machine.address == '127.0.0.1' assert machine.hostname == 'localhost' assert Service.query.count() == 1 service =", "assert Report.query.count() == 1 report = Report.query.first() assert report.collection == collection assert report.data", "Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20,", "flask import json import msgpack import pikka_bird_server from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine", "client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data", "== 0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_empty(self, client):", "client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code == 415", "== 'localhost' assert collection.machine == machine assert collection.pid == 42 assert collection.version_server ==", "json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type':", "assert service.code == 'system' assert Collection.query.count() == 1 collection = Collection.query.first() assert isinstance(collection.created_at,", "Report.query.count() == 0 def test_create_collection_invalid_url(self, client, collection_valid): res = client.post('/this-is-not-the-service-you-are-looking-for', data=json.dumps(collection_valid), headers={ 'Content-Type':", "4, 4, 19, 32, 20, 616977) assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19,", "assert data == {} assert Machine.query.count() == 1 machine = Machine.query.first() assert isinstance(machine.created_at,", "'localhost' assert collection.machine == machine assert collection.pid == 42 assert collection.version_server == pikka_bird_server.__version__", "data == { 'message': '415: Unsupported Media Type'} assert Machine.query.count() == 0 assert", "json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data =", "Entity'} assert Machine.query.count() == 1 assert Service.query.count() == 0 assert Collection.query.count() == 0", "'localhost' assert Service.query.count() == 1 service = Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code", "Unprocessable Entity'} assert Machine.query.count() == 1 assert Service.query.count() == 0 assert Collection.query.count() ==", "data) def test_create_binary(self, client, collection_valid): res = client.post('/collections', data=msgpack.packb(collection_valid), headers={ 'Content-Type': 'application/octet-stream'}, environ_base={", "== { 'message': '404: Not Found'} assert Machine.query.count() == 0 assert Service.query.count() ==", "Type'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count() == 0", "import Service class TestCollections: def assert_create_success(self, res, data): assert res.status_code == 201 assert", "'message': '404: Not Found'} assert Machine.query.count() == 0 assert Service.query.count() == 0 assert", "4, 4, 19, 33, 1, 424242) assert collection.hostname == 'localhost' assert collection.machine ==", "= client.post('/collections', data=json.dumps({}), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert", "assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert Report.query.count() == 1 report", "assert Machine.query.count() == 0 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert", "isinstance(machine.updated_at, datetime.datetime) assert machine.address == '127.0.0.1' assert machine.hostname == 'localhost' assert Service.query.count() ==", "{ 'message': '422: Unprocessable Entity'} assert Machine.query.count() == 1 assert Service.query.count() == 0", "assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_invalid_url(self, client, collection_valid): res", "= json.loads(res.data) assert res.status_code == 404 assert data == { 'message': '404: Not", "pikka_bird_server.models.report import Report from pikka_bird_server.models.service import Service class TestCollections: def assert_create_success(self, res, data):", "= Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32,", "assert collection.pid == 42 assert collection.version_server == pikka_bird_server.__version__ assert collection.version_collector == '1.2.3' assert", "1 collection = Collection.query.first() assert isinstance(collection.created_at, datetime.datetime) assert collection.collected_at == datetime.datetime(2015, 4, 4,", "== 0 assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_partial(self, client,", "msgpack import pikka_bird_server from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import Machine from pikka_bird_server.models.report", "20, 616977) assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1, 424242) assert", "res = client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data) assert res.status_code == 415 assert data", "datetime.datetime) assert machine.address == '127.0.0.1' assert machine.hostname == 'localhost' assert Service.query.count() == 1", "res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data)", "assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20, 616977) assert collection.collecting_at ==", "'Content-Type': 'application/octet-stream'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client,", "client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert res.status_code", "= client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) assert", "collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'})", "== 404 assert data == { 'message': '404: Not Found'} assert Machine.query.count() ==", "1 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() == 0", "data = json.loads(res.data) self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid))", "assert machine.address == '127.0.0.1' assert machine.hostname == 'localhost' assert Service.query.count() == 1 service", "Service class TestCollections: def assert_create_success(self, res, data): assert res.status_code == 201 assert data", "def test_create_collection_partial(self, client, collection_valid): collection_invalid = collection_valid.copy() del collection_invalid['environment']['hostname'] res = client.post('/collections', data=json.dumps(collection_invalid),", "service = Service.query.first() assert isinstance(service.created_at, datetime.datetime) assert service.code == 'system' assert Collection.query.count() ==", "'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def test_create_binary(self, client, collection_valid):", "0 assert Service.query.count() == 0 assert Collection.query.count() == 0 assert Report.query.count() == 0", "assert Collection.query.count() == 0 assert Report.query.count() == 0 def test_create_collection_partial(self, client, collection_valid): collection_invalid", "data=json.dumps(collection_valid), headers={ 'Content-Type': 'application/json'}, environ_base={ 'REMOTE_ADDR': '127.0.0.1'}) data = json.loads(res.data) self.assert_create_success(res, data) def", "assert Report.query.count() == 0 def test_create_collection_empty(self, client): res = client.post('/collections', data=json.dumps({}), headers={ 'Content-Type':", "json import msgpack import pikka_bird_server from pikka_bird_server.models.collection import Collection from pikka_bird_server.models.machine import Machine", "{ 'message': '415: Unsupported Media Type'} assert Machine.query.count() == 0 assert Service.query.count() ==", "import Machine from pikka_bird_server.models.report import Report from pikka_bird_server.models.service import Service class TestCollections: def", "== { 'message': '422: Unprocessable Entity'} assert Machine.query.count() == 1 assert Service.query.count() ==", "Machine.query.count() == 1 machine = Machine.query.first() assert isinstance(machine.created_at, datetime.datetime) assert isinstance(machine.updated_at, datetime.datetime) assert", "self.assert_create_success(res, data) def test_create_no_content_type(self, client, collection_valid): res = client.post('/collections', data=json.dumps(collection_valid)) data = json.loads(res.data)" ]
[ "form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method == 'POST': return 'Submitted!' if __name__ ==", "from flask_bootstrap import Bootstrap from models import QuizForm class Config(object): SECRET_KEY = '<KEY>'", "class Config(object): SECRET_KEY = '<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST'])", "Config(object): SECRET_KEY = '<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def", "'POST']) def take_test(): form = QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if", "render_template('take_quiz_template.html', form=form) if request.method == 'POST': return 'Submitted!' if __name__ == '__main__': application.run(host='0.0.0.0',", "def take_test(): form = QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method", "not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method == 'POST': return 'Submitted!' if __name__", "methods=['GET', 'POST']) def take_test(): form = QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form)", "'<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test(): form =", "flask_bootstrap import Bootstrap from models import QuizForm class Config(object): SECRET_KEY = '<KEY>' application", "models import QuizForm class Config(object): SECRET_KEY = '<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application)", "import Bootstrap from models import QuizForm class Config(object): SECRET_KEY = '<KEY>' application =", "python from flask import Flask, render_template, request from flask_bootstrap import Bootstrap from models", "QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method == 'POST': return 'Submitted!'", "QuizForm class Config(object): SECRET_KEY = '<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET',", "SECRET_KEY = '<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test():", "application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test(): form = QuizForm(request.form)", "return render_template('take_quiz_template.html', form=form) if request.method == 'POST': return 'Submitted!' if __name__ == '__main__':", "take_test(): form = QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method ==", "import QuizForm class Config(object): SECRET_KEY = '<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/',", "request from flask_bootstrap import Bootstrap from models import QuizForm class Config(object): SECRET_KEY =", "Bootstrap from models import QuizForm class Config(object): SECRET_KEY = '<KEY>' application = Flask(__name__)", "flask import Flask, render_template, request from flask_bootstrap import Bootstrap from models import QuizForm", "= '<KEY>' application = Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test(): form", "Flask, render_template, request from flask_bootstrap import Bootstrap from models import QuizForm class Config(object):", "import Flask, render_template, request from flask_bootstrap import Bootstrap from models import QuizForm class", "#!/usr/bin/env python from flask import Flask, render_template, request from flask_bootstrap import Bootstrap from", "form=form) if request.method == 'POST': return 'Submitted!' if __name__ == '__main__': application.run(host='0.0.0.0', debug=True)", "render_template, request from flask_bootstrap import Bootstrap from models import QuizForm class Config(object): SECRET_KEY", "= QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method == 'POST': return", "from flask import Flask, render_template, request from flask_bootstrap import Bootstrap from models import", "Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test(): form = QuizForm(request.form) if not form.validate_on_submit(): return", "from models import QuizForm class Config(object): SECRET_KEY = '<KEY>' application = Flask(__name__) application.config.from_object(Config)", "form = QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method == 'POST':", "= Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test(): form = QuizForm(request.form) if", "Flask(__name__) application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test(): form = QuizForm(request.form) if not", "@application.route('/', methods=['GET', 'POST']) def take_test(): form = QuizForm(request.form) if not form.validate_on_submit(): return render_template('take_quiz_template.html',", "application.config.from_object(Config) Bootstrap(application) @application.route('/', methods=['GET', 'POST']) def take_test(): form = QuizForm(request.form) if not form.validate_on_submit():", "if not form.validate_on_submit(): return render_template('take_quiz_template.html', form=form) if request.method == 'POST': return 'Submitted!' if" ]
[ "# print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\", 10, 10) veg = Vegetable(3) fruit", "owl.feed(meat) # veg = Vegetable(1) # print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\", 10,", "= Hen(\"Harry\", 10, 10) veg = Vegetable(3) fruit = Fruit(5) meat = Meat(1)", "10, 10) # print(owl) # meat = Meat(4) # print(owl.make_sound()) # owl.feed(meat) #", "print(owl) # meat = Meat(4) # print(owl.make_sound()) # owl.feed(meat) # veg = Vegetable(1)", "= Vegetable(3) fruit = Fruit(5) meat = Meat(1) print(hen) print(hen.make_sound()) hen.feed(veg) hen.feed(fruit) hen.feed(meat)", "# veg = Vegetable(1) # print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\", 10, 10)", "= Vegetable(1) # print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\", 10, 10) veg =", "Meat, Vegetable, Fruit # owl = Owl(\"Pip\", 10, 10) # print(owl) # meat", "= Owl(\"Pip\", 10, 10) # print(owl) # meat = Meat(4) # print(owl.make_sound()) #", "veg = Vegetable(3) fruit = Fruit(5) meat = Meat(1) print(hen) print(hen.make_sound()) hen.feed(veg) hen.feed(fruit)", "Fruit # owl = Owl(\"Pip\", 10, 10) # print(owl) # meat = Meat(4)", "Owl(\"Pip\", 10, 10) # print(owl) # meat = Meat(4) # print(owl.make_sound()) # owl.feed(meat)", "print(owl) hen = Hen(\"Harry\", 10, 10) veg = Vegetable(3) fruit = Fruit(5) meat", "10) veg = Vegetable(3) fruit = Fruit(5) meat = Meat(1) print(hen) print(hen.make_sound()) hen.feed(veg)", "from wild_animals_04.food import Meat, Vegetable, Fruit # owl = Owl(\"Pip\", 10, 10) #", "print(owl.make_sound()) # owl.feed(meat) # veg = Vegetable(1) # print(owl.feed(veg)) # print(owl) hen =", "from wild_animals_04.animals.birds import Hen from wild_animals_04.food import Meat, Vegetable, Fruit # owl =", "veg = Vegetable(1) # print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\", 10, 10) veg", "meat = Meat(4) # print(owl.make_sound()) # owl.feed(meat) # veg = Vegetable(1) # print(owl.feed(veg))", "= Meat(4) # print(owl.make_sound()) # owl.feed(meat) # veg = Vegetable(1) # print(owl.feed(veg)) #", "# print(owl) hen = Hen(\"Harry\", 10, 10) veg = Vegetable(3) fruit = Fruit(5)", "Meat(4) # print(owl.make_sound()) # owl.feed(meat) # veg = Vegetable(1) # print(owl.feed(veg)) # print(owl)", "<filename>polymorphism_and_magic_methods/exercise/wild_animals_04/test.py<gh_stars>0 from wild_animals_04.animals.birds import Hen from wild_animals_04.food import Meat, Vegetable, Fruit # owl", "# print(owl.make_sound()) # owl.feed(meat) # veg = Vegetable(1) # print(owl.feed(veg)) # print(owl) hen", "import Hen from wild_animals_04.food import Meat, Vegetable, Fruit # owl = Owl(\"Pip\", 10,", "owl = Owl(\"Pip\", 10, 10) # print(owl) # meat = Meat(4) # print(owl.make_sound())", "# owl.feed(meat) # veg = Vegetable(1) # print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\",", "print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\", 10, 10) veg = Vegetable(3) fruit =", "# print(owl) # meat = Meat(4) # print(owl.make_sound()) # owl.feed(meat) # veg =", "Vegetable, Fruit # owl = Owl(\"Pip\", 10, 10) # print(owl) # meat =", "10, 10) veg = Vegetable(3) fruit = Fruit(5) meat = Meat(1) print(hen) print(hen.make_sound())", "Vegetable(1) # print(owl.feed(veg)) # print(owl) hen = Hen(\"Harry\", 10, 10) veg = Vegetable(3)", "hen = Hen(\"Harry\", 10, 10) veg = Vegetable(3) fruit = Fruit(5) meat =", "10) # print(owl) # meat = Meat(4) # print(owl.make_sound()) # owl.feed(meat) # veg", "wild_animals_04.animals.birds import Hen from wild_animals_04.food import Meat, Vegetable, Fruit # owl = Owl(\"Pip\",", "# owl = Owl(\"Pip\", 10, 10) # print(owl) # meat = Meat(4) #", "wild_animals_04.food import Meat, Vegetable, Fruit # owl = Owl(\"Pip\", 10, 10) # print(owl)", "Hen(\"Harry\", 10, 10) veg = Vegetable(3) fruit = Fruit(5) meat = Meat(1) print(hen)", "Hen from wild_animals_04.food import Meat, Vegetable, Fruit # owl = Owl(\"Pip\", 10, 10)", "Vegetable(3) fruit = Fruit(5) meat = Meat(1) print(hen) print(hen.make_sound()) hen.feed(veg) hen.feed(fruit) hen.feed(meat) print(hen)", "import Meat, Vegetable, Fruit # owl = Owl(\"Pip\", 10, 10) # print(owl) #", "# meat = Meat(4) # print(owl.make_sound()) # owl.feed(meat) # veg = Vegetable(1) #" ]
[ "mapping from words to their indexes max_features: max features to use embed_size: size", "= all_embs.mean(), all_embs.std() if '<unk>' not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1,", "found words \"\"\" if embeddings_type not in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible", "import Word2Vec from simple_elmo import ElmoModel from torch import nn def pad_sequences( sequences:", "elif padding == 'pre': x[idx, -len(trunc) :] = trunc else: raise ValueError(f'Padding type", "matrix, number of of words and the list of not found words \"\"\"", "sample {trunc.shape[1:]} of sequence at position {idx}' f'is different from expected shape {sample_shape}'", "a sequence. maxlen: Int, maximum length of all sequences. dtype: Type of the", "nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self, x: torch.LongTensor) -> torch.Tensor: embed = self.embedding(x)", "shape for a `sequences` entry. \"\"\" if not hasattr(sequences, '__len__'): raise ValueError('`sequences` must", "in the main loop below. sample_shape = () for s in sequences: if", "in word_idx.items(): # possible variants of the word to be found in word", "embeddings \"\"\" def __init__(self, embeddings_path: str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess =", "strings, you can use `object`. padding: String, 'pre' or 'post': pad either before", "np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size)) for", "\"{truncating}\" ' 'not understood') # check `trunc` has expected shape trunc = np.asarray(trunc,", "pad_sequences( sequences: List, maxlen: Optional[int], dtype: str = 'int32', padding: str = 'post',", "sequences: List, maxlen: Optional[int], dtype: str = 'int32', padding: str = 'post', truncating:", "embedding_size = embed_size if embed_size != 0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean,", "values: word2vec, glove') model = Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index = {}", "the main loop below. sample_shape = () for s in sequences: if len(s)", "Creating Embedding Index embedding_index = {} for word in model.wv.vocab: coefs = np.asarray(model.wv[word])", "\"\"\" Create embedding matrix Args: embedding_path: path to embeddings embeddings_type: type of pretrained", "`value` at the end. Sequences longer than `num_timesteps` are truncated so that they", "found in word to idx dictionary variants_of_word = [word, word.lower(), word.capitalize(), word.upper()] for", "dtype: str = 'int32', padding: str = 'post', truncating: str = 'post', value:", "below. sample_shape = () for s in sequences: if len(s) > 0: sample_shape", "sample_shape = () for s in sequences: if len(s) > 0: sample_shape =", "== 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else:", "sequences. value: Float or String, padding value. # Returns x: Numpy array with", "To pad sequences with variable length strings, you can use `object`. padding: String,", "int = 300, ) -> Tuple[np.array, int, List]: \"\"\" Create embedding matrix Args:", "2D Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen` argument", "fit the desired length. The position where padding or truncation happens is determined", "value: Float or String, padding value. # Returns x: Numpy array with shape", "of invalid shape for a `sequences` entry. \"\"\" if not hasattr(sequences, '__len__'): raise", "np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for idx, s", "self.model = ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding Model is Loaded') def", "their indexes max_features: max features to use embed_size: size of embeddings Returns: embedding", "= 0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix", "or `padding`, or in case of invalid shape for a `sequences` entry. \"\"\"", "enumerate(sequences): if not len(s): continue # empty list/array was found if truncating ==", "invalid values for `truncating` or `padding`, or in case of invalid shape for", "else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std() if '<unk>' not", "'glove']: embedding_size = embed_size if embed_size != 0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values())", "-> torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed = self.model.get_elmo_vectors_session(x, self.sess) embed = torch.Tensor(embed)", "to the same length. from Keras This function transforms a list of `num_samples`", "is a sequence. maxlen: Int, maximum length of all sequences. dtype: Type of", "else: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module): \"\"\" Transform tokens", "provided, or the length of the longest sequence otherwise. Sequences that are shorter", "Args: embedding_path: path to embeddings embeddings_type: type of pretrained embeddings ('word2vec', 'glove'') word_idx:", "before or after each sequence. truncating: String, 'pre' or 'post': remove values from", "length. from Keras This function transforms a list of `num_samples` sequences (lists of", "+ 1, embed_size)) for word, num in word_idx.items(): # possible variants of the", "= ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding Model is Loaded') def forward(self,", "of of words and the list of not found words \"\"\" if embeddings_type", "`padding`, or in case of invalid shape for a `sequences` entry. \"\"\" if", "of sample {trunc.shape[1:]} of sequence at position {idx}' f'is different from expected shape", "x def build_matrix( word_idx: Dict, embedding_path: str = '', embeddings_type: str = 'word2vec',", "padded with `value` at the end. Sequences longer than `num_timesteps` are truncated so", "raise ValueError( f'Shape of sample {trunc.shape[1:]} of sequence at position {idx}' f'is different", "[] for x in sequences: try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must be", "as np import torch from gensim.models import Word2Vec from simple_elmo import ElmoModel from", "than `num_timesteps` are truncated so that they fit the desired length. The position", "determined by the arguments `padding` and `truncating`, respectively. Pre-padding is the default. #", "the desired length. The position where padding or truncation happens is determined by", "\"\"\" def __init__(self, embeddings_path: str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session()", "if trunc.shape[1:] != sample_shape: raise ValueError( f'Shape of sample {trunc.shape[1:]} of sequence at", "Arguments sequences: List of lists, where each element is a sequence. maxlen: Int,", "where padding or truncation happens is determined by the arguments `padding` and `truncating`,", "for x in sequences: try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must be a", "sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen)", "str = 'word2vec', max_features: int = 100000, embed_size: int = 300, ) ->", "!= 0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std() if", "= nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self, x: torch.LongTensor) -> torch.Tensor: embed =", "padding: String, 'pre' or 'post': pad either before or after each sequence. truncating:", "TypeError: raise ValueError('`sequences` must be a list of iterables. ' 'Found non-iterable: '", "found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc", "take the sample shape from the first non empty sequence # checking for", "output sequences. To pad sequences with variable length strings, you can use `object`.", "= 'post', truncating: str = 'post', value: int = 0, ) -> np.array:", "trunc.shape[1:] != sample_shape: raise ValueError( f'Shape of sample {trunc.shape[1:]} of sequence at position", "Dict, embedding_path: str = '', embeddings_type: str = 'word2vec', max_features: int = 100000,", "= 0, ) -> np.array: \"\"\"Pad sequences to the same length. from Keras", "shape `(len(sequences), maxlen)` # Raises ValueError: In case of invalid values for `truncating`", "sequence at position {idx}' f'is different from expected shape {sample_shape}' ) if padding", "'<unk>' not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean,", "desired length. The position where padding or truncation happens is determined by the", "import torch from gensim.models import Word2Vec from simple_elmo import ElmoModel from torch import", "num_timesteps)`. `num_timesteps` is either the `maxlen` argument if provided, or the length of", "coefs nb_words = min(max_features, len(word_idx)) if embeddings_type in ['word2vec', 'glove']: embedding_size = embed_size", "the length of the longest sequence otherwise. Sequences that are shorter than `num_timesteps`", "f'is different from expected shape {sample_shape}' ) if padding == 'post': x[idx, :", "number of of words and the list of not found words \"\"\" if", "padding value. # Returns x: Numpy array with shape `(len(sequences), maxlen)` # Raises", "if embedding_vector is not None: embedding_matrix[num] = embedding_vector break return embedding_matrix else: raise", "from Keras This function transforms a list of `num_samples` sequences (lists of integers)", "is determined by the arguments `padding` and `truncating`, respectively. Pre-padding is the default.", "'glove'') word_idx: mapping from words to their indexes max_features: max features to use", "str = 'post', value: int = 0, ) -> np.array: \"\"\"Pad sequences to", "truncating: String, 'pre' or 'post': remove values from sequences larger than `maxlen`, either", "truncating == 'post': trunc = s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\" ' 'not", "main loop below. sample_shape = () for s in sequences: if len(s) >", "raise ValueError('`sequences` must be iterable.') num_samples = len(sequences) lengths = [] for x", "= coefs nb_words = min(max_features, len(word_idx)) if embeddings_type in ['word2vec', 'glove']: embedding_size =", "is the default. # Arguments sequences: List of lists, where each element is", "ValueError('`sequences` must be a list of iterables. ' 'Found non-iterable: ' + str(x))", "trunc else: raise ValueError(f'Padding type \"{padding}\" not understood') return x def build_matrix( word_idx:", "variants_of_word = [word, word.lower(), word.capitalize(), word.upper()] for variant in variants_of_word: embedding_vector = embedding_index.get(variant)", "in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words = min(max_features, len(word_idx)) if", "values for `truncating` or `padding`, or in case of invalid shape for a", "embedding_vector = embedding_index.get(variant) if embedding_vector is not None: embedding_matrix[num] = embedding_vector break return", "of `num_samples` sequences (lists of integers) into a 2D Numpy array of shape", "= self.model.get_elmo_session() print('ELMo Embedding Model is Loaded') def forward(self, x: List) -> torch.Tensor:", "size of embeddings Returns: embedding matrix, number of of words and the list", "embedding_matrix[num] = embedding_vector break return embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec,", "Keras This function transforms a list of `num_samples` sequences (lists of integers) into", "embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words +", ": len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc) :] = trunc", "def build_matrix( word_idx: Dict, embedding_path: str = '', embeddings_type: str = 'word2vec', max_features:", "from typing import List, Optional, Dict, Tuple import numpy as np import torch", "else: raise ValueError(f'Padding type \"{padding}\" not understood') return x def build_matrix( word_idx: Dict,", "build_matrix( word_idx: Dict, embedding_path: str = '', embeddings_type: str = 'word2vec', max_features: int", "embedding_path: path to embeddings embeddings_type: type of pretrained embeddings ('word2vec', 'glove'') word_idx: mapping", "shorter than `num_timesteps` are padded with `value` at the end. Sequences longer than", "Embedding Model is Loaded') def forward(self, x: List) -> torch.Tensor: # embed =", "torch.LongTensor) -> torch.Tensor: embed = self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens", "# Arguments sequences: List of lists, where each element is a sequence. maxlen:", "def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int = 0): super().__init__()", "by the arguments `padding` and `truncating`, respectively. Pre-padding is the default. # Arguments", "for idx, s in enumerate(sequences): if not len(s): continue # empty list/array was", "each sequence. truncating: String, 'pre' or 'post': remove values from sequences larger than", "Tuple[np.array, int, List]: \"\"\" Create embedding matrix Args: embedding_path: path to embeddings embeddings_type:", "possible variants of the word to be found in word to idx dictionary", "embeddings_type in ['word2vec', 'glove']: embedding_size = embed_size if embed_size != 0 else len(list(embedding_index.values())[0])", "'word2vec', max_features: int = 100000, embed_size: int = 300, ) -> Tuple[np.array, int,", "a list of `num_samples` sequences (lists of integers) into a 2D Numpy array", "max_features: int = 100000, embed_size: int = 300, ) -> Tuple[np.array, int, List]:", "elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\" '", "Tuple import numpy as np import torch from gensim.models import Word2Vec from simple_elmo", "sequences with variable length strings, you can use `object`. padding: String, 'pre' or", "x in sequences: try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must be a list", "torch import nn def pad_sequences( sequences: List, maxlen: Optional[int], dtype: str = 'int32',", "to use embed_size: size of embeddings Returns: embedding matrix, number of of words", "'pre' or 'post': remove values from sequences larger than `maxlen`, either at the", "= s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\" ' 'not understood') # check `trunc`", "or the length of the longest sequence otherwise. Sequences that are shorter than", "pretrained embeddings ('word2vec', 'glove'') word_idx: mapping from words to their indexes max_features: max", "checking for consistency in the main loop below. sample_shape = () for s", "with `value` at the end. Sequences longer than `num_timesteps` are truncated so that", "if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc =", "all_embs.mean(), all_embs.std() if '<unk>' not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size))", "__init__(self, embeddings_path: str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding", "`maxlen`, either at the beginning or at the end of the sequences. value:", "at the end of the sequences. value: Float or String, padding value. #", "glove') model = Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index = {} for word", "type \"{padding}\" not understood') return x def build_matrix( word_idx: Dict, embedding_path: str =", "than `maxlen`, either at the beginning or at the end of the sequences.", "array with shape `(len(sequences), maxlen)` # Raises ValueError: In case of invalid values", "word_idx: mapping from words to their indexes max_features: max features to use embed_size:", "the first non empty sequence # checking for consistency in the main loop", "length of the longest sequence otherwise. Sequences that are shorter than `num_timesteps` are", "sequence. truncating: String, 'pre' or 'post': remove values from sequences larger than `maxlen`,", "= np.max(lengths) # take the sample shape from the first non empty sequence", "embed_size != 0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std()", "Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, word_to_idx: Dict, embeddings_path: str,", "x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for idx, s in enumerate(sequences):", "('word2vec', 'glove'') word_idx: mapping from words to their indexes max_features: max features to", "from words to their indexes max_features: max features to use embed_size: size of", "check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape:", "embed_size: size of embeddings Returns: embedding matrix, number of of words and the", "is not None: embedding_matrix[num] = embedding_vector break return embedding_matrix else: raise ValueError('Unacceptable embedding", "= np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size)) for word, num in word_idx.items(): #", "len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc) :] = trunc else:", "embeddings_type not in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') model", "position {idx}' f'is different from expected shape {sample_shape}' ) if padding == 'post':", "List, maxlen: Optional[int], dtype: str = 'int32', padding: str = 'post', truncating: str", "remove values from sequences larger than `maxlen`, either at the beginning or at", "trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError(f'Truncating", "s in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x =", "if embeddings_type not in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove')", "\"\"\"Pad sequences to the same length. from Keras This function transforms a list", "Float or String, padding value. # Returns x: Numpy array with shape `(len(sequences),", "not in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') model =", "self.sess = self.model.get_elmo_session() print('ELMo Embedding Model is Loaded') def forward(self, x: List) ->", "all sequences. dtype: Type of the output sequences. To pad sequences with variable", "for word in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words = min(max_features,", "of all sequences. dtype: Type of the output sequences. To pad sequences with", "to their indexes max_features: max features to use embed_size: size of embeddings Returns:", "forward(self, x: List) -> torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed = self.model.get_elmo_vectors_session(x, self.sess)", "embedding_vector is not None: embedding_matrix[num] = embedding_vector break return embedding_matrix else: raise ValueError('Unacceptable", "maxlen = np.max(lengths) # take the sample shape from the first non empty", "sequence otherwise. Sequences that are shorter than `num_timesteps` are padded with `value` at", "Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen` argument if", "the default. # Arguments sequences: List of lists, where each element is a", ") -> Tuple[np.array, int, List]: \"\"\" Create embedding matrix Args: embedding_path: path to", "word, num in word_idx.items(): # possible variants of the word to be found", "nn def pad_sequences( sequences: List, maxlen: Optional[int], dtype: str = 'int32', padding: str", "tokens to embeddings \"\"\" def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim:", "pad sequences with variable length strings, you can use `object`. padding: String, 'pre'", "dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self, x: torch.LongTensor) -> torch.Tensor:", "max_features: max features to use embed_size: size of embeddings Returns: embedding matrix, number", "for word, num in word_idx.items(): # possible variants of the word to be", "import List, Optional, Dict, Tuple import numpy as np import torch from gensim.models", "not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std,", "not None: embedding_matrix[num] = embedding_vector break return embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible", "for s in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x", "has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError(", "embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size)) for word, num in", "Dict, Tuple import numpy as np import torch from gensim.models import Word2Vec from", "embed = self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\"", "to embeddings \"\"\" def __init__(self, embeddings_path: str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess", "'post': x[idx, : len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc) :]", "longest sequence otherwise. Sequences that are shorter than `num_timesteps` are padded with `value`", "model = Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index = {} for word in", "embed_size: int = 300, ) -> Tuple[np.array, int, List]: \"\"\" Create embedding matrix", "' 'not understood') # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype)", "into a 2D Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps` is either the", "# Raises ValueError: In case of invalid values for `truncating` or `padding`, or", "List, Optional, Dict, Tuple import numpy as np import torch from gensim.models import", "list of iterables. ' 'Found non-iterable: ' + str(x)) if maxlen is None:", "torch from gensim.models import Word2Vec from simple_elmo import ElmoModel from torch import nn", "class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, embeddings_path: str): super().__init__()", "the same length. from Keras This function transforms a list of `num_samples` sequences", "otherwise. Sequences that are shorter than `num_timesteps` are padded with `value` at the", "embedding_index = {} for word in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs", "() for s in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break", "sequences larger than `maxlen`, either at the beginning or at the end of", "sample_shape = np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for", "words \"\"\" if embeddings_type not in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values:", "self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding Model is Loaded') def forward(self, x: List)", "embeddings \"\"\" def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int =", "expected shape {sample_shape}' ) if padding == 'post': x[idx, : len(trunc)] = trunc", "+ str(x)) if maxlen is None: maxlen = np.max(lengths) # take the sample", "if embeddings_type in ['word2vec', 'glove']: embedding_size = embed_size if embed_size != 0 else", "either at the beginning or at the end of the sequences. value: Float", "List]: \"\"\" Create embedding matrix Args: embedding_path: path to embeddings embeddings_type: type of", "except TypeError: raise ValueError('`sequences` must be a list of iterables. ' 'Found non-iterable:", "`num_samples` sequences (lists of integers) into a 2D Numpy array of shape `(num_samples,", "`(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen` argument if provided, or the length", "each element is a sequence. maxlen: Int, maximum length of all sequences. dtype:", "{trunc.shape[1:]} of sequence at position {idx}' f'is different from expected shape {sample_shape}' )", "happens is determined by the arguments `padding` and `truncating`, respectively. Pre-padding is the", "np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError( f'Shape of sample {trunc.shape[1:]} of", "value. # Returns x: Numpy array with shape `(len(sequences), maxlen)` # Raises ValueError:", "from the first non empty sequence # checking for consistency in the main", "List) -> torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed = self.model.get_elmo_vectors_session(x, self.sess) embed =", "= len(sequences) lengths = [] for x in sequences: try: lengths.append(len(x)) except TypeError:", "the end. Sequences longer than `num_timesteps` are truncated so that they fit the", "of invalid values for `truncating` or `padding`, or in case of invalid shape", "try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must be a list of iterables. '", "padding or truncation happens is determined by the arguments `padding` and `truncating`, respectively.", "Pre-padding is the default. # Arguments sequences: List of lists, where each element", "False def forward(self, x: torch.LongTensor) -> torch.Tensor: embed = self.embedding(x) return embed class", "or after each sequence. truncating: String, 'pre' or 'post': remove values from sequences", "of integers) into a 2D Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps` is", "expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError( f'Shape", "was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post':", "Sequences that are shorter than `num_timesteps` are padded with `value` at the end.", "(1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size)) for word, num", "embed_mean, embed_std = all_embs.mean(), all_embs.std() if '<unk>' not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean,", "same length. from Keras This function transforms a list of `num_samples` sequences (lists", "# check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] !=", "-len(trunc) :] = trunc else: raise ValueError(f'Padding type \"{padding}\" not understood') return x", "words and the list of not found words \"\"\" if embeddings_type not in", "# embed = self.model.get_elmo_vectors(x) embed = self.model.get_elmo_vectors_session(x, self.sess) embed = torch.Tensor(embed) return embed", "consistency in the main loop below. sample_shape = () for s in sequences:", "Transform tokens to embeddings \"\"\" def __init__(self, embeddings_path: str): super().__init__() self.model = ElmoModel()", "'post', value: int = 0, ) -> np.array: \"\"\"Pad sequences to the same", "Transform tokens to embeddings \"\"\" def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str,", "\"\"\" Transform tokens to embeddings \"\"\" def __init__(self, embeddings_path: str): super().__init__() self.model =", "if embed_size != 0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(),", "<reponame>angelinaku/wsd_pipeline<gh_stars>0 from typing import List, Optional, Dict, Tuple import numpy as np import", "Raises ValueError: In case of invalid values for `truncating` or `padding`, or in", "or 'post': remove values from sequences larger than `maxlen`, either at the beginning", "from sequences larger than `maxlen`, either at the beginning or at the end", "The position where padding or truncation happens is determined by the arguments `padding`", "`sequences` entry. \"\"\" if not hasattr(sequences, '__len__'): raise ValueError('`sequences` must be iterable.') num_samples", "str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding Model is", "x[idx, : len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc) :] =", "numpy as np import torch from gensim.models import Word2Vec from simple_elmo import ElmoModel", "`truncating` or `padding`, or in case of invalid shape for a `sequences` entry.", "Loaded') def forward(self, x: List) -> torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed =", "-> np.array: \"\"\"Pad sequences to the same length. from Keras This function transforms", "word.upper()] for variant in variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector is not None:", "ValueError(f'Padding type \"{padding}\" not understood') return x def build_matrix( word_idx: Dict, embedding_path: str", "maxlen)` # Raises ValueError: In case of invalid values for `truncating` or `padding`,", "x: List) -> torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed = self.model.get_elmo_vectors_session(x, self.sess) embed", "300, ) -> Tuple[np.array, int, List]: \"\"\" Create embedding matrix Args: embedding_path: path", "length of all sequences. dtype: Type of the output sequences. To pad sequences", "the `maxlen` argument if provided, or the length of the longest sequence otherwise.", "embed_std = all_embs.mean(), all_embs.std() if '<unk>' not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std,", "num_samples = len(sequences) lengths = [] for x in sequences: try: lengths.append(len(x)) except", "= np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError( f'Shape of sample {trunc.shape[1:]}", "None: maxlen = np.max(lengths) # take the sample shape from the first non", "ValueError( f'Shape of sample {trunc.shape[1:]} of sequence at position {idx}' f'is different from", "a `sequences` entry. \"\"\" if not hasattr(sequences, '__len__'): raise ValueError('`sequences` must be iterable.')", "== 'post': trunc = s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\" ' 'not understood')", "x: torch.LongTensor) -> torch.Tensor: embed = self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\" Transform", "'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise", "empty list/array was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating", "= embedding_index.get(variant) if embedding_vector is not None: embedding_matrix[num] = embedding_vector break return embedding_matrix", "= 'post', value: int = 0, ) -> np.array: \"\"\"Pad sequences to the", "torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed = self.model.get_elmo_vectors_session(x, self.sess) embed = torch.Tensor(embed) return", "['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') model = Word2Vec.load(embedding_path) #", "pad either before or after each sequence. truncating: String, 'pre' or 'post': remove", "invalid shape for a `sequences` entry. \"\"\" if not hasattr(sequences, '__len__'): raise ValueError('`sequences`", "\"\"\" if not hasattr(sequences, '__len__'): raise ValueError('`sequences` must be iterable.') num_samples = len(sequences)", "-> Tuple[np.array, int, List]: \"\"\" Create embedding matrix Args: embedding_path: path to embeddings", "ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') model = Word2Vec.load(embedding_path) # Creating Embedding Index", "`object`. padding: String, 'pre' or 'post': pad either before or after each sequence.", "word in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words = min(max_features, len(word_idx))", "sequence # checking for consistency in the main loop below. sample_shape = ()", "be iterable.') num_samples = len(sequences) lengths = [] for x in sequences: try:", "np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size)) for word, num in word_idx.items(): # possible", "dtype=dtype) for idx, s in enumerate(sequences): if not len(s): continue # empty list/array", "max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False", "with variable length strings, you can use `object`. padding: String, 'pre' or 'post':", "variable length strings, you can use `object`. padding: String, 'pre' or 'post': pad", "idx, s in enumerate(sequences): if not len(s): continue # empty list/array was found", "-> torch.Tensor: embed = self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to", "str = '', embeddings_type: str = 'word2vec', max_features: int = 100000, embed_size: int", "tokens to embeddings \"\"\" def __init__(self, embeddings_path: str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path)", "coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words = min(max_features, len(word_idx)) if embeddings_type in", "max features to use embed_size: size of embeddings Returns: embedding matrix, number of", "or truncation happens is determined by the arguments `padding` and `truncating`, respectively. Pre-padding", "# Creating Embedding Index embedding_index = {} for word in model.wv.vocab: coefs =", "'pre': x[idx, -len(trunc) :] = trunc else: raise ValueError(f'Padding type \"{padding}\" not understood')", "maxlen) + sample_shape, value, dtype=dtype) for idx, s in enumerate(sequences): if not len(s):", "sequences: List of lists, where each element is a sequence. maxlen: Int, maximum", "break return embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module):", "embedding type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\"", "dtype: Type of the output sequences. To pad sequences with variable length strings,", "shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError( f'Shape of", "from torch import nn def pad_sequences( sequences: List, maxlen: Optional[int], dtype: str =", "longer than `num_timesteps` are truncated so that they fit the desired length. The", "'post', truncating: str = 'post', value: int = 0, ) -> np.array: \"\"\"Pad", "if maxlen is None: maxlen = np.max(lengths) # take the sample shape from", "= [] for x in sequences: try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must", "ValueError(f'Truncating type \"{truncating}\" ' 'not understood') # check `trunc` has expected shape trunc", "the word to be found in word to idx dictionary variants_of_word = [word,", "def forward(self, x: List) -> torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed = self.model.get_elmo_vectors_session(x,", "typing import List, Optional, Dict, Tuple import numpy as np import torch from", "sequences: try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must be a list of iterables.", "be a list of iterables. ' 'Found non-iterable: ' + str(x)) if maxlen", "= trunc else: raise ValueError(f'Padding type \"{padding}\" not understood') return x def build_matrix(", "ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, embeddings_path: str): super().__init__() self.model", "0: sample_shape = np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)", "embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1,", "This function transforms a list of `num_samples` sequences (lists of integers) into a", "with shape `(len(sequences), maxlen)` # Raises ValueError: In case of invalid values for", "embeddings_path: str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding Model", "100000, embed_size: int = 300, ) -> Tuple[np.array, int, List]: \"\"\" Create embedding", "trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError( f'Shape of sample", "embeddings embeddings_type: type of pretrained embeddings ('word2vec', 'glove'') word_idx: mapping from words to", "embeddings_type: type of pretrained embeddings ('word2vec', 'glove'') word_idx: mapping from words to their", "= np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size))", "= np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words = min(max_features, len(word_idx)) if embeddings_type in ['word2vec',", "path to embeddings embeddings_type: type of pretrained embeddings ('word2vec', 'glove'') word_idx: mapping from", "in sequences: try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must be a list of", "ValueError('`sequences` must be iterable.') num_samples = len(sequences) lengths = [] for x in", "= 'int32', padding: str = 'post', truncating: str = 'post', value: int =", "indexes max_features: max features to use embed_size: size of embeddings Returns: embedding matrix,", "def __init__(self, embeddings_path: str): super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo", "if padding == 'post': x[idx, : len(trunc)] = trunc elif padding == 'pre':", "a list of iterables. ' 'Found non-iterable: ' + str(x)) if maxlen is", "argument if provided, or the length of the longest sequence otherwise. Sequences that", "raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') model = Word2Vec.load(embedding_path) # Creating Embedding", "embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad =", "word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int = 0): super().__init__() self.weights_matrix =", "truncation happens is determined by the arguments `padding` and `truncating`, respectively. Pre-padding is", "int, List]: \"\"\" Create embedding matrix Args: embedding_path: path to embeddings embeddings_type: type", "= 300, ) -> Tuple[np.array, int, List]: \"\"\" Create embedding matrix Args: embedding_path:", "def forward(self, x: torch.LongTensor) -> torch.Tensor: embed = self.embedding(x) return embed class ELMo_Embedder(nn.Module):", "[word, word.lower(), word.capitalize(), word.upper()] for variant in variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector", "= {} for word in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words", "embed_std, (nb_words + 1, embed_size)) for word, num in word_idx.items(): # possible variants", "shape from the first non empty sequence # checking for consistency in the", "\"\"\" def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int = 0):", "self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self, x:", "= False def forward(self, x: torch.LongTensor) -> torch.Tensor: embed = self.embedding(x) return embed", "that are shorter than `num_timesteps` are padded with `value` at the end. Sequences", "'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') model = Word2Vec.load(embedding_path) # Creating", "String, 'pre' or 'post': pad either before or after each sequence. truncating: String,", "word to idx dictionary variants_of_word = [word, word.lower(), word.capitalize(), word.upper()] for variant in", "first non empty sequence # checking for consistency in the main loop below.", "np import torch from gensim.models import Word2Vec from simple_elmo import ElmoModel from torch", "array of shape `(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen` argument if provided,", "in case of invalid shape for a `sequences` entry. \"\"\" if not hasattr(sequences,", "return embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module): \"\"\"", "features to use embed_size: size of embeddings Returns: embedding matrix, number of of", "gensim.models import Word2Vec from simple_elmo import ElmoModel from torch import nn def pad_sequences(", "either the `maxlen` argument if provided, or the length of the longest sequence", "embedding type.\\nPermissible values: word2vec, glove') model = Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index", "word.lower(), word.capitalize(), word.upper()] for variant in variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector is", "str, embeddings_type: str, embeddings_dim: int = 0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path,", "different from expected shape {sample_shape}' ) if padding == 'post': x[idx, : len(trunc)]", "for a `sequences` entry. \"\"\" if not hasattr(sequences, '__len__'): raise ValueError('`sequences` must be", "end of the sequences. value: Float or String, padding value. # Returns x:", "embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size)) for word, num in word_idx.items():", "Sequences longer than `num_timesteps` are truncated so that they fit the desired length.", "all_embs.std() if '<unk>' not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix", "can use `object`. padding: String, 'pre' or 'post': pad either before or after", "Word2Vec from simple_elmo import ElmoModel from torch import nn def pad_sequences( sequences: List,", "a 2D Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen`", "Optional[int], dtype: str = 'int32', padding: str = 'post', truncating: str = 'post',", "understood') # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:]", "the list of not found words \"\"\" if embeddings_type not in ['word2vec', 'glove']:", "np.array: \"\"\"Pad sequences to the same length. from Keras This function transforms a", "larger than `maxlen`, either at the beginning or at the end of the", "str(x)) if maxlen is None: maxlen = np.max(lengths) # take the sample shape", "raise ValueError(f'Truncating type \"{truncating}\" ' 'not understood') # check `trunc` has expected shape", "to embeddings embeddings_type: type of pretrained embeddings ('word2vec', 'glove'') word_idx: mapping from words", "embed_size if embed_size != 0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std =", "at the end. Sequences longer than `num_timesteps` are truncated so that they fit", "embeddings_dim: int = 0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim", "0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std() if '<unk>'", "Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index = {} for word in model.wv.vocab: coefs", "padding == 'pre': x[idx, -len(trunc) :] = trunc else: raise ValueError(f'Padding type \"{padding}\"", "is either the `maxlen` argument if provided, or the length of the longest", "must be a list of iterables. ' 'Found non-iterable: ' + str(x)) if", "== 'post': x[idx, : len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc)", "{sample_shape}' ) if padding == 'post': x[idx, : len(trunc)] = trunc elif padding", "= '', embeddings_type: str = 'word2vec', max_features: int = 100000, embed_size: int =", "is None: maxlen = np.max(lengths) # take the sample shape from the first", "self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self,", "default. # Arguments sequences: List of lists, where each element is a sequence.", "# Returns x: Numpy array with shape `(len(sequences), maxlen)` # Raises ValueError: In", "maxlen is None: maxlen = np.max(lengths) # take the sample shape from the", "not hasattr(sequences, '__len__'): raise ValueError('`sequences` must be iterable.') num_samples = len(sequences) lengths =", "of the word to be found in word to idx dictionary variants_of_word =", "the end of the sequences. value: Float or String, padding value. # Returns", "glove') class Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, word_to_idx: Dict,", "the sample shape from the first non empty sequence # checking for consistency", "import nn def pad_sequences( sequences: List, maxlen: Optional[int], dtype: str = 'int32', padding:", "Returns: embedding matrix, number of of words and the list of not found", "= build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding", "from gensim.models import Word2Vec from simple_elmo import ElmoModel from torch import nn def", "= () for s in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:]", "__init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int = 0): super().__init__() self.weights_matrix", "word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix)", "torch.Tensor: embed = self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings", "at the beginning or at the end of the sequences. value: Float or", "truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen]", "entry. \"\"\" if not hasattr(sequences, '__len__'): raise ValueError('`sequences` must be iterable.') num_samples =", "In case of invalid values for `truncating` or `padding`, or in case of", "of lists, where each element is a sequence. maxlen: Int, maximum length of", "num in word_idx.items(): # possible variants of the word to be found in", "`num_timesteps` are padded with `value` at the end. Sequences longer than `num_timesteps` are", "if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen) +", "transforms a list of `num_samples` sequences (lists of integers) into a 2D Numpy", "{idx}' f'is different from expected shape {sample_shape}' ) if padding == 'post': x[idx,", "truncating: str = 'post', value: int = 0, ) -> np.array: \"\"\"Pad sequences", "`(len(sequences), maxlen)` # Raises ValueError: In case of invalid values for `truncating` or", "len(s): continue # empty list/array was found if truncating == 'pre': trunc =", "s in enumerate(sequences): if not len(s): continue # empty list/array was found if", ":] = trunc else: raise ValueError(f'Padding type \"{padding}\" not understood') return x def", "len(sequences) lengths = [] for x in sequences: try: lengths.append(len(x)) except TypeError: raise", "'post': remove values from sequences larger than `maxlen`, either at the beginning or", "are truncated so that they fit the desired length. The position where padding", "position where padding or truncation happens is determined by the arguments `padding` and", "than `num_timesteps` are padded with `value` at the end. Sequences longer than `num_timesteps`", "['word2vec', 'glove']: embedding_size = embed_size if embed_size != 0 else len(list(embedding_index.values())[0]) all_embs =", "word.capitalize(), word.upper()] for variant in variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector is not", "embeddings_path: str, embeddings_type: str, embeddings_dim: int = 0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx,", "loop below. sample_shape = () for s in sequences: if len(s) > 0:", "respectively. Pre-padding is the default. # Arguments sequences: List of lists, where each", "that they fit the desired length. The position where padding or truncation happens", "dictionary variants_of_word = [word, word.lower(), word.capitalize(), word.upper()] for variant in variants_of_word: embedding_vector =", "break x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for idx, s in", "type of pretrained embeddings ('word2vec', 'glove'') word_idx: mapping from words to their indexes", "= 'word2vec', max_features: int = 100000, embed_size: int = 300, ) -> Tuple[np.array,", "np.max(lengths) # take the sample shape from the first non empty sequence #", "self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32)", "list of `num_samples` sequences (lists of integers) into a 2D Numpy array of", "variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector is not None: embedding_matrix[num] = embedding_vector break", "s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\" ' 'not understood') # check `trunc` has", "they fit the desired length. The position where padding or truncation happens is", "or at the end of the sequences. value: Float or String, padding value.", "of not found words \"\"\" if embeddings_type not in ['word2vec', 'glove']: raise ValueError('Unacceptable", "trunc elif padding == 'pre': x[idx, -len(trunc) :] = trunc else: raise ValueError(f'Padding", "\"\"\" if embeddings_type not in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec,", "you can use `object`. padding: String, 'pre' or 'post': pad either before or", "self.model.get_elmo_session() print('ELMo Embedding Model is Loaded') def forward(self, x: List) -> torch.Tensor: #", "torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self, x: torch.LongTensor) ->", "'post': trunc = s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\" ' 'not understood') #", "of pretrained embeddings ('word2vec', 'glove'') word_idx: mapping from words to their indexes max_features:", "continue # empty list/array was found if truncating == 'pre': trunc = s[-maxlen:]", "int = 100000, embed_size: int = 300, ) -> Tuple[np.array, int, List]: \"\"\"", "class Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, word_to_idx: Dict, embeddings_path:", "' + str(x)) if maxlen is None: maxlen = np.max(lengths) # take the", "matrix Args: embedding_path: path to embeddings embeddings_type: type of pretrained embeddings ('word2vec', 'glove'')", "value: int = 0, ) -> np.array: \"\"\"Pad sequences to the same length.", "0, ) -> np.array: \"\"\"Pad sequences to the same length. from Keras This", "type.\\nPermissible values: word2vec, glove') model = Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index =", "= self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def", "if '<unk>' not in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix =", "words to their indexes max_features: max features to use embed_size: size of embeddings", "case of invalid shape for a `sequences` entry. \"\"\" if not hasattr(sequences, '__len__'):", "non empty sequence # checking for consistency in the main loop below. sample_shape", "maxlen: Int, maximum length of all sequences. dtype: Type of the output sequences.", "> 0: sample_shape = np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen) + sample_shape, value,", "to idx dictionary variants_of_word = [word, word.lower(), word.capitalize(), word.upper()] for variant in variants_of_word:", "Embedding Index embedding_index = {} for word in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word]", "variants of the word to be found in word to idx dictionary variants_of_word", "int = 0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim )", "# checking for consistency in the main loop below. sample_shape = () for", "# empty list/array was found if truncating == 'pre': trunc = s[-maxlen:] elif", "end. Sequences longer than `num_timesteps` are truncated so that they fit the desired", "either before or after each sequence. truncating: String, 'pre' or 'post': remove values", "embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, embeddings_path: str):", "trunc = s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\" ' 'not understood') # check", "== 'pre': x[idx, -len(trunc) :] = trunc else: raise ValueError(f'Padding type \"{padding}\" not", "List of lists, where each element is a sequence. maxlen: Int, maximum length", "None: embedding_matrix[num] = embedding_vector break return embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible values:", "print('ELMo Embedding Model is Loaded') def forward(self, x: List) -> torch.Tensor: # embed", "\"\"\" Transform tokens to embeddings \"\"\" def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type:", "variant in variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector is not None: embedding_matrix[num] =", "'Found non-iterable: ' + str(x)) if maxlen is None: maxlen = np.max(lengths) #", "of the output sequences. To pad sequences with variable length strings, you can", "length strings, you can use `object`. padding: String, 'pre' or 'post': pad either", ") -> np.array: \"\"\"Pad sequences to the same length. from Keras This function", "element is a sequence. maxlen: Int, maximum length of all sequences. dtype: Type", "0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix =", "= [word, word.lower(), word.capitalize(), word.upper()] for variant in variants_of_word: embedding_vector = embedding_index.get(variant) if", "dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError( f'Shape of sample {trunc.shape[1:]} of sequence", "'pre' or 'post': pad either before or after each sequence. truncating: String, 'pre'", "np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std() if '<unk>' not in embedding_index: embedding_index['<unk>'] =", "'', embeddings_type: str = 'word2vec', max_features: int = 100000, embed_size: int = 300,", "`num_timesteps` are truncated so that they fit the desired length. The position where", "Index embedding_index = {} for word in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] =", "use embed_size: size of embeddings Returns: embedding matrix, number of of words and", "in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') model = Word2Vec.load(embedding_path)", "# take the sample shape from the first non empty sequence # checking", "s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError(f'Truncating type \"{truncating}\"", "raise ValueError('`sequences` must be a list of iterables. ' 'Found non-iterable: ' +", "= np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for idx,", "'not understood') # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if", "String, 'pre' or 'post': remove values from sequences larger than `maxlen`, either at", "embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size)) for word,", "in enumerate(sequences): if not len(s): continue # empty list/array was found if truncating", "Optional, Dict, Tuple import numpy as np import torch from gensim.models import Word2Vec", "for variant in variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector is not None: embedding_matrix[num]", "str, embeddings_dim: int = 0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx),", "arguments `padding` and `truncating`, respectively. Pre-padding is the default. # Arguments sequences: List", "list of not found words \"\"\" if embeddings_type not in ['word2vec', 'glove']: raise", "beginning or at the end of the sequences. value: Float or String, padding", "embeddings_type: str, embeddings_dim: int = 0): super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type,", "raise ValueError(f'Padding type \"{padding}\" not understood') return x def build_matrix( word_idx: Dict, embedding_path:", "the arguments `padding` and `truncating`, respectively. Pre-padding is the default. # Arguments sequences:", "`truncating`, respectively. Pre-padding is the default. # Arguments sequences: List of lists, where", "integers) into a 2D Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps` is either", "Type of the output sequences. To pad sequences with variable length strings, you", "Numpy array with shape `(len(sequences), maxlen)` # Raises ValueError: In case of invalid", "`padding` and `truncating`, respectively. Pre-padding is the default. # Arguments sequences: List of", "np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words = min(max_features, len(word_idx)) if embeddings_type in ['word2vec', 'glove']:", "Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int = 0): super().__init__() self.weights_matrix = build_matrix(", "for `truncating` or `padding`, or in case of invalid shape for a `sequences`", "lists, where each element is a sequence. maxlen: Int, maximum length of all", "padding == 'post': x[idx, : len(trunc)] = trunc elif padding == 'pre': x[idx,", "not understood') return x def build_matrix( word_idx: Dict, embedding_path: str = '', embeddings_type:", "of embeddings Returns: embedding matrix, number of of words and the list of", "min(max_features, len(word_idx)) if embeddings_type in ['word2vec', 'glove']: embedding_size = embed_size if embed_size !=", "embedding_path: str = '', embeddings_type: str = 'word2vec', max_features: int = 100000, embed_size:", "ValueError: In case of invalid values for `truncating` or `padding`, or in case", "embedding_index.get(variant) if embedding_vector is not None: embedding_matrix[num] = embedding_vector break return embedding_matrix else:", "length. The position where padding or truncation happens is determined by the arguments", "= Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index = {} for word in model.wv.vocab:", "nb_words = min(max_features, len(word_idx)) if embeddings_type in ['word2vec', 'glove']: embedding_size = embed_size if", "is Loaded') def forward(self, x: List) -> torch.Tensor: # embed = self.model.get_elmo_vectors(x) embed", "else: raise ValueError(f'Truncating type \"{truncating}\" ' 'not understood') # check `trunc` has expected", "shape `(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen` argument if provided, or the", "embedding_vector break return embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') class", ") if padding == 'post': x[idx, : len(trunc)] = trunc elif padding ==", "use `object`. padding: String, 'pre' or 'post': pad either before or after each", "maxlen: Optional[int], dtype: str = 'int32', padding: str = 'post', truncating: str =", "lengths.append(len(x)) except TypeError: raise ValueError('`sequences` must be a list of iterables. ' 'Found", "ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module): \"\"\" Transform tokens to embeddings", "def pad_sequences( sequences: List, maxlen: Optional[int], dtype: str = 'int32', padding: str =", "word_idx.items(): # possible variants of the word to be found in word to", "hasattr(sequences, '__len__'): raise ValueError('`sequences` must be iterable.') num_samples = len(sequences) lengths = []", "len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x = np.full((num_samples, maxlen) + sample_shape,", "import ElmoModel from torch import nn def pad_sequences( sequences: List, maxlen: Optional[int], dtype:", "(nb_words + 1, embed_size)) for word, num in word_idx.items(): # possible variants of", "of the sequences. value: Float or String, padding value. # Returns x: Numpy", "list/array was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating ==", "sample_shape, value, dtype=dtype) for idx, s in enumerate(sequences): if not len(s): continue #", "self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self, x: torch.LongTensor) -> torch.Tensor: embed", "sequences to the same length. from Keras This function transforms a list of", "super().__init__() self.model = ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding Model is Loaded')", "in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x = np.full((num_samples,", "lengths = [] for x in sequences: try: lengths.append(len(x)) except TypeError: raise ValueError('`sequences`", "= embed_size if embed_size != 0 else len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std", "`maxlen` argument if provided, or the length of the longest sequence otherwise. Sequences", "of shape `(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen` argument if provided, or", "x: Numpy array with shape `(len(sequences), maxlen)` # Raises ValueError: In case of", "str = 'post', truncating: str = 'post', value: int = 0, ) ->", "word_idx: Dict, embedding_path: str = '', embeddings_type: str = 'word2vec', max_features: int =", "in variants_of_word: embedding_vector = embedding_index.get(variant) if embedding_vector is not None: embedding_matrix[num] = embedding_vector", "# possible variants of the word to be found in word to idx", "\"{padding}\" not understood') return x def build_matrix( word_idx: Dict, embedding_path: str = '',", "str = 'int32', padding: str = 'post', truncating: str = 'post', value: int", "len(list(embedding_index.values())[0]) all_embs = np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std() if '<unk>' not in", "sample shape from the first non empty sequence # checking for consistency in", "build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding =", "ElmoModel() self.model.load(embeddings_path) self.sess = self.model.get_elmo_session() print('ELMo Embedding Model is Loaded') def forward(self, x:", "iterables. ' 'Found non-iterable: ' + str(x)) if maxlen is None: maxlen =", "if provided, or the length of the longest sequence otherwise. Sequences that are", "Model is Loaded') def forward(self, x: List) -> torch.Tensor: # embed = self.model.get_elmo_vectors(x)", "or String, padding value. # Returns x: Numpy array with shape `(len(sequences), maxlen)`", "or 'post': pad either before or after each sequence. truncating: String, 'pre' or", "'int32', padding: str = 'post', truncating: str = 'post', value: int = 0,", "int = 0, ) -> np.array: \"\"\"Pad sequences to the same length. from", "the output sequences. To pad sequences with variable length strings, you can use", "raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module): \"\"\" Transform tokens to", "x[idx, -len(trunc) :] = trunc else: raise ValueError(f'Padding type \"{padding}\" not understood') return", "String, padding value. # Returns x: Numpy array with shape `(len(sequences), maxlen)` #", "embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def", "and `truncating`, respectively. Pre-padding is the default. # Arguments sequences: List of lists,", "embed_size)) for word, num in word_idx.items(): # possible variants of the word to", "after each sequence. truncating: String, 'pre' or 'post': remove values from sequences larger", "are shorter than `num_timesteps` are padded with `value` at the end. Sequences longer", "sequence. maxlen: Int, maximum length of all sequences. dtype: Type of the output", "sequences. To pad sequences with variable length strings, you can use `object`. padding:", "word2vec, glove') model = Word2Vec.load(embedding_path) # Creating Embedding Index embedding_index = {} for", "values: word2vec, glove') class Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self,", "not len(s): continue # empty list/array was found if truncating == 'pre': trunc", "(lists of integers) into a 2D Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps`", "from expected shape {sample_shape}' ) if padding == 'post': x[idx, : len(trunc)] =", "= torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self, x: torch.LongTensor)", "type \"{truncating}\" ' 'not understood') # check `trunc` has expected shape trunc =", "if not len(s): continue # empty list/array was found if truncating == 'pre':", "shape {sample_shape}' ) if padding == 'post': x[idx, : len(trunc)] = trunc elif", "of sequence at position {idx}' f'is different from expected shape {sample_shape}' ) if", "all_embs = np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std() if '<unk>' not in embedding_index:", "padding: str = 'post', truncating: str = 'post', value: int = 0, )", "= np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for idx, s in enumerate(sequences): if", "maximum length of all sequences. dtype: Type of the output sequences. To pad", "where each element is a sequence. maxlen: Int, maximum length of all sequences.", "= s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError(f'Truncating type", "model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words = min(max_features, len(word_idx)) if embeddings_type", "at position {idx}' f'is different from expected shape {sample_shape}' ) if padding ==", "= 100000, embed_size: int = 300, ) -> Tuple[np.array, int, List]: \"\"\" Create", "return x def build_matrix( word_idx: Dict, embedding_path: str = '', embeddings_type: str =", "embedding_index[word] = coefs nb_words = min(max_features, len(word_idx)) if embeddings_type in ['word2vec', 'glove']: embedding_size", "the longest sequence otherwise. Sequences that are shorter than `num_timesteps` are padded with", "if not hasattr(sequences, '__len__'): raise ValueError('`sequences` must be iterable.') num_samples = len(sequences) lengths", "Returns x: Numpy array with shape `(len(sequences), maxlen)` # Raises ValueError: In case", "empty sequence # checking for consistency in the main loop below. sample_shape =", "Int, maximum length of all sequences. dtype: Type of the output sequences. To", "{} for word in model.wv.vocab: coefs = np.asarray(model.wv[word]) embedding_index[word] = coefs nb_words =", "super().__init__() self.weights_matrix = build_matrix( word_idx=word_to_idx, embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix,", "in word to idx dictionary variants_of_word = [word, word.lower(), word.capitalize(), word.upper()] for variant", "ElmoModel from torch import nn def pad_sequences( sequences: List, maxlen: Optional[int], dtype: str", "'post': pad either before or after each sequence. truncating: String, 'pre' or 'post':", "'__len__'): raise ValueError('`sequences` must be iterable.') num_samples = len(sequences) lengths = [] for", "return embed class ELMo_Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, embeddings_path:", "= min(max_features, len(word_idx)) if embeddings_type in ['word2vec', 'glove']: embedding_size = embed_size if embed_size", "np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) for idx, s in enumerate(sequences): if not", "' 'Found non-iterable: ' + str(x)) if maxlen is None: maxlen = np.max(lengths)", "embeddings Returns: embedding matrix, number of of words and the list of not", "forward(self, x: torch.LongTensor) -> torch.Tensor: embed = self.embedding(x) return embed class ELMo_Embedder(nn.Module): \"\"\"", "for consistency in the main loop below. sample_shape = () for s in", "1, embed_size)) for word, num in word_idx.items(): # possible variants of the word", "or in case of invalid shape for a `sequences` entry. \"\"\" if not", "= trunc elif padding == 'pre': x[idx, -len(trunc) :] = trunc else: raise", "in embedding_index: embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size)) embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words", "understood') return x def build_matrix( word_idx: Dict, embedding_path: str = '', embeddings_type: str", "f'Shape of sample {trunc.shape[1:]} of sequence at position {idx}' f'is different from expected", "`num_timesteps` is either the `maxlen` argument if provided, or the length of the", "sequences (lists of integers) into a 2D Numpy array of shape `(num_samples, num_timesteps)`.", ") self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad = False def forward(self,", "embeddings_type: str = 'word2vec', max_features: int = 100000, embed_size: int = 300, )", "non-iterable: ' + str(x)) if maxlen is None: maxlen = np.max(lengths) # take", "= embedding_vector break return embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove')", "function transforms a list of `num_samples` sequences (lists of integers) into a 2D", "case of invalid values for `truncating` or `padding`, or in case of invalid", "of the longest sequence otherwise. Sequences that are shorter than `num_timesteps` are padded", "sample_shape: raise ValueError( f'Shape of sample {trunc.shape[1:]} of sequence at position {idx}' f'is", "embedding_path=embeddings_path, embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim ) self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32) self.embedding = nn.Embedding.from_pretrained(self.weights_matrix) self.embedding.weight.requires_grad", "from simple_elmo import ElmoModel from torch import nn def pad_sequences( sequences: List, maxlen:", "value, dtype=dtype) for idx, s in enumerate(sequences): if not len(s): continue # empty", "= np.stack(embedding_index.values()) embed_mean, embed_std = all_embs.mean(), all_embs.std() if '<unk>' not in embedding_index: embedding_index['<unk>']", "Create embedding matrix Args: embedding_path: path to embeddings embeddings_type: type of pretrained embeddings", "simple_elmo import ElmoModel from torch import nn def pad_sequences( sequences: List, maxlen: Optional[int],", "!= sample_shape: raise ValueError( f'Shape of sample {trunc.shape[1:]} of sequence at position {idx}'", "in ['word2vec', 'glove']: embedding_size = embed_size if embed_size != 0 else len(list(embedding_index.values())[0]) all_embs", "of words and the list of not found words \"\"\" if embeddings_type not", "of iterables. ' 'Found non-iterable: ' + str(x)) if maxlen is None: maxlen", "word2vec, glove') class Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def __init__(self, word_to_idx:", "be found in word to idx dictionary variants_of_word = [word, word.lower(), word.capitalize(), word.upper()]", "truncated so that they fit the desired length. The position where padding or", "to be found in word to idx dictionary variants_of_word = [word, word.lower(), word.capitalize(),", "`trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise", "embedding_matrix else: raise ValueError('Unacceptable embedding type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module): \"\"\" Transform", "embedding matrix Args: embedding_path: path to embeddings embeddings_type: type of pretrained embeddings ('word2vec',", "type.\\nPermissible values: word2vec, glove') class Embedder(nn.Module): \"\"\" Transform tokens to embeddings \"\"\" def", "not found words \"\"\" if embeddings_type not in ['word2vec', 'glove']: raise ValueError('Unacceptable embedding", "the beginning or at the end of the sequences. value: Float or String,", "idx dictionary variants_of_word = [word, word.lower(), word.capitalize(), word.upper()] for variant in variants_of_word: embedding_vector", "word to be found in word to idx dictionary variants_of_word = [word, word.lower(),", "import numpy as np import torch from gensim.models import Word2Vec from simple_elmo import", "must be iterable.') num_samples = len(sequences) lengths = [] for x in sequences:", "sequences. dtype: Type of the output sequences. To pad sequences with variable length", "to embeddings \"\"\" def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int", "values from sequences larger than `maxlen`, either at the beginning or at the", "+ sample_shape, value, dtype=dtype) for idx, s in enumerate(sequences): if not len(s): continue", "iterable.') num_samples = len(sequences) lengths = [] for x in sequences: try: lengths.append(len(x))", "and the list of not found words \"\"\" if embeddings_type not in ['word2vec',", "the sequences. value: Float or String, padding value. # Returns x: Numpy array", "self.embedding.weight.requires_grad = False def forward(self, x: torch.LongTensor) -> torch.Tensor: embed = self.embedding(x) return", "embedding matrix, number of of words and the list of not found words", "so that they fit the desired length. The position where padding or truncation", "embeddings ('word2vec', 'glove'') word_idx: mapping from words to their indexes max_features: max features", "len(word_idx)) if embeddings_type in ['word2vec', 'glove']: embedding_size = embed_size if embed_size != 0", "are padded with `value` at the end. Sequences longer than `num_timesteps` are truncated" ]
[ "( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"),", "from .views import ( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns = [", "HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'), path(\"<int:hackathon_id>/update_hackathon\", update_hackathon, name=\"update_hackathon\"), path(\"<int:hackathon_id>/delete_hackathon\", delete_hackathon,", ") urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'),", "update_hackathon, delete_hackathon, judging ) urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"),", "django.urls import path from .views import ( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging )", "= [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'), path(\"<int:hackathon_id>/update_hackathon\", update_hackathon,", "[ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'), path(\"<int:hackathon_id>/update_hackathon\", update_hackathon, name=\"update_hackathon\"),", "from django.urls import path from .views import ( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging", "delete_hackathon, judging ) urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\",", ".views import ( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns = [ path('',", "path from .views import ( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns =", "create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging,", "name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'), path(\"<int:hackathon_id>/update_hackathon\", update_hackathon, name=\"update_hackathon\"), path(\"<int:hackathon_id>/delete_hackathon\", delete_hackathon, name=\"delete_hackathon\"),", "path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'), path(\"<int:hackathon_id>/update_hackathon\", update_hackathon, name=\"update_hackathon\"), path(\"<int:hackathon_id>/delete_hackathon\",", "import path from .views import ( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns", "judging ) urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon,", "urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'), path(\"<int:hackathon_id>/update_hackathon\",", "HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns = [ path('', HackathonListView.as_view(), name=\"hackathon-list\"), path(\"<int:hack_id>/team/<int:team_id>/judging/\",", "import ( HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging ) urlpatterns = [ path('', HackathonListView.as_view(),", "path(\"<int:hack_id>/team/<int:team_id>/judging/\", judging, name=\"judging\"), path(\"create_hackathon\", create_hackathon, name='create_hackathon'), path(\"<int:hackathon_id>/update_hackathon\", update_hackathon, name=\"update_hackathon\"), path(\"<int:hackathon_id>/delete_hackathon\", delete_hackathon, name=\"delete_hackathon\"), ]" ]
[ "Iterable) -> None: super().__init__() self.data = data def __getitem__(self, index: int): return self.data[index]", "[] for idx in self.scopes[index]: results.append(self.data[idx]) return results def __len__(self) -> int: return", "Optional from torch.utils.data import Dataset class CachedDataset(Dataset): def __init__(self, data: Iterable) -> None:", "= transform def __getitem__(self, index: int): return self.transform(self.data[index]) def __len__(self) -> int: return", "= [] for idx in self.scopes[index]: results.append(self.data[idx]) return results def __len__(self) -> int:", "transform def __getitem__(self, index: int): return self.transform(self.data[index]) def __len__(self) -> int: return len(self.data)", "def __init__( self, data: Iterable, transform, debug: Optional[bool] = False ) -> None:", "results.append(self.data[idx]) return results def __len__(self) -> int: return len(self.scopes) class StreamTransformDataset(Dataset): def __init__(", "int): return self.data[index] def __len__(self) -> int: return len(self.data) class CachedBagREDataset(Dataset): def __init__(self,", "data = data[:128] self.data = data self.transform = transform def __getitem__(self, index: int):", "int: return len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None: super().__init__() data, scopes", "super().__init__() self.data = data def __getitem__(self, index: int): return self.data[index] def __len__(self) ->", "= False ) -> None: super().__init__() if debug: data = data[:128] self.data =", "from torch.utils.data import Dataset class CachedDataset(Dataset): def __init__(self, data: Iterable) -> None: super().__init__()", "torch.utils.data import Dataset class CachedDataset(Dataset): def __init__(self, data: Iterable) -> None: super().__init__() self.data", "class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None: super().__init__() data, scopes = data_with_scopes self.data", "self.data = data def __getitem__(self, index: int): return self.data[index] def __len__(self) -> int:", "len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None: super().__init__() data, scopes = data_with_scopes", "data self.scopes = scopes def __getitem__(self, index: int): results = [] for idx", "def __getitem__(self, index: int): return self.data[index] def __len__(self) -> int: return len(self.data) class", "StreamTransformDataset(Dataset): def __init__( self, data: Iterable, transform, debug: Optional[bool] = False ) ->", "return self.data[index] def __len__(self) -> int: return len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes)", "class CachedDataset(Dataset): def __init__(self, data: Iterable) -> None: super().__init__() self.data = data def", "= data self.transform = transform def __getitem__(self, index: int): return self.transform(self.data[index]) def __len__(self)", "Iterable, Optional from torch.utils.data import Dataset class CachedDataset(Dataset): def __init__(self, data: Iterable) ->", "data self.transform = transform def __getitem__(self, index: int): return self.transform(self.data[index]) def __len__(self) ->", "__getitem__(self, index: int): return self.data[index] def __len__(self) -> int: return len(self.data) class CachedBagREDataset(Dataset):", "CachedDataset(Dataset): def __init__(self, data: Iterable) -> None: super().__init__() self.data = data def __getitem__(self,", "Dataset class CachedDataset(Dataset): def __init__(self, data: Iterable) -> None: super().__init__() self.data = data", "data, scopes = data_with_scopes self.data = data self.scopes = scopes def __getitem__(self, index:", "<reponame>Spico197/REx from typing import Iterable, Optional from torch.utils.data import Dataset class CachedDataset(Dataset): def", "-> None: super().__init__() data, scopes = data_with_scopes self.data = data self.scopes = scopes", "data[:128] self.data = data self.transform = transform def __getitem__(self, index: int): return self.transform(self.data[index])", "results def __len__(self) -> int: return len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self, data:", "self.scopes[index]: results.append(self.data[idx]) return results def __len__(self) -> int: return len(self.scopes) class StreamTransformDataset(Dataset): def", "self.transform = transform def __getitem__(self, index: int): return self.transform(self.data[index]) def __len__(self) -> int:", "self.data = data self.transform = transform def __getitem__(self, index: int): return self.transform(self.data[index]) def", "CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None: super().__init__() data, scopes = data_with_scopes self.data =", "__init__(self, data: Iterable) -> None: super().__init__() self.data = data def __getitem__(self, index: int):", "len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self, data: Iterable, transform, debug: Optional[bool] = False", "False ) -> None: super().__init__() if debug: data = data[:128] self.data = data", "import Dataset class CachedDataset(Dataset): def __init__(self, data: Iterable) -> None: super().__init__() self.data =", "debug: Optional[bool] = False ) -> None: super().__init__() if debug: data = data[:128]", "class StreamTransformDataset(Dataset): def __init__( self, data: Iterable, transform, debug: Optional[bool] = False )", "= data self.scopes = scopes def __getitem__(self, index: int): results = [] for", "data_with_scopes) -> None: super().__init__() data, scopes = data_with_scopes self.data = data self.scopes =", "None: super().__init__() data, scopes = data_with_scopes self.data = data self.scopes = scopes def", "self.scopes = scopes def __getitem__(self, index: int): results = [] for idx in", "from typing import Iterable, Optional from torch.utils.data import Dataset class CachedDataset(Dataset): def __init__(self,", "super().__init__() if debug: data = data[:128] self.data = data self.transform = transform def", "Iterable, transform, debug: Optional[bool] = False ) -> None: super().__init__() if debug: data", "-> int: return len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self, data: Iterable, transform, debug:", "self.data[index] def __len__(self) -> int: return len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) ->", "int: return len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self, data: Iterable, transform, debug: Optional[bool]", "__getitem__(self, index: int): results = [] for idx in self.scopes[index]: results.append(self.data[idx]) return results", "__len__(self) -> int: return len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None: super().__init__()", "import Iterable, Optional from torch.utils.data import Dataset class CachedDataset(Dataset): def __init__(self, data: Iterable)", "def __len__(self) -> int: return len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self, data: Iterable,", "= data_with_scopes self.data = data self.scopes = scopes def __getitem__(self, index: int): results", "results = [] for idx in self.scopes[index]: results.append(self.data[idx]) return results def __len__(self) ->", "Optional[bool] = False ) -> None: super().__init__() if debug: data = data[:128] self.data", "int): results = [] for idx in self.scopes[index]: results.append(self.data[idx]) return results def __len__(self)", "scopes def __getitem__(self, index: int): results = [] for idx in self.scopes[index]: results.append(self.data[idx])", "= data[:128] self.data = data self.transform = transform def __getitem__(self, index: int): return", "__init__( self, data: Iterable, transform, debug: Optional[bool] = False ) -> None: super().__init__()", "typing import Iterable, Optional from torch.utils.data import Dataset class CachedDataset(Dataset): def __init__(self, data:", "def __len__(self) -> int: return len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None:", "self.data = data self.scopes = scopes def __getitem__(self, index: int): results = []", "idx in self.scopes[index]: results.append(self.data[idx]) return results def __len__(self) -> int: return len(self.scopes) class", "data: Iterable) -> None: super().__init__() self.data = data def __getitem__(self, index: int): return", ") -> None: super().__init__() if debug: data = data[:128] self.data = data self.transform", "-> int: return len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None: super().__init__() data,", "= data def __getitem__(self, index: int): return self.data[index] def __len__(self) -> int: return", "return len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self, data: Iterable, transform, debug: Optional[bool] =", "= scopes def __getitem__(self, index: int): results = [] for idx in self.scopes[index]:", "debug: data = data[:128] self.data = data self.transform = transform def __getitem__(self, index:", "for idx in self.scopes[index]: results.append(self.data[idx]) return results def __len__(self) -> int: return len(self.scopes)", "return results def __len__(self) -> int: return len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self,", "data def __getitem__(self, index: int): return self.data[index] def __len__(self) -> int: return len(self.data)", "def __init__(self, data_with_scopes) -> None: super().__init__() data, scopes = data_with_scopes self.data = data", "super().__init__() data, scopes = data_with_scopes self.data = data self.scopes = scopes def __getitem__(self,", "__len__(self) -> int: return len(self.scopes) class StreamTransformDataset(Dataset): def __init__( self, data: Iterable, transform,", "data_with_scopes self.data = data self.scopes = scopes def __getitem__(self, index: int): results =", "index: int): results = [] for idx in self.scopes[index]: results.append(self.data[idx]) return results def", "self, data: Iterable, transform, debug: Optional[bool] = False ) -> None: super().__init__() if", "__init__(self, data_with_scopes) -> None: super().__init__() data, scopes = data_with_scopes self.data = data self.scopes", "scopes = data_with_scopes self.data = data self.scopes = scopes def __getitem__(self, index: int):", "def __init__(self, data: Iterable) -> None: super().__init__() self.data = data def __getitem__(self, index:", "if debug: data = data[:128] self.data = data self.transform = transform def __getitem__(self,", "index: int): return self.data[index] def __len__(self) -> int: return len(self.data) class CachedBagREDataset(Dataset): def", "-> None: super().__init__() if debug: data = data[:128] self.data = data self.transform =", "None: super().__init__() self.data = data def __getitem__(self, index: int): return self.data[index] def __len__(self)", "return len(self.data) class CachedBagREDataset(Dataset): def __init__(self, data_with_scopes) -> None: super().__init__() data, scopes =", "in self.scopes[index]: results.append(self.data[idx]) return results def __len__(self) -> int: return len(self.scopes) class StreamTransformDataset(Dataset):", "-> None: super().__init__() self.data = data def __getitem__(self, index: int): return self.data[index] def", "transform, debug: Optional[bool] = False ) -> None: super().__init__() if debug: data =", "data: Iterable, transform, debug: Optional[bool] = False ) -> None: super().__init__() if debug:", "None: super().__init__() if debug: data = data[:128] self.data = data self.transform = transform", "def __getitem__(self, index: int): results = [] for idx in self.scopes[index]: results.append(self.data[idx]) return" ]
[ "def findMultiple(n,k): a = 0 b = 1 count = 1 while(True): c", "b,c if __name__ == \"__main__\": k = int(input(\"Enter the number which is in", "value of n : \")) print(\"{}th/nd/rd multiple of {} in Fibonacci Series is", "of a number in Fibonacci Series def findMultiple(n,k): a = 0 b =", "0 b = 1 count = 1 while(True): c = a+b if c", "= a+b if c % k==0: if count == n: return(c) count +=", "= 1 while(True): c = a+b if c % k==0: if count ==", "c = a+b if c % k==0: if count == n: return(c) count", "a+b if c % k==0: if count == n: return(c) count += 1", "__name__ == \"__main__\": k = int(input(\"Enter the number which is in the Fibonacci", "\")) n = int(input(\"Enter the value of n : \")) print(\"{}th/nd/rd multiple of", "count = 1 while(True): c = a+b if c % k==0: if count", "+= 1 a,b = b,c if __name__ == \"__main__\": k = int(input(\"Enter the", "k = int(input(\"Enter the number which is in the Fibonacci Series: \")) n", "1 count = 1 while(True): c = a+b if c % k==0: if", "in the Fibonacci Series: \")) n = int(input(\"Enter the value of n :", "in Fibonacci Series def findMultiple(n,k): a = 0 b = 1 count =", "k==0: if count == n: return(c) count += 1 a,b = b,c if", "the value of n : \")) print(\"{}th/nd/rd multiple of {} in Fibonacci Series", "% k==0: if count == n: return(c) count += 1 a,b = b,c", "if __name__ == \"__main__\": k = int(input(\"Enter the number which is in the", "while(True): c = a+b if c % k==0: if count == n: return(c)", "= int(input(\"Enter the number which is in the Fibonacci Series: \")) n =", "b = 1 count = 1 while(True): c = a+b if c %", "return(c) count += 1 a,b = b,c if __name__ == \"__main__\": k =", "the Fibonacci Series: \")) n = int(input(\"Enter the value of n : \"))", "== \"__main__\": k = int(input(\"Enter the number which is in the Fibonacci Series:", "n = int(input(\"Enter the value of n : \")) print(\"{}th/nd/rd multiple of {}", "\"__main__\": k = int(input(\"Enter the number which is in the Fibonacci Series: \"))", "Series def findMultiple(n,k): a = 0 b = 1 count = 1 while(True):", "int(input(\"Enter the number which is in the Fibonacci Series: \")) n = int(input(\"Enter", "is in the Fibonacci Series: \")) n = int(input(\"Enter the value of n", "Python Program for n\\’th multiple of a number in Fibonacci Series def findMultiple(n,k):", "a number in Fibonacci Series def findMultiple(n,k): a = 0 b = 1", "== n: return(c) count += 1 a,b = b,c if __name__ == \"__main__\":", "of n : \")) print(\"{}th/nd/rd multiple of {} in Fibonacci Series is {}\".format(n,k,findMultiple(n,k)))", "= 0 b = 1 count = 1 while(True): c = a+b if", "Program for n\\’th multiple of a number in Fibonacci Series def findMultiple(n,k): a", "n\\’th multiple of a number in Fibonacci Series def findMultiple(n,k): a = 0", "count += 1 a,b = b,c if __name__ == \"__main__\": k = int(input(\"Enter", "number in Fibonacci Series def findMultiple(n,k): a = 0 b = 1 count", "multiple of a number in Fibonacci Series def findMultiple(n,k): a = 0 b", "findMultiple(n,k): a = 0 b = 1 count = 1 while(True): c =", "Fibonacci Series: \")) n = int(input(\"Enter the value of n : \")) print(\"{}th/nd/rd", "= b,c if __name__ == \"__main__\": k = int(input(\"Enter the number which is", "Series: \")) n = int(input(\"Enter the value of n : \")) print(\"{}th/nd/rd multiple", "n: return(c) count += 1 a,b = b,c if __name__ == \"__main__\": k", "1 while(True): c = a+b if c % k==0: if count == n:", "= int(input(\"Enter the value of n : \")) print(\"{}th/nd/rd multiple of {} in", "int(input(\"Enter the value of n : \")) print(\"{}th/nd/rd multiple of {} in Fibonacci", "number which is in the Fibonacci Series: \")) n = int(input(\"Enter the value", "if count == n: return(c) count += 1 a,b = b,c if __name__", "Fibonacci Series def findMultiple(n,k): a = 0 b = 1 count = 1", "c % k==0: if count == n: return(c) count += 1 a,b =", "if c % k==0: if count == n: return(c) count += 1 a,b", "a = 0 b = 1 count = 1 while(True): c = a+b", "the number which is in the Fibonacci Series: \")) n = int(input(\"Enter the", "for n\\’th multiple of a number in Fibonacci Series def findMultiple(n,k): a =", "1 a,b = b,c if __name__ == \"__main__\": k = int(input(\"Enter the number", "which is in the Fibonacci Series: \")) n = int(input(\"Enter the value of", "count == n: return(c) count += 1 a,b = b,c if __name__ ==", "a,b = b,c if __name__ == \"__main__\": k = int(input(\"Enter the number which", "= 1 count = 1 while(True): c = a+b if c % k==0:", "# Python Program for n\\’th multiple of a number in Fibonacci Series def" ]
[ "attempt should return HTTP_400_BAD_REQUEST while attempts < throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True):", "} # Hacking attempt should return HTTP_400_BAD_REQUEST while attempts < throttle rate with", "jump more than one hour in time. Should be back at returning HTTP_400_BAD_REQUEST", "'username': \"readonly\", \"password\": \"password\" } resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) #", "override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client = APIClient() # request token url = reverse('api:auth-token')", "min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp", "resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we have a", "= '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6", "HTTP_400_BAD_REQUEST while attempts < throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in", "be back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True): resp = client.post(url, data=data,", "Test that the token received can be used for authentication :return: \"\"\" client", "Prevent brute force authentication by preventing the API user issuing too many auth-token", "class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that when hitting the auth_token", "self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"<PASSWORD>\" } # Hacking attempt should", "= APIClient() # request token url = reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data", "user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"password\" } resp", "from freezegun import freeze_time from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def", "resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token ' +", "\"\"\" Use case: Prevent brute force authentication by preventing the API user issuing", "test that a hacker sending auth request with wrong password will be blocked", "rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in range(max_attempt): resp = client.post(url, data=data,", "preventing the API user issuing too many auth-token request \"\"\" def test_brute_force(self): \"\"\"", "import status from rest_framework.test import APIClient from freezegun import freeze_time from main.tests.api import", "test_brute_force(self): \"\"\" test that a hacker sending auth request with wrong password will", "test_token_valid(self): \"\"\" Test that the token received can be used for authentication :return:", "many auth-token request \"\"\" def test_brute_force(self): \"\"\" test that a hacker sending auth", "resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than one", "APIClient() # request token url = reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data =", "self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that the token received can be", "status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30 min jump in time. Should still return", "client = APIClient() # request token url = reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password'))", "test_token_auth_end_point(self): \"\"\" Test that when hitting the auth_token end point we receive a", "self.assertTrue(token) # can't get dataset list without token url = reverse('api:dataset-list') resp =", "jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp =", "status.HTTP_400_BAD_REQUEST) # next attempt should return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json')", ":return: \"\"\" client = APIClient() # request token url = reverse('api:auth-token') user =", "wrong password will be blocked after n attempts :return: \"\"\" rate = '6/hour'", "format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30 min jump in time. Should", "django.conf import settings from django.urls import reverse from django.test import override_settings from rest_framework", "range(max_attempt): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should return", "set credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class", "should return HTTP_400_BAD_REQUEST while attempts < throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for", "} resp = client.post(url, data=data, format='json') token = resp.data.get('token') self.assertTrue(token) # can't get", "+ token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent", "password will be blocked after n attempts :return: \"\"\" rate = '6/hour' drf_settings", "= client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30 min jump", "time. Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url, data=data,", "APIClient from freezegun import freeze_time from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)", "resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should return a", "blocked after n attempts :return: \"\"\" rate = '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth']", "HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) #", "with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's", "status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute force authentication by preventing the", "than one hour in time. Should be back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29", "resp = client.post(url, data=data, format='json') token = resp.data.get('token') self.assertTrue(token) # can't get dataset", "from django.test import override_settings from rest_framework import status from rest_framework.test import APIClient from", "client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should return a HTTP_429_TOO_MANY_REQUESTS resp", "\"password\" } resp = client.post(url, data=data, format='json') token = resp.data.get('token') self.assertTrue(token) # can't", "we have a token self.assertTrue('token' in resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)", "# check that we have a token self.assertTrue('token' in resp.data) token = resp.data.get('token')", "back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True): resp = client.post(url, data=data, format='json')", "brute force authentication by preventing the API user issuing too many auth-token request", "resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30 min", "django.test import override_settings from rest_framework import status from rest_framework.test import APIClient from freezegun", "from rest_framework.test import APIClient from freezegun import freeze_time from main.tests.api import helpers class", "client = APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data = {", "import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that when hitting", "# can't get dataset list without token url = reverse('api:dataset-list') resp = client.get(url)", "data=data, format='json') token = resp.data.get('token') self.assertTrue(token) # can't get dataset list without token", "returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "import reverse from django.test import override_settings from rest_framework import status from rest_framework.test import", "# next attempt should return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code,", "client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp", "self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"password\" } resp = client.post(url, data=data,", "client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30 min jump in", "= reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"<PASSWORD>\"", "tick=True): for attempt in range(max_attempt): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) #", "= resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that the token received", "resp.data.get('token') self.assertTrue(token) # can't get dataset list without token url = reverse('api:dataset-list') resp", "settings from django.urls import reverse from django.test import override_settings from rest_framework import status", "\"\"\" client = APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data =", "for attempt in range(max_attempt): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next", "more than one hour in time. Should be back at returning HTTP_400_BAD_REQUEST with", "have a token self.assertTrue('token' in resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def", "= 6 client = APIClient() # request token url = reverse('api:auth-token') user =", "that when hitting the auth_token end point we receive a token :return: \"\"\"", "format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than one hour in time. Should", "= self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"password\" } resp =", "= client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute force authentication", "received can be used for authentication :return: \"\"\" client = APIClient() user =", "with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in range(max_attempt): resp = client.post(url, data=data, format='json')", "\"password\": \"password\" } resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that", "auth-token request \"\"\" def test_brute_force(self): \"\"\" test that a hacker sending auth request", "too many auth-token request \"\"\" def test_brute_force(self): \"\"\" test that a hacker sending", "freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in range(max_attempt): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code,", "url = reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token", "simulate a 30 min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29", "still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code,", "reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"<PASSWORD>\" }", "'6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client", "be blocked after n attempts :return: \"\"\" rate = '6/hour' drf_settings = settings.REST_FRAMEWORK", "at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code,", "\"password\": \"password\" } resp = client.post(url, data=data, format='json') token = resp.data.get('token') self.assertTrue(token) #", "status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than one hour in time. Should be back", "\"password\": \"<PASSWORD>\" } # Hacking attempt should return HTTP_400_BAD_REQUEST while attempts < throttle", "auth_token end point we receive a token :return: \"\"\" client = APIClient() #", "import override_settings from rest_framework import status from rest_framework.test import APIClient from freezegun import", "self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"<PASSWORD>\" } # Hacking attempt", "self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data,", "\"\"\" Test that when hitting the auth_token end point we receive a token", "freezegun import freeze_time from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self):", "import freeze_time from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\"", "self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data = { 'username': user.username, \"password\": \"password\" } resp", "client.post(url, data=data, format='json') token = resp.data.get('token') self.assertTrue(token) # can't get dataset list without", "the auth_token end point we receive a token :return: \"\"\" client = APIClient()", "token :return: \"\"\" client = APIClient() # request token url = reverse('api:auth-token') user", "tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than", "= settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client = APIClient()", "helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that when hitting the", "TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute force authentication by preventing the API user", "request token url = reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username':", "Hacking attempt should return HTTP_400_BAD_REQUEST while attempts < throttle rate with freeze_time(\"2018-05-29 12:00:00\",", "\"\"\" Test that the token received can be used for authentication :return: \"\"\"", "force authentication by preventing the API user issuing too many auth-token request \"\"\"", "be used for authentication :return: \"\"\" client = APIClient() user = self.readonly_user self.assertTrue(user.check_password('password'))", "with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client = APIClient() # request token url =", "data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we have a token self.assertTrue('token' in", "from rest_framework import status from rest_framework.test import APIClient from freezegun import freeze_time from", "we receive a token :return: \"\"\" client = APIClient() # request token url", "\"readonly\", \"password\": \"<PASSWORD>\" } # Hacking attempt should return HTTP_400_BAD_REQUEST while attempts <", "= { 'username': \"readonly\", \"password\": \"password\" } resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code,", "main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that when", "API user issuing too many auth-token request \"\"\" def test_brute_force(self): \"\"\" test that", "client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use", "user issuing too many auth-token request \"\"\" def test_brute_force(self): \"\"\" test that a", "status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp = client.get(url) self.assertEqual(resp.status_code,", "data = { 'username': \"readonly\", \"password\": \"password\" } resp = client.post(url, data=data, format='json')", "the API user issuing too many auth-token request \"\"\" def test_brute_force(self): \"\"\" test", "Test that when hitting the auth_token end point we receive a token :return:", "\"readonly\", \"password\": \"password\" } resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check", "token received can be used for authentication :return: \"\"\" client = APIClient() user", "can't get dataset list without token url = reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code,", "the token received can be used for authentication :return: \"\"\" client = APIClient()", "while attempts < throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in range(max_attempt):", "django.urls import reverse from django.test import override_settings from rest_framework import status from rest_framework.test", "= reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"password\"", "# set credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK)", "rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client = APIClient() # request token url", "def test_token_auth_end_point(self): \"\"\" Test that when hitting the auth_token end point we receive", "return HTTP_400_BAD_REQUEST while attempts < throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt", "= client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we have a token", "authentication :return: \"\"\" client = APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token')", "def test_brute_force(self): \"\"\" test that a hacker sending auth request with wrong password", "user.username, \"password\": \"password\" } resp = client.post(url, data=data, format='json') token = resp.data.get('token') self.assertTrue(token)", "attempts :return: \"\"\" rate = '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with", "after n attempts :return: \"\"\" rate = '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] =", "6 client = APIClient() # request token url = reverse('api:auth-token') user = self.readonly_user", "reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token '", "class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute force authentication by preventing the API", "override_settings from rest_framework import status from rest_framework.test import APIClient from freezegun import freeze_time", "token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute", "format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url,", "issuing too many auth-token request \"\"\" def test_brute_force(self): \"\"\" test that a hacker", "rate = '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt =", "'username': \"readonly\", \"password\": \"<PASSWORD>\" } # Hacking attempt should return HTTP_400_BAD_REQUEST while attempts", "'username': user.username, \"password\": \"password\" } resp = client.post(url, data=data, format='json') token = resp.data.get('token')", "= client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token)", "in range(max_attempt): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should", "data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should return a HTTP_429_TOO_MANY_REQUESTS resp =", "self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we have a token self.assertTrue('token' in resp.data) token", "HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30", "data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30 min jump in time.", "token = resp.data.get('token') self.assertTrue(token) # can't get dataset list without token url =", "' + token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case:", "one hour in time. Should be back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\",", "attempt in range(max_attempt): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt", "{ 'username': \"readonly\", \"password\": \"password\" } resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK)", "APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data = { 'username': user.username,", "token self.assertTrue('token' in resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\"", "30 min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True):", "request \"\"\" def test_brute_force(self): \"\"\" test that a hacker sending auth request with", "reverse from django.test import override_settings from rest_framework import status from rest_framework.test import APIClient", "# Hacking attempt should return HTTP_400_BAD_REQUEST while attempts < throttle rate with freeze_time(\"2018-05-29", "\"\"\" client = APIClient() # request token url = reverse('api:auth-token') user = self.readonly_user", "resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that the", "a 30 min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\",", "sending auth request with wrong password will be blocked after n attempts :return:", "attempts < throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in range(max_attempt): resp", "when hitting the auth_token end point we receive a token :return: \"\"\" client", "rest_framework import status from rest_framework.test import APIClient from freezegun import freeze_time from main.tests.api", "token url = reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential", "will be blocked after n attempts :return: \"\"\" rate = '6/hour' drf_settings =", "= resp.data.get('token') self.assertTrue(token) # can't get dataset list without token url = reverse('api:dataset-list')", "freeze_time from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test", "in time. Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url,", "reverse('api:auth-token') data = { 'username': user.username, \"password\": \"password\" } resp = client.post(url, data=data,", "12:30:00\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more", "next attempt should return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS)", "= { 'username': user.username, \"password\": \"password\" } resp = client.post(url, data=data, format='json') token", "in time. Should be back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True): resp", "# let's simulate a 30 min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS", "self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a 30 min jump in time. Should still", "\"\"\" rate = '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt", "= rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client = APIClient() # request token", "= client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than one hour", "hitting the auth_token end point we receive a token :return: \"\"\" client =", "that the token received can be used for authentication :return: \"\"\" client =", "status from rest_framework.test import APIClient from freezegun import freeze_time from main.tests.api import helpers", "drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client =", "= APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data = { 'username':", "receive a token :return: \"\"\" client = APIClient() # request token url =", "credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase):", "a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate a", "authentication by preventing the API user issuing too many auth-token request \"\"\" def", "= client.post(url, data=data, format='json') token = resp.data.get('token') self.assertTrue(token) # can't get dataset list", "in resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that", "Should be back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True): resp = client.post(url,", "dataset list without token url = reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN])", "auth request with wrong password will be blocked after n attempts :return: \"\"\"", "self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute force authentication by preventing", "with wrong password will be blocked after n attempts :return: \"\"\" rate =", "import settings from django.urls import reverse from django.test import override_settings from rest_framework import", "settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client = APIClient() #", "freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump", "for authentication :return: \"\"\" client = APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url =", "\"\"\" def test_brute_force(self): \"\"\" test that a hacker sending auth request with wrong", "\"<PASSWORD>\" } # Hacking attempt should return HTTP_400_BAD_REQUEST while attempts < throttle rate", "resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that the token received can", "user = self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data = { 'username': user.username, \"password\":", "user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"<PASSWORD>\" } #", "} resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we have", "used for authentication :return: \"\"\" client = APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url", "[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp = client.get(url)", "hour in time. Should be back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True):", "url = reverse('api:auth-token') data = { 'username': user.username, \"password\": \"password\" } resp =", "let's simulate a 30 min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS with", "time. Should be back at returning HTTP_400_BAD_REQUEST with freeze_time(\"2018-05-29 13:00:05\", tick=True): resp =", "should return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's", "resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute force", "client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we have a token self.assertTrue('token'", "\"password\" } resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we", "case: Prevent brute force authentication by preventing the API user issuing too many", "self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp =", "= self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"<PASSWORD>\" } # Hacking", "drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings): max_attempt = 6 client = APIClient() # request", "rest_framework.test import APIClient from freezegun import freeze_time from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase):", "max_attempt = 6 client = APIClient() # request token url = reverse('api:auth-token') user", "token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that the token", "from django.conf import settings from django.urls import reverse from django.test import override_settings from", "format='json') token = resp.data.get('token') self.assertTrue(token) # can't get dataset list without token url", "can be used for authentication :return: \"\"\" client = APIClient() user = self.readonly_user", "url = reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\":", "return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS)", "= self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data = { 'username': user.username, \"password\": \"password\"", "< throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in range(max_attempt): resp =", "# let's jump more than one hour in time. Should be back at", "format='json') self.assertEqual(resp.status_code, status.HTTP_200_OK) # check that we have a token self.assertTrue('token' in resp.data)", "12:00:00\", tick=True): for attempt in range(max_attempt): resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "a token self.assertTrue('token' in resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self):", "request with wrong password will be blocked after n attempts :return: \"\"\" rate", "n attempts :return: \"\"\" rate = '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate", "def test_token_valid(self): \"\"\" Test that the token received can be used for authentication", "{ 'username': user.username, \"password\": \"password\" } resp = client.post(url, data=data, format='json') token =", "REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that the token received can be used for", "attempt should return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) #", "client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\" Use case: Prevent brute force authentication by", "= client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) # next attempt should return a HTTP_429_TOO_MANY_REQUESTS", "self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"password\" } resp = client.post(url,", "status.HTTP_200_OK) # check that we have a token self.assertTrue('token' in resp.data) token =", "@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that when hitting the auth_token end point", "list without token url = reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) #", "from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that", "by preventing the API user issuing too many auth-token request \"\"\" def test_brute_force(self):", "that we have a token self.assertTrue('token' in resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',),", "without token url = reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set", "REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that when hitting the auth_token end point we", ":return: \"\"\" rate = '6/hour' drf_settings = settings.REST_FRAMEWORK drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate with override_settings(REST_FRAMEWORK=drf_settings):", "point we receive a token :return: \"\"\" client = APIClient() # request token", "check that we have a token self.assertTrue('token' in resp.data) token = resp.data.get('token') self.assertTrue(token)", "get dataset list without token url = reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED,", "data = { 'username': \"readonly\", \"password\": \"<PASSWORD>\" } # Hacking attempt should return", "that a hacker sending auth request with wrong password will be blocked after", "\"\"\" test that a hacker sending auth request with wrong password will be", "= { 'username': \"readonly\", \"password\": \"<PASSWORD>\" } # Hacking attempt should return HTTP_400_BAD_REQUEST", "self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than one hour in time. Should be", "end point we receive a token :return: \"\"\" client = APIClient() # request", "token client.credentials(HTTP_AUTHORIZATION='Token ' + token) resp = client.get(url) self.assertEqual(resp.status_code, status.HTTP_200_OK) class TestUserAuthThrottling(helpers.BaseUserTestCase): \"\"\"", "from django.urls import reverse from django.test import override_settings from rest_framework import status from", "return a HTTP_429_TOO_MANY_REQUESTS resp = client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's simulate", "reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\", \"password\": \"password\" }", "TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_auth_end_point(self): \"\"\" Test that when hitting the auth_token end", "data = { 'username': user.username, \"password\": \"password\" } resp = client.post(url, data=data, format='json')", "Should still return HTTP_429_TOO_MANY_REQUESTS with freeze_time(\"2018-05-29 12:30:00\", tick=True): resp = client.post(url, data=data, format='json')", "client.post(url, data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than one hour in", ":return: \"\"\" client = APIClient() user = self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data", "a token :return: \"\"\" client = APIClient() # request token url = reverse('api:auth-token')", "throttle rate with freeze_time(\"2018-05-29 12:00:00\", tick=True): for attempt in range(max_attempt): resp = client.post(url,", "= reverse('api:dataset-list') resp = client.get(url) self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]) # set credential token client.credentials(HTTP_AUTHORIZATION='Token", "Use case: Prevent brute force authentication by preventing the API user issuing too", "a hacker sending auth request with wrong password will be blocked after n", "= reverse('api:auth-token') data = { 'username': user.username, \"password\": \"password\" } resp = client.post(url,", "self.assertTrue('token' in resp.data) token = resp.data.get('token') self.assertTrue(token) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test", "let's jump more than one hour in time. Should be back at returning", "data=data, format='json') self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS) # let's jump more than one hour in time.", "token url = reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = { 'username': \"readonly\",", "import APIClient from freezegun import freeze_time from main.tests.api import helpers class TestAuth(helpers.BaseUserTestCase): @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',),", "@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',), REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS) def test_token_valid(self): \"\"\" Test that the token received can be used", "# request token url = reverse('api:auth-token') user = self.readonly_user self.assertTrue(user.check_password('password')) data = {", "{ 'username': \"readonly\", \"password\": \"<PASSWORD>\" } # Hacking attempt should return HTTP_400_BAD_REQUEST while", "hacker sending auth request with wrong password will be blocked after n attempts", "self.readonly_user self.assertTrue(user.check_password('password')) url = reverse('api:auth-token') data = { 'username': user.username, \"password\": \"password\" }" ]
[ "help_text='Date time for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance',", "}, ), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date", "('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True,", "max_length=30, null=True)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ),", "], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ), migrations.CreateModel( name='Ingreso',", "('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30,", "name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created", "['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True,", "models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)),", "instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe',", "null=True)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ), migrations.CreateModel(", "models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by':", "null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract':", "('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar',", "null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True,", "Django 2.0.9 on 2019-07-28 03:39 import datetime from django.db import migrations, models class", "dependencies = [ ] operations = [ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio',", "models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by':", "on 2019-07-28 03:39 import datetime from django.db import migrations, models class Migration(migrations.Migration): initial", "from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [", "[ ] operations = [ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')), ('modified',", "['created'], 'abstract': False, }, ), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Egreso', fields=[", "models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [", "for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año',", "migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for", "('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True,", "null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={", "= [ ] operations = [ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero',", "models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract':", "('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ),", "False, }, ), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True,", "models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)),", "modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)),", "migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for", "verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)),", "null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago',", "models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified", "models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)),", "('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering':", "by Django 2.0.9 on 2019-07-28 03:39 import datetime from django.db import migrations, models", "('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30,", "max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering':", "('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering': ['-created', '-modified'],", "('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario',", "2019-07-28 03:39 import datetime from django.db import migrations, models class Migration(migrations.Migration): initial =", "('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado',", "max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30,", "verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True,", "('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad',", "for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('cliente',", "django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ]", "('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30,", "null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)),", "verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)),", "models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True,", "max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'],", "('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False,", "models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ), ]", "serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date", "('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto',", "operations = [ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True,", "models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True,", "[ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time", "null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago',", "models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30,", "import datetime from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies", "for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('fecha',", "import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time", "('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'],", "models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, },", "('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for", "Generated by Django 2.0.9 on 2019-07-28 03:39 import datetime from django.db import migrations,", "null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)),", "models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True,", "= [ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')),", "primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True,", "time for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)),", "models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30,", "'get_latest_by': ['created'], 'abstract': False, }, ), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False,", "models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={", "help_text='Date time for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30,", "max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante',", "null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)),", "options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ), migrations.CreateModel( name='Ingreso', fields=[", "instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe',", "2.0.9 on 2019-07-28 03:39 import datetime from django.db import migrations, models class Migration(migrations.Migration):", "] operations = [ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created',", "True dependencies = [ ] operations = [ migrations.CreateModel( name='Egreso', fields=[ ('id', models.AutoField(auto_created=True,", "models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)),", "migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)),", "('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes',", "('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True,", "null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ], options={ 'ordering': ['-created',", "models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente', models.CharField(blank=True, max_length=30, null=True)),", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel(", "created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)),", "models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created',", "), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time", "models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, },", "('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30, null=True)), ],", "null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)),", "('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes',", "max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True, max_length=30,", "models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True,", "max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('importe', models.PositiveIntegerField(default=0)), ('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ],", "03:39 import datetime from django.db import migrations, models class Migration(migrations.Migration): initial = True", "time for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)),", "null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)),", "('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes',", "models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0,", "<reponame>albertoaldanar/serecsinAPI # Generated by Django 2.0.9 on 2019-07-28 03:39 import datetime from django.db", "max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None,", "null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi',", "name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created", "for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año',", "modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)),", "Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Egreso',", "models.CharField(blank=True, max_length=30, null=True)), ('concepto', models.CharField(blank=True, max_length=30, null=True)), ('genero', models.CharField(blank=True, max_length=30, null=True)), ('cantidad', models.FloatField(blank=True,", "verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)),", "created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True,", "max_length=30, null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30,", "= True dependencies = [ ] operations = [ migrations.CreateModel( name='Egreso', fields=[ ('id',", "('adeudo_mes', models.PositiveIntegerField(default=0)), ('importante', models.BooleanField(default=False)), ('adeudo_acumulado', models.PositiveIntegerField(default=0)), ], options={ 'ordering': ['-created', '-modified'], 'get_latest_by': ['created'],", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance',", "null=True)), ('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)),", "max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)),", "instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30,", "instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True, max_length=30, null=True)), ('año', models.PositiveIntegerField(default=2019)), ('cliente',", "time for created instance', verbose_name='created_a')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')),", "models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')), ('fecha', models.DateField(default=datetime.date.today)), ('importe', models.PositiveIntegerField(default=0)), ('mes', models.CharField(blank=True,", "'ordering': ['-created', '-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ), migrations.CreateModel( name='Ingreso', fields=[ ('id',", "default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)),", "datetime from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies =", "models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen', models.PositiveIntegerField(default=0, null=True)), ('metodo_pago', models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30,", "help_text='Date time for modified instance', verbose_name='modified_at')), ('cliente', models.CharField(blank=True, max_length=30, null=True)), ('mes', models.CharField(blank=True, max_length=30,", "('cantidad', models.FloatField(blank=True, default=None, null=True)), ('usuario', models.CharField(blank=True, max_length=30, null=True)), ('lugar', models.CharField(blank=True, max_length=30, null=True)), ('cuenta_origen',", "# Generated by Django 2.0.9 on 2019-07-28 03:39 import datetime from django.db import", "'-modified'], 'get_latest_by': ['created'], 'abstract': False, }, ), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "models.CharField(blank=True, max_length=30, null=True)), ('forma_pago', models.CharField(blank=True, max_length=30, null=True)), ('cfdi', models.CharField(blank=True, max_length=30, null=True)), ('folio', models.CharField(blank=True,", "'abstract': False, }, ), migrations.CreateModel( name='Ingreso', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created'," ]
[ "[ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255,", "migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'), ] operations = [", "model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On',", "models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField(", "max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ),", "on 2021-04-20 06:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ), migrations.AddField(", "Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField( model_name='device', name='state',", "by Django 2.1.15 on 2021-04-20 06:56 from django.db import migrations, models class Migration(migrations.Migration):", "null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ), ]", "class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField( model_name='device',", "= [ ('core', '0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField(", "('core', '0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo',", "06:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'),", "Generated by Django 2.1.15 on 2021-04-20 06:56 from django.db import migrations, models class", "Django 2.1.15 on 2021-04-20 06:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'), ] operations", "migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal',", "name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On', 'On'),", "[ ('core', '0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device',", "'On'), ('Off', 'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')],", "# Generated by Django 2.1.15 on 2021-04-20 06:56 from django.db import migrations, models", "('Off', 'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255,", "'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True),", "), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device',", "operations = [ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off',", "] operations = [ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'),", "2021-04-20 06:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core',", "= [ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')],", "'0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'), ]", "model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ),", "migrations.RemoveField( model_name='device', name='state', ), migrations.AddField( model_name='device', name='stateAlgo', field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20210420_0653'), ] operations =", "field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True), ), migrations.AddField( model_name='device', name='stateReal', field=models.CharField(choices=[('On', 'On'), ('Off',", "<filename>app/core/migrations/0003_auto_20210420_0656.py # Generated by Django 2.1.15 on 2021-04-20 06:56 from django.db import migrations,", "dependencies = [ ('core', '0002_auto_20210420_0653'), ] operations = [ migrations.RemoveField( model_name='device', name='state', ),", "2.1.15 on 2021-04-20 06:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies =" ]
[ "that lead to the selected edge p_edges = list() for p_edge in e_chart.edges():", "else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence", "sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch", "0 for parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *= parent_prob return prob", "= sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist", "= 'approach pinch twist unpinch move' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens)", "sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist' tokens", "rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge):", "unpinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach", "print selected_edge, production.prob() return production.prob() def find_parent(selected_edge): # Find the parent edges that", "edge, and return its probability for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs():", "end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate = list() for s,", "'approach pinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence =", "move' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach", "= 'approach pinch twist unpinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print", "print prediction sentence = 'approach pinch twist unpinch move grasp_right twist' tokens =", "selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): # Compute the", "prediction sentence = 'approach pinch twist unpinch' tokens = sentence.split() prediction = predict_next_symbols(grammar,", "rules = [rule.strip() for rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar def", "lead to the selected edge p_edges = list() for p_edge in e_chart.edges(): if", "tokens) print prediction sentence = 'approach pinch twist' tokens = sentence.split() prediction =", "print prediction sentence = 'approach pinch twist unpinch move' tokens = sentence.split() prediction", "print prediction sentence = 'approach pinch twist unpinch move grasp_right' tokens = sentence.split()", "for parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *= parent_prob return prob symbols", "prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch' tokens = sentence.split()", "'approach pinch twist unpinch move grasp_right twist' tokens = sentence.split() prediction = predict_next_symbols(grammar,", "to the selected edge p_edges = list() for p_edge in e_chart.edges(): if p_edge.end()", "*= parent_prob return prob symbols = list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart =", "duplicate symbols_no_duplicate = list() probs_no_duplicate = list() for s, p in zip(symbols, probs):", "list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate", "the edge by recursion prob = get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob =", "tokens) print prediction sentence = 'approach pinch twist unpinch move grasp_right twist' tokens", "return grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find the corresponding production rule", "nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges = list() for edge in e_chart.edges(): #", "the parent edges that lead to the selected edge p_edges = list() for", "e_chart.edges(): # print edge if edge.end() == len(tokens): # Only add terminal nodes", "print edge if edge.end() == len(tokens): # Only add terminal nodes if isinstance(edge.nextsym(),", "len(tokens): # Only add terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs =", "Only add terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for", "grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens)", "'approach' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach", "twist unpinch move grasp_right' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction", "'approach pinch twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence", "= list() for edge in e_chart.edges(): # print edge if edge.end() == len(tokens):", "+= p return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach'", "probs): if s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return", "probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens = sentence.split() prediction", "= sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist'", "parent_prob += get_edge_prob(parent_edge) prob *= parent_prob return prob symbols = list() earley_parser =", "= predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move' tokens", "sentence = 'approach pinch twist unpinch move' tokens = sentence.split() prediction = predict_next_symbols(grammar,", "move grasp_right' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence =", "for edge in e_chart.edges(): # print edge if edge.end() == len(tokens): # Only", "def read_induced_grammar(path): with open(path) as f: rules = [rule.strip() for rule in f.readlines()]", "Find the parent edges that lead to the selected edge p_edges = list()", "of the edge by recursion prob = get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob", "in e_chart.edges(): # print edge if edge.end() == len(tokens): # Only add terminal", "def find_parent(selected_edge): # Find the parent edges that lead to the selected edge", "parent edges that lead to the selected edge p_edges = list() for p_edge", "sentence = 'approach pinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction", "selected_edge.rhs(): # print selected_edge, production.prob() return production.prob() def find_parent(selected_edge): # Find the parent", "# Only add terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list()", "end_edges = list() for edge in e_chart.edges(): # print edge if edge.end() ==", "parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *= parent_prob return prob symbols =", "as f: rules = [rule.strip() for rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return", "symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar", "unpinch move' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence =", "= predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist' tokens = sentence.split()", "tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch", "def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens = sentence.split() prediction =", "predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find the corresponding production rule of the edge,", "# print edge if edge.end() == len(tokens): # Only add terminal nodes if", "p_edges = list() for p_edge in e_chart.edges(): if p_edge.end() == selected_edge.start() and p_edge.nextsym()", "probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence =", "sentence = 'approach' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence", "'approach pinch twist unpinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction", "for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): # print selected_edge, production.prob() return", "list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges = list() for edge", "symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar =", "parent_prob return prob symbols = list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens)", "and return its probability for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): #", "symbols_no_duplicate = list() probs_no_duplicate = list() for s, p in zip(symbols, probs): if", "tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction if __name__ == '__main__':", "== selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): # Compute", "edge if edge.end() == len(tokens): # Only add terminal nodes if isinstance(edge.nextsym(), unicode):", "print prediction sentence = 'approach pinch twist unpinch' tokens = sentence.split() prediction =", "p_edge in e_chart.edges(): if p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return", "[rule.strip() for rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens):", "production.prob() return production.prob() def find_parent(selected_edge): # Find the parent edges that lead to", "sentence = 'approach pinch twist unpinch move grasp_right' tokens = sentence.split() prediction =", "unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate", "if s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate,", "edge p_edges = list() for p_edge in e_chart.edges(): if p_edge.end() == selected_edge.start() and", "= predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move grasp_right'", "earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges = list() for edge in", "grasp_right' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach", "find_parent(selected_edge): # Find the parent edges that lead to the selected edge p_edges", "def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find the corresponding production rule of the", "twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach", "in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): # print selected_edge, production.prob() return production.prob() def", "nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for end_edge in end_edges:", "earley_parser.chart_parse(tokens) end_edges = list() for edge in e_chart.edges(): # print edge if edge.end()", "e_chart.edges(): if p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges def", "# Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate = list() for s, p in", "= earley_parser.chart_parse(tokens) end_edges = list() for edge in e_chart.edges(): # print edge if", "predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move' tokens =", "end_edges.append(edge) probs = list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate", "edge.end() == len(tokens): # Only add terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge)", "read_induced_grammar(path): with open(path) as f: rules = [rule.strip() for rule in f.readlines()] grammar", "return p_edges def get_edge_prob(selected_edge): # Compute the probability of the edge by recursion", "# Compute the probability of the edge by recursion prob = get_production_prob(selected_edge) if", "s, p in zip(symbols, probs): if s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else:", "print prediction sentence = 'approach pinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens)", "prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist' tokens =", "pinch twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence =", "prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move", "of the edge, and return its probability for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs()", "end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate = list()", "grammar = nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find the", "prob = get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob = 0 for parent_edge in", "list() for p_edge in e_chart.edges(): if p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs():", "symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate", "return production.prob() def find_parent(selected_edge): # Find the parent edges that lead to the", "!= 0: parent_prob = 0 for parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob", "selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): # Compute the probability of the edge", "prob symbols = list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges =", "predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist' tokens = sentence.split() prediction", "with open(path) as f: rules = [rule.strip() for rule in f.readlines()] grammar =", "unpinch move grasp_right twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction", "= sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction if __name__ == '__main__': main()", "the edge, and return its probability for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() ==", "get_production_prob(selected_edge): # Find the corresponding production rule of the edge, and return its", "probability for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): # print selected_edge, production.prob()", "open(path) as f: rules = [rule.strip() for rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules)", "if selected_edge.start() != 0: parent_prob = 0 for parent_edge in find_parent(selected_edge): parent_prob +=", "tokens) print prediction sentence = 'approach pinch' tokens = sentence.split() prediction = predict_next_symbols(grammar,", "the selected edge p_edges = list() for p_edge in e_chart.edges(): if p_edge.end() ==", "f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find", "= sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch' tokens", "selected_edge, production.prob() return production.prob() def find_parent(selected_edge): # Find the parent edges that lead", "print prediction sentence = 'approach pinch twist' tokens = sentence.split() prediction = predict_next_symbols(grammar,", "= predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch' tokens =", "nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find the corresponding production", "sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch' tokens =", "p in zip(symbols, probs): if s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)]", "if edge.end() == len(tokens): # Only add terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym())", "predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch' tokens = sentence.split() prediction =", "twist unpinch move' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence", "p return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens", "pinch twist unpinch move grasp_right' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print", "add terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for end_edge", "pinch twist unpinch move grasp_right twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens)", "= 'approach pinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence", "+= get_edge_prob(parent_edge) prob *= parent_prob return prob symbols = list() earley_parser = nltk.EarleyChartParser(grammar,", "symbols = list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges = list()", "parent_prob = 0 for parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *= parent_prob", "Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate = list() for s, p in zip(symbols,", "edge by recursion prob = get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob = 0", "in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate) def main():", "sentence = 'approach pinch twist unpinch move grasp_right twist' tokens = sentence.split() prediction", "if p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge):", "p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): # Compute the probability of the edge by", "tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch'", "tokens) print prediction sentence = 'approach pinch twist unpinch move' tokens = sentence.split()", "recursion prob = get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob = 0 for parent_edge", "= 'approach pinch twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction", "predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move grasp_right' tokens", "corresponding production rule of the edge, and return its probability for production in", "get_edge_prob(selected_edge): # Compute the probability of the edge by recursion prob = get_production_prob(selected_edge)", "= nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges = list() for edge in e_chart.edges():", "= list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges = list() for", "Find the corresponding production rule of the edge, and return its probability for", "selected edge p_edges = list() for p_edge in e_chart.edges(): if p_edge.end() == selected_edge.start()", "grasp_right twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction if __name__", "in e_chart.edges(): if p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges", "edges that lead to the selected edge p_edges = list() for p_edge in", "move grasp_right twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction if", "grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): # print selected_edge, production.prob() return production.prob() def find_parent(selected_edge):", "pinch twist unpinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence", "tokens) print prediction sentence = 'approach pinch twist unpinch' tokens = sentence.split() prediction", "= nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find the corresponding", "grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): # Find the corresponding production rule of", "= get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob = 0 for parent_edge in find_parent(selected_edge):", "not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate) def", "prediction sentence = 'approach pinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print", "sentence = 'approach pinch twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print", "list() for edge in e_chart.edges(): # print edge if edge.end() == len(tokens): #", "terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for end_edge in", "in zip(symbols, probs): if s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] +=", "predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move grasp_right twist'", "isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) #", "trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges = list() for edge in e_chart.edges(): # print", "p_edges def get_edge_prob(selected_edge): # Compute the probability of the edge by recursion prob", "= 'approach' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence =", "zip(symbols, probs): if s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p", "import nltk def read_induced_grammar(path): with open(path) as f: rules = [rule.strip() for rule", "= 0 for parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *= parent_prob return", "production rule of the edge, and return its probability for production in grammar.productions(lhs=selected_edge.lhs()):", "= 'approach pinch twist unpinch move grasp_right twist' tokens = sentence.split() prediction =", "= read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print", "'approach pinch twist unpinch move grasp_right' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens)", "find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *= parent_prob return prob symbols = list() earley_parser", "= list() for p_edge in e_chart.edges(): if p_edge.end() == selected_edge.start() and p_edge.nextsym() ==", "0: parent_prob = 0 for parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *=", "prob *= parent_prob return prob symbols = list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart", "for p_edge in e_chart.edges(): if p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge)", "= list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate = list()", "= list() for s, p in zip(symbols, probs): if s not in symbols_no_duplicate:", "probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt')", "pinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach", "def get_production_prob(selected_edge): # Find the corresponding production rule of the edge, and return", "production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): # print selected_edge, production.prob() return production.prob()", "selected_edge.start() != 0: parent_prob = 0 for parent_edge in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge)", "its probability for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): # print selected_edge,", "# Find the corresponding production rule of the edge, and return its probability", "production.prob() def find_parent(selected_edge): # Find the parent edges that lead to the selected", "def get_edge_prob(selected_edge): # Compute the probability of the edge by recursion prob =", "probs_no_duplicate = list() for s, p in zip(symbols, probs): if s not in", "prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch' tokens", "probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate = list() for s, p", "tokens) print prediction sentence = 'approach pinch twist unpinch move grasp_right' tokens =", "Compute the probability of the edge by recursion prob = get_production_prob(selected_edge) if selected_edge.start()", "return prob symbols = list() earley_parser = nltk.EarleyChartParser(grammar, trace=0) e_chart = earley_parser.chart_parse(tokens) end_edges", "twist unpinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence =", "zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens = sentence.split()", "sentence = 'approach pinch twist unpinch' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens)", "return its probability for production in grammar.productions(lhs=selected_edge.lhs()): if production.rhs() == selected_edge.rhs(): # print", "edge in e_chart.edges(): # print edge if edge.end() == len(tokens): # Only add", "main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens = sentence.split() prediction = predict_next_symbols(grammar,", "return zip(symbols_no_duplicate, probs_no_duplicate) def main(): grammar = read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens =", "twist unpinch move grasp_right twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print", "production.rhs() == selected_edge.rhs(): # print selected_edge, production.prob() return production.prob() def find_parent(selected_edge): # Find", "prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move'", "twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction if __name__ ==", "# Find the parent edges that lead to the selected edge p_edges =", "sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch'", "for s, p in zip(symbols, probs): if s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p)", "pinch twist unpinch move' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction", "<reponame>tdonca/OpenBottle<filename>grammar_induction/earley_parser/earley_parser.py import nltk def read_induced_grammar(path): with open(path) as f: rules = [rule.strip() for", "= predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch move grasp_right", "the probability of the edge by recursion prob = get_production_prob(selected_edge) if selected_edge.start() !=", "the corresponding production rule of the edge, and return its probability for production", "get_edge_prob(parent_edge) prob *= parent_prob return prob symbols = list() earley_parser = nltk.EarleyChartParser(grammar, trace=0)", "= 'approach pinch twist unpinch move grasp_right' tokens = sentence.split() prediction = predict_next_symbols(grammar,", "'approach pinch twist unpinch move' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print", "unpinch move grasp_right' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction sentence", "for rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens): def", "if production.rhs() == selected_edge.rhs(): # print selected_edge, production.prob() return production.prob() def find_parent(selected_edge): #", "f: rules = [rule.strip() for rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar", "list() for s, p in zip(symbols, probs): if s not in symbols_no_duplicate: symbols_no_duplicate.append(s)", "prediction sentence = 'approach pinch twist unpinch move' tokens = sentence.split() prediction =", "if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs = list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge))", "# print selected_edge, production.prob() return production.prob() def find_parent(selected_edge): # Find the parent edges", "prediction sentence = 'approach pinch twist unpinch move grasp_right' tokens = sentence.split() prediction", "prediction sentence = 'approach pinch twist unpinch move grasp_right twist' tokens = sentence.split()", "= predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch' tokens = sentence.split() prediction", "== len(tokens): # Only add terminal nodes if isinstance(edge.nextsym(), unicode): symbols.append(edge.nextsym()) end_edges.append(edge) probs", "p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): #", "p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): # Compute the probability of", "probs = list() for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate =", "s not in symbols_no_duplicate: symbols_no_duplicate.append(s) probs_no_duplicate.append(p) else: probs_no_duplicate[symbols_no_duplicate.index(s)] += p return zip(symbols_no_duplicate, probs_no_duplicate)", "nltk def read_induced_grammar(path): with open(path) as f: rules = [rule.strip() for rule in", "get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob = 0 for parent_edge in find_parent(selected_edge): parent_prob", "for end_edge in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate =", "list() probs_no_duplicate = list() for s, p in zip(symbols, probs): if s not", "rule of the edge, and return its probability for production in grammar.productions(lhs=selected_edge.lhs()): if", "prediction sentence = 'approach pinch twist' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens)", "and p_edge.nextsym() == selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): # Compute the probability", "in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar, tokens): def get_production_prob(selected_edge): #", "predict_next_symbols(grammar, tokens) print prediction sentence = 'approach pinch twist unpinch' tokens = sentence.split()", "= list() probs_no_duplicate = list() for s, p in zip(symbols, probs): if s", "tokens): def get_production_prob(selected_edge): # Find the corresponding production rule of the edge, and", "== selected_edge.lhs(): p_edges.append(p_edge) return p_edges def get_edge_prob(selected_edge): # Compute the probability of the", "by recursion prob = get_production_prob(selected_edge) if selected_edge.start() != 0: parent_prob = 0 for", "in find_parent(selected_edge): parent_prob += get_edge_prob(parent_edge) prob *= parent_prob return prob symbols = list()", "read_induced_grammar('../grammars/parser_input.txt') sentence = 'approach' tokens = sentence.split() prediction = predict_next_symbols(grammar, tokens) print prediction", "in end_edges: probs.append(get_edge_prob(end_edge)) # Eliminate duplicate symbols_no_duplicate = list() probs_no_duplicate = list() for", "= [rule.strip() for rule in f.readlines()] grammar = nltk.PCFG.fromstring(rules) return grammar def predict_next_symbols(grammar,", "probability of the edge by recursion prob = get_production_prob(selected_edge) if selected_edge.start() != 0:", "== selected_edge.rhs(): # print selected_edge, production.prob() return production.prob() def find_parent(selected_edge): # Find the", "e_chart = earley_parser.chart_parse(tokens) end_edges = list() for edge in e_chart.edges(): # print edge" ]
[ "'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources),", "from .dataset import Dataset class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch =", "= \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets, sources): return map(lambda", "Dataset class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self,", "# -*- coding: utf-8 -*- import fuel.datasets from .dataset import Dataset class CIFAR100(Dataset):", "sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def load_data(sets=None, sources=None, fuel_dir=False): return", "basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets, sources): return", "CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets, sources):", ".dataset import Dataset class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100", "class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets)", "map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def load_data(sets=None, sources=None, fuel_dir=False): return CIFAR100().load_data(sets, sources, fuel_dir);", "-*- coding: utf-8 -*- import fuel.datasets from .dataset import Dataset class CIFAR100(Dataset): basename", "sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def load_data(sets=None, sources=None, fuel_dir=False): return CIFAR100().load_data(sets,", "return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def load_data(sets=None, sources=None, fuel_dir=False): return CIFAR100().load_data(sets, sources,", "coding: utf-8 -*- import fuel.datasets from .dataset import Dataset class CIFAR100(Dataset): basename =", "\"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets, sources): return map(lambda s:", "-*- import fuel.datasets from .dataset import Dataset class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features',", "build_data(self, sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def load_data(sets=None, sources=None, fuel_dir=False):", "class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets,", "default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def build_data(self, sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s],", "utf-8 -*- import fuel.datasets from .dataset import Dataset class CIFAR100(Dataset): basename = \"cifar100\"", "fuel.datasets from .dataset import Dataset class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch", "fuel.datasets.CIFAR100 def build_data(self, sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def load_data(sets=None,", "import fuel.datasets from .dataset import Dataset class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels']", "def build_data(self, sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def load_data(sets=None, sources=None,", "<reponame>dribnet/kerosene # -*- coding: utf-8 -*- import fuel.datasets from .dataset import Dataset class", "= fuel.datasets.CIFAR100 def build_data(self, sets, sources): return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets) def", "import Dataset class CIFAR100(Dataset): basename = \"cifar100\" default_sources=['features', 'coarse_labels'] class_for_filename_patch = fuel.datasets.CIFAR100 def" ]
[ "[] for image_key in imageItems: imageItem = ImageItem() image = imageItems[image_key] imageItem['thumbnail'] =", "size_infos = {} size_values = [] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id", "if len(product_detail_str)>0: context = execjs.compile(''' %s function get_product_detail(){ return productDetail; } ''' %", "x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] = 'color' colorItem['show_product_id']", "colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images']", "= category baseItem['product_type'] = product_type baseItem['url'] = url baseItem['gender'] = gender baseItem['brand'] =", "= item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url,", "color_names baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc']", "= sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type']", "len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url = self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages,", "'gender' : gender, 'category_url' : category_url}) def parse_item(self, response): baseItem = response.meta['baseItem'] return", "callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url})", "= color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0]", "ImageItem, Color, SkuItem from scrapy import Request from gorden_crawler.utils.item_field_handler import handle_price import re", "len(item_link_lis.extract())>0 : for item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url +", "= {} for color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0:", "= image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in tmp_images:", "key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0 and color_name in color_price_mapping.keys():", "ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel = Selector(response) category = response.meta['category'] product_type = response.meta['product_type']", "import handle_price import re import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel =", "gender, 'category_url' : category_url}) def parse_item(self, response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem)", "= sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if", "from gorden_crawler.utils.item_field_handler import handle_price import re import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response):", "= Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] = self.name baseItem['show_product_id'] =", "colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] = images yield", "baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context = execjs.compile(''' %s function get_product_detail(){ return productDetail;", "sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block in color_price_blocks: color_name =", "color_price_mapping = {} for color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) >", "0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names =", "meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url}) def", "size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block", "item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item,", "self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] = images yield colorItem sizes", "'product_type' : product_type, 'gender' : gender, 'category_url' : category_url}) def parse_item(self, response): baseItem", "skuItem['from_site'] = self.name skuItem['color'] = color_name skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size skuItem['size']", "= size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] = sel.xpath('//div[@itemprop=\"description\"]').extract()[0] baseItem['dimensions'] = ['size', 'color']", "image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1])", "for image_key in imageItems: imageItem = ImageItem() image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail']", "key=lambda x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] = 'color'", "size_values = [] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price =", "[] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks", "= list_price if len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price']", "next_page_link[0]): url = self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type'", "response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context", "def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context = execjs.compile(''' %s function", "re import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel = Selector(response) category =", ": gender, 'category_url' : category_url}) def parse_item(self, response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response,", "skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else:", "= Selector(response) category = response.meta['category'] product_type = response.meta['product_type'] gender = response.meta['gender'] category_url =", "product_id size_js_infos = product_detail['sizes'] size_infos = {} size_values = [] for size_id in", "# -*- coding: utf-8 -*- from gorden_crawler.spiders.shiji_base import BaseSpider from scrapy.selector import Selector", "item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri baseItem = BaseItem() baseItem['type']", "gorden_crawler.utils.item_field_handler import handle_price import re import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel", "next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url = self.shopbop_base_url +", "product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id size_js_infos", "response.meta['gender'] category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 : for", "!= next_page_link[0]): url = self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' : category,", "import Request from gorden_crawler.utils.item_field_handler import handle_price import re import execjs class ShopbopEastdaneCommon(BaseSpider): def", "def parse_pages(self, response): sel = Selector(response) category = response.meta['category'] product_type = response.meta['product_type'] gender", "handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract()", "'base' baseItem['category'] = category baseItem['product_type'] = product_type baseItem['url'] = url baseItem['gender'] = gender", "and color_name in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price']", "for item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri baseItem", "imageItems: imageItem = ImageItem() image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom']", "category = response.meta['category'] product_type = response.meta['product_type'] gender = response.meta['gender'] category_url = response.meta['category_url'] item_link_lis", "imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in", ": for item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri", "callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] != next_page_link[0]):", "response.meta['product_type'] gender = response.meta['gender'] category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if", "response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context = execjs.compile(''' %s function get_product_detail(){ return", "color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names = [] for key in image_items: imageItems =", "size_infos[size] skuItem = SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] = self.name skuItem['color'] = color_name", "= regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names = [] for", "image_items[key]['sizes'] for size in sizes: size_name = size_infos[size] skuItem = SkuItem() skuItem['type'] =", "category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 : for item_link_li", "import BaseItem, ImageItem, Color, SkuItem from scrapy import Request from gorden_crawler.utils.item_field_handler import handle_price", "image_items = product_detail['colors'] color_names = [] for key in image_items: imageItems = image_items[key]['images']", "skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container", "baseItem['type'] = 'base' baseItem['category'] = category baseItem['product_type'] = product_type baseItem['url'] = url baseItem['gender']", "skuItem['list_price'] = list_price if len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0]", "'sku' skuItem['from_site'] = self.name skuItem['color'] = color_name skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size", "+ item_link_uri baseItem = BaseItem() baseItem['type'] = 'base' baseItem['category'] = category baseItem['product_type'] =", "sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] = self.name baseItem['show_product_id']", "return productDetail; } ''' % (product_detail_str)) product_detail = context.call('get_product_detail') sel = Selector(response) product_id", "image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple", "= item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] =", "baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url = self.shopbop_base_url", "[] for key in image_items: imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[]", "size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] = sel.xpath('//div[@itemprop=\"description\"]').extract()[0] baseItem['dimensions'] = ['size',", "import Selector from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem from scrapy import Request", "Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url' :", "for key in image_items: imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images", "for size in sizes: size_name = size_infos[size] skuItem = SkuItem() skuItem['type'] = 'sku'", "gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0])", "= product_type baseItem['url'] = url baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] =", "= response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0:", "BaseItem() baseItem['type'] = 'base' baseItem['category'] = category baseItem['product_type'] = product_type baseItem['url'] = url", "= 'color' colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] =", "= [] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0]", "size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]')", "category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url}) def parse_item(self, response):", "response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 : for item_link_li in item_link_lis:", "gorden_crawler.spiders.shiji_base import BaseSpider from scrapy.selector import Selector from gorden_crawler.items import BaseItem, ImageItem, Color,", "baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price']", "= url baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] =", "self.shopbop_base_url + item_link_uri baseItem = BaseItem() baseItem['type'] = 'base' baseItem['category'] = category baseItem['product_type']", "baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] = sel.xpath('//div[@itemprop=\"description\"]').extract()[0] baseItem['dimensions'] = ['size', 'color'] yield baseItem", "size_js_infos = product_detail['sizes'] size_infos = {} size_values = [] for size_id in size_js_infos:", "return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context =", "self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context = execjs.compile('''", "in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping", "% (product_detail_str)) product_detail = context.call('get_product_detail') sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus =", "handle_price import re import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel = Selector(response)", "if len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url = self.shopbop_base_url + next_page_link[0] yield Request(url,", "baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context = execjs.compile(''' %s", "execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel = Selector(response) category = response.meta['category'] product_type", "response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body))", "= key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0 and color_name in", "product\")]') if len(item_link_lis.extract())>0 : for item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url =", "item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0])", "= SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] = self.name skuItem['color'] = color_name skuItem['show_product_id'] =", "if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] = sel.xpath('//div[@itemprop=\"description\"]').extract()[0] baseItem['dimensions']", "image_key in imageItems: imageItem = ImageItem() image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image']", "tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem", "skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus']", "= gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] =", "sizes = image_items[key]['sizes'] for size in sizes: size_name = size_infos[size] skuItem = SkuItem()", "product_id colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] = images", "baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes'] size_infos = {} size_values = [] for", "sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name)", "category baseItem['product_type'] = product_type baseItem['url'] = url baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0]", "skuItem['type'] = 'sku' skuItem['from_site'] = self.name skuItem['color'] = color_name skuItem['show_product_id'] = product_id skuItem['id']", "= [] baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes'] size_infos =", "= Color() colorItem['type'] = 'color' colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name colorItem['cover'] =", "item_link_uri baseItem = BaseItem() baseItem['type'] = 'base' baseItem['category'] = category baseItem['product_type'] = product_type", "coding: utf-8 -*- from gorden_crawler.spiders.shiji_base import BaseSpider from scrapy.selector import Selector from gorden_crawler.items", "color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items", "size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {}", "{} size_values = [] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price", "product_id skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0 and", "for color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span =", "Color, SkuItem from scrapy import Request from gorden_crawler.utils.item_field_handler import handle_price import re import", "= size_values baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit", "color_names = [] for key in image_items: imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip()", "''' % (product_detail_str)) product_detail = context.call('get_product_detail') sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus", "ImageItem() image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images", "colorItem['images'] = images yield colorItem sizes = image_items[key]['sizes'] for size in sizes: size_name", "def parse_item(self, response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem):", "= response.meta['gender'] category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 :", "response.meta['category'] product_type = response.meta['product_type'] gender = response.meta['gender'] category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class,", "= context.call('get_product_detail') sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] =", "size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block in", "SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] = self.name skuItem['color'] = color_name skuItem['show_product_id'] = product_id", "gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem from scrapy import Request from gorden_crawler.utils.item_field_handler import", "size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): # skuItem['current_price'] =", "False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]')", "= response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 : for item_link_li in", "imageItem = ImageItem() image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'],", "color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract()", "image_items: imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = [] for", "url = self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' :", "image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = [] for image_key in imageItems: imageItem = ImageItem()", "Color() colorItem['type'] = 'color' colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch']", "baseItem['url'] = url baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover']", "color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract()", "sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes']", "color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = [] for image_key in imageItems: imageItem", "= self.name skuItem['color'] = color_name skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size skuItem['size'] =", "else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names", "= [] for key in image_items: imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name)", "colorItem sizes = image_items[key]['sizes'] for size in sizes: size_name = size_infos[size] skuItem =", "colorItem['name'] = color_name colorItem['images'] = images yield colorItem sizes = image_items[key]['sizes'] for size", "len(product_detail_str)>0: context = execjs.compile(''' %s function get_product_detail(){ return productDetail; } ''' % (product_detail_str))", "= sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 : for item_link_li in item_link_lis: item_link_uri =", "in tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] = 'color' colorItem['show_product_id'] = product_id colorItem['from_site']", "else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names = [] for key in", "images=[] tmp_images = [] for image_key in imageItems: imageItem = ImageItem() image =", "baseItem = BaseItem() baseItem['type'] = 'base' baseItem['category'] = category baseItem['product_type'] = product_type baseItem['url']", "execjs.compile(''' %s function get_product_detail(){ return productDetail; } ''' % (product_detail_str)) product_detail = context.call('get_product_detail')", ": category_url}) def parse_item(self, response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self,", "context.call('get_product_detail') sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] = self.name", "image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = [] for image_key in imageItems:", "item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri baseItem = BaseItem() baseItem['type'] = 'base' baseItem['category']", "= sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url = self.shopbop_base_url + next_page_link[0]", "= color_name skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] =", "BaseSpider from scrapy.selector import Selector from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem from", "= BaseItem() baseItem['type'] = 'base' baseItem['category'] = category baseItem['product_type'] = product_type baseItem['url'] =", "= handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link =", "for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks =", "skuItem['color'] = color_name skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name skuItem['list_price']", "get_product_detail(){ return productDetail; } ''' % (product_detail_str)) product_detail = context.call('get_product_detail') sel = Selector(response)", "productDetail; } ''' % (product_detail_str)) product_detail = context.call('get_product_detail') sel = Selector(response) product_id =", "= color_name colorItem['images'] = images yield colorItem sizes = image_items[key]['sizes'] for size in", "skuItem = SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] = self.name skuItem['color'] = color_name skuItem['show_product_id']", "len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else:", "size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc']", "tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] = 'color' colorItem['show_product_id'] = product_id", "baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0]", ": product_type, 'gender' : gender, 'category_url' : category_url}) def parse_item(self, response): baseItem =", "skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False", "= response.meta['product_type'] gender = response.meta['gender'] category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]')", "tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] = 'color' colorItem['show_product_id'] = product_id colorItem['from_site'] =", "= self.name baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes'] size_infos = {} size_values =", "yield colorItem sizes = image_items[key]['sizes'] for size in sizes: size_name = size_infos[size] skuItem", "if len(item_link_lis.extract())>0 : for item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url", "handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and", "color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names = []", "= color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0:", "'category_url' : category_url}) def parse_item(self, response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def", "imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = [] for image_key", "color_name skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] = list_price", "baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem'", "'color' colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name", "in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price']", "= skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\"", "= product_id size_js_infos = product_detail['sizes'] size_infos = {} size_values = [] for size_id", "= image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = [] for image_key in imageItems: imageItem =", "%s function get_product_detail(){ return productDetail; } ''' % (product_detail_str)) product_detail = context.call('get_product_detail') sel", "self.name baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes'] size_infos = {} size_values = []", "scrapy import Request from gorden_crawler.utils.item_field_handler import handle_price import re import execjs class ShopbopEastdaneCommon(BaseSpider):", "} ''' % (product_detail_str)) product_detail = context.call('get_product_detail') sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0]", "= sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem)", "baseItem['category'] = category baseItem['product_type'] = product_type baseItem['url'] = url baseItem['gender'] = gender baseItem['brand']", "= size_infos[size] skuItem = SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] = self.name skuItem['color'] =", "import re import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel = Selector(response) category", "gender = response.meta['gender'] category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0", "skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0 and color_name", "import BaseSpider from scrapy.selector import Selector from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem", "Request from gorden_crawler.utils.item_field_handler import handle_price import re import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self,", "= sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block in color_price_blocks: color_name", "color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names = [] for key in image_items:", "product_detail['sizes'] size_infos = {} size_values = [] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] =", "imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0])", "baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield", "= color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names = [] for key in image_items: imageItems", "in image_items: imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = []", "= skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus'] =", "color_name colorItem['images'] = images yield colorItem sizes = image_items[key]['sizes'] for size in sizes:", "sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes']", "= sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] =", "Selector from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem from scrapy import Request from", "product_type, 'gender' : gender, 'category_url' : category_url}) def parse_item(self, response): baseItem = response.meta['baseItem']", "= color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] = size_values", "> 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0] else:", "colorItem['type'] = 'color' colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name']", "in imageItems: imageItem = ImageItem() image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] =", "baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price']", "baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if", "Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id", "image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] = images yield colorItem sizes = image_items[key]['sizes'] for", "baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link", "skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if", "product_type baseItem['url'] = url baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0]", "sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] = sel.xpath('//div[@itemprop=\"description\"]').extract()[0]", "if len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']]", "if len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]] =", "\"hproduct product\")]') if len(item_link_lis.extract())>0 : for item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url", "color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]]", "handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context = execjs.compile(''' %s function get_product_detail(){", "class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel = Selector(response) category = response.meta['category'] product_type =", "parse_item(self, response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\",", "utf-8 -*- from gorden_crawler.spiders.shiji_base import BaseSpider from scrapy.selector import Selector from gorden_crawler.items import", "if len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items =", "color_name in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] =", "= [] for image_key in imageItems: imageItem = ImageItem() image = imageItems[image_key] imageItem['thumbnail']", "from scrapy.selector import Selector from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem from scrapy", ": category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url}) def parse_item(self,", "from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem from scrapy import Request from gorden_crawler.utils.item_field_handler", "BaseItem, ImageItem, Color, SkuItem from scrapy import Request from gorden_crawler.utils.item_field_handler import handle_price import", "product_type = response.meta['product_type'] gender = response.meta['gender'] category_url = response.meta['category_url'] item_link_lis = sel.xpath('//li[contains(@class, \"hproduct", "product_detail['colors'] color_names = [] for key in image_items: imageItems = image_items[key]['images'] color_name =", "skuItem['size'] = size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): #", "len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors']", "= ImageItem() image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem))", "product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if len(product_detail_str)>0: context = execjs.compile(''' %s function get_product_detail(){ return productDetail; }", "skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] = list_price if", "= 'sku' skuItem['from_site'] = self.name skuItem['color'] = color_name skuItem['show_product_id'] = product_id skuItem['id'] =", "sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] =", "baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0:", "= product_detail['sizes'] size_infos = {} size_values = [] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']]", "= execjs.compile(''' %s function get_product_detail(){ return productDetail; } ''' % (product_detail_str)) product_detail =", "= sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id size_js_infos =", "color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']=", "images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] = 'color' colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name", "sizes: size_name = size_infos[size] skuItem = SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] = self.name", "= imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images,", "= {} size_values = [] for size_id in size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id)", "in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri baseItem = BaseItem()", "color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) >", "images yield colorItem sizes = image_items[key]['sizes'] for size in sizes: size_name = size_infos[size]", "Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] !=", "color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock']", "baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes'] size_infos = {} size_values", "> 0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names", "{} for color_price_block in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span", "in sizes: size_name = size_infos[size] skuItem = SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] =", "parse_pages(self, response): sel = Selector(response) category = response.meta['category'] product_type = response.meta['product_type'] gender =", "# skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] =", "list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for color_price_block in color_price_blocks:", "[] baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes'] size_infos = {}", "in color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if", "= self.shopbop_base_url + item_link_uri baseItem = BaseItem() baseItem['type'] = 'base' baseItem['category'] = category", "len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0]", "colorItem = Color() colorItem['type'] = 'color' colorItem['show_product_id'] = product_id colorItem['from_site'] = self.name colorItem['cover']", "tmp_images = sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem = Color()", "colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] = images yield colorItem sizes =", "response.body)) if len(product_detail_str)>0: context = execjs.compile(''' %s function get_product_detail(){ return productDetail; } '''", "= size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): # skuItem['current_price']", "-*- from gorden_crawler.spiders.shiji_base import BaseSpider from scrapy.selector import Selector from gorden_crawler.items import BaseItem,", "regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]] =", "item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri baseItem =", "item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem})", "url = self.shopbop_base_url + item_link_uri baseItem = BaseItem() baseItem['type'] = 'base' baseItem['category'] =", ": baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url =", "= 'base' baseItem['category'] = category baseItem['product_type'] = product_type baseItem['url'] = url baseItem['gender'] =", "size_js_infos: size_infos[size_js_infos[size_id]['sizeCode']] = size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping =", "color_price_blocks: color_name = color_price_block.xpath('./span[@class=\"priceColors\"]/text()').extract() if len(color_name) > 0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span)", "scrapy.selector import Selector from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem from scrapy import", "(product_detail_str)) product_detail = context.call('get_product_detail') sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = []", "SkuItem from scrapy import Request from gorden_crawler.utils.item_field_handler import handle_price import re import execjs", "= self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type,", "from scrapy import Request from gorden_crawler.utils.item_field_handler import handle_price import re import execjs class", "= self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] = images yield colorItem", "size_values baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit =", "Selector(response) category = response.meta['category'] product_type = response.meta['product_type'] gender = response.meta['gender'] category_url = response.meta['category_url']", "size_name = size_infos[size] skuItem = SkuItem() skuItem['type'] = 'sku' skuItem['from_site'] = self.name skuItem['color']", "import execjs class ShopbopEastdaneCommon(BaseSpider): def parse_pages(self, response): sel = Selector(response) category = response.meta['category']", "item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri baseItem = BaseItem() baseItem['type'] =", "= image_items[key]['sizes'] for size in sizes: size_name = size_infos[size] skuItem = SkuItem() skuItem['type']", "and (category_url[category] != next_page_link[0]): url = self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category'", "skus = [] baseItem['from_site'] = self.name baseItem['show_product_id'] = product_id size_js_infos = product_detail['sizes'] size_infos", "baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response, baseItem): product_detail_str=\"\".join(re.findall(r\"var\\s+productDetail[^;]+\", response.body)) if", "skuItem['current_price'] = color_price_mapping[colorItem['name']] else: skuItem['current_price'] = skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] =", "= image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] = images yield colorItem sizes = image_items[key]['sizes']", "skuItem['list_price'] skuItem['is_outof_stock'] = False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus'] = skus", "response): sel = Selector(response) category = response.meta['category'] product_type = response.meta['product_type'] gender = response.meta['gender']", "len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] = sel.xpath('//div[@itemprop=\"description\"]').extract()[0] baseItem['dimensions'] =", "size in sizes: size_name = size_infos[size] skuItem = SkuItem() skuItem['type'] = 'sku' skuItem['from_site']", "regular_price_span[0] else: color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class=\"salePrice\"]/text()').extract()[0] image_items = product_detail['colors'] color_names = [] for key", "= response.meta['category'] product_type = response.meta['product_type'] gender = response.meta['gender'] category_url = response.meta['category_url'] item_link_lis =", "= item_link_li.xpath('./div/a/@href').extract()[0] url = self.shopbop_base_url + item_link_uri baseItem = BaseItem() baseItem['type'] = 'base'", "category_url}) def parse_item(self, response): baseItem = response.meta['baseItem'] return self.handle_parse_item(response, baseItem) def handle_parse_item(self, response,", "0: regular_price_span = color_price_block.xpath('./span[@class=\"regularPrice\"]/text()').extract() if len(regular_price_span) > 0: color_price_mapping[color_name[0]] = regular_price_span[0] else: color_price_mapping[color_name[0]]", "yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url'", "imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0]) for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem =", "from gorden_crawler.spiders.shiji_base import BaseSpider from scrapy.selector import Selector from gorden_crawler.items import BaseItem, ImageItem,", "size_fit_container.extract()[0] baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop=\"description\"]').extract()[0]+size_fit+\"</div>\" else: baseItem['desc'] = sel.xpath('//div[@itemprop=\"description\"]').extract()[0] baseItem['dimensions'] = ['size', 'color'] yield", "for tmp_tuple in tmp_images: images.append(tmp_tuple[1]) colorItem = Color() colorItem['type'] = 'color' colorItem['show_product_id'] =", "context = execjs.compile(''' %s function get_product_detail(){ return productDetail; } ''' % (product_detail_str)) product_detail", "item_link_lis = sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 : for item_link_li in item_link_lis: item_link_uri", "= product_id skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name skuItem['list_price'] = list_price if len(color_price_mapping)>0", "self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender'", "= product_id colorItem['from_site'] = self.name colorItem['cover'] = image_items[key]['swatch'] colorItem['name'] = color_name colorItem['images'] =", "list_price if len(color_price_mapping)>0 and color_name in color_price_mapping.keys(): # skuItem['current_price'] = sale_price_span.re(r'\\d+.?\\d*')[0] skuItem['current_price'] =", "function get_product_detail(){ return productDetail; } ''' % (product_detail_str)) product_detail = context.call('get_product_detail') sel =", "(category_url[category] != next_page_link[0]): url = self.shopbop_base_url + next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' :", "-*- coding: utf-8 -*- from gorden_crawler.spiders.shiji_base import BaseSpider from scrapy.selector import Selector from", "= item_link_li.xpath('.//img/@src').extract()[0] baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class=\"retail-price\"]/text()').extract()[0]) baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' :", "tmp_images = [] for image_key in imageItems: imageItem = ImageItem() image = imageItems[image_key]", "sel = Selector(response) category = response.meta['category'] product_type = response.meta['product_type'] gender = response.meta['gender'] category_url", "= images yield colorItem sizes = image_items[key]['sizes'] for size in sizes: size_name =", "= product_detail['colors'] color_names = [] for key in image_items: imageItems = image_items[key]['images'] color_name", "sel.xpath('//li[contains(@class, \"hproduct product\")]') if len(item_link_lis.extract())>0 : for item_link_li in item_link_lis: item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0]", "imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda", "url baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title'] = item_link_li.xpath('.//div[@class=\"title\"]/text()').extract()[0] baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0]", "= size_id size_values.append(size_id) list_price = sel.xpath('//div[@id=\"productPrices\"]//meta[@itemprop=\"price\"]/@content').extract()[0] color_price_blocks = sel.xpath('//div[@id=\"productPrices\"]//div[@class=\"priceBlock\"]') color_price_mapping = {} for", "next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender,", "= image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images = [] for image_key in", "baseItem['skus'] = skus size_fit_container = sel.xpath('//div[@id=\"sizeFitContainer\"]') if len(size_fit_container)>0: size_fit = size_fit_container.extract()[0] baseItem['desc'] =", "yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category]", "color_names.append(color_name) images=[] tmp_images = [] for image_key in imageItems: imageItem = ImageItem() image", "image = imageItems[image_key] imageItem['thumbnail'] = image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images =", "product_detail = context.call('get_product_detail') sel = Selector(response) product_id = sel.xpath('//div[@id=\"productId\"]/text()').extract()[0] skus = [] baseItem['from_site']", "baseItem['product_type'] = product_type baseItem['url'] = url baseItem['gender'] = gender baseItem['brand'] = item_link_li.xpath('.//div[@class=\"brand\"]/text()').extract()[0] baseItem['title']", "self.name skuItem['color'] = color_name skuItem['show_product_id'] = product_id skuItem['id'] = key+\"-\"+size skuItem['size'] = size_name", "meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url", "= False skus.append(skuItem) baseItem['sizes'] = size_values baseItem['colors']= color_names baseItem['skus'] = skus size_fit_container =", "sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0 and (category_url[category] != next_page_link[0]): url = self.shopbop_base_url + next_page_link[0] yield", "= image['thumbnail'] imageItem['image'] = image['zoom'] tmp_images.append((image['index'], imageItem)) tmp_images = sorted(tmp_images, key=lambda x:x[0]) for", "+ next_page_link[0] yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' :", "= handle_price(item_link_li.xpath('.//span[@class=\"sale-price-low\"]/text()').extract()[0]) yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem}) next_page_link = sel.xpath('//span[@data-at=\"nextPage\"]/@data-next-link').extract() if len(next_page_link)>0", "key in image_items: imageItems = image_items[key]['images'] color_name = image_items[key]['colorName'].strip() color_names.append(color_name) images=[] tmp_images =" ]
[ "regex module # check if date is valid (yyyy-mm-dd) def date_validation(self, date): if", "module # check if date is valid (yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\",", "date is valid (yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return True else:", "(yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return True else: return False date_validation(\"2022-02-29\")", "import regex module # check if date is valid (yyyy-mm-dd) def date_validation(self, date):", "date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return True else: return False date_validation(\"2022-02-29\") # False/True", "# check if date is valid (yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date):", "re # import regex module # check if date is valid (yyyy-mm-dd) def", "# import regex module # check if date is valid (yyyy-mm-dd) def date_validation(self,", "def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return True else: return False date_validation(\"2022-02-29\") #", "check if date is valid (yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return", "is valid (yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return True else: return", "if date is valid (yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return True", "import re # import regex module # check if date is valid (yyyy-mm-dd)", "valid (yyyy-mm-dd) def date_validation(self, date): if re.fullmatch(r\"/^\\d{4}-\\d{2}-\\d{2}$/\", date): return True else: return False" ]
[ "import Config from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import RuleProcessor from cfripper.rules import", "event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {}; {}; {};\".format( config.project_name, config.service_name, config.stack_name, ))", "str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0] for", "RuleProcessor from cfripper.rules import ALL_RULES from cfripper.model.result import Result from cfripper.config.logger import get_logger", "{}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) > 0 or len(result.warnings)", "the Lambda function. :param event: { \"stack_template_url\": String } :param context: :return: \"\"\"", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import RuleProcessor from cfripper.rules import ALL_RULES from", "context: :return: \"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no parameter 'stack_template_url'", "template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In case of an ivalid script", "S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In case of an ivalid", "Process Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), )", "raise ValueError(\"Invalid event type: no parameter 'stack_template_url' in request.\") result = Result() s3", "RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not result.valid: log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name,", "result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) else:", "[x.args[0] for x in result.exceptions], } # Process Rules config = Config( project_name=event.get(\"project\"),", "2018 Skyscanner Ltd Licensed under the Apache License, Version 2.0 (the \"License\"); you", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "def handler(event, context): \"\"\" Main entry point of the Lambda function. :param event:", "from cfripper.model.rule_processor import RuleProcessor from cfripper.rules import ALL_RULES from cfripper.model.result import Result from", "{}, stack - {}. {} {} URL: {}\".format( _type, project_name, service_name, stack_name, json.dumps(rules),", "String } :param context: :return: \"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "} # Process Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"),", "an ivalid script log a warning and return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"])))", "and return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\": '',", "config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0: log_results(", "the License for the specific language governing permissions and limitations under the License.", "result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r", ")) def handler(event, context): \"\"\" Main entry point of the Lambda function. :param", "json.dumps(rules), str(warnings), template_url, )) def handler(event, context): \"\"\" Main entry point of the", "stack - {}. {} {} URL: {}\".format( _type, project_name, service_name, stack_name, json.dumps(rules), str(warnings),", "service_name, stack_name, json.dumps(rules), str(warnings), template_url, )) def handler(event, context): \"\"\" Main entry point", "context): \"\"\" Main entry point of the Lambda function. :param event: { \"stack_template_url\":", "\"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0]", "from cfripper.config.logger import get_logger logger = get_logger() def log_results(project_name, service_name, stack_name, rules, _type,", "License for the specific language governing permissions and limitations under the License. \"\"\"", "= Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for:", "Unless required by applicable law or agreed to in writing, software distributed under", "{}; {};\".format( config.project_name, config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result) for rule in", ":param event: { \"stack_template_url\": String } :param context: :return: \"\"\" if not event.get(\"stack_template_url\"):", "\"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0] for x in result.exceptions], } # Process", "Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {};", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan", "License, Version 2.0 (the \"License\"); you may not use this file except in", "\"\"\" Main entry point of the Lambda function. :param event: { \"stack_template_url\": String", "service_name, stack_name, rules, _type, warnings, template_url): logger.info(\"{}: project - {}, service- {}, stack", "r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0] for x in result.exceptions],", "str(warnings), template_url, )) def handler(event, context): \"\"\" Main entry point of the Lambda", "Skyscanner Ltd Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "for the specific language governing permissions and limitations under the License. \"\"\" import", "CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0]", "in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0] for x in result.exceptions], \"warnings\": result.failed_monitored_rules, }", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "or len(result.warnings) > 0: log_results( \"Failed monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings,", "\"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format(", "config.project_name, config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result) for rule in config.RULES] processor", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "in compliance with the License. You may obtain a copy of the License", "config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name,", "KIND, either express or implied. See the License for the specific language governing", "in writing, software distributed under the License is distributed on an \"AS IS\"", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "not result.valid: log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL:", "or agreed to in writing, software distributed under the License is distributed on", "cfripper.model.result import Result from cfripper.config.logger import get_logger logger = get_logger() def log_results(project_name, service_name,", "In case of an ivalid script log a warning and return early result.add_exception(TypeError(\"Malformated", "entry point of the Lambda function. :param event: { \"stack_template_url\": String } :param", "warning and return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\":", "{ \"stack_template_url\": String } :param context: :return: \"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid", "config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"],", "processor.process_cf_template(template, config, result) if not result.valid: log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules,", "\"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no parameter 'stack_template_url' in request.\")", "template: # In case of an ivalid script log a warning and return", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "OF ANY KIND, either express or implied. See the License for the specific", "service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {}; {}; {};\".format(", "return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\": result.failed_rules,", "ALL_RULES from cfripper.model.result import Result from cfripper.config.logger import get_logger logger = get_logger() def", "- {}, service- {}, stack - {}. {} {} URL: {}\".format( _type, project_name,", "log_results( \"Failed monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return {", "may not use this file except in compliance with the License. You may", "import RuleProcessor from cfripper.rules import ALL_RULES from cfripper.model.result import Result from cfripper.config.logger import", "type: no parameter 'stack_template_url' in request.\") result = Result() s3 = S3Adapter() template", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "service- {}, stack - {}. {} {} URL: {}\".format( _type, project_name, service_name, stack_name,", "rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {}; {}; {};\".format( config.project_name, config.service_name, config.stack_name,", "project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url, )) def handler(event, context): \"\"\" Main entry", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# Process Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"),", "project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {}; {};", "{}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0] for x", "rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(), \"reason\":", "config.service_name, config.stack_name, )) else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) if", "limitations under the License. \"\"\" import json from cfripper.config.config import Config from cfripper.s3_adapter", "See the License for the specific language governing permissions and limitations under the", "stack_name, rules, _type, warnings, template_url): logger.info(\"{}: project - {}, service- {}, stack -", "= get_logger() def log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url): logger.info(\"{}: project -", "of an ivalid script log a warning and return early result.add_exception(TypeError(\"Malformated CF script:", "log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {};", "stack_name, json.dumps(rules), str(warnings), template_url, )) def handler(event, context): \"\"\" Main entry point of", "{}; {}; {};\".format( config.project_name, config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result) for rule", "logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS: {}; {}; {}\".format(", "= [ALL_RULES.get(rule)(config, result) for rule in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result)", "event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS: {};", "this file except in compliance with the License. You may obtain a copy", "{}\".format( config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name,", "\"License\"); you may not use this file except in compliance with the License.", "[ALL_RULES.get(rule)(config, result) for rule in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result) if", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "\"stack_template_url\": String } :param context: :return: \"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event", "ValueError(\"Invalid event type: no parameter 'stack_template_url' in request.\") result = Result() s3 =", "you may not use this file except in compliance with the License. You", "script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0] for", "agreed to in writing, software distributed under the License is distributed on an", "{ \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\":", "> 0: log_results( \"Failed monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], )", "in result.exceptions], } # Process Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"),", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Main entry point of the Lambda function. :param event: { \"stack_template_url\": String }", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "implied. See the License for the specific language governing permissions and limitations under", "cfripper.config.config import Config from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import RuleProcessor from cfripper.rules", "ivalid script log a warning and return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return", "request.\") result = Result() s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template:", "Ltd Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "a warning and return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\",", "get_logger() def log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url): logger.info(\"{}: project - {},", "result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\":", "_type, project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url, )) def handler(event, context): \"\"\" Main", "use this file except in compliance with the License. You may obtain a", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In case of an ivalid script log a", ")) else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) >", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "for x in result.exceptions], } # Process Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"),", "for r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0] for x in result.exceptions], \"warnings\":", "started for: {}; {}; {};\".format( config.project_name, config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result)", "S3Adapter from cfripper.model.rule_processor import RuleProcessor from cfripper.rules import ALL_RULES from cfripper.model.result import Result", "\"\"\" Copyright 2018 Skyscanner Ltd Licensed under the Apache License, Version 2.0 (the", "\"\"\" import json from cfripper.config.config import Config from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "import json from cfripper.config.config import Config from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import", "_type, warnings, template_url): logger.info(\"{}: project - {}, service- {}, stack - {}. {}", "\"valid\": \"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0] for x in result.exceptions], }", "result) for rule in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not", "return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\": '', \"failed_rules\":", "config.stack_name, )) if len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0: log_results( \"Failed monitored", "{} URL: {}\".format( _type, project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url, )) def handler(event,", "cfripper.model.rule_processor import RuleProcessor from cfripper.rules import ALL_RULES from cfripper.model.result import Result from cfripper.config.logger", "event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]),", "else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) > 0", "rule in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not result.valid: log_results(", "if not result.valid: log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], )", "config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started", "stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {}; {}; {};\".format( config.project_name,", "\",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0] for x in", "required by applicable law or agreed to in writing, software distributed under the", "Lambda function. :param event: { \"stack_template_url\": String } :param context: :return: \"\"\" if", "the License. \"\"\" import json from cfripper.config.config import Config from cfripper.s3_adapter import S3Adapter", "from cfripper.rules import ALL_RULES from cfripper.model.result import Result from cfripper.config.logger import get_logger logger", "no parameter 'stack_template_url' in request.\") result = Result() s3 = S3Adapter() template =", "event type: no parameter 'stack_template_url' in request.\") result = Result() s3 = S3Adapter()", "= s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In case of an ivalid script log", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "not use this file except in compliance with the License. You may obtain", "cfripper.rules import ALL_RULES from cfripper.model.result import Result from cfripper.config.logger import get_logger logger =", "\"exceptions\": [x.args[0] for x in result.exceptions], } # Process Rules config = Config(", "under the License. \"\"\" import json from cfripper.config.config import Config from cfripper.s3_adapter import", "{}).get(\"name\"), rules=ALL_RULES.keys(), event=event.get(\"event\"), template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {}; {}; {};\".format( config.project_name, config.service_name,", "if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no parameter 'stack_template_url' in request.\") result", "log a warning and return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\":", "config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result) for rule in config.RULES] processor =", "script log a warning and return early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return {", "\"failed_rules\": [], \"exceptions\": [x.args[0] for x in result.exceptions], } # Process Rules config", "not template: # In case of an ivalid script log a warning and", "result = Result() s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: #", "ANY KIND, either express or implied. See the License for the specific language", "if len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0: log_results( \"Failed monitored rules\", config.project_name,", "file except in compliance with the License. You may obtain a copy of", "'stack_template_url' in request.\") result = Result() s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if", ":return: \"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no parameter 'stack_template_url' in", "for rule in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not result.valid:", "2.0 (the \"License\"); you may not use this file except in compliance with", "project - {}, service- {}, stack - {}. {} {} URL: {}\".format( _type,", "in request.\") result = Result() s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "{}\".format( config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0:", "{ \"valid\": \"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0] for x in result.exceptions],", "r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0] for x in result.exceptions], \"warnings\": result.failed_monitored_rules,", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "result.warnings, event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in", "(the \"License\"); you may not use this file except in compliance with the", "return { \"valid\": \"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0] for x in", "{}; {}\".format( config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name,", "json from cfripper.config.config import Config from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import RuleProcessor", "logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) > 0 or", ") return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\":", "License. \"\"\" import json from cfripper.config.config import Config from cfripper.s3_adapter import S3Adapter from", "len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0: log_results( \"Failed monitored rules\", config.project_name, config.service_name,", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "template_url): logger.info(\"{}: project - {}, service- {}, stack - {}. {} {} URL:", "early result.add_exception(TypeError(\"Malformated CF script: {}\".format(event[\"stack_template_url\"]))) return { \"valid\": \"true\", \"reason\": '', \"failed_rules\": [],", "{}. {} {} URL: {}\".format( _type, project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url, ))", "<gh_stars>0 \"\"\" Copyright 2018 Skyscanner Ltd Licensed under the Apache License, Version 2.0", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not result.valid: log_results( \"Failed", "config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, ))", "case of an ivalid script log a warning and return early result.add_exception(TypeError(\"Malformated CF", "config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0: log_results( \"Failed", "law or agreed to in writing, software distributed under the License is distributed", "config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, ))", ") logger.info(\"Scan started for: {}; {}; {};\".format( config.project_name, config.service_name, config.stack_name, )) rules =", "{};\".format( config.project_name, config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result) for rule in config.RULES]", "Version 2.0 (the \"License\"); you may not use this file except in compliance", ") logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS: {}; {};", "\"true\", \"reason\": '', \"failed_rules\": [], \"exceptions\": [x.args[0] for x in result.exceptions], } #", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "handler(event, context): \"\"\" Main entry point of the Lambda function. :param event: {", "rules = [ALL_RULES.get(rule)(config, result) for rule in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config,", "result.valid: log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {};", "'', \"failed_rules\": [], \"exceptions\": [x.args[0] for x in result.exceptions], } # Process Rules", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "} :param context: :return: \"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no", "= S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In case of an", "not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no parameter 'stack_template_url' in request.\") result =", "either express or implied. See the License for the specific language governing permissions", "rules, _type, warnings, template_url): logger.info(\"{}: project - {}, service- {}, stack - {}.", "logger.info(\"Scan started for: {}; {}; {};\".format( config.project_name, config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config,", "governing permissions and limitations under the License. \"\"\" import json from cfripper.config.config import", "[], \"exceptions\": [x.args[0] for x in result.exceptions], } # Process Rules config =", "config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result) for rule in config.RULES] processor = RuleProcessor(*rules)", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "and limitations under the License. \"\"\" import json from cfripper.config.config import Config from", ")) rules = [ALL_RULES.get(rule)(config, result) for rule in config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template,", "config.RULES] processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not result.valid: log_results( \"Failed rules\",", "import Result from cfripper.config.logger import get_logger logger = get_logger() def log_results(project_name, service_name, stack_name,", "template_url, )) def handler(event, context): \"\"\" Main entry point of the Lambda function.", "s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In case of", "{} {} URL: {}\".format( _type, project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url, )) def", "the specific language governing permissions and limitations under the License. \"\"\" import json", "import get_logger logger = get_logger() def log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url):", "import S3Adapter from cfripper.model.rule_processor import RuleProcessor from cfripper.rules import ALL_RULES from cfripper.model.result import", "Result from cfripper.config.logger import get_logger logger = get_logger() def log_results(project_name, service_name, stack_name, rules,", "{}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name,", "permissions and limitations under the License. \"\"\" import json from cfripper.config.config import Config", "function. :param event: { \"stack_template_url\": String } :param context: :return: \"\"\" if not", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "to in writing, software distributed under the License is distributed on an \"AS", "except in compliance with the License. You may obtain a copy of the", "= Result() s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "from cfripper.model.result import Result from cfripper.config.logger import get_logger logger = get_logger() def log_results(project_name,", "config, result) if not result.valid: log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings,", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "len(result.warnings) > 0: log_results( \"Failed monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"],", "event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no parameter 'stack_template_url' in request.\") result = Result()", "logger.info(\"{}: project - {}, service- {}, stack - {}. {} {} URL: {}\".format(", "Copyright 2018 Skyscanner Ltd Licensed under the Apache License, Version 2.0 (the \"License\");", "\"Failed monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return { \"valid\":", "def log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url): logger.info(\"{}: project - {}, service-", "event: { \"stack_template_url\": String } :param context: :return: \"\"\" if not event.get(\"stack_template_url\"): raise", "\"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"]) for r in result.failed_rules]), \"failed_rules\": result.failed_rules, \"exceptions\": [x.args[0] for x", "= RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not result.valid: log_results( \"Failed rules\", config.project_name, config.service_name,", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "- {}. {} {} URL: {}\".format( _type, project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url,", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "result.exceptions], } # Process Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\", {}).get(\"name\"), rules=ALL_RULES.keys(),", "cfripper.config.logger import get_logger logger = get_logger() def log_results(project_name, service_name, stack_name, rules, _type, warnings,", "x in result.exceptions], } # Process Rules config = Config( project_name=event.get(\"project\"), service_name=event.get(\"serviceName\"), stack_name=event.get(\"stack\",", "URL: {}\".format( _type, project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url, )) def handler(event, context):", "# In case of an ivalid script log a warning and return early", "compliance with the License. You may obtain a copy of the License at", "template_url=event.get(\"stack_template_url\"), ) logger.info(\"Scan started for: {}; {}; {};\".format( config.project_name, config.service_name, config.stack_name, )) rules", "express or implied. See the License for the specific language governing permissions and", "config.stack_name, )) else: logger.info(\"PASS: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules)", "parameter 'stack_template_url' in request.\") result = Result() s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"])", "of the Lambda function. :param event: { \"stack_template_url\": String } :param context: :return:", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "{}\".format( _type, project_name, service_name, stack_name, json.dumps(rules), str(warnings), template_url, )) def handler(event, context): \"\"\"", "applicable law or agreed to in writing, software distributed under the License is", "language governing permissions and limitations under the License. \"\"\" import json from cfripper.config.config", "rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format( config.project_name,", "processor = RuleProcessor(*rules) processor.process_cf_template(template, config, result) if not result.valid: log_results( \"Failed rules\", config.project_name,", ")) if len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0: log_results( \"Failed monitored rules\",", "config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name,", "point of the Lambda function. :param event: { \"stack_template_url\": String } :param context:", "specific language governing permissions and limitations under the License. \"\"\" import json from", ":param context: :return: \"\"\" if not event.get(\"stack_template_url\"): raise ValueError(\"Invalid event type: no parameter", "from cfripper.config.config import Config from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import RuleProcessor from", "result.warnings, event[\"stack_template_url\"], ) logger.info(\"FAIL: {}; {}; {}\".format( config.project_name, config.service_name, config.stack_name, )) else: logger.info(\"PASS:", "0 or len(result.warnings) > 0: log_results( \"Failed monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url): logger.info(\"{}: project - {}, service- {},", "if not template: # In case of an ivalid script log a warning", "import ALL_RULES from cfripper.model.result import Result from cfripper.config.logger import get_logger logger = get_logger()", "0: log_results( \"Failed monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return", "monitored rules\", config.project_name, config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(),", "Result() s3 = S3Adapter() template = s3.download_template_to_dictionary(event[\"stack_template_url\"]) if not template: # In case", "logger = get_logger() def log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url): logger.info(\"{}: project", "{}; {}\".format( config.project_name, config.service_name, config.stack_name, )) if len(result.failed_monitored_rules) > 0 or len(result.warnings) >", "warnings, template_url): logger.info(\"{}: project - {}, service- {}, stack - {}. {} {}", "Config from cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import RuleProcessor from cfripper.rules import ALL_RULES", "result) if not result.valid: log_results( \"Failed rules\", config.project_name, config.service_name, config.stack_name, result.failed_rules, result.warnings, event[\"stack_template_url\"],", "get_logger logger = get_logger() def log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url): logger.info(\"{}:", "> 0 or len(result.warnings) > 0: log_results( \"Failed monitored rules\", config.project_name, config.service_name, config.stack_name,", "{}, service- {}, stack - {}. {} {} URL: {}\".format( _type, project_name, service_name,", "config.service_name, config.stack_name, result.failed_monitored_rules, result.warnings, event[\"stack_template_url\"], ) return { \"valid\": str(result.valid).lower(), \"reason\": \",\".join([\"{}-{}\".format(r[\"rule\"], r[\"reason\"])", "for: {}; {}; {};\".format( config.project_name, config.service_name, config.stack_name, )) rules = [ALL_RULES.get(rule)(config, result) for", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "cfripper.s3_adapter import S3Adapter from cfripper.model.rule_processor import RuleProcessor from cfripper.rules import ALL_RULES from cfripper.model.result" ]
[ "for i in arr: if i < pivot: less.append(i) elif i > pivot:", "print(\"start qs\", stdin.readline()) # Ignore first line with number of inputs on it", "main(): print(\"start qs\", stdin.readline()) # Ignore first line with number of inputs on", "question recursion @@ points: 500 for i in arr: if i < pivot:", "question base case @@ description: Find the base case @@ points: 100 return", "sys import stdin @@ begin hide def main(): print(\"start qs\", stdin.readline()) # Ignore", "less.append(i) elif i > pivot: more.append(i) else: pivotList.append(i) @@ end question less =", "base case @@ points: 100 return arr @@ end question else: pivot =", "return less + pivotList + more @@ begin hide if __name__ == '__main__':", "base case @@ description: Find the base case @@ points: 100 return arr", "@@ points: 100 return arr @@ end question else: pivot = arr[0] @@", "pivotList = [] more = [] if len(arr) <= 1: @@ begin question", "stdin.readline() print(array_in) presort = list(map(int, array_in.split(' '))) sort = quickSort(presort) print(sort) @@ end", "arr[0] @@ begin question recursion @@ points: 500 for i in arr: if", "i < pivot: less.append(i) elif i > pivot: more.append(i) else: pivotList.append(i) @@ end", "begin question base case @@ description: Find the base case @@ points: 100", "@@ begin hide def main(): print(\"start qs\", stdin.readline()) # Ignore first line with", "'))) sort = quickSort(presort) print(sort) @@ end hide def quickSort(arr): less = []", "= arr[0] @@ begin question recursion @@ points: 500 for i in arr:", "begin hide def main(): print(\"start qs\", stdin.readline()) # Ignore first line with number", "i in arr: if i < pivot: less.append(i) elif i > pivot: more.append(i)", "qs\", stdin.readline()) # Ignore first line with number of inputs on it array_in", "= [] if len(arr) <= 1: @@ begin question base case @@ description:", "the base case @@ points: 100 return arr @@ end question else: pivot", "pivot: less.append(i) elif i > pivot: more.append(i) else: pivotList.append(i) @@ end question less", "print(sort) @@ end hide def quickSort(arr): less = [] pivotList = [] more", "= list(map(int, array_in.split(' '))) sort = quickSort(presort) print(sort) @@ end hide def quickSort(arr):", "@@ description: Find the base case @@ points: 100 return arr @@ end", "[] more = [] if len(arr) <= 1: @@ begin question base case", "@@ end question else: pivot = arr[0] @@ begin question recursion @@ points:", "line with number of inputs on it array_in = stdin.readline() print(array_in) presort =", "of inputs on it array_in = stdin.readline() print(array_in) presort = list(map(int, array_in.split(' ')))", "@@ begin question base case @@ description: Find the base case @@ points:", "quickSort(presort) print(sort) @@ end hide def quickSort(arr): less = [] pivotList = []", "> pivot: more.append(i) else: pivotList.append(i) @@ end question less = quickSort(less) more =", "import stdin @@ begin hide def main(): print(\"start qs\", stdin.readline()) # Ignore first", "more.append(i) else: pivotList.append(i) @@ end question less = quickSort(less) more = quickSort(more) return", "= [] more = [] if len(arr) <= 1: @@ begin question base", "pivot: more.append(i) else: pivotList.append(i) @@ end question less = quickSort(less) more = quickSort(more)", "elif i > pivot: more.append(i) else: pivotList.append(i) @@ end question less = quickSort(less)", "list(map(int, array_in.split(' '))) sort = quickSort(presort) print(sort) @@ end hide def quickSort(arr): less", "[] if len(arr) <= 1: @@ begin question base case @@ description: Find", "1: @@ begin question base case @@ description: Find the base case @@", "@@ points: 500 for i in arr: if i < pivot: less.append(i) elif", "def main(): print(\"start qs\", stdin.readline()) # Ignore first line with number of inputs", "len(arr) <= 1: @@ begin question base case @@ description: Find the base", "question less = quickSort(less) more = quickSort(more) return less + pivotList + more", "<reponame>RaphaelArkadyMeyer/LiveCoding<gh_stars>0 from sys import stdin @@ begin hide def main(): print(\"start qs\", stdin.readline())", "number of inputs on it array_in = stdin.readline() print(array_in) presort = list(map(int, array_in.split('", "= stdin.readline() print(array_in) presort = list(map(int, array_in.split(' '))) sort = quickSort(presort) print(sort) @@", "case @@ description: Find the base case @@ points: 100 return arr @@", "else: pivotList.append(i) @@ end question less = quickSort(less) more = quickSort(more) return less", "quickSort(more) return less + pivotList + more @@ begin hide if __name__ ==", "return arr @@ end question else: pivot = arr[0] @@ begin question recursion", "quickSort(arr): less = [] pivotList = [] more = [] if len(arr) <=", "question else: pivot = arr[0] @@ begin question recursion @@ points: 500 for", "stdin @@ begin hide def main(): print(\"start qs\", stdin.readline()) # Ignore first line", "@@ end hide def quickSort(arr): less = [] pivotList = [] more =", "points: 500 for i in arr: if i < pivot: less.append(i) elif i", "Ignore first line with number of inputs on it array_in = stdin.readline() print(array_in)", "case @@ points: 100 return arr @@ end question else: pivot = arr[0]", "hide def quickSort(arr): less = [] pivotList = [] more = [] if", "recursion @@ points: 500 for i in arr: if i < pivot: less.append(i)", "it array_in = stdin.readline() print(array_in) presort = list(map(int, array_in.split(' '))) sort = quickSort(presort)", "Find the base case @@ points: 100 return arr @@ end question else:", "description: Find the base case @@ points: 100 return arr @@ end question", "on it array_in = stdin.readline() print(array_in) presort = list(map(int, array_in.split(' '))) sort =", "pivotList + more @@ begin hide if __name__ == '__main__': main() @@ end", "@@ end question less = quickSort(less) more = quickSort(more) return less + pivotList", "presort = list(map(int, array_in.split(' '))) sort = quickSort(presort) print(sort) @@ end hide def", "end hide def quickSort(arr): less = [] pivotList = [] more = []", "arr @@ end question else: pivot = arr[0] @@ begin question recursion @@", "arr: if i < pivot: less.append(i) elif i > pivot: more.append(i) else: pivotList.append(i)", "500 for i in arr: if i < pivot: less.append(i) elif i >", "points: 100 return arr @@ end question else: pivot = arr[0] @@ begin", "# Ignore first line with number of inputs on it array_in = stdin.readline()", "sort = quickSort(presort) print(sort) @@ end hide def quickSort(arr): less = [] pivotList", "first line with number of inputs on it array_in = stdin.readline() print(array_in) presort", "+ pivotList + more @@ begin hide if __name__ == '__main__': main() @@", "pivotList.append(i) @@ end question less = quickSort(less) more = quickSort(more) return less +", "array_in = stdin.readline() print(array_in) presort = list(map(int, array_in.split(' '))) sort = quickSort(presort) print(sort)", "less = quickSort(less) more = quickSort(more) return less + pivotList + more @@", "< pivot: less.append(i) elif i > pivot: more.append(i) else: pivotList.append(i) @@ end question", "hide def main(): print(\"start qs\", stdin.readline()) # Ignore first line with number of", "end question else: pivot = arr[0] @@ begin question recursion @@ points: 500", "less + pivotList + more @@ begin hide if __name__ == '__main__': main()", "inputs on it array_in = stdin.readline() print(array_in) presort = list(map(int, array_in.split(' '))) sort", "with number of inputs on it array_in = stdin.readline() print(array_in) presort = list(map(int,", "+ more @@ begin hide if __name__ == '__main__': main() @@ end hide", "= [] pivotList = [] more = [] if len(arr) <= 1: @@", "= quickSort(less) more = quickSort(more) return less + pivotList + more @@ begin", "more = quickSort(more) return less + pivotList + more @@ begin hide if", "array_in.split(' '))) sort = quickSort(presort) print(sort) @@ end hide def quickSort(arr): less =", "if len(arr) <= 1: @@ begin question base case @@ description: Find the", "[] pivotList = [] more = [] if len(arr) <= 1: @@ begin", "if i < pivot: less.append(i) elif i > pivot: more.append(i) else: pivotList.append(i) @@", "quickSort(less) more = quickSort(more) return less + pivotList + more @@ begin hide", "@@ begin question recursion @@ points: 500 for i in arr: if i", "def quickSort(arr): less = [] pivotList = [] more = [] if len(arr)", "in arr: if i < pivot: less.append(i) elif i > pivot: more.append(i) else:", "else: pivot = arr[0] @@ begin question recursion @@ points: 500 for i", "= quickSort(presort) print(sort) @@ end hide def quickSort(arr): less = [] pivotList =", "i > pivot: more.append(i) else: pivotList.append(i) @@ end question less = quickSort(less) more", "less = [] pivotList = [] more = [] if len(arr) <= 1:", "print(array_in) presort = list(map(int, array_in.split(' '))) sort = quickSort(presort) print(sort) @@ end hide", "more = [] if len(arr) <= 1: @@ begin question base case @@", "end question less = quickSort(less) more = quickSort(more) return less + pivotList +", "stdin.readline()) # Ignore first line with number of inputs on it array_in =", "begin question recursion @@ points: 500 for i in arr: if i <", "= quickSort(more) return less + pivotList + more @@ begin hide if __name__", "pivot = arr[0] @@ begin question recursion @@ points: 500 for i in", "<= 1: @@ begin question base case @@ description: Find the base case", "100 return arr @@ end question else: pivot = arr[0] @@ begin question", "from sys import stdin @@ begin hide def main(): print(\"start qs\", stdin.readline()) #" ]
[ "random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}} {{noun}}\" }", "{{noun}}\" } def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def", "noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def remNoun(self, noun): if(noun in", "open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf) self.actions = { \"adjective\" : lambda :", "template = self.actions[\"template\"]() tokens = template.split(\" \") result = \"\" for token in", "\"\" for token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions):", "open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as", "import random import re infl = inflect.engine() class MadLibber(): def make(self): template =", "import inflect import json import random import re infl = inflect.engine() class MadLibber():", "json.load(thinf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\" : lambda :", ": random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}} {{noun}}\" } def addNoun(self, noun): self.nouns.append(noun)", "\" return result.strip() class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives =", "Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as", "lambda : random.choice(self.templates) } class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives", ": r\"{{adjective}} {{noun}}\" } def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf:", "self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as", "open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf) self.actions = { \"adjective\" : lambda :", ": lambda : random.choice(self.templates) } class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf:", "def remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf)", "= self.actions[\"template\"]() tokens = template.split(\" \") result = \"\" for token in tokens:", "self.templates = json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf) self.actions = {", "json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf) self.actions = { \"adjective\" :", "open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf)", "def make(self): template = self.actions[\"template\"]() tokens = template.split(\" \") result = \"\" for", ": lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts), \"person\" : lambda :", "lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()),", "def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def remNoun(self, noun):", "\"thing\" : lambda : random.choice(self.things), \"template\" : lambda : random.choice(self.templates) } class Prompter(MadLibber):", "json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons", "self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns),", "\"w\") as adf: json.dump(adf) def remAdjective(self, adjective): if(adjective in self.adjectives): self.adjectives.remove(adjective) with open(\"./data/prompt/adjectives.json\",", ": random.choice(self.templates) } class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives =", "make(self): template = self.actions[\"template\"]() tokens = template.split(\" \") result = \"\" for token", "def remAdjective(self, adjective): if(adjective in self.adjectives): self.adjectives.remove(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf)", "random.choice(self.persons), \"thing\" : lambda : random.choice(self.things), \"template\" : lambda : random.choice(self.templates) } class", "open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf)", ": lambda : random.choice(self.persons), \"thing\" : lambda : random.choice(self.things), \"template\" : lambda :", "adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf) self.actions =", "self.actions[action[1]]() else: result += action[0] else: result += token result += \" \"", "open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf)", ": lambda : random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns), \"template\" : lambda :", "as parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf) with", "\"amount\" : lambda : random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda", "nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def", "self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf) self.actions = {", "nf: json.dump(nf) def remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as", "re infl = inflect.engine() class MadLibber(): def make(self): template = self.actions[\"template\"]() tokens =", "token) if(action): if(action[1] in self.actions): result += self.actions[action[1]]() else: result += action[0] else:", "} class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf) with", "nf: self.nouns = json.load(nf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"noun\"", "as thinf: self.things = json.load(thinf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives),", "\"noun\" : lambda : random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}} {{noun}}\" } def", "self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\") as", "self.nouns = json.load(nf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"noun\" :", "return result.strip() class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf)", "lambda : random.choice(self.persons), \"thing\" : lambda : random.choice(self.things), \"template\" : lambda : random.choice(self.templates)", "__init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts", "\") result = \"\" for token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action):", "result += \" \" return result.strip() class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as", ": random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}} {{noun}}\"", "self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def remAdjective(self, adjective): if(adjective in self.adjectives):", "import re infl = inflect.engine() class MadLibber(): def make(self): template = self.actions[\"template\"]() tokens", "class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\")", "parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\")", "lambda : r\"{{adjective}} {{noun}}\" } def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as", "lambda : random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts),", "as temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf) self.actions", "with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def remAdjective(self, adjective): if(adjective in self.adjectives): self.adjectives.remove(adjective)", "random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts), \"an_amount\" :", "r\"{{adjective}} {{noun}}\" } def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf)", ": lambda : random.choice(self.things), \"template\" : lambda : random.choice(self.templates) } class Prompter(MadLibber): def", "self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with", "} def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def remNoun(self,", "\"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda", ": lambda : r\"{{adjective}} {{noun}}\" } def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\")", "= inflect.engine() class MadLibber(): def make(self): template = self.actions[\"template\"]() tokens = template.split(\" \")", "as adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf) with", "+= \" \" return result.strip() class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as adf:", "as adf: json.dump(adf) def remAdjective(self, adjective): if(adjective in self.adjectives): self.adjectives.remove(adjective) with open(\"./data/prompt/adjectives.json\", \"w\")", "noun): if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def addAdjective(self,", "MadLibber(): def make(self): template = self.actions[\"template\"]() tokens = template.split(\" \") result = \"\"", "self.actions): result += self.actions[action[1]]() else: result += action[0] else: result += token result", ": infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\"", "infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\" :", ": infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts), \"person\" : lambda : random.choice(self.persons), \"thing\"", "\"template\" : lambda : random.choice(self.templates) } class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as", "result += self.actions[action[1]]() else: result += action[0] else: result += token result +=", "as amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf) with", "with open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf) self.actions = { \"adjective\" : lambda", "random.choice(self.templates) } class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf)", "action[0] else: result += token result += \" \" return result.strip() class Complimenter(MadLibber):", "lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts), \"person\" : lambda : random.choice(self.persons),", "lambda : random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}}", "adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def remAdjective(self, adjective): if(adjective in", "if(action): if(action[1] in self.actions): result += self.actions[action[1]]() else: result += action[0] else: result", "random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}} {{noun}}\" } def addNoun(self, noun): self.nouns.append(noun) with", "inflect.engine() class MadLibber(): def make(self): template = self.actions[\"template\"]() tokens = template.split(\" \") result", "json.dump(adf) def remAdjective(self, adjective): if(adjective in self.adjectives): self.adjectives.remove(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf:", "= json.load(thinf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\" : lambda", "with open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons =", ": lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts), \"an_amount\" : lambda :", "with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\")", "as adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf) self.actions", "json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things", "result += token result += \" \" return result.strip() class Complimenter(MadLibber): def __init__(self):", "tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions): result += self.actions[action[1]]() else:", "addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def remNoun(self, noun): if(noun", ": random.choice(self.parts), \"person\" : lambda : random.choice(self.persons), \"thing\" : lambda : random.choice(self.things), \"template\"", "+= self.actions[action[1]]() else: result += action[0] else: result += token result += \"", "\" \" return result.strip() class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives", "as nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf)", "random.choice(self.things), \"template\" : lambda : random.choice(self.templates) } class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\")", "\"person\" : lambda : random.choice(self.persons), \"thing\" : lambda : random.choice(self.things), \"template\" : lambda", "{ \"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" :", "perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\")", "__init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns", "lambda : random.choice(self.parts), \"person\" : lambda : random.choice(self.persons), \"thing\" : lambda : random.choice(self.things),", "import json import random import re infl = inflect.engine() class MadLibber(): def make(self):", "random.choice(self.parts), \"person\" : lambda : random.choice(self.persons), \"thing\" : lambda : random.choice(self.things), \"template\" :", "= json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf) self.actions = { \"adjective\"", "with open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf) self.actions = { \"adjective\" : lambda", ": random.choice(self.persons), \"thing\" : lambda : random.choice(self.things), \"template\" : lambda : random.choice(self.templates) }", "addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def remAdjective(self, adjective): if(adjective", "self.actions[\"template\"]() tokens = template.split(\" \") result = \"\" for token in tokens: action", "as nf: self.nouns = json.load(nf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives),", "with open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts =", "json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf) self.actions = { \"adjective\" :", "action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions): result += self.actions[action[1]]() else: result", "json import random import re infl = inflect.engine() class MadLibber(): def make(self): template", "adf: json.dump(adf) def remAdjective(self, adjective): if(adjective in self.adjectives): self.adjectives.remove(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as", "{ \"adjective\" : lambda : random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns), \"template\" :", "+= token result += \" \" return result.strip() class Complimenter(MadLibber): def __init__(self): with", "= json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf) self.actions = { \"adjective\"", "self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as", "class Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\")", "json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts", "lambda : random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}} {{noun}}\" } def addNoun(self, noun):", ": random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts), \"an_amount\"", "result += action[0] else: result += token result += \" \" return result.strip()", "class MadLibber(): def make(self): template = self.actions[\"template\"]() tokens = template.split(\" \") result =", "with open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things =", "= re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions): result += self.actions[action[1]]() else: result +=", "def __init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as amf:", "open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def remAdjective(self, adjective): if(adjective in self.adjectives): self.adjectives.remove(adjective) with", "\"adjective\" : lambda : random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns), \"template\" : lambda", ": lambda : random.choice(self.parts), \"person\" : lambda : random.choice(self.persons), \"thing\" : lambda :", "in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions): result += self.actions[action[1]]()", "result.strip() class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\") as adf: self.adjectives = json.load(adf) with", "open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun) with", "else: result += action[0] else: result += token result += \" \" return", "self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\",", "= json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as perf:", "json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates", "with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns =", "as perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf) with", "= \"\" for token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in", "open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as nf: self.nouns = json.load(nf)", "random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts), \"person\" :", "for token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions): result", "with open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates =", "self.things = json.load(thinf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\" :", "infl = inflect.engine() class MadLibber(): def make(self): template = self.actions[\"template\"]() tokens = template.split(\"", "= json.load(nf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"noun\" : lambda", "self.nouns.append(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def remNoun(self, noun): if(noun in self.nouns):", "json.load(nf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"noun\" : lambda :", ": lambda : random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda :", ": lambda : random.choice(self.nouns), \"template\" : lambda : r\"{{adjective}} {{noun}}\" } def addNoun(self,", "open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf)", "tokens = template.split(\" \") result = \"\" for token in tokens: action =", "+= action[0] else: result += token result += \" \" return result.strip() class", "with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun)", "\"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts), \"person\" : lambda", "= json.load(parf) with open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as temf:", "\"template\" : lambda : r\"{{adjective}} {{noun}}\" } def addNoun(self, noun): self.nouns.append(noun) with open(\"./data/prompt/nouns.json\",", "= template.split(\" \") result = \"\" for token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\",", ": random.choice(self.things), \"template\" : lambda : random.choice(self.templates) } class Prompter(MadLibber): def __init__(self): with", "self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\") as", "\"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts), \"an_amount\" : lambda", "inflect import json import random import re infl = inflect.engine() class MadLibber(): def", "open(\"./data/respect/persons.json\") as perf: self.persons = json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf)", ": random.choice(self.amounts), \"an_amount\" : lambda : infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts), \"person\"", "\"parts\" : lambda : random.choice(self.parts), \"person\" : lambda : random.choice(self.persons), \"thing\" : lambda", "adf: self.adjectives = json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\")", "infl.an(self.actions[\"amount\"]()), \"parts\" : lambda : random.choice(self.parts), \"person\" : lambda : random.choice(self.persons), \"thing\" :", "\"w\") as nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf:", "amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts = json.load(parf) with open(\"./data/respect/persons.json\")", "in self.actions): result += self.actions[action[1]]() else: result += action[0] else: result += token", "in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective)", "if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def addAdjective(self, adjective):", "with open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as parf: self.parts =", "template.split(\" \") result = \"\" for token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token)", "result = \"\" for token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1]", "if(action[1] in self.actions): result += self.actions[action[1]]() else: result += action[0] else: result +=", "random import re infl = inflect.engine() class MadLibber(): def make(self): template = self.actions[\"template\"]()", "= { \"adjective\" : lambda : random.choice(self.adjectives), \"noun\" : lambda : random.choice(self.nouns), \"template\"", "thinf: self.things = json.load(thinf) self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\"", "token result += \" \" return result.strip() class Complimenter(MadLibber): def __init__(self): with open(\"./data/respect/adjectives.json\")", "else: result += token result += \" \" return result.strip() class Complimenter(MadLibber): def", ": lambda : random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda :", "json.dump(nf) def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def remAdjective(self,", "as nf: json.dump(nf) def remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\")", "self.actions = { \"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()),", "= json.load(perf) with open(\"./data/respect/templates.json\") as temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\") as thinf:", "temf: self.templates = json.load(temf) with open(\"./data/respect/things.json\") as thinf: self.things = json.load(thinf) self.actions =", "lambda : random.choice(self.things), \"template\" : lambda : random.choice(self.templates) } class Prompter(MadLibber): def __init__(self):", "re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions): result += self.actions[action[1]]() else: result += action[0]", "lambda : random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\" : lambda : random.choice(self.amounts),", "remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf: json.dump(nf) def", "json.dump(nf) def remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\", \"w\") as nf:", "Prompter(MadLibber): def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as", "token in tokens: action = re.match(\"\\{\\{(.+?)\\}\\}\", token) if(action): if(action[1] in self.actions): result +=", "\"w\") as nf: json.dump(nf) def remNoun(self, noun): if(noun in self.nouns): self.nouns.remove(noun) with open(\"./data/prompt/nouns.json\",", "= json.load(adf) with open(\"./data/respect/amounts.json\") as amf: self.amounts = json.load(amf) with open(\"./data/respect/parts.json\") as parf:", "def __init__(self): with open(\"./data/prompt/adjectives.json\") as adf: self.adjectives = json.load(adf) with open(\"./data/prompt/nouns.json\") as nf:", "= { \"adjective\" : lambda : random.choice(self.adjectives), \"an_adjective\" : lambda : infl.an(self.actions[\"adjective\"]()), \"amount\"", "def addAdjective(self, adjective): self.adjectives.append(adjective) with open(\"./data/prompt/adjectives.json\", \"w\") as adf: json.dump(adf) def remAdjective(self, adjective):" ]
[ "i index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range(", "and x < img.shape[0]: res = abs(img[x][y - 1][channel] - img[x][y][channel]) else: res", "res = np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window", "from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v): return", "res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon =", "- 1][channel] - img[x][y][channel]) else: res = 0 return res def getDetailsRegions(imgs): region_indexes", "= 0 M_G = 0 M_R = 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]):", "1][channel] - img[x][y][channel]) else: res = 0 return res def getDetailsRegions(imgs): region_indexes =", "dtype=float32) for x in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window = get_window(x, y,", "M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R])", "jit from numpy import float32 from tqdm import tqdm from utils import (", "return cv.merge((b, g, r)) def compute(imgs): for i in range(len(imgs)): imgs[i] = np.float32(imgs[i])", "sigma_x)) + (((y - y_c) ** 2) / (2 * sigma_y))) ) @jit", "= get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for i in range(len(imgs)): M.append([]) for", "for j in range(window[1][0], window[1][1]): # for i in range(res.shape[0]): # for j", "map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *= channel[x][y] res[x][y] += add", "x, y))) M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y))) M_R", "= 0.0 for i in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den", "get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v): return v / 255 @jit def", "tqdm from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v):", "sigma_x = 100 sigma_y = 100 return exp( -((((x - x_c) ** 2)", "centers_indexes) b, g, r = cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit(", "res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for i", "region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x in", "10) M = [] for i in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])):", "M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx", "and abs(y_c_reg - y_c) <= epsilon @jit def exp_g(x, y, x_c, y_c) ->", "channel, x, y): res = 0 if x + 1 < img.shape[0] and", "res = 0 if y - 1 > 0 and x < img.shape[0]:", "-> float: sigma_x = 100 sigma_y = 100 return exp( -((((x - x_c)", "range(res.shape[1]): window = get_window(x, y, channel, 5) # WINDOW VERSION for i in", ") add *= channel[x][y] res[x][y] += add return res def blend(img, regions_indexes): centers_indexes", "y), deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2,", "): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon", "channel, 5) # WINDOW VERSION for i in range(window[0][0], window[0][1]): for j in", "exp_g(x, y, x_c, y_c) -> float: sigma_x = 100 sigma_y = 100 return", "regions_indexes), regions_indexes) res = res / np.amax(res) res = 255 * res return", "center_indexes[i][1]) den *= center_indexes.shape[0] return num / den def compute_channel(channel, region_indexes, center_indexes, map_px_center):", "x < img.shape[0]: res = abs(img[x][y - 1][channel] - img[x][y][channel]) else: res =", "range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0 M_R", "img.shape[1]: res = abs(img[x + 1][y][channel] - img[x][y][channel]) else: res = 0 return", "x, y): res = 0 if x + 1 < img.shape[0] and y", "for y in range(res.shape[1]): window = get_window(x, y, channel, 5) # WINDOW VERSION", ") proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result()", "for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx]", "compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes,", "add = 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i,", "res @jit def deltaIy(img, channel, x, y): res = 0 if y -", "for i in range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]): # for i", "associate_index_to_centers, get_window, ) @jit def P(v): return v / 255 @jit def deltaIx(img,", "[] for i in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B = 0", "center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for y", "y))) M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y))) M_R +=", "iterate over each region max_r = {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] =", "(2 * sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num", "r = cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes,", "def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx", "y_c, region_indexes, center_indexes): num = exp_g(x, y, x_c, y_c) den = 0.0 for", "img.shape[0]: res = abs(img[x][y - 1][channel] - img[x][y][channel]) else: res = 0 return", "res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g,", "as excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2 =", "np from concurrent.futures import ProcessPoolExecutor from numba import jit from numpy import float32", "1 > 0 and x < img.shape[0]: res = abs(img[x][y - 1][channel] -", "for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0,", "region_indexes def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in range(3): for", "i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0],", "range(3): for r_indx in tqdm(range(M.shape[1])): # iterate over each region max_r = {}", "abs(img[x][y - 1][channel] - img[x][y][channel]) else: res = 0 return res def getDetailsRegions(imgs):", "np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in range(3):", "range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg,", "= np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])): # iterate over", "if y - 1 > 0 and x < img.shape[0]: res = abs(img[x][y", "2, x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes):", "** 2) / (2 * sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c, y_c,", "max_r = {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)]", "in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M,", "abs(img[x + 1][y][channel] - img[x][y][channel]) else: res = 0 return res @jit def", "tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0 M_R = 0 for x in", "deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M,", "M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res", "get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img) with ProcessPoolExecutor() as", "import ProcessPoolExecutor from numba import jit from numpy import float32 from tqdm import", "+ 1][y][channel] - img[x][y][channel]) else: res = 0 return res @jit def deltaIy(img,", "b = proc1.result() g = proc2.result() r = proc3.result() return cv.merge((b, g, r))", "range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num /", "/ (2 * sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes):", "blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r =", "getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res / np.amax(res) res", "*= channel[x][y] res[x][y] += add return res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes)", "= [] for i in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B =", "map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *= channel[x][y] res[x][y]", "i in range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]): # for i in", "= np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window =", "y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *= channel[x][y]", "= imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2", "region_indexes, center_indexes, ) add *= channel[x][y] res[x][y] += add return res def blend(img,", "M_G = 0 M_R = 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y", "res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res / np.amax(res) res =", "center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num / den def compute_channel(channel, region_indexes, center_indexes,", "j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0 M_R = 0 for", "j in range(res.shape[1]): add = 0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x,", "centers_indexes, pixel_region_center ) b = proc1.result() g = proc2.result() r = proc3.result() return", "window[1][1]): # for i in range(res.shape[0]): # for j in range(res.shape[1]): add =", "math import exp import cv2 as cv import numpy as np from concurrent.futures", "= exp_g(x, y, x_c, y_c) den = 0.0 for i in range(center_indexes.shape[0]): den", "5) # WINDOW VERSION for i in range(window[0][0], window[0][1]): for j in range(window[1][0],", "add *= channel[x][y] res[x][y] += add return res def blend(img, regions_indexes): centers_indexes =", "x_c, y_c) -> float: sigma_x = 100 sigma_y = 100 return exp( -((((x", "x, y): res = 0 if y - 1 > 0 and x", "res = abs(img[x][y - 1][channel] - img[x][y][channel]) else: res = 0 return res", "= proc3.result() return cv.merge((b, g, r)) def compute(imgs): for i in range(len(imgs)): imgs[i]", "def deltaIx(img, channel, x, y): res = 0 if x + 1 <", "region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])): #", "y_c) -> float: sigma_x = 100 sigma_y = 100 return exp( -((((x -", "y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *= channel[x][y] res[x][y] +=", "exp import cv2 as cv import numpy as np from concurrent.futures import ProcessPoolExecutor", "deltaIy(img, channel, x, y): res = 0 if y - 1 > 0", "x_c, y_c) den = 0.0 for i in range(center_indexes.shape[0]): den += exp_g(x, y,", "exp_g(x, y, x_c, y_c) den = 0.0 for i in range(center_indexes.shape[0]): den +=", "1, x, y))) M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y)))", "<= epsilon and abs(y_c_reg - y_c) <= epsilon @jit def exp_g(x, y, x_c,", "= 0 if y - 1 > 0 and x < img.shape[0]: res", "pixel_region_center ) b = proc1.result() g = proc2.result() r = proc3.result() return cv.merge((b,", "/ den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape,", "= 0 return res @jit def deltaIy(img, channel, x, y): res = 0", "in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y)))", "v / 255 @jit def deltaIx(img, channel, x, y): res = 0 if", "= np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for y in", "epsilon = 2 return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c)", "exp( -((((x - x_c) ** 2) / (2 * sigma_x)) + (((y -", "in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx]", "y_c) ** 2) / (2 * sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c,", "gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num = exp_g(x, y, x_c, y_c) den", "def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for", "): add = 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0],", "(2 * sigma_x)) + (((y - y_c) ** 2) / (2 * sigma_y)))", "x, y), deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i],", "- y_c) <= epsilon @jit def exp_g(x, y, x_c, y_c) -> float: sigma_x", "r_indx in tqdm(range(M.shape[1])): # iterate over each region max_r = {} for i", "as np from concurrent.futures import ProcessPoolExecutor from numba import jit from numpy import", "range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y))) M_G", "map_px_center[(x, y)][1], ): add = 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1],", "numpy as np from concurrent.futures import ProcessPoolExecutor from numba import jit from numpy", "M_R = 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]):", "y_c): epsilon = 2 return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg -", "y, x_c, y_c) -> float: sigma_x = 100 sigma_y = 100 return exp(", "0 if y - 1 > 0 and x < img.shape[0]: res =", "* sigma_x)) + (((y - y_c) ** 2) / (2 * sigma_y))) )", "excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result() g = proc2.result()", "for i in range(res.shape[0]): # for j in range(res.shape[1]): add = 0 if", "range(res.shape[0]): # for j in range(res.shape[1]): add = 0 if U( map_px_center[(i, j)][0],", "P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i], 1, x,", "in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]):", "joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx in", "= 0 if x + 1 < img.shape[0] and y < img.shape[1]: res", "M_B = 0 M_G = 0 M_R = 0 for x in range(region_indexes[j][0][0],", "index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0],", "np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])): # iterate over each", "P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M),", "map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for", "= proc2.result() r = proc3.result() return cv.merge((b, g, r)) def compute(imgs): for i", "y)][1], ): add = 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i,", "j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add = 1 add *=", "U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2 return abs(x_c_reg - x_c) <= epsilon", "b, g, r = cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel,", "region max_r = {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image =", "= 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1],", "numba import jit from numpy import float32 from tqdm import tqdm from utils", "in range(res.shape[1]): add = 0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0],", "= get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img) with ProcessPoolExecutor()", "excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel, r, regions_indexes,", "/ (2 * sigma_x)) + (((y - y_c) ** 2) / (2 *", "get_window(x, y, channel, 5) # WINDOW VERSION for i in range(window[0][0], window[0][1]): for", "associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 =", "M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for", "1][y][channel] - img[x][y][channel]) else: res = 0 return res @jit def deltaIy(img, channel,", "for r_indx in tqdm(range(M.shape[1])): # iterate over each region max_r = {} for", "from concurrent.futures import ProcessPoolExecutor from numba import jit from numpy import float32 from", "g = proc2.result() r = proc3.result() return cv.merge((b, g, r)) def compute(imgs): for", "numpy import float32 from tqdm import tqdm from utils import ( get_region_indexes, get_region_centers,", "imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes)", "import exp import cv2 as cv import numpy as np from concurrent.futures import", "< img.shape[1]: res = abs(img[x + 1][y][channel] - img[x][y][channel]) else: res = 0", "0 return res @jit def deltaIy(img, channel, x, y): res = 0 if", "proc3.result() return cv.merge((b, g, r)) def compute(imgs): for i in range(len(imgs)): imgs[i] =", "proc1.result() g = proc2.result() r = proc3.result() return cv.merge((b, g, r)) def compute(imgs):", "i in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return", "cv import numpy as np from concurrent.futures import ProcessPoolExecutor from numba import jit", "np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for y in range(res.shape[1]):", "- 1 > 0 and x < img.shape[0]: res = abs(img[x][y - 1][channel]", "range(window[1][0], window[1][1]): # for i in range(res.shape[0]): # for j in range(res.shape[1]): add", "img.shape[0] and y < img.shape[1]: res = abs(img[x + 1][y][channel] - img[x][y][channel]) else:", "def deltaIy(img, channel, x, y): res = 0 if y - 1 >", "map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add = 1 add *= gaussianBlendingFunction(", "r = proc3.result() return cv.merge((b, g, r)) def compute(imgs): for i in range(len(imgs)):", "imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2 return", "region_indexes, center_indexes): num = exp_g(x, y, x_c, y_c) den = 0.0 for i", "exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num / den def compute_channel(channel,", "range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]): # for i in range(res.shape[0]): #", "# WINDOW VERSION for i in range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]):", "region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y))) M_G +=", "= 0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ):", "j)][1], region_indexes, center_indexes, ) add *= channel[x][y] res[x][y] += add return res def", "+= P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i], 1,", "*= center_indexes.shape[0] return num / den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes =", "= proc1.result() g = proc2.result() r = proc3.result() return cv.merge((b, g, r)) def", "P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i], 2, x,", "y_c_reg, x_c, y_c): epsilon = 2 return abs(x_c_reg - x_c) <= epsilon and", "= 2 return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c) <=", "res @jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2 return abs(x_c_reg -", "map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *= channel[x][y] res[x][y] += add return res", "x + 1 < img.shape[0] and y < img.shape[1]: res = abs(img[x +", "@jit def deltaIx(img, channel, x, y): res = 0 if x + 1", "= 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B", "@jit def exp_g(x, y, x_c, y_c) -> float: sigma_x = 100 sigma_y =", "2 return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon", "# for j in range(res.shape[1]): add = 0 if U( map_px_center[(i, j)][0], map_px_center[(i,", "for i in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0]", "pixel_region_center ) proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b =", "1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes,", "j in range(window[1][0], window[1][1]): # for i in range(res.shape[0]): # for j in", "np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window = get_window(x,", "excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel, g, regions_indexes,", "0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B +=", "deltaIx(img, channel, x, y): res = 0 if x + 1 < img.shape[0]", "for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit", "y): res = 0 if y - 1 > 0 and x <", "i in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G =", "= i index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in", "2) / (2 * sigma_x)) + (((y - y_c) ** 2) / (2", "float32 from tqdm import tqdm from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window,", "@jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2 return abs(x_c_reg - x_c)", "j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def", "U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add = 1", "compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x", "range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes),", "y < img.shape[1]: res = abs(img[x + 1][y][channel] - img[x][y][channel]) else: res =", "res = np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])): # iterate", "/ 255 @jit def deltaIx(img, channel, x, y): res = 0 if x", "in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x,", "= excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel, r,", "y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs,", "res = abs(img[x + 1][y][channel] - img[x][y][channel]) else: res = 0 return res", "0 M_R = 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0],", "def U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2 return abs(x_c_reg - x_c) <=", "y_c) <= epsilon @jit def exp_g(x, y, x_c, y_c) -> float: sigma_x =", "channel, x, y): res = 0 if y - 1 > 0 and", "r)) def compute(imgs): for i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes =", "over each region max_r = {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i", "*= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, )", "+ 1 < img.shape[0] and y < img.shape[1]: res = abs(img[x + 1][y][channel]", "M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0 M_R =", "center_indexes): num = exp_g(x, y, x_c, y_c) den = 0.0 for i in", "g, r)) def compute(imgs): for i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes", "r, regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result() g = proc2.result() r =", "def compute(imgs): for i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs)", "import float32 from tqdm import tqdm from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers,", "return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in", "@jit def P(v): return v / 255 @jit def deltaIx(img, channel, x, y):", "M = [] for i in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B", "<= epsilon @jit def exp_g(x, y, x_c, y_c) -> float: sigma_x = 100", "+= exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num / den def", "x, y))) M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B,", "getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for i in range(len(imgs)):", "for x in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window = get_window(x, y, channel,", "0.0 for i in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *=", "# iterate over each region max_r = {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])]", "x_c, y_c, region_indexes, center_indexes): num = exp_g(x, y, x_c, y_c) den = 0.0", "in range(3): for r_indx in tqdm(range(M.shape[1])): # iterate over each region max_r =", "+= add return res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes,", ") @jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num = exp_g(x, y,", "= blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res / np.amax(res) res = 255", "den = 0.0 for i in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1])", "region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg, x_c,", "for i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res =", "import cv2 as cv import numpy as np from concurrent.futures import ProcessPoolExecutor from", "y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res =", "np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res =", "channel[x][y] res[x][y] += add return res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center", "from numpy import float32 from tqdm import tqdm from utils import ( get_region_indexes,", "epsilon @jit def exp_g(x, y, x_c, y_c) -> float: sigma_x = 100 sigma_y", "centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img) with", "channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])): # iterate over each region max_r", "num / den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res =", "get_window, ) @jit def P(v): return v / 255 @jit def deltaIx(img, channel,", "= getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res / np.amax(res)", "return res @jit def deltaIy(img, channel, x, y): res = 0 if y", "add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes,", "0, x, y), deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i], 1, x, y),", "if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add =", "def exp_g(x, y, x_c, y_c) -> float: sigma_x = 100 sigma_y = 100", "x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon @jit def exp_g(x, y,", "float: sigma_x = 100 sigma_y = 100 return exp( -((((x - x_c) **", "proc2.result() r = proc3.result() return cv.merge((b, g, r)) def compute(imgs): for i in", "compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes,", ") proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit(", "< img.shape[0]: res = abs(img[x][y - 1][channel] - img[x][y][channel]) else: res = 0", "import jit from numpy import float32 from tqdm import tqdm from utils import", "cv2 as cv import numpy as np from concurrent.futures import ProcessPoolExecutor from numba", "0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add", "with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center )", "- x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon @jit def exp_g(x,", "else: res = 0 return res @jit def deltaIy(img, channel, x, y): res", "deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x,", "center_indexes, ) add *= channel[x][y] res[x][y] += add return res def blend(img, regions_indexes):", "max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j", "M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i],", "+= P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i], 2,", "j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *= channel[x][y] res[x][y] += add return", "for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for i in", "regions_indexes) res = res / np.amax(res) res = 255 * res return res", "y, x_c, y_c, region_indexes, center_indexes): num = exp_g(x, y, x_c, y_c) den =", "y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num / den def compute_channel(channel, region_indexes,", "y in range(res.shape[1]): window = get_window(x, y, channel, 5) # WINDOW VERSION for", "M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i],", "region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res", "= {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for", "region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg, x_c, y_c):", "return exp( -((((x - x_c) ** 2) / (2 * sigma_x)) + (((y", "** 2) / (2 * sigma_x)) + (((y - y_c) ** 2) /", "regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res /", "M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])):", "window = get_window(x, y, channel, 5) # WINDOW VERSION for i in range(window[0][0],", "compute(imgs): for i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res", "0 return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = []", ") b = proc1.result() g = proc2.result() r = proc3.result() return cv.merge((b, g,", "> 0 and x < img.shape[0]: res = abs(img[x][y - 1][channel] - img[x][y][channel])", "- y_c) ** 2) / (2 * sigma_y))) ) @jit def gaussianBlendingFunction(x, y,", "= get_window(x, y, channel, 5) # WINDOW VERSION for i in range(window[0][0], window[0][1]):", "# for i in range(res.shape[0]): # for j in range(res.shape[1]): add = 0", "num = exp_g(x, y, x_c, y_c) den = 0.0 for i in range(center_indexes.shape[0]):", "epsilon and abs(y_c_reg - y_c) <= epsilon @jit def exp_g(x, y, x_c, y_c)", "y - 1 > 0 and x < img.shape[0]: res = abs(img[x][y -", "in range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]): # for i in range(res.shape[0]):", "y)][0], map_px_center[(x, y)][1], ): add = 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x,", "= excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel, g,", "img[x][y][channel]) else: res = 0 return res @jit def deltaIy(img, channel, x, y):", "window[0][1]): for j in range(window[1][0], window[1][1]): # for i in range(res.shape[0]): # for", "and y < img.shape[1]: res = abs(img[x + 1][y][channel] - img[x][y][channel]) else: res", "center_indexes.shape[0] return num / den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes)", "M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape)", "abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon @jit def", "get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v): return v / 255 @jit", "0 and x < img.shape[0]: res = abs(img[x][y - 1][channel] - img[x][y][channel]) else:", "sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num = exp_g(x,", "for channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])): # iterate over each region", "= 100 sigma_y = 100 return exp( -((((x - x_c) ** 2) /", "cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center", "excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit(", "concurrent.futures import ProcessPoolExecutor from numba import jit from numpy import float32 from tqdm", "regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result() g = proc2.result() r = proc3.result()", "res[x][y] += add return res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center =", "for i in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G", "range(res.shape[1]): add = 0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x,", "for j in range(res.shape[1]): add = 0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1],", "region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for i in range(len(imgs)): M.append([])", "range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return", "add = 0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1],", "1 < img.shape[0] and y < img.shape[1]: res = abs(img[x + 1][y][channel] -", "range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y),", "return v / 255 @jit def deltaIx(img, channel, x, y): res = 0", "= 0 M_R = 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in", "= np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res", "= cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes,", "den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32)", "def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r", "import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v): return v /", "x_c, y_c): epsilon = 2 return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg", "g, r = cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel, b,", "map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add = 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0],", "map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *=", "in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0 M_R = 0 for x", "= associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img) with ProcessPoolExecutor() as excecutor: proc1", "ProcessPoolExecutor from numba import jit from numpy import float32 from tqdm import tqdm", "100 sigma_y = 100 return exp( -((((x - x_c) ** 2) / (2", "= abs(img[x + 1][y][channel] - img[x][y][channel]) else: res = 0 return res @jit", "y): res = 0 if x + 1 < img.shape[0] and y <", "y, x_c, y_c) den = 0.0 for i in range(center_indexes.shape[0]): den += exp_g(x,", "sigma_y = 100 return exp( -((((x - x_c) ** 2) / (2 *", "return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon @jit", "res = 0 return res @jit def deltaIy(img, channel, x, y): res =", "-((((x - x_c) ** 2) / (2 * sigma_x)) + (((y - y_c)", "in range(res.shape[1]): window = get_window(x, y, channel, 5) # WINDOW VERSION for i", "utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v): return v", "i in range(res.shape[0]): # for j in range(res.shape[1]): add = 0 if U(", "0 if x + 1 < img.shape[0] and y < img.shape[1]: res =", "(((y - y_c) ** 2) / (2 * sigma_y))) ) @jit def gaussianBlendingFunction(x,", "deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x,", "den *= center_indexes.shape[0] return num / den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes", "pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img) with ProcessPoolExecutor() as excecutor:", "img[x][y][channel]) else: res = 0 return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1],", "y_c) den = 0.0 for i in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0],", "pixel_region_center ) proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3 =", "from tqdm import tqdm from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, )", "proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel,", "g, regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center", "x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res", "map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add = 1 add", "return num / den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res", "< img.shape[0] and y < img.shape[1]: res = abs(img[x + 1][y][channel] - img[x][y][channel])", "= 0 return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M =", "0, x, y))) M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y)))", "tqdm(range(M.shape[1])): # iterate over each region max_r = {} for i in range(len(imgs)):", "x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0,", "else: res = 0 return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10)", "y, channel, 5) # WINDOW VERSION for i in range(window[0][0], window[0][1]): for j", "x_c) ** 2) / (2 * sigma_x)) + (((y - y_c) ** 2)", "def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for i in", "i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] =", "def P(v): return v / 255 @jit def deltaIx(img, channel, x, y): res", "centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b", "from numba import jit from numpy import float32 from tqdm import tqdm from", "as cv import numpy as np from concurrent.futures import ProcessPoolExecutor from numba import", "blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res / np.amax(res) res = 255 *", "proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel,", "j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add = 1 add *= gaussianBlendingFunction( map_px_center[(x,", "{} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for i", "@jit def deltaIy(img, channel, x, y): res = 0 if y - 1", "max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ):", "compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result() g = proc2.result() r", "2, x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes", "region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i],", "P(v): return v / 255 @jit def deltaIx(img, channel, x, y): res =", "return res @jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2 return abs(x_c_reg", "tqdm(range(res.shape[0])): for y in range(res.shape[1]): window = get_window(x, y, channel, 5) # WINDOW", "regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center )", "ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2", "WINDOW VERSION for i in range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]): #", "in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg,", "in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num", "x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def", "b, regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center", "in range(window[1][0], window[1][1]): # for i in range(res.shape[0]): # for j in range(res.shape[1]):", "gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add", "return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for", "get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for i in range(len(imgs)): M.append([]) for j", "range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for", "i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs,", "return res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b,", "M, regions_indexes), regions_indexes) res = res / np.amax(res) res = 255 * res", "imgs[0].shape[1], 10) M = [] for i in range(len(imgs)): M.append([]) for j in", "- x_c) ** 2) / (2 * sigma_x)) + (((y - y_c) **", "* sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num =", "- img[x][y][channel]) else: res = 0 return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0],", "each region max_r = {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image", "y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x,", "= excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result() g =", "import tqdm from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def", "centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3", "0 M_G = 0 M_R = 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for", ") @jit def P(v): return v / 255 @jit def deltaIx(img, channel, x,", "+ (((y - y_c) ** 2) / (2 * sigma_y))) ) @jit def", "x, y), deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i],", "regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img)", "tqdm import tqdm from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit", "res = 0 return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M", "255 @jit def deltaIx(img, channel, x, y): res = 0 if x +", "in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window = get_window(x, y, channel, 5) #", "100 return exp( -((((x - x_c) ** 2) / (2 * sigma_x)) +", "cv.merge((b, g, r)) def compute(imgs): for i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M,", "import numpy as np from concurrent.futures import ProcessPoolExecutor from numba import jit from", "VERSION for i in range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]): # for", "y))) M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G,", "2) / (2 * sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes,", "add return res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes)", "regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center )", "= abs(img[x][y - 1][channel] - img[x][y][channel]) else: res = 0 return res def", "= max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1]", "proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result() g", "for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i],", "in range(res.shape[0]): # for j in range(res.shape[1]): add = 0 if U( map_px_center[(i,", "if x + 1 < img.shape[0] and y < img.shape[1]: res = abs(img[x", "1, x, y), deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i], 2, x, y),", "x in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window = get_window(x, y, channel, 5)", "def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num = exp_g(x, y, x_c, y_c)", "+= P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R]) return", "@jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num = exp_g(x, y, x_c,", "- img[x][y][channel]) else: res = 0 return res @jit def deltaIy(img, channel, x,", "= 100 return exp( -((((x - x_c) ** 2) / (2 * sigma_x))", "for j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0 M_R = 0", "in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0", "in tqdm(range(M.shape[1])): # iterate over each region max_r = {} for i in", "abs(y_c_reg - y_c) <= epsilon @jit def exp_g(x, y, x_c, y_c) -> float:", "res = 0 if x + 1 < img.shape[0] and y < img.shape[1]:", "from math import exp import cv2 as cv import numpy as np from", "center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])):", "y), deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1,", "( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v): return v / 255", "den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num / den" ]
[ "= \"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\"", "right images into a single column with respective target steering angle value in", "return undersampled_data ## returning the undersampled data \"\"\"Function that reset the index and", "training and validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training", "as np import csv import matplotlib.pyplot as plt import matplotlib.image as mpimg from", "for value in target_bins: bin_ind = list(np.where(out == value)[0]) ## selecting data points", "base_path_img) \"\"\" creating a model\"\"\" model = model_nvidia_updated() ## Compiling the model using", "the batch images target = [] ## list holding the steering values corresponding", "complete data for training \"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\"", "value X_right = data.loc[:,'right'] ## The image from the right camera y_right =", "selected data points target_indices.extend(random_indices) ## adding undersampled indices to the list undersampled_indices =", "target_indices = [] ## list holding the undersampled data points total_indices = list(range(len(out)))", "model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu'))", "= expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data = expanding_data(data) undersampled_data", "model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001)))", "values in undersampled bins are removed,initialized to the total_indices ### iterating through bins", "model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried undersampling the data,", "for training \"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using", "= train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training and validation with batch size of", "## divide the steering values in 30 eqaully sized bins bins, counts =", "divide the steering values in 30 eqaully sized bins bins, counts = np.unique(out,", "img_path.split('\\\\')[-1] new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target =", "above indices target_indices = [] ## list holding the undersampled data points total_indices", "that reset the index and adds an \"ID\" columns to the data frame", "python data generator for producing batches of data of size = batch_size to", "from the selected data points target_indices.extend(random_indices) ## adding undersampled indices to the list", "long almost straight sections, therefore the data has a large number of observations", "batch images = [] ## list holding the batch images target = []", "= y_center + 0.3 ## To steer a bit right add a positive", "returning the undersampled data \"\"\"Function that reset the index and adds an \"ID\"", "## average number of values in bins target_counts = int(np.percentile(counts,75)) ## the count", "takes the data frame and merge the three columns for the ceter, left", "= pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function takes the data frame and merge", "\"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete data", "'target_counts' data points from the selected data points target_indices.extend(random_indices) ## adding undersampled indices", "[] ## list holding the batch images target = [] ## list holding", "the unique bins and number of values in each bin avg_counts = np.mean(counts)", "data of size = batch_size to be used in keras fit_generator function\"\"\" def", "target.append(data.loc[batch_id,'target']) images = np.array(images) target = np.array(target) yield images, target ## returning a", "the undersampled data points total_indices = list(range(len(out))) ## Complete list of indices of", "angle value in the second column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center'] ## The", "NVIDIA research paper\"\"\" def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))", "after the values in undersampled bins are removed,initialized to the total_indices ### iterating", "from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location", "expanding_data(data): X_center = data.loc[:,'center'] ## The central camera image y_center = data.loc[:,'target'] ##", "undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing the data into training and", "of data of size = batch_size to be used in keras fit_generator function\"\"\"", "[] ## list holding the undersampled data points total_indices = list(range(len(out))) ## Complete", "with undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data points from the", "adds an \"ID\" columns to the data frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data", "images = [] ## list holding the batch images target = [] ##", "towards drving straight. The data for such low angle value are undersampled.\"\"\" def", "indices = np.where(counts>avg_counts) ## indices where the counts in the bin is greater", "## list containing the indices remaining after the values in undersampled bins are", "with respective target steering angle value in the second column.\"\"\" def expanding_data(data): X_center", "+ 0.3 ## To steer a bit right add a positive value X_right", "camera image y_center = data.loc[:,'target'] ## Respective value for steering X_left = data.loc[:,'left']", "import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.model_selection import train_test_split from", "sections, therefore the data has a large number of observations having low steering", "a model as given in the NVIDIA research paper\"\"\" def model_nvidia_updated(): model =", "target_bins: bin_ind = list(np.where(out == value)[0]) ## selecting data points in the bin", "\"\"\" creating a model\"\"\" model = model_nvidia_updated() ## Compiling the model using Adam", "train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training and validation with batch size of 128", "bins for value in target_bins: bin_ind = list(np.where(out == value)[0]) ## selecting data", "function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ## selecting all the IDs #print(ids)", "np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ##", "batch = ids[offset:offset+batch_size] ## selectiing a batch images = [] ## list holding", "undersampled_data = reset_and_add(undersampled_data) ### dividing the data into training and validation sets train_data,", "keras.models import Sequential from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation,", "The function is a python data generator for producing batches of data of", "= np.where(counts>avg_counts) ## indices where the counts in the bin is greater than", "training and validation with batch size of 128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator", "y_center = data.loc[:,'target'] ## Respective value for steering X_left = data.loc[:,'left'] ## The", "list holding the steering values corresponding to thte above list ## creating a", "Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of the driving_log.csv file and images", "== value)[0]) ## selecting data points in the bin being iterated remaining_indices =", "= undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data", "and validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training and", "of values in bins target_counts = int(np.percentile(counts,75)) ## the count to which the", "(len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs = 3) ## saving the model model.save('model_new.h5')", "value \"\"\" Three data frames for central, left, right camera data, each with", "and adds an \"ID\" columns to the data frame input \"\"\" def reset_and_add(undersampled_data):", "in keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ## selecting all", "undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\" The function is a python data", "bin avg_counts = np.mean(counts) ## average number of values in bins target_counts =", "from the data frame return undersampled_data ## returning the undersampled data \"\"\"Function that", "bins having value counts greater than avg_counts and undersampling from the those bins", "left, right camera data, each with two columns - image location and target", "= list(range(len(undersampled_data))) return undersampled_data \"\"\" The function is a python data generator for", "target value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True)", "center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\"", "into a single column with respective target steering angle value in the second", "#print(ids) num = len(ids) ## length of the data frame #indices = np.arange(len(ids))", "and undersampling from the those bins for value in target_bins: bin_ind = list(np.where(out", "model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs = 3) ## saving", "the data, but results were not satisfactory, so end up using complete data", "mpimg from sklearn.model_selection import train_test_split from keras import regularizers from keras.models import Sequential", "size = 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs =", "randomly selecting 'target_counts' data points from the selected data points target_indices.extend(random_indices) ## adding", "the second column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center'] ## The central camera image", "data for batch_id in batch: img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path =", "= Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))", "target_counts = int(np.percentile(counts,75)) ## the count to which the value will be undersampled", "The central camera image y_center = data.loc[:,'target'] ## Respective value for steering X_left", "for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the", "average counts target_bins = bins[indices] ## bins corresponding to the above indices target_indices", "images generated using Udacity Car Simulator in training mode.\"\"\" base_path = \"../data/recording/\" base_path_img", "validation with batch size of 128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128,", "the ceter, left , right images into a single column with respective target", "the data into training and validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data", "## list holding the steering values corresponding to thte above list ## creating", "keras import regularizers from keras.models import Sequential from keras.layers import Dense, Flatten, Lambda,", "removed,initialized to the total_indices ### iterating through bins having value counts greater than", "np.array(target) yield images, target ## returning a batch \"\"\" Function that creates a", "yield images, target ## returning a batch \"\"\" Function that creates a model", "as input a data frame and returns a data frame with undersampled data", "images = np.array(images) target = np.array(target) yield images, target ## returning a batch", "pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function takes the data frame and merge the", "dividing the data into training and validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create", "\"\"\" This function takes the data frame and merge the three columns for", "steer a bit left add a negative value \"\"\" Three data frames for", "import regularizers from keras.models import Sequential from keras.layers import Dense, Flatten, Lambda, Cropping2D,", "central, left, right camera data, each with two columns - image location and", "in 30 eqaully sized bins bins, counts = np.unique(out, return_counts=True) ## count the", "= np.array(images) target = np.array(target) yield images, target ## returning a batch \"\"\"", "### dividing the data into training and validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42)", "remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts,", "= merged_data.loc[undersampled_indices] ## selecting the data points from the data frame return undersampled_data", "#model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization())", "returns a data frame with undersampled data for some target steering values. The", "frame and merge the three columns for the ceter, left , right images", "the data remaining_indices = total_indices ## list containing the indices remaining after the", "data for such low angle value are undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False)", "camera y_right = y_center - 0.3 ## To steer a bit left add", "merged_data \"\"\" The function takes as input a data frame and returns a", "where the counts in the bin is greater than average counts target_bins =", "values. The track in the simulator has long almost straight sections, therefore the", "location and target value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data", "\"\"\" The function is a python data generator for producing batches of data", "images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target = np.array(target) yield images, target ## returning", "model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization())", "X_left = data.loc[:,'left'] ## The image from left camera y_left = y_center +", "y_left = y_center + 0.3 ## To steer a bit right add a", "corresponding to the above indices target_indices = [] ## list holding the undersampled", "sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training and validation with", "number of values in each bin avg_counts = np.mean(counts) ## average number of", "for producing batches of data of size = batch_size to be used in", "of the data remaining_indices = total_indices ## list containing the indices remaining after", "128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs = 3) ##", "of values in each bin avg_counts = np.mean(counts) ## average number of values", "## list holding the undersampled data points total_indices = list(range(len(out))) ## Complete list", "image location and target value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True)", "from keras import regularizers from keras.models import Sequential from keras.layers import Dense, Flatten,", "- image location and target value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data =", "of 128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating a", "columns to the data frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"]", "image from the right camera y_right = y_center - 0.3 ## To steer", "data['ID'].values ## selecting all the IDs #print(ids) num = len(ids) ## length of", "the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data points", "bit left add a negative value \"\"\" Three data frames for central, left,", "#create data generators for training and validation with batch size of 128 train_generator", "so end up using complete data for training \"\"\"undersampled_data = undersampling(data) undersampled_data =", "concatenating the remaining indices with undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting the", "= reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ###", "model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried undersampling", "creates a model as given in the NVIDIA research paper\"\"\" def model_nvidia_updated(): model", "Respective value for steering X_left = data.loc[:,'left'] ## The image from left camera", "csv import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.model_selection import train_test_split", "shuffling the data for offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing a", "def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in 30 eqaully", "percentile indices = np.where(counts>avg_counts) ## indices where the counts in the bin is", "model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten())", "selecting 'target_counts' data points from the selected data points target_indices.extend(random_indices) ## adding undersampled", "Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of the driving_log.csv", "biased towards drving straight. The data for such low angle value are undersampled.\"\"\"", "in the bin is greater than average counts target_bins = bins[indices] ## bins", "the data frame and merge the three columns for the ceter, left ,", "undersampled data points total_indices = list(range(len(out))) ## Complete list of indices of the", "import numpy as np import csv import matplotlib.pyplot as plt import matplotlib.image as", "= img_path.split('\\\\')[-1] new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target", "list holding the batch images target = [] ## list holding the steering", "size of 128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating", "function model.compile(loss='mse',optimizer='adam') ## training the model using fit_generator, batch size = 128 model.fit_generator(generator=train_generator,", "y_right = y_center - 0.3 ## To steer a bit left add a", "a bit right add a positive value X_right = data.loc[:,'right'] ## The image", "#model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001)))", "Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of the driving_log.csv file", "but results were not satisfactory, so end up using complete data for training", "data points in the bin being iterated remaining_indices = list(set(remaining_indices) - set(bin_ind)) ##", "undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with undersampled indices undersampled_data =", "data.loc[:,'target'] ## Respective value for steering X_left = data.loc[:,'left'] ## The image from", "BatchNormalization,Dropout \"\"\" Location of the driving_log.csv file and images generated using Udacity Car", "model using fit_generator, batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps", "while True: #indices = shuffle(indices) np.random.shuffle(ids) ## shuffling the data for offset in", "a negative value \"\"\" Three data frames for central, left, right camera data,", "for some target steering values. The track in the simulator has long almost", "= pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The function takes as input a data", "the value will be undersampled -- 75th percentile indices = np.where(counts>avg_counts) ## indices", "valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\" model = model_nvidia_updated() ## Compiling", "<gh_stars>0 import pandas as pd import numpy as np import csv import matplotlib.pyplot", "steering angles. Due to this the model may be biased towards drving straight.", "may be biased towards drving straight. The data for such low angle value", "target steering values. The track in the simulator has long almost straight sections,", "img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target = np.array(target) yield images, target ##", "to the data frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] =", "for steering X_left = data.loc[:,'left'] ## The image from left camera y_left =", "data, each with two columns - image location and target value for steering\"\"\"", "in the simulator has long almost straight sections, therefore the data has a", "data \"\"\"Function that reset the index and adds an \"ID\" columns to the", "the three columns for the ceter, left , right images into a single", "value in the second column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center'] ## The central", "frame #indices = np.arange(len(ids)) np.random.seed(42) while True: #indices = shuffle(indices) np.random.shuffle(ids) ## shuffling", "import csv import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.model_selection import", "mean squared error as loss function model.compile(loss='mse',optimizer='adam') ## training the model using fit_generator,", "= total_indices ## list containing the indices remaining after the values in undersampled", "value will be undersampled -- 75th percentile indices = np.where(counts>avg_counts) ## indices where", "model.compile(loss='mse',optimizer='adam') ## training the model using fit_generator, batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch", "corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data points from", "and target value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data =", "target_indices.extend(random_indices) ## adding undersampled indices to the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating", "data.loc[:,'left'] ## The image from left camera y_left = y_center + 0.3 ##", "being iterated remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding indices random_indices", "## count the unique bins and number of values in each bin avg_counts", "int(np.percentile(counts,75)) ## the count to which the value will be undersampled -- 75th", "model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu'))", "validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training and validation with batch size", "the those bins for value in target_bins: bin_ind = list(np.where(out == value)[0]) ##", "corresponding to thte above list ## creating a batch of data for batch_id", "the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The function takes", "the data frame #indices = np.arange(len(ids)) np.random.seed(42) while True: #indices = shuffle(indices) np.random.shuffle(ids)", "of the driving_log.csv file and images generated using Udacity Car Simulator in training", "and number of values in each bin avg_counts = np.mean(counts) ## average number", "which the value will be undersampled -- 75th percentile indices = np.where(counts>avg_counts) ##", "used in keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ## selecting", "input \"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\"", "To steer a bit left add a negative value \"\"\" Three data frames", "bins[indices] ## bins corresponding to the above indices target_indices = [] ## list", "total_indices = list(range(len(out))) ## Complete list of indices of the data remaining_indices =", "total_indices ### iterating through bins having value counts greater than avg_counts and undersampling", "target_bins = bins[indices] ## bins corresponding to the above indices target_indices = []", "greater than avg_counts and undersampling from the those bins for value in target_bins:", "= int(np.percentile(counts,75)) ## the count to which the value will be undersampled --", "right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data", "new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target = np.array(target)", "Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of the driving_log.csv file and images generated", "the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with undersampled indices", "the total_indices ### iterating through bins having value counts greater than avg_counts and", "frame with undersampled data for some target steering values. The track in the", "and merge the three columns for the ceter, left , right images into", "= pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in 30 eqaully sized bins bins,", "remaining_indices = total_indices ## list containing the indices remaining after the values in", "## adding undersampled indices to the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the", "batch images target = [] ## list holding the steering values corresponding to", "model = model_nvidia_updated() ## Compiling the model using Adam optimizer and mean squared", "two columns - image location and target value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True)", "in the drving_log file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function takes", "avg_counts = np.mean(counts) ## average number of values in bins target_counts = int(np.percentile(counts,75))", "reset the index and adds an \"ID\" columns to the data frame input", "## list holding the batch images target = [] ## list holding the", "model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried undersampling the data, but results were", "value for steering X_left = data.loc[:,'left'] ## The image from left camera y_left", "add a negative value \"\"\" Three data frames for central, left, right camera", "values corresponding to thte above list ## creating a batch of data for", "bit right add a positive value X_right = data.loc[:,'right'] ## The image from", "as loss function model.compile(loss='mse',optimizer='adam') ## training the model using fit_generator, batch size =", "## returning a batch \"\"\" Function that creates a model as given in", "through bins having value counts greater than avg_counts and undersampling from the those", "the index and adds an \"ID\" columns to the data frame input \"\"\"", "Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization())", "= list(np.where(out == value)[0]) ## selecting data points in the bin being iterated", "unique bins and number of values in each bin avg_counts = np.mean(counts) ##", "that creates a model as given in the NVIDIA research paper\"\"\" def model_nvidia_updated():", "the bin is greater than average counts target_bins = bins[indices] ## bins corresponding", "generators for training and validation with batch size of 128 train_generator = dataGenerator(train_data,", "left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True)", "= np.mean(counts) ## average number of values in bins target_counts = int(np.percentile(counts,75)) ##", "the undersampled data \"\"\"Function that reset the index and adds an \"ID\" columns", "base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\")", "negative value \"\"\" Three data frames for central, left, right camera data, each", "steer a bit right add a positive value X_right = data.loc[:,'right'] ## The", "list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data points from the selected data points", "remaining indices with undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data points", "error as loss function model.compile(loss='mse',optimizer='adam') ## training the model using fit_generator, batch size", "Adam optimizer and mean squared error as loss function model.compile(loss='mse',optimizer='adam') ## training the", "pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The function takes as input a data frame", "the model using Adam optimizer and mean squared error as loss function model.compile(loss='mse',optimizer='adam')", "target = np.array(target) yield images, target ## returning a batch \"\"\" Function that", "base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\" data =", "data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The function takes as", "from keras.models import Sequential from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D,", "into training and validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for", "fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ## selecting all the IDs", "data.loc[:,'center'] ## The central camera image y_center = data.loc[:,'target'] ## Respective value for", "reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\" The function is", "of data for batch_id in batch: img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path", "avg_counts and undersampling from the those bins for value in target_bins: bin_ind =", "selecting the data points from the data frame return undersampled_data ## returning the", "The image from left camera y_left = y_center + 0.3 ## To steer", "for offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing a batch images =", "with batch size of 128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img)", "is greater than average counts target_bins = bins[indices] ## bins corresponding to the", "of the data frame #indices = np.arange(len(ids)) np.random.seed(42) while True: #indices = shuffle(indices)", "camera data, each with two columns - image location and target value for", "in bins target_counts = int(np.percentile(counts,75)) ## the count to which the value will", "those bins for value in target_bins: bin_ind = list(np.where(out == value)[0]) ## selecting", "columns - image location and target value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data", "to be used in keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values", "## The image from the right camera y_right = y_center - 0.3 ##", "#indices = np.arange(len(ids)) np.random.seed(42) while True: #indices = shuffle(indices) np.random.shuffle(ids) ## shuffling the", "frame return undersampled_data ## returning the undersampled data \"\"\"Function that reset the index", "given in the NVIDIA research paper\"\"\" def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))", "for batch_id in batch: img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path = base_path_img", "dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ## selecting all the IDs #print(ids) num =", "count to which the value will be undersampled -- 75th percentile indices =", "The data for such low angle value are undersampled.\"\"\" def undersampling(merged_data): out =", "function takes the data frame and merge the three columns for the ceter,", "second column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center'] ## The central camera image y_center", "= list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False))", "undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data points from the data frame return", "bin is greater than average counts target_bins = bins[indices] ## bins corresponding to", "list ## creating a batch of data for batch_id in batch: img_path =", "train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\" model", "## concatenating the remaining indices with undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting", "model may be biased towards drving straight. The data for such low angle", "### using complete data undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing the", "\"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\" The", "offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing a batch images = []", "## Compiling the model using Adam optimizer and mean squared error as loss", "import train_test_split from keras import regularizers from keras.models import Sequential from keras.layers import", "import Sequential from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout", "X_center = data.loc[:,'center'] ## The central camera image y_center = data.loc[:,'target'] ## Respective", "## The central camera image y_center = data.loc[:,'target'] ## Respective value for steering", "batch_id in batch: img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path = base_path_img +", "data, but results were not satisfactory, so end up using complete data for", "data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function takes the data frame and", "batch size of 128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\"", "data points target_indices.extend(random_indices) ## adding undersampled indices to the list undersampled_indices = np.concatenate([target_indices,remaining_indices])", "ceter, left , right images into a single column with respective target steering", "batch of data for batch_id in batch: img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1]", "= model_nvidia_updated() ## Compiling the model using Adam optimizer and mean squared error", "producing batches of data of size = batch_size to be used in keras", "batch_size to be used in keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids =", "\"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\" data = pd.read_csv(base_path +", "in each bin avg_counts = np.mean(counts) ## average number of values in bins", "-- 75th percentile indices = np.where(counts>avg_counts) ## indices where the counts in the", "## selecting data points in the bin being iterated remaining_indices = list(set(remaining_indices) -", "replace=False)) ## randomly selecting 'target_counts' data points from the selected data points target_indices.extend(random_indices)", "target ## returning a batch \"\"\" Function that creates a model as given", "np.mean(counts) ## average number of values in bins target_counts = int(np.percentile(counts,75)) ## the", "optimizer and mean squared error as loss function model.compile(loss='mse',optimizer='adam') ## training the model", "\"\"\"Reading in the drving_log file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function", "data frame with undersampled data for some target steering values. The track in", "random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data points from the selected", "## bins corresponding to the above indices target_indices = [] ## list holding", "validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training and validation", "= data.loc[:,'right'] ## The image from the right camera y_right = y_center -", "data frames for central, left, right camera data, each with two columns -", "train_test_split from keras import regularizers from keras.models import Sequential from keras.layers import Dense,", "left , right images into a single column with respective target steering angle", "single column with respective target steering angle value in the second column.\"\"\" def", "than avg_counts and undersampling from the those bins for value in target_bins: bin_ind", "the data for offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing a batch", "generator for producing batches of data of size = batch_size to be used", "holding the batch images target = [] ## list holding the steering values", "creating a model\"\"\" model = model_nvidia_updated() ## Compiling the model using Adam optimizer", "frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data", "model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001)))", "out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in 30 eqaully sized bins", "bins bins, counts = np.unique(out, return_counts=True) ## count the unique bins and number", "creating a batch of data for batch_id in batch: img_path = data.loc[batch_id,'path'] img_name", "\"\"\"Function that reset the index and adds an \"ID\" columns to the data", "import pandas as pd import numpy as np import csv import matplotlib.pyplot as", "model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))", "## Respective value for steering X_left = data.loc[:,'left'] ## The image from left", "the above indices target_indices = [] ## list holding the undersampled data points", "angles. Due to this the model may be biased towards drving straight. The", "data points from the selected data points target_indices.extend(random_indices) ## adding undersampled indices to", "research paper\"\"\" def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization())", "= list(range(len(out))) ## Complete list of indices of the data remaining_indices = total_indices", "= base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target = np.array(target) yield", "\"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This", "the data frame return undersampled_data ## returning the undersampled data \"\"\"Function that reset", "values in bins target_counts = int(np.percentile(counts,75)) ## the count to which the value", "driving_log.csv file and images generated using Udacity Car Simulator in training mode.\"\"\" base_path", "Tried undersampling the data, but results were not satisfactory, so end up using", "import matplotlib.image as mpimg from sklearn.model_selection import train_test_split from keras import regularizers from", "model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu'))", "- 0.3 ## To steer a bit left add a negative value \"\"\"", "data has a large number of observations having low steering angles. Due to", "- set(bin_ind)) ## remove the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly", "the data points from the data frame return undersampled_data ## returning the undersampled", "y_center - 0.3 ## To steer a bit left add a negative value", "This function takes the data frame and merge the three columns for the", "column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center'] ## The central camera image y_center =", "#model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ###", "steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data", "train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators for training and validation with batch", "= ids[offset:offset+batch_size] ## selectiing a batch images = [] ## list holding the", "bins, counts = np.unique(out, return_counts=True) ## count the unique bins and number of", "training mode.\"\"\" base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\"", "Location of the driving_log.csv file and images generated using Udacity Car Simulator in", "undersampled -- 75th percentile indices = np.where(counts>avg_counts) ## indices where the counts in", "data frame and merge the three columns for the ceter, left , right", "holding the undersampled data points total_indices = list(range(len(out))) ## Complete list of indices", "### Tried undersampling the data, but results were not satisfactory, so end up", "= 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs = 3)", "steering X_left = data.loc[:,'left'] ## The image from left camera y_left = y_center", "#indices = shuffle(indices) np.random.shuffle(ids) ## shuffling the data for offset in range(0,num,batch_size): batch", "= data.loc[:,'center'] ## The central camera image y_center = data.loc[:,'target'] ## Respective value", "value counts greater than avg_counts and undersampling from the those bins for value", "steering angle value in the second column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center'] ##", "\"\"\" Location of the driving_log.csv file and images generated using Udacity Car Simulator", "selectiing a batch images = [] ## list holding the batch images target", "target steering angle value in the second column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center']", "iterating through bins having value counts greater than avg_counts and undersampling from the", "and images generated using Udacity Car Simulator in training mode.\"\"\" base_path = \"../data/recording/\"", "not satisfactory, so end up using complete data for training \"\"\"undersampled_data = undersampling(data)", "expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing the data into training and validation sets", "list(range(len(out))) ## Complete list of indices of the data remaining_indices = total_indices ##", "pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in 30 eqaully sized bins bins, counts", "generated using Udacity Car Simulator in training mode.\"\"\" base_path = \"../data/recording/\" base_path_img =", "has a large number of observations having low steering angles. Due to this", "track in the simulator has long almost straight sections, therefore the data has", "index and adds an \"ID\" columns to the data frame input \"\"\" def", "to the total_indices ### iterating through bins having value counts greater than avg_counts", "simulator has long almost straight sections, therefore the data has a large number", "for training and validation with batch size of 128 train_generator = dataGenerator(train_data, 128,base_path_img)", "data undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing the data into training", "undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\" The function is a", "right camera data, each with two columns - image location and target value", "\"\"\" The function takes as input a data frame and returns a data", "The track in the simulator has long almost straight sections, therefore the data", "counts target_bins = bins[indices] ## bins corresponding to the above indices target_indices =", "= list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data points from the selected data", "end up using complete data for training \"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data)", "128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\" model = model_nvidia_updated() ##", "size = batch_size to be used in keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img):", "data frame return undersampled_data ## returning the undersampled data \"\"\"Function that reset the", "def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ## selecting all the IDs #print(ids) num", "model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model", "Car Simulator in training mode.\"\"\" base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in", "data points total_indices = list(range(len(out))) ## Complete list of indices of the data", "merge the three columns for the ceter, left , right images into a", "satisfactory, so end up using complete data for training \"\"\"undersampled_data = undersampling(data) undersampled_data", "be undersampled -- 75th percentile indices = np.where(counts>avg_counts) ## indices where the counts", "the right camera y_right = y_center - 0.3 ## To steer a bit", "### iterating through bins having value counts greater than avg_counts and undersampling from", "value for steering\"\"\" center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging", "as pd import numpy as np import csv import matplotlib.pyplot as plt import", "indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data points from the", "such low angle value are undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide", "up using complete data for training \"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data", "batches of data of size = batch_size to be used in keras fit_generator", "low steering angles. Due to this the model may be biased towards drving", "img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target'])", "undersampled data for some target steering values. The track in the simulator has", "= [] ## list holding the steering values corresponding to thte above list", "using Adam optimizer and mean squared error as loss function model.compile(loss='mse',optimizer='adam') ## training", "the model may be biased towards drving straight. The data for such low", "selecting all the IDs #print(ids) num = len(ids) ## length of the data", "count the unique bins and number of values in each bin avg_counts =", "Function that creates a model as given in the NVIDIA research paper\"\"\" def", "base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target = np.array(target) yield images,", "remaining after the values in undersampled bins are removed,initialized to the total_indices ###", "the simulator has long almost straight sections, therefore the data has a large", "return_counts=True) ## count the unique bins and number of values in each bin", "undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in 30 eqaully sized", "a single column with respective target steering angle value in the second column.\"\"\"", "will be undersampled -- 75th percentile indices = np.where(counts>avg_counts) ## indices where the", "np.unique(out, return_counts=True) ## count the unique bins and number of values in each", "bins corresponding to the above indices target_indices = [] ## list holding the", "= [] ## list holding the undersampled data points total_indices = list(range(len(out))) ##", "model ### Tried undersampling the data, but results were not satisfactory, so end", "data generators for training and validation with batch size of 128 train_generator =", "points from the data frame return undersampled_data ## returning the undersampled data \"\"\"Function", "= data.loc[:,'left'] ## The image from left camera y_left = y_center + 0.3", "the data frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data)))", "therefore the data has a large number of observations having low steering angles.", "merged_data.loc[undersampled_indices] ## selecting the data points from the data frame return undersampled_data ##", "Sequential from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\"", "in undersampled bins are removed,initialized to the total_indices ### iterating through bins having", "angle value are undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering", "def expanding_data(data): X_center = data.loc[:,'center'] ## The central camera image y_center = data.loc[:,'target']", "list holding the undersampled data points total_indices = list(range(len(out))) ## Complete list of", "to this the model may be biased towards drving straight. The data for", "data for training \"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ###", "is a python data generator for producing batches of data of size =", "undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in 30", "having low steering angles. Due to this the model may be biased towards", "## remove the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts'", "## shuffling the data for offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing", "this the model may be biased towards drving straight. The data for such", "keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of", "indices to the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with", "dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\" model = model_nvidia_updated()", "list containing the indices remaining after the values in undersampled bins are removed,initialized", "undersampled bins are removed,initialized to the total_indices ### iterating through bins having value", "[] ## list holding the steering values corresponding to thte above list ##", "## selectiing a batch images = [] ## list holding the batch images", "merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The function takes as input a", "in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing a batch images = [] ##", "a positive value X_right = data.loc[:,'right'] ## The image from the right camera", "the values in undersampled bins are removed,initialized to the total_indices ### iterating through", "#model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1))", "central camera image y_center = data.loc[:,'target'] ## Respective value for steering X_left =", "## the count to which the value will be undersampled -- 75th percentile", "model_nvidia_updated() ## Compiling the model using Adam optimizer and mean squared error as", ", right images into a single column with respective target steering angle value", "75th percentile indices = np.where(counts>avg_counts) ## indices where the counts in the bin", "iterated remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding indices random_indices =", "steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs = 3) ## saving the", "#model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu'))", "a batch \"\"\" Function that creates a model as given in the NVIDIA", "all the IDs #print(ids) num = len(ids) ## length of the data frame", "undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\" The function is a python data generator", "from sklearn.model_selection import train_test_split from keras import regularizers from keras.models import Sequential from", "The image from the right camera y_right = y_center - 0.3 ## To", "the model using fit_generator, batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator,", "range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing a batch images = [] ## list", "np.arange(len(ids)) np.random.seed(42) while True: #indices = shuffle(indices) np.random.shuffle(ids) ## shuffling the data for", "0.3 ## To steer a bit right add a positive value X_right =", "steering values. The track in the simulator has long almost straight sections, therefore", "average number of values in bins target_counts = int(np.percentile(counts,75)) ## the count to", "a large number of observations having low steering angles. Due to this the", "for the ceter, left , right images into a single column with respective", "function takes as input a data frame and returns a data frame with", "batch: img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5)", "Complete list of indices of the data remaining_indices = total_indices ## list containing", "in target_bins: bin_ind = list(np.where(out == value)[0]) ## selecting data points in the", "to which the value will be undersampled -- 75th percentile indices = np.where(counts>avg_counts)", "be used in keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ##", "= len(ids) ## length of the data frame #indices = np.arange(len(ids)) np.random.seed(42) while", "results were not satisfactory, so end up using complete data for training \"\"\"undersampled_data", "points target_indices.extend(random_indices) ## adding undersampled indices to the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ##", "model as given in the NVIDIA research paper\"\"\" def model_nvidia_updated(): model = Sequential()", "paper\"\"\" def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu'))", "X_right = data.loc[:,'right'] ## The image from the right camera y_right = y_center", "to the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with undersampled", "return model ### Tried undersampling the data, but results were not satisfactory, so", "Three data frames for central, left, right camera data, each with two columns", "ids = data['ID'].values ## selecting all the IDs #print(ids) num = len(ids) ##", "straight sections, therefore the data has a large number of observations having low", "## randomly selecting 'target_counts' data points from the selected data points target_indices.extend(random_indices) ##", "takes as input a data frame and returns a data frame with undersampled", "length of the data frame #indices = np.arange(len(ids)) np.random.seed(42) while True: #indices =", "data.loc[:,'right'] ## The image from the right camera y_right = y_center - 0.3", "values in 30 eqaully sized bins bins, counts = np.unique(out, return_counts=True) ## count", "the selected data points target_indices.extend(random_indices) ## adding undersampled indices to the list undersampled_indices", "bins and number of values in each bin avg_counts = np.mean(counts) ## average", "data frame and returns a data frame with undersampled data for some target", "reset_and_add(undersampled_data) ### dividing the data into training and validation sets train_data, validation_data =", "indices with undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data points from", "number of observations having low steering angles. Due to this the model may", "positive value X_right = data.loc[:,'right'] ## The image from the right camera y_right", "a batch of data for batch_id in batch: img_path = data.loc[batch_id,'path'] img_name =", "## Complete list of indices of the data remaining_indices = total_indices ## list", "counts greater than avg_counts and undersampling from the those bins for value in", "= undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\" The function is a python", "+ img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images) target = np.array(target) yield images, target", "batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs", "mode.\"\"\" base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\" data", "Simulator in training mode.\"\"\" base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the", "straight. The data for such low angle value are undersampled.\"\"\" def undersampling(merged_data): out", "undersampled indices to the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices", "complete data undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing the data into", "= pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target']", "image from left camera y_left = y_center + 0.3 ## To steer a", "holding the steering values corresponding to thte above list ## creating a batch", "squared error as loss function model.compile(loss='mse',optimizer='adam') ## training the model using fit_generator, batch", "and returns a data frame with undersampled data for some target steering values.", "undersampled data \"\"\"Function that reset the index and adds an \"ID\" columns to", "= np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with undersampled indices undersampled_data = merged_data.loc[undersampled_indices]", "of size = batch_size to be used in keras fit_generator function\"\"\" def dataGenerator(data,", "numpy as np import csv import matplotlib.pyplot as plt import matplotlib.image as mpimg", "number of values in bins target_counts = int(np.percentile(counts,75)) ## the count to which", "## length of the data frame #indices = np.arange(len(ids)) np.random.seed(42) while True: #indices", "= y_center - 0.3 ## To steer a bit left add a negative", "np import csv import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.model_selection", "three columns for the ceter, left , right images into a single column", "were not satisfactory, so end up using complete data for training \"\"\"undersampled_data =", "using complete data for training \"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data =", "low angle value are undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the", "shuffle(indices) np.random.shuffle(ids) ## shuffling the data for offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size]", "len(ids) ## length of the data frame #indices = np.arange(len(ids)) np.random.seed(42) while True:", "data for some target steering values. The track in the simulator has long", "file and images generated using Udacity Car Simulator in training mode.\"\"\" base_path =", "undersampling the data, but results were not satisfactory, so end up using complete", "data for offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ## selectiing a batch images", "data frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return", "fit_generator, batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1,", "0.3 ## To steer a bit left add a negative value \"\"\" Three", "counts = np.unique(out, return_counts=True) ## count the unique bins and number of values", "an \"ID\" columns to the data frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data =", "be biased towards drving straight. The data for such low angle value are", "= shuffle(indices) np.random.shuffle(ids) ## shuffling the data for offset in range(0,num,batch_size): batch =", "for central, left, right camera data, each with two columns - image location", "model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu'))", "num = len(ids) ## length of the data frame #indices = np.arange(len(ids)) np.random.seed(42)", "images, target ## returning a batch \"\"\" Function that creates a model as", "large number of observations having low steering angles. Due to this the model", "the drving_log file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function takes the", "= [] ## list holding the batch images target = [] ## list", "as plt import matplotlib.image as mpimg from sklearn.model_selection import train_test_split from keras import", "using complete data undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing the data", "frame and returns a data frame with undersampled data for some target steering", "\"driving_log.csv\") \"\"\" This function takes the data frame and merge the three columns", "respective target steering angle value in the second column.\"\"\" def expanding_data(data): X_center =", "the indices remaining after the values in undersampled bins are removed,initialized to the", "right add a positive value X_right = data.loc[:,'right'] ## The image from the", "a data frame and returns a data frame with undersampled data for some", "Udacity Car Simulator in training mode.\"\"\" base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading", "= dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\" model =", "of indices of the data remaining_indices = total_indices ## list containing the indices", "Activation, BatchNormalization,Dropout \"\"\" Location of the driving_log.csv file and images generated using Udacity", "with two columns - image location and target value for steering\"\"\" center_data =", "the bin being iterated remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding", "img_name = img_path.split('\\\\')[-1] new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images = np.array(images)", "a python data generator for producing batches of data of size = batch_size", "undersampled_data \"\"\" The function is a python data generator for producing batches of", "= expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing the data into training and validation", "drving_log file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function takes the data", "training the model using fit_generator, batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1,", "= batch_size to be used in keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids", "the count to which the value will be undersampled -- 75th percentile indices", "## To steer a bit right add a positive value X_right = data.loc[:,'right']", "frames for central, left, right camera data, each with two columns - image", "the data has a large number of observations having low steering angles. Due", "model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu'))", "The function takes as input a data frame and returns a data frame", "return undersampled_data \"\"\" The function is a python data generator for producing batches", "pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The", "from the right camera y_right = y_center - 0.3 ## To steer a", "regularizers from keras.models import Sequential from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D,", "each bin avg_counts = np.mean(counts) ## average number of values in bins target_counts", "undersampled_data ## returning the undersampled data \"\"\"Function that reset the index and adds", "left camera y_left = y_center + 0.3 ## To steer a bit right", "input a data frame and returns a data frame with undersampled data for", "= pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data", "np.random.shuffle(ids) ## shuffling the data for offset in range(0,num,batch_size): batch = ids[offset:offset+batch_size] ##", "a model\"\"\" model = model_nvidia_updated() ## Compiling the model using Adam optimizer and", "drving straight. The data for such low angle value are undersampled.\"\"\" def undersampling(merged_data):", "## creating a batch of data for batch_id in batch: img_path = data.loc[batch_id,'path']", "camera y_left = y_center + 0.3 ## To steer a bit right add", "= data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images", "To steer a bit right add a positive value X_right = data.loc[:,'right'] ##", "indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data points from the data frame", "add a positive value X_right = data.loc[:,'right'] ## The image from the right", "as mpimg from sklearn.model_selection import train_test_split from keras import regularizers from keras.models import", "right camera y_right = y_center - 0.3 ## To steer a bit left", "np.where(counts>avg_counts) ## indices where the counts in the bin is greater than average", "+ \"driving_log.csv\") \"\"\" This function takes the data frame and merge the three", "y_center + 0.3 ## To steer a bit right add a positive value", "using Udacity Car Simulator in training mode.\"\"\" base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\"", "30 eqaully sized bins bins, counts = np.unique(out, return_counts=True) ## count the unique", "## indices where the counts in the bin is greater than average counts", "\"\"\" Three data frames for central, left, right camera data, each with two", "function is a python data generator for producing batches of data of size", "to thte above list ## creating a batch of data for batch_id in", "thte above list ## creating a batch of data for batch_id in batch:", "the remaining indices with undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data", "pandas as pd import numpy as np import csv import matplotlib.pyplot as plt", "adding undersampled indices to the list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining", "dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\" model = model_nvidia_updated() ## Compiling the model", "= np.arange(len(ids)) np.random.seed(42) while True: #indices = shuffle(indices) np.random.shuffle(ids) ## shuffling the data", "indices where the counts in the bin is greater than average counts target_bins", "list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ##", "undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data)", "from the those bins for value in target_bins: bin_ind = list(np.where(out == value)[0])", "as given in the NVIDIA research paper\"\"\" def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3)))", "pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return", "the NVIDIA research paper\"\"\" def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu'))", "of observations having low steering angles. Due to this the model may be", "model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried undersampling the data, but", "data generator for producing batches of data of size = batch_size to be", "greater than average counts target_bins = bins[indices] ## bins corresponding to the above", "from left camera y_left = y_center + 0.3 ## To steer a bit", "the steering values corresponding to thte above list ## creating a batch of", "steering values corresponding to thte above list ## creating a batch of data", "target = [] ## list holding the steering values corresponding to thte above", "keras fit_generator function\"\"\" def dataGenerator(data, batch_size,base_path_img): ids = data['ID'].values ## selecting all the", "= \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the drving_log file\"\"\" data = pd.read_csv(base_path", "data points from the data frame return undersampled_data ## returning the undersampled data", "undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data = expanding_data(data)", "list(range(len(undersampled_data))) return undersampled_data \"\"\" The function is a python data generator for producing", "data frame #indices = np.arange(len(ids)) np.random.seed(42) while True: #indices = shuffle(indices) np.random.shuffle(ids) ##", "value)[0]) ## selecting data points in the bin being iterated remaining_indices = list(set(remaining_indices)", "## The image from left camera y_left = y_center + 0.3 ## To", "columns for the ceter, left , right images into a single column with", "a batch images = [] ## list holding the batch images target =", "each with two columns - image location and target value for steering\"\"\" center_data", "matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.model_selection import train_test_split from keras", "with undersampled data for some target steering values. The track in the simulator", "= np.array(target) yield images, target ## returning a batch \"\"\" Function that creates", "points from the selected data points target_indices.extend(random_indices) ## adding undersampled indices to the", "ids[offset:offset+batch_size] ## selectiing a batch images = [] ## list holding the batch", "np.array(images) target = np.array(target) yield images, target ## returning a batch \"\"\" Function", "= reset_and_add(undersampled_data) ### dividing the data into training and validation sets train_data, validation_data", "indices target_indices = [] ## list holding the undersampled data points total_indices =", "batch \"\"\" Function that creates a model as given in the NVIDIA research", "\"\"\"Merging the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The function", "observations having low steering angles. Due to this the model may be biased", "a data frame with undersampled data for some target steering values. The track", "Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of the driving_log.csv file and", "model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried undersampling the", "model\"\"\" model = model_nvidia_updated() ## Compiling the model using Adam optimizer and mean", "selecting data points in the bin being iterated remaining_indices = list(set(remaining_indices) - set(bin_ind))", "data into training and validation sets train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42) #create data generators", "import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of the", "for such low angle value are undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ##", "having value counts greater than avg_counts and undersampling from the those bins for", "merged_data.columns=['path','target'] return merged_data \"\"\" The function takes as input a data frame and", "expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data = expanding_data(data) undersampled_data =", "def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index() undersampled_data[\"ID\"] = list(range(len(undersampled_data))) return undersampled_data \"\"\" The function", "return merged_data \"\"\" The function takes as input a data frame and returns", "and validation with batch size of 128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator =", "model using Adam optimizer and mean squared error as loss function model.compile(loss='mse',optimizer='adam') ##", "images target = [] ## list holding the steering values corresponding to thte", "indices of the data remaining_indices = total_indices ## list containing the indices remaining", "has long almost straight sections, therefore the data has a large number of", "Compiling the model using Adam optimizer and mean squared error as loss function", "def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))", "model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried undersampling the data, but results were not", "sklearn.model_selection import train_test_split from keras import regularizers from keras.models import Sequential from keras.layers", "undersampling from the those bins for value in target_bins: bin_ind = list(np.where(out ==", "are undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in", "the steering values in 30 eqaully sized bins bins, counts = np.unique(out, return_counts=True)", "\"ID\" columns to the data frame input \"\"\" def reset_and_add(undersampled_data): undersampled_data = undersampled_data.reset_index()", "images into a single column with respective target steering angle value in the", "model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried", "value are undersampled.\"\"\" def undersampling(merged_data): out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values", "points total_indices = list(range(len(out))) ## Complete list of indices of the data remaining_indices", "containing the indices remaining after the values in undersampled bins are removed,initialized to", "set(bin_ind)) ## remove the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting", "remove the corresponding indices random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data", "image y_center = data.loc[:,'target'] ## Respective value for steering X_left = data.loc[:,'left'] ##", "## To steer a bit left add a negative value \"\"\" Three data", "almost straight sections, therefore the data has a large number of observations having", "eqaully sized bins bins, counts = np.unique(out, return_counts=True) ## count the unique bins", "data remaining_indices = total_indices ## list containing the indices remaining after the values", "indices remaining after the values in undersampled bins are removed,initialized to the total_indices", "are removed,initialized to the total_indices ### iterating through bins having value counts greater", "using fit_generator, batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch = (len(train_data)//128)+1, validation_data=valid_generator, validation_steps =", "some target steering values. The track in the simulator has long almost straight", "list undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with undersampled indices undersampled_data", "in batch: img_path = data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path = base_path_img + img_name", "bins target_counts = int(np.percentile(counts,75)) ## the count to which the value will be", "total_indices ## list containing the indices remaining after the values in undersampled bins", "batch_size,base_path_img): ids = data['ID'].values ## selecting all the IDs #print(ids) num = len(ids)", "list of indices of the data remaining_indices = total_indices ## list containing the", "sized bins bins, counts = np.unique(out, return_counts=True) ## count the unique bins and", "## selecting all the IDs #print(ids) num = len(ids) ## length of the", "bin_ind = list(np.where(out == value)[0]) ## selecting data points in the bin being", "True: #indices = shuffle(indices) np.random.shuffle(ids) ## shuffling the data for offset in range(0,num,batch_size):", "in the bin being iterated remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove the", "value in target_bins: bin_ind = list(np.where(out == value)[0]) ## selecting data points in", "IDs #print(ids) num = len(ids) ## length of the data frame #indices =", "bin being iterated remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding indices", "model.add(Activation('elu')) model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Flatten()) model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return", "file\"\"\" data = pd.read_csv(base_path + \"driving_log.csv\") \"\"\" This function takes the data frame", "model.add(Dense(1)) return model ### Tried undersampling the data, but results were not satisfactory,", "= (len(train_data)//128)+1, validation_data=valid_generator, validation_steps = (len(validation_data)//128)+1, epochs = 3) ## saving the model", "= pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\"", "plt import matplotlib.image as mpimg from sklearn.model_selection import train_test_split from keras import regularizers", "list(np.where(out == value)[0]) ## selecting data points in the bin being iterated remaining_indices", "bins are removed,initialized to the total_indices ### iterating through bins having value counts", "values in each bin avg_counts = np.mean(counts) ## average number of values in", "data.loc[batch_id,'path'] img_name = img_path.split('\\\\')[-1] new_path = base_path_img + img_name images.append(((mpimg.imread(new_path))/255)-0.5) target.append(data.loc[batch_id,'target']) images =", "frames\"\"\" merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True) merged_data.columns=['path','target'] return merged_data \"\"\" The function takes as input", "left add a negative value \"\"\" Three data frames for central, left, right", "model.add(Activation('elu')) model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001))) model.add(Activation('elu')) model.add(Dense(1)) return model ### Tried undersampling the data, but results", "reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data = expanding_data(data) undersampled_data = reset_and_add(undersampled_data) ### dividing", "## selecting the data points from the data frame return undersampled_data ## returning", "the counts in the bin is greater than average counts target_bins = bins[indices]", "column with respective target steering angle value in the second column.\"\"\" def expanding_data(data):", "= bins[indices] ## bins corresponding to the above indices target_indices = [] ##", "= data['ID'].values ## selecting all the IDs #print(ids) num = len(ids) ## length", "the IDs #print(ids) num = len(ids) ## length of the data frame #indices", "= dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\" model = model_nvidia_updated() ## Compiling the", "in training mode.\"\"\" base_path = \"../data/recording/\" base_path_img = \"../data/recording/IMG/\" \"\"\"Reading in the drving_log", "pd.concat([X_center,y_center],axis=1,ignore_index=True) left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True) right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True) \"\"\"Merging the data frames\"\"\" merged_data =", "to the above indices target_indices = [] ## list holding the undersampled data", "and mean squared error as loss function model.compile(loss='mse',optimizer='adam') ## training the model using", "undersampled indices undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data points from the data", "the driving_log.csv file and images generated using Udacity Car Simulator in training mode.\"\"\"", "a bit left add a negative value \"\"\" Three data frames for central,", "\"\"\" Function that creates a model as given in the NVIDIA research paper\"\"\"", "## returning the undersampled data \"\"\"Function that reset the index and adds an", "returning a batch \"\"\" Function that creates a model as given in the", "= np.unique(out, return_counts=True) ## count the unique bins and number of values in", "undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete data undersampled_data =", "pd import numpy as np import csv import matplotlib.pyplot as plt import matplotlib.image", "points in the bin being iterated remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove", "steering values in 30 eqaully sized bins bins, counts = np.unique(out, return_counts=True) ##", "than average counts target_bins = bins[indices] ## bins corresponding to the above indices", "= data.loc[:,'target'] ## Respective value for steering X_left = data.loc[:,'left'] ## The image", "MaxPooling2D, Activation, BatchNormalization,Dropout \"\"\" Location of the driving_log.csv file and images generated using", "in the second column.\"\"\" def expanding_data(data): X_center = data.loc[:,'center'] ## The central camera", "np.random.seed(42) while True: #indices = shuffle(indices) np.random.shuffle(ids) ## shuffling the data for offset", "matplotlib.image as mpimg from sklearn.model_selection import train_test_split from keras import regularizers from keras.models", "in the NVIDIA research paper\"\"\" def model_nvidia_updated(): model = Sequential() model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3))) model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001))) #model.add(BatchNormalization())", "## training the model using fit_generator, batch size = 128 model.fit_generator(generator=train_generator, steps_per_epoch =", "loss function model.compile(loss='mse',optimizer='adam') ## training the model using fit_generator, batch size = 128", "128 train_generator = dataGenerator(train_data, 128,base_path_img) valid_generator = dataGenerator(validation_data,128, base_path_img) \"\"\" creating a model\"\"\"", "above list ## creating a batch of data for batch_id in batch: img_path", "training \"\"\"undersampled_data = undersampling(data) undersampled_data = expanding_data(undersampled_data) undersampled_data = reset_and_add(undersampled_data)\"\"\" ### using complete", "counts in the bin is greater than average counts target_bins = bins[indices] ##", "Due to this the model may be biased towards drving straight. The data" ]
[ "-> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation", "import re from typing import List, Union, Iterable class NaturalSort: @staticmethod def atoi(text:", "typing import List, Union, Iterable class NaturalSort: @staticmethod def atoi(text: str) -> int:", "natural_keys(text: str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See", "order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) \"\"\" return [NaturalSort.atoi(c) for c", "-> int: return int(text) if text.isdigit() else text @staticmethod def natural_keys(text: str) ->", "implementation in the comments) \"\"\" return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)] @staticmethod", "Iterable class NaturalSort: @staticmethod def atoi(text: str) -> int: return int(text) if text.isdigit()", "int(text) if text.isdigit() else text @staticmethod def natural_keys(text: str) -> List[Union[str, int]]: \"\"\"", "Union, Iterable class NaturalSort: @staticmethod def atoi(text: str) -> int: return int(text) if", "\"\"\" return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)] @staticmethod def sorted(data: Iterable): return", "str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's", "comments) \"\"\" return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)] @staticmethod def sorted(data: Iterable):", "in the comments) \"\"\" return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)] @staticmethod def", "text @staticmethod def natural_keys(text: str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in human", "List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in", "sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) \"\"\" return", "[NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)] @staticmethod def sorted(data: Iterable): return sorted(data, key=NaturalSort.natural_keys)", "return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)] @staticmethod def sorted(data: Iterable): return sorted(data,", "def natural_keys(text: str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html", "import List, Union, Iterable class NaturalSort: @staticmethod def atoi(text: str) -> int: return", "@staticmethod def natural_keys(text: str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in human order", "@staticmethod def atoi(text: str) -> int: return int(text) if text.isdigit() else text @staticmethod", "text.isdigit() else text @staticmethod def natural_keys(text: str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts", "from typing import List, Union, Iterable class NaturalSort: @staticmethod def atoi(text: str) ->", "(See Toothy's implementation in the comments) \"\"\" return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)',", "re from typing import List, Union, Iterable class NaturalSort: @staticmethod def atoi(text: str)", "int]]: \"\"\" alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the", "if text.isdigit() else text @staticmethod def natural_keys(text: str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys)", "atoi(text: str) -> int: return int(text) if text.isdigit() else text @staticmethod def natural_keys(text:", "def atoi(text: str) -> int: return int(text) if text.isdigit() else text @staticmethod def", "return int(text) if text.isdigit() else text @staticmethod def natural_keys(text: str) -> List[Union[str, int]]:", "in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) \"\"\" return [NaturalSort.atoi(c)", "human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) \"\"\" return [NaturalSort.atoi(c) for", "NaturalSort: @staticmethod def atoi(text: str) -> int: return int(text) if text.isdigit() else text", "http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) \"\"\" return [NaturalSort.atoi(c) for c in", "\"\"\" alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments)", "the comments) \"\"\" return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)] @staticmethod def sorted(data:", "List, Union, Iterable class NaturalSort: @staticmethod def atoi(text: str) -> int: return int(text)", "Toothy's implementation in the comments) \"\"\" return [NaturalSort.atoi(c) for c in re.split(r'(\\d+)', text)]", "class NaturalSort: @staticmethod def atoi(text: str) -> int: return int(text) if text.isdigit() else", "else text @staticmethod def natural_keys(text: str) -> List[Union[str, int]]: \"\"\" alist.sort(key=natural_keys) sorts in", "alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) \"\"\"", "str) -> int: return int(text) if text.isdigit() else text @staticmethod def natural_keys(text: str)", "int: return int(text) if text.isdigit() else text @staticmethod def natural_keys(text: str) -> List[Union[str," ]
[ "filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1,", "create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None,", "TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope)", "_descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None,", "create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8,", ") _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0,", "file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False,", "protocol buffer compiler. DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\"", "= _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0,", "extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71,", ") _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor(", "_TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2'", "], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key,", "create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "{ 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope) }) _sym_db.RegisterMessage(TokenEnvelope) DESCRIPTOR._options =", "create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody',", "the protocol buffer compiler. DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer", "extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type", "_TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope']", "number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,", "reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db =", "index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf import descriptor as _descriptor from", "import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto',", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[", "full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "\\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor(", "index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION", "extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False,", "index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ],", "containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR,", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state',", "file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None,", "}) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2'", "number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,", "containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR,", ": _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,),", "enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type", "extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False,", "extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR,", "number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY =", "], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None,", "serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type =", "oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None,", "{ 'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope =", "= _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3'", "import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state',", "coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT!", "= _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12,", "_symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb',", "serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose',", "_descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, )", "_TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version',", "google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1,", "buffer code.\"\"\" from google.protobuf import descriptor as _descriptor from google.protobuf import message as", "= _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None,", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type =", "_symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01", "= _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ],", "index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ],", "type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload',", ") _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE", "from google.protobuf import message as _message from google.protobuf import reflection as _reflection from", "serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None,", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1,", "(_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope", "@@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' :", ") _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED',", "TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody)", "serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[", "as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as", "serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None,", "number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None,", "create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1,", "type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody',", "= _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) })", "file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "= _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope) })", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3,", "'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE,", "serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\",", "_TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY,", "'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope',", "fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf import descriptor as", "= _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None,", "_TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope) }) _sym_db.RegisterMessage(TokenEnvelope) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2,", "full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor(", "name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3,", "], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION", "extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217,", "index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3',", "# @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__'", "full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE,", "create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None,", "values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None,", "serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0,", "\\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR,", "_sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' #", "name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False,", "_TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0,", "filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE',", "DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf import", "_TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), {", "type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169,", "file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None,", "\\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED',", "syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor(", "file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1,", "fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9,", "_descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None,", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload',", "from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR =", "serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[", "name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14,", "google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf", "number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION =", "\\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[", "create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "# Generated by the protocol buffer compiler. DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto", "_TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0,", "type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307,", "name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION)", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE", "create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1,", "DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR'", "= _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' :", "# @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key,", "create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version',", "@@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9,", "oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type =", "full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8,", "-*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source:", "type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),", "utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! #", "# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO", "_reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope) }) _sym_db.RegisterMessage(TokenEnvelope)", "number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,", "<reponame>xswz8015/infra # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler.", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type", "as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default()", "_sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id',", "compiler. DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf", "filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD',", "_descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None,", "_reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR", "cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ],", "-*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT", "full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1,", "_descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, )", "name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version',", "google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor(", "cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(", "serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\",", "(_message.Message,), { 'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope) }) _sym_db.RegisterMessage(TokenEnvelope) DESCRIPTOR._options", "message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database", "_descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "\"\"\"Generated protocol buffer code.\"\"\" from google.protobuf import descriptor as _descriptor from google.protobuf import", "file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False,", "index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as", "_TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY", "descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection", "create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose',", "], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor(", "containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0,", "_TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody',", "create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None,", "full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor(", "package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE =", "serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key,", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose',", "index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY", "DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__'", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2,", "= _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] =", ": _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope) }) _sym_db.RegisterMessage(TokenEnvelope) DESCRIPTOR._options = None #", "_descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' )", "as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3',", "name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key),", "serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor(", "\\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE = _descriptor.EnumDescriptor( name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key,", ": 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR' :", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY", "create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None,", "from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from", "'__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), { 'DESCRIPTOR'", "'DESCRIPTOR' : _TOKENENVELOPE, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope) }) _sym_db.RegisterMessage(TokenEnvelope) DESCRIPTOR._options = None", "_TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type =", "file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1,", "go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf import descriptor as _descriptor from google.protobuf", "name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12,", "serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3',", "file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope',", "_TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None,", "_descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1,", "create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import", "index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2',", "], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372,", "enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION,", "serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[", "_reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody)", "has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ],", "cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None,", "= _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody", "from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database #", "_descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection", "], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None,", "serialized_start=217, serialized_end=372, ) _TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type", "_descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None,", "_TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE _TOKENBODY_PURPOSE.containing_type = _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody']", "name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key),", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1,", "serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor(", "], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214,", "by the protocol buffer compiler. DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol", "protocol buffer code.\"\"\" from google.protobuf import descriptor as _descriptor from google.protobuf import message", "DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02", "= _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,),", "name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key),", "file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None,", "index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "_descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ],", "_message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database", "serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor(", "_descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None, type=None,", "extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False,", "default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[],", "nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=217, serialized_end=372, )", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1,", "name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02 \\x01(\\x0e\\x32!.buildbucket.v2.TokenBody.Purpose\\x12\\r\\n\\x05state\\x18\\x03 \\x01(\\x0c\\\"-\\n\\x07Purpose\\x12\\x17\\n\\x13PURPOSE_UNSPECIFIED\\x10\\x00\\x12\\t\\n\\x05\\x42UILD\\x10\\x01\\\"\\x9b\\x01\\n\\rTokenEnvelope\\x12\\x36\\n\\x07version\\x18\\x01 \\x01(\\x0e\\x32%.buildbucket.v2.TokenEnvelope.Version\\x12\\x0f\\n\\x07payload\\x18\\x02 \\x01(\\x0c\\\"A\\n\\x07Version\\x12\\x17\\n\\x13VERSION_UNSPECIFIED\\x10\\x00\\x12\\x1d\\n\\x19UNENCRYPTED_PASSWORD_LIKE\\x10\\x01\\x42\\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\\x06proto3' ) _TOKENBODY_PURPOSE", "values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1, serialized_options=None,", "type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),", "label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[", "extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False,", "extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False,", "name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "_descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None,", "], extensions=[ ], nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TOKENENVELOPE_VERSION, ], serialized_options=None,", "create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214,", "serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None,", "serialized_options=None, serialized_start=307, serialized_end=372, ) _sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION) _TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None,", "_sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='VERSION_UNSPECIFIED', index=0,", "# source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf import descriptor as _descriptor", "name='state', full_name='buildbucket.v2.TokenBody.state', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False,", "= _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__' :", "Generated by the protocol buffer compiler. DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated", "name='Purpose', full_name='buildbucket.v2.TokenBody.Purpose', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PURPOSE_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key),", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1, number=2,", "name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE)", "_descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from google.protobuf import descriptor", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope',", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='purpose', full_name='buildbucket.v2.TokenBody.purpose',", "google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports)", "= _TOKENBODY _TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] =", "import message as _message from google.protobuf import reflection as _reflection from google.protobuf import", "serialized_options=None, serialized_start=169, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key,", "code.\"\"\" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message", "_TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR)", "_TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, ) _TOKENENVELOPE =", "_TOKENBODY = _descriptor.Descriptor( name='TokenBody', full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id',", "_descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0, number=1,", "_sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), { 'DESCRIPTOR' : _TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' #", "number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BUILD', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None,", "nested_types=[], enum_types=[ _TOKENBODY_PURPOSE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=71, serialized_end=214, )", "_TOKENENVELOPE_VERSION _TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) TokenBody =", "number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,", "_TOKENBODY, '__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2' # @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody) }) _sym_db.RegisterMessage(TokenBody) TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), {", "full_name='buildbucket.v2.TokenBody', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0, number=1, type=3, cpp_type=2,", "serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE) _TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor( name='Version', full_name='buildbucket.v2.TokenEnvelope.Version', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor(", ") _TOKENENVELOPE = _descriptor.Descriptor( name='TokenEnvelope', full_name='buildbucket.v2.TokenEnvelope', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='version',", "buffer compiler. DO NOT EDIT! # source: go.chromium.org/luci/buildbucket/proto/token.proto \"\"\"Generated protocol buffer code.\"\"\" from", "number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b\"\", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,", "_sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='go.chromium.org/luci/buildbucket/proto/token.proto', package='buildbucket.v2', syntax='proto3', serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb', create_key=_descriptor._internal_create_key, serialized_pb=b'\\n2go.chromium.org/luci/buildbucket/proto/token.proto\\x12\\x0e\\x62uildbucket.v2\\\"\\x8f\\x01\\n\\tTokenBody\\x12\\x10\\n\\x08\\x62uild_id\\x18\\x01 \\x01(\\x03\\x12\\x32\\n\\x07purpose\\x18\\x02" ]
[ "tags # http://www.nltk.org/api/nltk.tag.html # To find the available POS tags: # import nltk.help;", "Print some statistics about the initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) #", "'log_reg_model_' + concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$',", "y_true, y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true,", "'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the", "os.makedirs(args.results_dir) results_dir = args.results_dir # Data to analyse. input_file = args.data_file # Get", "the initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state", "# Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted", "max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test", "a multiclass classifier. Args: args (argparse): arguments. Returns: None. \"\"\" # Directory with", "['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP',", "X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute the overall metrics for the logistic", "'--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use as", "import pickle import numpy as np import re from sklearn.metrics import accuracy_score, precision_score,", "tags: # import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.', 'CC', 'CD',", "= args.data_file # Get training data, tokenize and POS tag sentences. # X", "LSTM model. save_dir = args.save_dir # Folder to save results. if not os.path.isdir(args.results_dir):", "the overall metrics for the logistic regression classifiers. print('\\n-----> Test results') classifiers_id =", "re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None: # all weights X_t_ = X_t else:", "the available POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',',", "concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist()", "JJ* into JJ; NN* into NN; NNP* into NNP; RB* into RB. \"\"\")", "...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to the whole", "results') classifiers_id = ['all', 'top1', 'top2', 'top3'] for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id))", "print('\\n-----> Test results') classifiers_id = ['all', 'top1', 'top2', 'top3'] for classifier_id in classifiers_id:", "the LSTM state for each byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X,", "for various logistic regression classifiers for different POS tags, as a multiclass classifier.", "= X_t else: # top k weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]]", "Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted =", "\"\"\" # Directory with LSTM model. save_dir = args.save_dir # Folder to save", "process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of LSTMs state_type", "# Folder to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir #", "default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use as input to", "tags into VB; JJ* into JJ; NN* into NN; NNP* into NNP; RB*", "hidden_states or cell_states of LSTMs state_type = 'cell_states' # List of concepts to", "Returns: None. \"\"\" # Directory with LSTM model. save_dir = args.save_dir # Folder", "Directory with LSTM model. save_dir = args.save_dir # Folder to save results. if", "If no file is provided, the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str, default='results',", "unique_tags, counts = np.unique([y[1] for sublist in Y for y in sublist], return_counts=True)", "for concept in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' + concept + '_'", "Test results') classifiers_id = ['all', 'top1', 'top2', 'top3'] for classifier_id in classifiers_id: print('\\n-", "concepts to the whole set if no grouping is required. unique_tags, counts =", "# X holds the sentences (word1, word2, ...) # Y holds the corresponding", "= 'cell_states' # List of concepts to analyse - Upenn POS tags #", "# Get training data, tokenize and POS tag sentences. # X holds the", "'__main__': \"\"\" Parse CLI arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str,", "type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags into", "import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of LSTMs state_type = 'cell_states' #", "tag sentences. # X holds the sentences (word1, word2, ...) # Y holds", "LSTMs state_type = 'cell_states' # List of concepts to analyse - Upenn POS", "int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in X_t] trX, vaX, teX, trY, vaY,", "# top k weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in", "accuracy for various logistic regression classifiers for different POS tags, as a multiclass", "# Compute the overall metrics for the logistic regression classifiers. print('\\n-----> Test results')", "multiclass classifier. Args: args (argparse): arguments. Returns: None. \"\"\" # Directory with LSTM", "the whole set if no grouping is required. unique_tags, counts = np.unique([y[1] for", "top k weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in X_t]", "VB; JJ* into JJ; NN* into NN; NNP* into NNP; RB* into RB.", "# import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT',", "LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use as input to the classifier. If", "is used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group", "recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__': \"\"\" Parse CLI arguments. \"\"\" parser", "concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the accuracy for various logistic regression classifiers", "compute_LSTM_states # hidden_states or cell_states of LSTMs state_type = 'cell_states' # List of", "containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use as input to the classifier.", "predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred =", "analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To find the available POS", "is required. unique_tags, counts = np.unique([y[1] for sublist in Y for y in", "different POS tags, as a multiclass classifier. Args: args (argparse): arguments. Returns: None.", "'top1', 'top2', 'top3'] for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs", "- Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To find the available POS tags:", "X holds the sentences (word1, word2, ...) # Y holds the corresponding ((word1,", "[] classes = [] for concept in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_'", "parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags into VB; JJ* into JJ; NN* into", "'top2', 'top3'] for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs =", "# Computes the LSTM state for each byte in X. X_t, X_t_pos_tags =", "# Data to analyse. input_file = args.data_file # Get training data, tokenize and", "__name__ == '__main__': \"\"\" Parse CLI arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(", "counts) # Computes the LSTM state for each byte in X. X_t, X_t_pos_tags", "[] for concept in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' + concept +", "((word1, tag1), (word2, tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the", "in X_t] trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0])", "+ '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) #", "'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the accuracy for various logistic", "import os import argparse import pickle import numpy as np import re from", "teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with largest predicted", "np import re from sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test,", "X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with largest predicted probability. concept_classifiers =", "word2, ...) # Y holds the corresponding ((word1, tag1), (word2, tags), ...) X,", "logistic regression classifiers. print('\\n-----> Test results') classifiers_id = ['all', 'top1', 'top2', 'top3'] for", "'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept +", "'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the accuracy for", "concept_classifiers = [] predicted_probs = [] classes = [] for concept in concepts:", "'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\"", "np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes", "= [] predicted_probs = [] classes = [] for concept in concepts: lr_file", "x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None: # all weights X_t_ =", "concept in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' + concept + '_' +", "',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO',", "average='weighted'))) if __name__ == '__main__': \"\"\" Parse CLI arguments. \"\"\" parser = argparse.ArgumentParser(", "\"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB*", "results_dir, 'log_reg_model_' + concept + '_' + classifier_id + '.sav') if not os.path.exists(lr_file):", "# List of concepts to analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html #", "(word2, tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to", "None. \"\"\" # Directory with LSTM model. save_dir = args.save_dir # Folder to", "axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred)))", "largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0)", "for each byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute", "type=str, default=None, help=\"\"\"file to use as input to the classifier. If no file", "probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes =", "some statistics about the initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes", "# Find the class with largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs =", "')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB',", "classifiers. print('\\n-----> Test results') classifiers_id = ['all', 'top1', 'top2', 'top3'] for classifier_id in", "y in sublist], return_counts=True) if not args.group_tags: global concepts concepts = unique_tags #", "for sublist in Y for y in sublist], return_counts=True) if not args.group_tags: global", "= [] classes = [] for concept in concepts: lr_file = os.path.join( results_dir,", "'_' + classifier_id + '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file,", "holds the corresponding ((word1, tag1), (word2, tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags)", "if __name__ == '__main__': \"\"\" Parse CLI arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter)", "Freiburg. <NAME> <<EMAIL>> \"\"\" import os import argparse import pickle import numpy as", "[x[coef_sorted[0:k]] for x in X_t] trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_,", "precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__':", "'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args):", "classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs = [] classes =", "in Y for y in sublist], return_counts=True) if not args.group_tags: global concepts concepts", "Y) # Compute the overall metrics for the logistic regression classifiers. print('\\n-----> Test", "global concepts concepts = unique_tags # Print some statistics about the initial distribution", "concept + '_' + classifier_id + '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model", "http://www.nltk.org/api/nltk.tag.html # To find the available POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts", "trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with", "unique_tags # Print some statistics about the initial distribution of POS tags. print_pos_tag_statistics(unique_tags,", "the accuracy for various logistic regression classifiers for different POS tags, as a", "'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the accuracy for various", "University of Freiburg. <NAME> <<EMAIL>> \"\"\" import os import argparse import pickle import", "concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states", "state_type = 'cell_states' # List of concepts to analyse - Upenn POS tags", "sentences (word1, word2, ...) # Y holds the corresponding ((word1, tag1), (word2, tags),", "regression classifiers. print('\\n-----> Test results') classifiers_id = ['all', 'top1', 'top2', 'top3'] for classifier_id", "various logistic regression classifiers for different POS tags, as a multiclass classifier. Args:", "y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted')))", "vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the", "lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_'", "= [] for concept in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' + concept", "vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with largest", "no file is provided, the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory", "Compute the overall metrics for the logistic regression classifiers. print('\\n-----> Test results') classifiers_id", "\"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file',", "weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in X_t] trX, vaX,", "in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' + concept + '_' + classifier_id", "whole set if no grouping is required. unique_tags, counts = np.unique([y[1] for sublist", "pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall:", "LSTM state for each byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y)", "'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE',", "= np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY,", "to the whole set if no grouping is required. unique_tags, counts = np.unique([y[1]", "X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute the overall metrics for the", "sublist], return_counts=True) if not args.group_tags: global concepts concepts = unique_tags # Print some", "\"\"\" import os import argparse import pickle import numpy as np import re", "sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import", "concepts to analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To find the", "# http://www.nltk.org/api/nltk.tag.html # To find the available POS tags: # import nltk.help; nltk.help.upenn_tagset()", "and POS tag sentences. # X holds the sentences (word1, word2, ...) #", "initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state for", "'.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB']", "print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs = [] classes = [] for concept", "{}'.format(classifier_id)) concept_classifiers = [] predicted_probs = [] classes = [] for concept in", "'log_reg_model_' + concept + '_' + classifier_id + '.sav') if not os.path.exists(lr_file): continue", "args.save_dir # Folder to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir", "continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join(", "Find the class with largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs)", "parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use", "if no grouping is required. unique_tags, counts = np.unique([y[1] for sublist in Y", "concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' + concept + '_' + classifier_id +", "X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to the whole set", "import re from sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags", "provided, the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers')", "= re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None: # all weights X_t_ = X_t", "= process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to the whole set if no", "process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to the whole set if no grouping", "Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to the whole set if", "pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None: # all weights", "POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.', 'CC',", "= os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_)))", "as a multiclass classifier. Args: args (argparse): arguments. Returns: None. \"\"\" # Directory", "[] predicted_probs = [] classes = [] for concept in concepts: lr_file =", "from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or", "= ['all', 'top1', 'top2', 'top3'] for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers =", "accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states", "tags, as a multiclass classifier. Args: args (argparse): arguments. Returns: None. \"\"\" #", "recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__': \"\"\" Parse CLI arguments.", "for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs = [] classes", "set if no grouping is required. unique_tags, counts = np.unique([y[1] for sublist in", "coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None:", "tokenize and POS tag sentences. # X holds the sentences (word1, word2, ...)", "= split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with largest predicted probability.", "os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x", "concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format(", "predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with largest predicted probability. concept_classifiers = np.array(concept_classifiers)", "lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all,", "classifiers for different POS tags, as a multiclass classifier. Args: args (argparse): arguments.", "y_pred, average='weighted'))) if __name__ == '__main__': \"\"\" Parse CLI arguments. \"\"\" parser =", "to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data to", "= argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None,", "help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags into VB; JJ*", "used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all", "'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the accuracy for various logistic regression", "the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags',", "split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with largest predicted probability. concept_classifiers", "'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def", "X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute the overall metrics for", "print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__': \"\"\" Parse CLI", "formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to", "k weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in X_t] trX,", "'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None: # all weights X_t_", "tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to the", "= np.unique([y[1] for sublist in Y for y in sublist], return_counts=True) if not", "y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred,", "x in X_t] trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:,", "input to the classifier. If no file is provided, the nltk.corpus.treebank is used", "os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data to analyse. input_file = args.data_file #", "not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data to analyse. input_file = args.data_file", "X_t else: # top k weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for", "with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags into VB; JJ* into", "To find the available POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts = ['(',", "analyse. input_file = args.data_file # Get training data, tokenize and POS tag sentences.", "sublist in Y for y in sublist], return_counts=True) if not args.group_tags: global concepts", "classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav')", "os import argparse import pickle import numpy as np import re from sklearn.metrics", "metrics for the logistic regression classifiers. print('\\n-----> Test results') classifiers_id = ['all', 'top1',", "pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test", "Computes the accuracy for various logistic regression classifiers for different POS tags, as", "arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model')", "= pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' +", "results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data to analyse. input_file", "is None: # all weights X_t_ = X_t else: # top k weights", "state for each byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) #", "np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred", "with largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs,", "type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use as input", "concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of LSTMs state_type = 'cell_states'", "<<EMAIL>> \"\"\" import os import argparse import pickle import numpy as np import", "# Print some statistics about the initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts)", "= np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None: #", "# Set the concepts to the whole set if no grouping is required.", "0]) # Find the class with largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs", "corresponding ((word1, tag1), (word2, tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set", "POS tags # http://www.nltk.org/api/nltk.tag.html # To find the available POS tags: # import", "for different POS tags, as a multiclass classifier. Args: args (argparse): arguments. Returns:", "all weights X_t_ = X_t else: # top k weights k = int(x.group('k'))", "# Y holds the corresponding ((word1, tag1), (word2, tags), ...) X, Y =", "np.unique([y[1] for sublist in Y for y in sublist], return_counts=True) if not args.group_tags:", "input_file = args.data_file # Get training data, tokenize and POS tag sentences. #", "(argparse): arguments. Returns: None. \"\"\" # Directory with LSTM model. save_dir = args.save_dir", "help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use as input to the", "Args: args (argparse): arguments. Returns: None. \"\"\" # Directory with LSTM model. save_dir", "default=None, help=\"\"\"file to use as input to the classifier. If no file is", "# all weights X_t_ = X_t else: # top k weights k =", "Parse CLI arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory", "'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the accuracy for various logistic regression classifiers for", "regression classifiers for different POS tags, as a multiclass classifier. Args: args (argparse):", "print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of LSTMs state_type = 'cell_states' # List", "Get training data, tokenize and POS tag sentences. # X holds the sentences", "if not args.group_tags: global concepts concepts = unique_tags # Print some statistics about", "print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if", "if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients", "# hidden_states or cell_states of LSTMs state_type = 'cell_states' # List of concepts", "the corresponding ((word1, tag1), (word2, tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) #", "+ '_' + classifier_id + '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model =", "available POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.',", "of concepts to analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To find", "parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str,", "Y for y in sublist], return_counts=True) if not args.group_tags: global concepts concepts =", "\"\"\" Parse CLI arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/',", "teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class", "= unique_tags # Print some statistics about the initial distribution of POS tags.", "precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states #", "classifier. If no file is provided, the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str,", "save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data to analyse.", "average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__': \"\"\" Parse", "no grouping is required. unique_tags, counts = np.unique([y[1] for sublist in Y for", "of Freiburg. <NAME> <<EMAIL>> \"\"\" import os import argparse import pickle import numpy", "{:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__': \"\"\" Parse CLI arguments. \"\"\"", "POS tags, as a multiclass classifier. Args: args (argparse): arguments. Returns: None. \"\"\"", "Data to analyse. input_file = args.data_file # Get training data, tokenize and POS", "import numpy as np import re from sklearn.metrics import accuracy_score, precision_score, recall_score from", "= [x[coef_sorted[0:k]] for x in X_t] trX, vaX, teX, trY, vaY, teY =", "of LSTMs state_type = 'cell_states' # List of concepts to analyse - Upenn", "argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file", "import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN',", "concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join( results_dir,", "grouping is required. unique_tags, counts = np.unique([y[1] for sublist in Y for y", "CLI arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing", "not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all", "use as input to the classifier. If no file is provided, the nltk.corpus.treebank", "with LSTM model. save_dir = args.save_dir # Folder to save results. if not", "required. unique_tags, counts = np.unique([y[1] for sublist in Y for y in sublist],", "'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER'])", "= teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted')))", "the sentences (word1, word2, ...) # Y holds the corresponding ((word1, tag1), (word2,", "to analyse. input_file = args.data_file # Get training data, tokenize and POS tag", "+ concept + '_' + classifier_id + '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept)", "logistic regression classifiers for different POS tags, as a multiclass classifier. Args: args", "into VB; JJ* into JJ; NN* into NN; NNP* into NNP; RB* into", "argparse import pickle import numpy as np import re from sklearn.metrics import accuracy_score,", "import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of", "tag1), (word2, tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts", "'cell_states' # List of concepts to analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html", "np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x is None: # all", "args.group_tags) # Set the concepts to the whole set if no grouping is", "classifier. Args: args (argparse): arguments. Returns: None. \"\"\" # Directory with LSTM model.", "the class with largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind", "nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true',", "classifier_id) if x is None: # all weights X_t_ = X_t else: #", "['all', 'top1', 'top2', 'top3'] for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = []", "action='store_true', help=\"\"\"group all VB* tags into VB; JJ* into JJ; NN* into NN;", "Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To find the available POS tags: #", "the classifier. If no file is provided, the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir',", "the logistic regression classifiers. print('\\n-----> Test results') classifiers_id = ['all', 'top1', 'top2', 'top3']", "teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test", "List of concepts to analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To", "2018, University of Freiburg. <NAME> <<EMAIL>> \"\"\" import os import argparse import pickle", "+ concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id)", "def concept_neurons_accuracy(args): \"\"\" Computes the accuracy for various logistic regression classifiers for different", "statistics about the initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the", "of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state for each byte", "classifier_id + '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0])", "weights X_t_ = X_t else: # top k weights k = int(x.group('k')) X_t_", "= ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP',", "is provided, the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with saved", "save_dir = args.save_dir # Folder to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir", "the concepts to the whole set if no grouping is required. unique_tags, counts", "or cell_states of LSTMs state_type = 'cell_states' # List of concepts to analyse", "into JJ; NN* into NN; NNP* into NNP; RB* into RB. \"\"\") args", "classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags into VB; JJ* into JJ; NN*", "+ classifier_id + '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb'))", "as np import re from sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron import", "arguments. Returns: None. \"\"\" # Directory with LSTM model. save_dir = args.save_dir #", "sentences. # X holds the sentences (word1, word2, ...) # Y holds the", "default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags into VB;", "parser.add_argument('--data_file', type=str, default=None, help=\"\"\"file to use as input to the classifier. If no", "help=\"\"\"file to use as input to the classifier. If no file is provided,", "POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state for each byte in", "for the logistic regression classifiers. print('\\n-----> Test results') classifiers_id = ['all', 'top1', 'top2',", "'.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest", "np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true,", "'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes", "holds the sentences (word1, word2, ...) # Y holds the corresponding ((word1, tag1),", "print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state for each byte in X. X_t,", "+ '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x", "all VB* tags into VB; JJ* into JJ; NN* into NN; NNP* into", "print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format(", "Computes the LSTM state for each byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir,", "args.group_tags: global concepts concepts = unique_tags # Print some statistics about the initial", "concepts = unique_tags # Print some statistics about the initial distribution of POS", "== '__main__': \"\"\" Parse CLI arguments. \"\"\" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir',", "return_counts=True) if not args.group_tags: global concepts concepts = unique_tags # Print some statistics", "predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes", "to use as input to the classifier. If no file is provided, the", "into NN; NNP* into NNP; RB* into RB. \"\"\") args = parser.parse_args() concept_neurons_accuracy(args)", "overall metrics for the logistic regression classifiers. print('\\n-----> Test results') classifiers_id = ['all',", "pickle import numpy as np import re from sklearn.metrics import accuracy_score, precision_score, recall_score", "from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of LSTMs state_type =", "'top3'] for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs = []", "re from sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from", "concepts concepts = unique_tags # Print some statistics about the initial distribution of", "class with largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind =", "(word1, word2, ...) # Y holds the corresponding ((word1, tag1), (word2, tags), ...)", "counts = np.unique([y[1] for sublist in Y for y in sublist], return_counts=True) if", "# Directory with LSTM model. save_dir = args.save_dir # Folder to save results.", "k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in X_t] trX, vaX, teX,", "trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find", "classes = [] for concept in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' +", "'_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if x is", "Folder to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data", "X, Y) # Compute the overall metrics for the logistic regression classifiers. print('\\n----->", "classifiers_id = ['all', 'top1', 'top2', 'top3'] for classifier_id in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers", "lr_file = os.path.join( results_dir, 'log_reg_model_' + concept + '_' + classifier_id + '.sav')", "import accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics,", "model. save_dir = args.save_dir # Folder to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir)", "<NAME> <<EMAIL>> \"\"\" import os import argparse import pickle import numpy as np", "for x in X_t] trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags)", "None: # all weights X_t_ = X_t else: # top k weights k", "...) # Y holds the corresponding ((word1, tag1), (word2, tags), ...) X, Y", "as input to the classifier. If no file is provided, the nltk.corpus.treebank is", "split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of LSTMs", "numpy as np import re from sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron", "file is provided, the nltk.corpus.treebank is used \"\"\") parser.add_argument('--results_dir', type=str, default='results', help='directory with", "'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): \"\"\" Computes the accuracy", "recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states", "each byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute the", "in classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs = [] classes = []", "= np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test accuracy:", "training data, tokenize and POS tag sentences. # X holds the sentences (word1,", "concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN',", "args (argparse): arguments. Returns: None. \"\"\" # Directory with LSTM model. save_dir =", "X_t_ = [x[coef_sorted[0:k]] for x in X_t] trX, vaX, teX, trY, vaY, teY", "in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute the overall metrics", "= args.save_dir # Folder to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir =", "Y holds the corresponding ((word1, tag1), (word2, tags), ...) X, Y = process_sentence_pos_tags(input_file,", "nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD',", "args.results_dir # Data to analyse. input_file = args.data_file # Get training data, tokenize", "help=\"\"\"group all VB* tags into VB; JJ* into JJ; NN* into NN; NNP*", "find the available POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')',", "in sublist], return_counts=True) if not args.group_tags: global concepts concepts = unique_tags # Print", "if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data to analyse. input_file =", "about the initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM", "= int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in X_t] trX, vaX, teX, trY,", "= concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision:", "results_dir = args.results_dir # Data to analyse. input_file = args.data_file # Get training", "compute_LSTM_states(save_dir, X, Y) # Compute the overall metrics for the logistic regression classifiers.", "pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept", "saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags into VB; JJ* into JJ;", "import argparse import pickle import numpy as np import re from sklearn.metrics import", "parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help=\"\"\"group all VB* tags", "predicted_probs = [] classes = [] for concept in concepts: lr_file = os.path.join(", "if x is None: # all weights X_t_ = X_t else: # top", "tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state for each byte in X.", "Set the concepts to the whole set if no grouping is required. unique_tags,", "os.path.join( results_dir, 'log_reg_model_' + concept + '_' + classifier_id + '.sav') if not", "concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\\d)$', classifier_id) if", "X_t] trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) #", "else: # top k weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x", "distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state for each", "\"\"\" Computes the accuracy for various logistic regression classifiers for different POS tags,", "JJ; NN* into NN; NNP* into NNP; RB* into RB. \"\"\") args =", "{:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred,", "concept_neurons_accuracy(args): \"\"\" Computes the accuracy for various logistic regression classifiers for different POS", "# To find the available POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts =", "= args.results_dir # Data to analyse. input_file = args.data_file # Get training data,", "not args.group_tags: global concepts concepts = unique_tags # Print some statistics about the", "y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__': \"\"\"", "args.data_file # Get training data, tokenize and POS tag sentences. # X holds", "from sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron", "precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__", "= np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true,", "results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x =", "data, tokenize and POS tag sentences. # X holds the sentences (word1, word2,", "accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true,", "to the classifier. If no file is provided, the nltk.corpus.treebank is used \"\"\")", "cell_states of LSTMs state_type = 'cell_states' # List of concepts to analyse -", "POS tag sentences. # X holds the sentences (word1, word2, ...) # Y", "= os.path.join( results_dir, 'log_reg_model_' + concept + '_' + classifier_id + '.sav') if", "NN* into NN; NNP* into NNP; RB* into RB. \"\"\") args = parser.parse_args()", "coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze(", "{:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ ==", "to analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To find the available", "\"\"\" 2018, University of Freiburg. <NAME> <<EMAIL>> \"\"\" import os import argparse import", "x is None: # all weights X_t_ = X_t else: # top k", "X_t_ = X_t else: # top k weights k = int(x.group('k')) X_t_ =", "nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ',", "os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all =", "byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute the overall", "for y in sublist], return_counts=True) if not args.group_tags: global concepts concepts = unique_tags", "= compute_LSTM_states(save_dir, X, Y) # Compute the overall metrics for the logistic regression", "VB* tags into VB; JJ* into JJ; NN* into NN; NNP* into NNP;", "classifiers_id: print('\\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs = [] classes = [] for" ]
[ "z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or ('12.2' in", "+ admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version = cmo.getDomainVersion() set_server_log_config(domain_version, log_dir, managed_server_name)", "managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name + '/Log/'", "cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3'", "admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name", "= os.environ['LOG_DIR'] admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password =", "+ admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version =", "cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if", "+ '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy", "cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5)", "+ ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version = cmo.getDomainVersion() set_server_log_config(domain_version,", "cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url =", "+ '/Log/' + _server_name) cmo.setFileName(_log_dir + '/' + _server_name + '/' + 'general.'", "cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30)", "if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' + admin_server_listen_address", "cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://'", "cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url", "_server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir + '/' + _server_name + '/' +", "cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in", "python log_dir = os.environ['LOG_DIR'] admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME']", "_server_name): cd('/Servers/' + _server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir + '/' + _server_name", "+ _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM", "'.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS", "_domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' + admin_server_listen_address + ':' +", "_server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d,", "cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard')", "= os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name =", "###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir", "_server_name + '/' + 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00')", "= os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def", "h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or", "+ '/' + _server_name + '/' + 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') #", "connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version = cmo.getDomainVersion() set_server_log_config(domain_version, log_dir, managed_server_name) save() activate()", "os.environ['LOG_DIR'] admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>']", "admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' +", "'t3://' + admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version", "admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ######################################################################", "# cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a", "os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME']", "_log_dir, _server_name): cd('/Servers/' + _server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir + '/' +", "yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version)", "cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' +", "if ('14.' in _domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000)", "cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z')", "d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in", "a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or ('12.2'", "log_dir = os.environ['LOG_DIR'] admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password", "= os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name + '/Log/' +", "cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace')", "+ _server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir + '/' + _server_name + '/'", "':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version = cmo.getDomainVersion() set_server_log_config(domain_version, log_dir,", "cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50)", "###################################################################### admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url)", "cmo.setFileName(_log_dir + '/' + _server_name + '/' + 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log')", "('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10)", "admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir,", "+ _server_name + '/' + 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime')", "admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit()", "cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True)", "cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info')", "set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir + '/'", "# cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True)", "admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version = cmo.getDomainVersion()", "os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name +", "cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500)", "= os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name):", "+ _server_name) cmo.setFileName(_log_dir + '/' + _server_name + '/' + 'general.' + _server_name", "'10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' + admin_server_listen_address +", "in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True)", "admin_password, admin_server_url) edit() startEdit() domain_version = cmo.getDomainVersion() set_server_log_config(domain_version, log_dir, managed_server_name) save() activate() exit()", "os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version,", "'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False)", "'/' + _server_name + '/' + 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null')", "cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.' in _domain_version) or ('12.2' in _domain_version):", "_domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info')", "= 't3://' + admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit()", "in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' + admin_server_listen_address + ':'", "+ 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30)", "os.environ['ADMIN_USERNAME'] admin_password = os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/'", "cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username,", "'/' + 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True)", "cd('/Servers/' + _server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir + '/' + _server_name +", "cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') #", "def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name + '/Log/' + _server_name) cmo.setFileName(_log_dir +", "+ '/' + 'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log') # cmo.setFileName('/dev/null') cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24)", "= os.environ['<PASSWORD>'] managed_server_name = os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name", "<filename>weblogic/server/set_server_log.py #!/usr/bin/env python log_dir = os.environ['LOG_DIR'] admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username", "'/Log/' + _server_name) cmo.setFileName(_log_dir + '/' + _server_name + '/' + 'general.' +", "('14.' in _domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info')", "cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True) cmo.setRedirectStderrToServerLogEnabled(True) if ('14.'", "os.environ['MANAGED_SERVER_NAME'] ###################################################################### def set_server_log_config(_domain_version, _log_dir, _server_name): cd('/Servers/' + _server_name + '/Log/' + _server_name)", "admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() domain_version = cmo.getDomainVersion() set_server_log_config(domain_version, log_dir, managed_server_name) save()", "cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version:", "or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info')", "#!/usr/bin/env python log_dir = os.environ['LOG_DIR'] admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username =", "cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if '10.3' in _domain_version: cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ######################################################################", "_domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0) cmo.setStdoutSeverity('Info') cmo.setDomainLogBroadcastSeverity('Info') cmo.setDomainLogBroadcasterBufferSize(10) cmo.setStdoutLogStack(True) if", "cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password,", "_server_name) cmo.setFileName(_log_dir + '/' + _server_name + '/' + 'general.' + _server_name +", "in _domain_version) or ('12.2' in _domain_version): cmo.setLogMonitoringEnabled(True) cmo.setLogMonitoringIntervalSecs(30) cmo.setLogMonitoringThrottleThreshold(1500) cmo.setLogMonitoringThrottleMessageLength(50) cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000) cmo.setLogFileSeverity('Info') cmo.setBufferSizeKB(0)", "cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info') # cmo.setLoggerSeverity('Trace') cmo.setRedirectStdoutToServerLogEnabled(True)", "cmo.setRotationType('byTime') cmo.setRotationTime('00:00') cmo.setFileTimeSpan(24) cmo.setNumberOfFilesLimited(True) cmo.setFileCount(30) cmo.setRotateLogOnStartup(False) cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z') cmo.setLoggerSeverity('Info')", "cmo.setMemoryBufferSeverity('Info') cmo.setStacktraceDepth(5) cmo.setStdoutFormat('standard') ###################################################################### admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port" ]
[ "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "software and associated documentation files (the \"Software\"), to deal # in the Software", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t = self.first + n_strides * self._stride while", "self._stride = get_timedelta(stride) def message(self, interval): return '{} running from {} to {}", "{} to {} with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self,", "ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "DEALINGS IN THE SOFTWARE. from hyperstream.stream import StreamInstance from hyperstream.tool import Tool, check_input_stream_count", "interval.start = self.first n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t = self.first", "from hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta from datetime import", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER", "get_timedelta from datetime import datetime class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple", "do so, subject to the following conditions: # # The above copyright notice", "LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "t = self.first + n_strides * self._stride while t <= interval.end: if t", "interval): return '{} running from {} to {} with stride {}s'.format( self.__class__.__name__, str(interval.start),", "got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self, interval): return '{} running from {}", "and to permit persons to whom the Software is # furnished to do", "{}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self, interval): return '{} running from {} to", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the Software without restriction, including without limitation the rights # to use, copy,", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "stride as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first, datetime): raise ValueError(\"Expected", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "the following conditions: # # The above copyright notice and this permission notice", "@check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval): if interval.start < self.first: interval.start = self.first", "stride=1.0): \"\"\" Simple clock ticker tool :param first: Start of the clock :param", "_execute(self, sources, alignment_stream, interval): if interval.start < self.first: interval.start = self.first n_strides =", "interval.start < self.first: interval.start = self.first n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds())", "person obtaining a copy # of this software and associated documentation files (the", "first: Start of the clock :param stride: Tick stride as timedelta \"\"\" super(Clock,", "str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval): if interval.start < self.first:", "# furnished to do so, subject to the following conditions: # # The", "hyperstream.utils import MIN_DATE, get_timedelta from datetime import datetime class Clock(Tool): def __init__(self, first=MIN_DATE,", "the Software, and to permit persons to whom the Software is # furnished", "permit persons to whom the Software is # furnished to do so, subject", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "from hyperstream.stream import StreamInstance from hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils import MIN_DATE,", "Permission is hereby granted, free of charge, to any person obtaining a copy", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR", "Tool, check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta from datetime import datetime class Clock(Tool):", "sources, alignment_stream, interval): if interval.start < self.first: interval.start = self.first n_strides = int((interval.start", "self).__init__(first=first, stride=stride) if not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride =", "n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t = self.first + n_strides *", "# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "import datetime class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker tool", "in the Software without restriction, including without limitation the rights # to use,", "FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "Software without restriction, including without limitation the rights # to use, copy, modify,", "from {} to {} with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta from datetime import datetime class Clock(Tool): def", "\"\"\" Simple clock ticker tool :param first: Start of the clock :param stride:", "def _execute(self, sources, alignment_stream, interval): if interval.start < self.first: interval.start = self.first n_strides", "# The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol #", "Start of the clock :param stride: Tick stride as timedelta \"\"\" super(Clock, self).__init__(first=first,", "stride: Tick stride as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first, datetime):", "datetime import datetime class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker", "copies of the Software, and to permit persons to whom the Software is", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR", "BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR", "# The above copyright notice and this permission notice shall be included in", "included in all # copies or substantial portions of the Software. # #", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR", "__init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker tool :param first: Start of the", "# of this software and associated documentation files (the \"Software\"), to deal #", "to do so, subject to the following conditions: # # The above copyright", "ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self, interval): return '{} running", "is hereby granted, free of charge, to any person obtaining a copy #", "above copyright notice and this permission notice shall be included in all #", "persons to whom the Software is # furnished to do so, subject to", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #", "the clock :param stride: Tick stride as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if", "sell # copies of the Software, and to permit persons to whom the", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #", "conditions: # # The above copyright notice and this permission notice shall be", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "(MIT) # Copyright (c) 2014-2017 University of Bristol # # Permission is hereby", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "OTHER DEALINGS IN THE SOFTWARE. from hyperstream.stream import StreamInstance from hyperstream.tool import Tool,", "clock :param stride: Tick stride as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if not", "University of Bristol # # Permission is hereby granted, free of charge, to", "interval): if interval.start < self.first: interval.start = self.first n_strides = int((interval.start - self.first).total_seconds()", "THE SOFTWARE. from hyperstream.stream import StreamInstance from hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils", "= self.first + n_strides * self._stride while t <= interval.end: if t >", "to permit persons to whom the Software is # furnished to do so,", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, #", "OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker tool :param first: Start of", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE #", "Tick stride as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first, datetime): raise", "self._stride.total_seconds()) t = self.first + n_strides * self._stride while t <= interval.end: if", "WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "License (MIT) # Copyright (c) 2014-2017 University of Bristol # # Permission is", "notice shall be included in all # copies or substantial portions of the", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "import MIN_DATE, get_timedelta from datetime import datetime class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0):", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "of charge, to any person obtaining a copy # of this software and", "whom the Software is # furnished to do so, subject to the following", "StreamInstance from hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta from datetime", "timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got", "self.first + n_strides * self._stride while t <= interval.end: if t > interval.start:", "(c) 2014-2017 University of Bristol # # Permission is hereby granted, free of", "KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "self.first n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t = self.first + n_strides", "# # Permission is hereby granted, free of charge, to any person obtaining", "< self.first: interval.start = self.first n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t", "free of charge, to any person obtaining a copy # of this software", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT", "datetime class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker tool :param", "str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval): if interval.start < self.first: interval.start", "stride=stride) if not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride)", "isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self, interval):", "TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "shall be included in all # copies or substantial portions of the Software.", "MIN_DATE, get_timedelta from datetime import datetime class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\"", "datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self, interval): return '{} running from", "n_strides * self._stride while t <= interval.end: if t > interval.start: yield StreamInstance(t,", "'{} running from {} to {} with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride))", "BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE.", "The above copyright notice and this permission notice shall be included in all", "* self._stride while t <= interval.end: if t > interval.start: yield StreamInstance(t, t)", "and/or sell # copies of the Software, and to permit persons to whom", "so, subject to the following conditions: # # The above copyright notice and", "this permission notice shall be included in all # copies or substantial portions", "WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT", "datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self, interval): return", "self.first).total_seconds() // self._stride.total_seconds()) t = self.first + n_strides * self._stride while t <=", "get_timedelta(stride) def message(self, interval): return '{} running from {} to {} with stride", "SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from hyperstream.stream", "first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker tool :param first: Start of the clock", "if interval.start < self.first: interval.start = self.first n_strides = int((interval.start - self.first).total_seconds() //", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE,", "clock ticker tool :param first: Start of the clock :param stride: Tick stride", "stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval): if", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "# copies or substantial portions of the Software. # # THE SOFTWARE IS", "OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS", "THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from", "from hyperstream.utils import MIN_DATE, get_timedelta from datetime import datetime class Clock(Tool): def __init__(self,", "# in the Software without restriction, including without limitation the rights # to", "is # furnished to do so, subject to the following conditions: # #", "hyperstream.stream import StreamInstance from hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta", "files (the \"Software\"), to deal # in the Software without restriction, including without", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "to {} with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources,", "self._stride while t <= interval.end: if t > interval.start: yield StreamInstance(t, t) t", "copy # of this software and associated documentation files (the \"Software\"), to deal", "CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "tool :param first: Start of the clock :param stride: Tick stride as timedelta", "# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "{} with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream,", "alignment_stream, interval): if interval.start < self.first: interval.start = self.first n_strides = int((interval.start -", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER", "to the following conditions: # # The above copyright notice and this permission", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE", "to deal # in the Software without restriction, including without limitation the rights", "OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "to any person obtaining a copy # of this software and associated documentation", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING", "from datetime import datetime class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY,", "following conditions: # # The above copyright notice and this permission notice shall", "of the Software, and to permit persons to whom the Software is #", "in all # copies or substantial portions of the Software. # # THE", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "running from {} to {} with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0)", "str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval): if interval.start < self.first: interval.start =", "message(self, interval): return '{} running from {} to {} with stride {}s'.format( self.__class__.__name__,", "The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol # #", "+ n_strides * self._stride while t <= interval.end: if t > interval.start: yield", "IN THE SOFTWARE. from hyperstream.stream import StreamInstance from hyperstream.tool import Tool, check_input_stream_count from", "and associated documentation files (the \"Software\"), to deal # in the Software without", "LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "SOFTWARE. from hyperstream.stream import StreamInstance from hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils import", "import Tool, check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta from datetime import datetime class", "super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride", "= self.first n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t = self.first +", "with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval):", "Copyright (c) 2014-2017 University of Bristol # # Permission is hereby granted, free", "any person obtaining a copy # of this software and associated documentation files", "ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM,", "# # The above copyright notice and this permission notice shall be included", "self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval): if interval.start <", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED,", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "class Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker tool :param first:", "self.first: interval.start = self.first n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t =", "TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "# OR OTHER DEALINGS IN THE SOFTWARE. from hyperstream.stream import StreamInstance from hyperstream.tool", "THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. #", "\"\"\" super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__))", "def message(self, interval): return '{} running from {} to {} with stride {}s'.format(", "sublicense, and/or sell # copies of the Software, and to permit persons to", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "Software is # furnished to do so, subject to the following conditions: #", "OR OTHER DEALINGS IN THE SOFTWARE. from hyperstream.stream import StreamInstance from hyperstream.tool import", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY,", "Software, and to permit persons to whom the Software is # furnished to", "- self.first).total_seconds() // self._stride.total_seconds()) t = self.first + n_strides * self._stride while t", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR #", "raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self, interval): return '{}", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A", "WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN", "import StreamInstance from hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta from", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN", "Simple clock ticker tool :param first: Start of the clock :param stride: Tick", "if not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def", "return '{} running from {} to {} with stride {}s'.format( self.__class__.__name__, str(interval.start), str(interval.end),", "AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "all # copies or substantial portions of the Software. # # THE SOFTWARE", "{}s'.format( self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride)) @check_input_stream_count(0) def _execute(self, sources, alignment_stream, interval): if interval.start", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "this software and associated documentation files (the \"Software\"), to deal # in the", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR", "# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT", "= get_timedelta(stride) def message(self, interval): return '{} running from {} to {} with", "USE # OR OTHER DEALINGS IN THE SOFTWARE. from hyperstream.stream import StreamInstance from", "copyright notice and this permission notice shall be included in all # copies", "WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS", "charge, to any person obtaining a copy # of this software and associated", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "of the clock :param stride: Tick stride as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride)", "CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE", "while t <= interval.end: if t > interval.start: yield StreamInstance(t, t) t +=", "OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES", "of Bristol # # Permission is hereby granted, free of charge, to any", "// self._stride.total_seconds()) t = self.first + n_strides * self._stride while t <= interval.end:", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING", "IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT", "2014-2017 University of Bristol # # Permission is hereby granted, free of charge,", "CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "granted, free of charge, to any person obtaining a copy # of this", "ticker tool :param first: Start of the clock :param stride: Tick stride as", "Bristol # # Permission is hereby granted, free of charge, to any person", "# copies of the Software, and to permit persons to whom the Software", "PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN", "Clock(Tool): def __init__(self, first=MIN_DATE, stride=1.0): \"\"\" Simple clock ticker tool :param first: Start", "as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime,", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from hyperstream.stream import", "not isinstance(first, datetime): raise ValueError(\"Expected datetime.datetime, got {}\".format(first.__type__.__name__)) self._stride = get_timedelta(stride) def message(self,", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS", ":param stride: Tick stride as timedelta \"\"\" super(Clock, self).__init__(first=first, stride=stride) if not isinstance(first,", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL", "to whom the Software is # furnished to do so, subject to the", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "# Copyright (c) 2014-2017 University of Bristol # # Permission is hereby granted,", "MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol # # Permission", "permission notice shall be included in all # copies or substantial portions of", "furnished to do so, subject to the following conditions: # # The above", "and this permission notice shall be included in all # copies or substantial", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from hyperstream.stream import StreamInstance", "be included in all # copies or substantial portions of the Software. #", "hyperstream.tool import Tool, check_input_stream_count from hyperstream.utils import MIN_DATE, get_timedelta from datetime import datetime", "t <= interval.end: if t > interval.start: yield StreamInstance(t, t) t += self._stride", "# Permission is hereby granted, free of charge, to any person obtaining a", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "= int((interval.start - self.first).total_seconds() // self._stride.total_seconds()) t = self.first + n_strides * self._stride", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "notice and this permission notice shall be included in all # copies or", ":param first: Start of the clock :param stride: Tick stride as timedelta \"\"\"", "HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN" ]
[ "an existing postgres database, first: pip install psycopg2 then overwrite the settings above", "BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this secret key if using in", "\"HOST\" : \"localhost\", \"PORT\" : \"5432\", } } \"\"\" To connect to an", "\"\"\" To connect to an existing postgres database, first: pip install psycopg2 then", "postgres database, first: pip install psycopg2 then overwrite the settings above with: DATABASES", "install psycopg2 then overwrite the settings above with: DATABASES = { 'default': {", "to an existing postgres database, first: pip install psycopg2 then overwrite the settings", "SECURITY WARNING: Modify this secret key if using in production! SECRET_KEY = \"\"", "this secret key if using in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES =", "DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\",", "psycopg2 then overwrite the settings above with: DATABASES = { 'default': { 'ENGINE':", "= os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this secret key if using in production!", "paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY", "SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '',", "project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this", ": \"localhost\", \"PORT\" : \"5432\", } } \"\"\" To connect to an existing", ": \"\", \"PASSWORD\" : \"\", \"HOST\" : \"localhost\", \"PORT\" : \"5432\", } }", "{ \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\", \"PASSWORD\" : \"\",", "'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '', }", "using in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": { \"ENGINE\":", "settings above with: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER':", "DATABASES = { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\", \"PASSWORD\"", "connect to an existing postgres database, first: pip install psycopg2 then overwrite the", "existing postgres database, first: pip install psycopg2 then overwrite the settings above with:", "in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",", "'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '', } } \"\"\"", "\"\", \"HOST\" : \"localhost\", \"PORT\" : \"5432\", } } \"\"\" To connect to", "# SECURITY WARNING: Modify this secret key if using in production! SECRET_KEY =", "\"PORT\" : \"5432\", } } \"\"\" To connect to an existing postgres database,", "DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password',", "\"localhost\", \"PORT\" : \"5432\", } } \"\"\" To connect to an existing postgres", "= { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST':", "first: pip install psycopg2 then overwrite the settings above with: DATABASES = {", "inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING:", "'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '', } } \"\"\" INSTALLED_APPS", "'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '', } } \"\"\" INSTALLED_APPS = (\"db\",)", "Modify this secret key if using in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES", "os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this secret key if using in production! SECRET_KEY", "<filename>settings.py import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...)", ": \"5432\", } } \"\"\" To connect to an existing postgres database, first:", "WARNING: Modify this secret key if using in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField'", "{ 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost',", "if using in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": {", "then overwrite the settings above with: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2',", "this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this secret key", "\"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\", \"PASSWORD\" : \"\", \"HOST\"", "...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this secret key if using", "production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\":", ": \"\", \"HOST\" : \"localhost\", \"PORT\" : \"5432\", } } \"\"\" To connect", "Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) #", "the settings above with: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB',", "\"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' :", "# Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__))", "'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '', } }", "key if using in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\":", "To connect to an existing postgres database, first: pip install psycopg2 then overwrite", "'', 'USER' : \"\", \"PASSWORD\" : \"\", \"HOST\" : \"localhost\", \"PORT\" : \"5432\",", "overwrite the settings above with: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME':", "\"5432\", } } \"\"\" To connect to an existing postgres database, first: pip", "above with: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres',", "\"PASSWORD\" : \"\", \"HOST\" : \"localhost\", \"PORT\" : \"5432\", } } \"\"\" To", "'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT':", "{ \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\", \"PASSWORD\" : \"\", \"HOST\" :", "import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR", "os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this secret key if", "the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify", "} \"\"\" To connect to an existing postgres database, first: pip install psycopg2", "{ 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '',", "secret key if using in production! SECRET_KEY = \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = {", "\"\", \"PASSWORD\" : \"\", \"HOST\" : \"localhost\", \"PORT\" : \"5432\", } } \"\"\"", "os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR =", "database, first: pip install psycopg2 then overwrite the settings above with: DATABASES =", "= { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\", \"PASSWORD\" :", "with: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'YOURDB', 'USER': 'postgres', 'PASSWORD':", "} } \"\"\" To connect to an existing postgres database, first: pip install", "= \"\" DEFAULT_AUTO_FIELD='django.db.models.AutoField' DATABASES = { \"default\": { \"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER'", "'USER': 'postgres', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '', } } \"\"\" INSTALLED_APPS =", "pip install psycopg2 then overwrite the settings above with: DATABASES = { 'default':", "'USER' : \"\", \"PASSWORD\" : \"\", \"HOST\" : \"localhost\", \"PORT\" : \"5432\", }", "\"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\", \"PASSWORD\" : \"\", \"HOST\" : \"localhost\", \"PORT\"", "like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # SECURITY WARNING: Modify this secret", "\"NAME\": '', 'USER' : \"\", \"PASSWORD\" : \"\", \"HOST\" : \"localhost\", \"PORT\" :", "\"ENGINE\": \"django.db.backends.postgresql_psycopg2\", \"NAME\": '', 'USER' : \"\", \"PASSWORD\" : \"\", \"HOST\" : \"localhost\"," ]
[ "1, 1) from .backends import EmailBackend from .models import PRIORITY from .utils import", "(1, 1, 1) from .backends import EmailBackend from .models import PRIORITY from .utils", "VERSION = (1, 1, 1) from .backends import EmailBackend from .models import PRIORITY", "= (1, 1, 1) from .backends import EmailBackend from .models import PRIORITY from", "1) from .backends import EmailBackend from .models import PRIORITY from .utils import send_mail" ]
[ "\"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True,", "= window.read() if event == '-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som =", "key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')],", "= FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while True: event, values", "sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-',", "while True: event, values = window.read() if event == '-start-': data = np.loadtxt('output.txt',", "if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets", "'Грузовик 4+', 'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров' ] codes = ['1', '2',", "canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while True: event, values = window.read()", "values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets =", "if event2 == '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles',", "without the plot window = sg.Window('Аналитический инструмент для SOM', layout, finalize=True, size=(320, 170))", "True: event2, values2 = window2.read() if event2 == 'OK': window2.close() break if event2", "key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6,", "else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else:", "0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while True:", "2', 'Автобус 3', 'Грузовик рейнджеров' ] codes = ['1', '2', '3', '4', '5',", "import SOM layout = [[sg.Text('SOM для VAST 2017 MC1', font='Any 18')], # [sg.Text('Path", "sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1)", "'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно',", "= np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM if", "для VAST 2017 MC1', font='Any 18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], #", "sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-',", "key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create the form and show it without the", "plot window = sg.Window('Аналитический инструмент для SOM', layout, finalize=True, size=(320, 170)) def draw_figure(canvas,", "draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2 = window2.read() if event2 == 'OK': window2.close()", "target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-',", "event == '-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) #", "# som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png') if event == 'Exit': window.close() break pass", "[sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init',", "name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds #", "figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg", "= sg.Window('Аналитический инструмент для SOM', layout, finalize=True, size=(320, 170)) def draw_figure(canvas, figure, loc=(0,", "som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM if values['hill']: if values['random']: som.fit(data,", "- 1 names = ['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2',", "True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init',", "figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while True: event, values = window.read() if", "['1', '2', '3', '4', '5', '6', '2P'] fig1 = som.plot_point_map_gui(data, targets, codes) #", "2017 MC1', font='Any 18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to", "and show it without the plot window = sg.Window('Аналитический инструмент для SOM', layout,", "= np.loadtxt('target.txt', dtype='int') targets = targets - 1 names = ['Автомобиль', 'Грузовик 2',", "som.plot_point_map_gui(data, targets, codes) # fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) #", "[[sg.Text('SOM для VAST 2017 MC1', font='Any 18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')],", "delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM if values['hill']: if", "'Грузовик рейнджеров' ] codes = ['1', '2', '3', '4', '5', '6', '2P'] fig1", "figure1_y, figure1_w, figure1_h = fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2", "import FigureCanvasTkAgg from SOM import SOM layout = [[sg.Text('SOM для VAST 2017 MC1',", "init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random')", "decay='hill') if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear')", "FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while True: event, values =", "Result', layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while", "SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill',", "if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if", "as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import SOM layout = [[sg.Text('SOM", "# [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер", "from SOM import SOM layout = [[sg.Text('SOM для VAST 2017 MC1', font='Any 18')],", "SOM import SOM layout = [[sg.Text('SOM для VAST 2017 MC1', font='Any 18')], #", "som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets", "key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип", "names = ['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2', 'Автобус 3',", "3', 'Грузовик 4+', 'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров' ] codes = ['1',", "window2.read() if event2 == 'OK': window2.close() break if event2 == '-print-': som.plot_point_map(data, targets,", "[sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create the form and show it", "layout = [[sg.Text('SOM для VAST 2017 MC1', font='Any 18')], # [sg.Text('Path to data'),", "values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if values['random']:", "figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print", "size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))],", "window2.close() break if event2 == '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets,", "size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'),", "matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import SOM layout = [[sg.Text('SOM для VAST 2017", "[sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'),", "som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if values['random']: som.fit(data,", "== '-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) # initialize", "SOM layout = [[sg.Text('SOM для VAST 2017 MC1', font='Any 18')], # [sg.Text('Path to", "values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int')", "sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'),", "it without the plot window = sg.Window('Аналитический инструмент для SOM', layout, finalize=True, size=(320,", "key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'),", "window = sg.Window('Аналитический инструмент для SOM', layout, finalize=True, size=(320, 170)) def draw_figure(canvas, figure,", "if event == '-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-']))", "figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds", "key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))],", "= draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2 = window2.read() if event2 == 'OK':", "= window2.read() if event2 == 'OK': window2.close() break if event2 == '-print-': som.plot_point_map(data,", "loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while", "figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2 = sg.Window('SOM Result', layout2, finalize=True)", "fig1 = som.plot_point_map_gui(data, targets, codes) # fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7,", "if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if", "targets = targets - 1 names = ['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик", "'6', '2P'] fig1 = som.plot_point_map_gui(data, targets, codes) # fig2 = som.plot_class_density_gui(data, targets, t=0,", "== '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png') if", "figure1_w, figure1_h = fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2 =", "] # create the form and show it without the plot window =", "= fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2", "= ['1', '2', '3', '4', '5', '6', '2P'] fig1 = som.plot_point_map_gui(data, targets, codes)", "values2 = window2.read() if event2 == 'OK': window2.close() break if event2 == '-print-':", "window2 = sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg =", "if event2 == 'OK': window2.close() break if event2 == '-print-': som.plot_point_map(data, targets, names,", "the form and show it without the plot window = sg.Window('Аналитический инструмент для", "init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets = targets -", "4+', 'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров' ] codes = ['1', '2', '3',", "True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create", "else: som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets = targets - 1", "codes) # fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6)", "figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ]", "3', 'Грузовик рейнджеров' ] codes = ['1', '2', '3', '4', '5', '6', '2P']", "[[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2 = sg.Window('SOM Result', layout2,", "] window2 = sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg", "size=(320, 170)) def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top',", "figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while True: event,", "draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2 = window2.read() if", "2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров' ] codes", "layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2 = sg.Window('SOM", "fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2 = window2.read() if event2 ==", "['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров'", "sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')],", "names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png') if event == 'Exit': window.close()", "'3', '4', '5', '6', '2P'] fig1 = som.plot_point_map_gui(data, targets, codes) # fig2 =", "для SOM', layout, finalize=True, size=(320, 170)) def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg =", "codes = ['1', '2', '3', '4', '5', '6', '2P'] fig1 = som.plot_point_map_gui(data, targets,", "True: event, values = window.read() if event == '-start-': data = np.loadtxt('output.txt', delimiter=';',", "[sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки", "'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров' ] codes = ['1', '2', '3', '4',", "som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png') if event ==", "sg.Button('Выход', key=\"Exit\")] ] # create the form and show it without the plot", "sg import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import SOM", "fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2 = window2.read() if event2", "values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']:", "dtype='int') targets = targets - 1 names = ['Автомобиль', 'Грузовик 2', 'Грузовик 3',", "finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2,", "инструмент для SOM', layout, finalize=True, size=(320, 170)) def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg", "figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg while True: event, values = window.read() if event", "SOM', layout, finalize=True, size=(320, 170)) def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure,", "sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'),", "= fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h),", "figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result',", "the SOM if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']),", "SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000,", "key='-print-')] ] window2 = sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2)", "data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20',", "while True: event2, values2 = window2.read() if event2 == 'OK': window2.close() break if", "def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)", "'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров' ]", "= som.plot_point_map_gui(data, targets, codes) # fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7)", "= som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w,", "som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h", "int(values['-epochs-']), decay='hill') if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']),", "key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2,", "'Грузовик 3', 'Грузовик 4+', 'Автобус 2', 'Автобус 3', 'Грузовик рейнджеров' ] codes =", "рейнджеров' ] codes = ['1', '2', '3', '4', '5', '6', '2P'] fig1 =", "sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create the form", "to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'),", "# fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds # figure_x, figure2_y, figure2_w,", "key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create the form and show", "figure_canvas_agg while True: event, values = window.read() if event == '-start-': data =", "'2', '3', '4', '5', '6', '2P'] fig1 = som.plot_point_map_gui(data, targets, codes) # fig2", "'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create the form and", "filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png') if event == 'Exit': window.close() break", "import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import SOM layout", "'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет',", "# create the form and show it without the plot window = sg.Window('Аналитический", "event, values = window.read() if event == '-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40))", "data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM", "usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM if values['hill']: if values['random']:", "= SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']),", "SOM if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill')", "fill='both', expand=1) return figure_canvas_agg while True: event, values = window.read() if event ==", "'4', '5', '6', '2P'] fig1 = som.plot_point_map_gui(data, targets, codes) # fig2 = som.plot_class_density_gui(data,", "sg.Button('Print result', key='-print-')] ] window2 = sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg =", "[sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество", "layout, finalize=True, size=(320, 170)) def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)", "fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds # figure_x,", "np.loadtxt('target.txt', dtype='int') targets = targets - 1 names = ['Автомобиль', 'Грузовик 2', 'Грузовик", "fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2 =", "key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY',", "som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets = targets - 1 names", "PySimpleGUI as sg import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM", "6) figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h =", "size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True,", "инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")]", "1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA',", "targets - 1 names = ['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус", "break if event2 == '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0,", "int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']),", "draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return", "targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h =", "1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип", "int(values['-height-'])) # initialize the SOM if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random')", "fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')],", "'OK': window2.close() break if event2 == '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data,", "key=\"Exit\")] ] # create the form and show it without the plot window", "[sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход',", "decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets = targets - 1 names = ['Автомобиль',", "layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True:", "= [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2 = sg.Window('SOM Result',", "'-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png') if event", "to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20',", "key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2 = sg.Window('SOM Result', layout2, finalize=True) #", "= sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas,", "from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import SOM layout = [[sg.Text('SOM для VAST", "# [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)),", "== 'OK': window2.close() break if event2 == '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') #", "fig1) while True: event2, values2 = window2.read() if event2 == 'OK': window2.close() break", "the plot window = sg.Window('Аналитический инструмент для SOM', layout, finalize=True, size=(320, 170)) def", "= targets - 1 names = ['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+',", "FigureCanvasTkAgg from SOM import SOM layout = [[sg.Text('SOM для VAST 2017 MC1', font='Any", "= draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2 = window2.read()", "7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds # figure_x, figure2_y,", "initialize the SOM if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else: som.fit(data,", "np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM if values['hill']:", "'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] #", "сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'),", "1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'),", "sg.FileBrowse('targets.txt', key='-Path-target-')], [sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'), sg.InputText('20', key='-height-', size=(2,", "show it without the plot window = sg.Window('Аналитический инструмент для SOM', layout, finalize=True,", "decay='hill', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear',", "1 names = ['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2', 'Автобус", "sg.Window('Аналитический инструмент для SOM', layout, finalize=True, size=(320, 170)) def draw_figure(canvas, figure, loc=(0, 0)):", "170)) def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both',", "create the form and show it without the plot window = sg.Window('Аналитический инструмент", "# fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2", "numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import SOM layout =", "'Автобус 3', 'Грузовик рейнджеров' ] codes = ['1', '2', '3', '4', '5', '6',", "fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h", "расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create the form and show it without", "return figure_canvas_agg while True: event, values = window.read() if event == '-start-': data", "targets = np.loadtxt('target.txt', dtype='int') targets = targets - 1 names = ['Автомобиль', 'Грузовик", "key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ] # create the", "] codes = ['1', '2', '3', '4', '5', '6', '2P'] fig1 = som.plot_point_map_gui(data,", "'5', '6', '2P'] fig1 = som.plot_point_map_gui(data, targets, codes) # fig2 = som.plot_class_density_gui(data, targets,", "# figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'),", "as sg import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import", "import PySimpleGUI as sg import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from", "np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from SOM import SOM layout = [[sg.Text('SOM для", "[sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear',", "figure2_w, figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')], [sg.OK('OK'), sg.Button('Print result', key='-print-')]", "[sg.OK('OK'), sg.Button('Print result', key='-print-')] ] window2 = sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg", "18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')],", "font='Any 18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'), sg.FileBrowse('targets.txt',", "values = window.read() if event == '-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som", "som.fit(data, int(values['-epochs-']), decay='hill') if values['linear']: if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data,", "targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png') if event == 'Exit':", "figure1_h = fig1.bbox.bounds # figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds layout2 = [[sg.Canvas(size=(figure1_w,", "decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets = targets", "sg.InputText('20', key='-height-', size=(2, 1))], [sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill',", "fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2) fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1) while True: event2, values2 =", "<gh_stars>1-10 import PySimpleGUI as sg import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg", "targets, codes) # fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6,", "event2 == 'OK': window2.close() break if event2 == '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png')", "int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets =", "эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))], [sg.Text('Тип \"Decay\"'), sg.Radio('hill', 'DECAY', True, key='hill'), sg.Radio('linear', 'DECAY',", "= [[sg.Text('SOM для VAST 2017 MC1', font='Any 18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt',", "sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key=\"Exit\")] ]", "finalize=True, size=(320, 170)) def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw()", "sg.Radio('linear', 'DECAY', key='linear')], [sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'), sg.Radio('Случайно', 'init', key='random')], [sg.Button('Начать", "form and show it without the plot window = sg.Window('Аналитический инструмент для SOM',", "'2P'] fig1 = som.plot_point_map_gui(data, targets, codes) # fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0])", "if values['random']: som.fit(data, int(values['-epochs-']), decay='linear', init_type='random') else: som.fit(data, int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt',", "result', key='-print-')] ] window2 = sg.Window('SOM Result', layout2, finalize=True) # fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas,", "# initialize the SOM if values['hill']: if values['random']: som.fit(data, int(values['-epochs-']), decay='hill', init_type='random') else:", "event2, values2 = window2.read() if event2 == 'OK': window2.close() break if event2 ==", "VAST 2017 MC1', font='Any 18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path", "# fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x,", "t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds", "int(values['-epochs-']), decay='linear') targets = np.loadtxt('target.txt', dtype='int') targets = targets - 1 names =", "window.read() if event == '-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']),", "'-start-': data = np.loadtxt('output.txt', delimiter=';', usecols=range(40)) som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the", "= ['Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2', 'Автобус 3', 'Грузовик", "event2 == '-print-': som.plot_point_map(data, targets, names, filename='images/SOM/som.png') # som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png')", "expand=1) return figure_canvas_agg while True: event, values = window.read() if event == '-start-':", "MC1', font='Any 18')], # [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')], # [sg.Text('Path to target'),", "fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0]) fig1.set_size_inches(7, 7) # fig2.set_size_inches(6, 6) figure1_x, figure1_y," ]
[ "Decimal(binary + (i * digit)) i *= 10 decimal = decimal // 2", "i = 1 while decimal != 0: digit = decimal % 2 binary", "= Decimal(binary + (i * digit)) i *= 10 decimal = decimal //", "\")) binary = 0 i = 1 while decimal != 0: digit =", "decimal % 2 binary = Decimal(binary + (i * digit)) i *= 10", "digit = decimal % 2 binary = Decimal(binary + (i * digit)) i", "2 binary = Decimal(binary + (i * digit)) i *= 10 decimal =", "decimal != 0: digit = decimal % 2 binary = Decimal(binary + (i", "= decimal % 2 binary = Decimal(binary + (i * digit)) i *=", "= 1 while decimal != 0: digit = decimal % 2 binary =", "from decimal import * decimal = int(input(\"Enter a number: \")) binary = 0", "% 2 binary = Decimal(binary + (i * digit)) i *= 10 decimal", "number: \")) binary = 0 i = 1 while decimal != 0: digit", "binary = Decimal(binary + (i * digit)) i *= 10 decimal = decimal", "1 while decimal != 0: digit = decimal % 2 binary = Decimal(binary", "import * decimal = int(input(\"Enter a number: \")) binary = 0 i =", "decimal = int(input(\"Enter a number: \")) binary = 0 i = 1 while", "* decimal = int(input(\"Enter a number: \")) binary = 0 i = 1", "0 i = 1 while decimal != 0: digit = decimal % 2", "+ (i * digit)) i *= 10 decimal = decimal // 2 print(binary)", "while decimal != 0: digit = decimal % 2 binary = Decimal(binary +", "= int(input(\"Enter a number: \")) binary = 0 i = 1 while decimal", "decimal import * decimal = int(input(\"Enter a number: \")) binary = 0 i", "int(input(\"Enter a number: \")) binary = 0 i = 1 while decimal !=", "!= 0: digit = decimal % 2 binary = Decimal(binary + (i *", "0: digit = decimal % 2 binary = Decimal(binary + (i * digit))", "binary = 0 i = 1 while decimal != 0: digit = decimal", "= 0 i = 1 while decimal != 0: digit = decimal %", "a number: \")) binary = 0 i = 1 while decimal != 0:" ]
[ "+ mRNA2 Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2 # Fixed species # Variable", "R1: G1 > G1 + mRNA1 Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1 R3:", "species mRNA1 = 50.0 G1 = 1 mRNA2 = 50.0 G2 = 1", "R2: mRNA1 > $pool Kdeg*mRNA1 R3: G2 > G2 + mRNA2 Ksyn*G2 R4:", "# Reactions R1: G1 > G1 + mRNA1 Ksyn*G1 R2: mRNA1 > $pool", "$pool Kdeg*mRNA1 R3: G2 > G2 + mRNA2 Ksyn*G2 R4: mRNA2 > $pool", "= 50.0 G2 = 1 # Parameters Ksyn = 10 Kdeg = 0.2", "= \"\"\" # Reactions R1: G1 > G1 + mRNA1 Ksyn*G1 R2: mRNA1", "1 mRNA2 = 50.0 G2 = 1 # Parameters Ksyn = 10 Kdeg", "mRNA2 = 50.0 G2 = 1 # Parameters Ksyn = 10 Kdeg =", "Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2 # Fixed species # Variable species mRNA1", "\"\"\" # Reactions R1: G1 > G1 + mRNA1 Ksyn*G1 R2: mRNA1 >", "mRNA1 = 50.0 G1 = 1 mRNA2 = 50.0 G2 = 1 #", "> G2 + mRNA2 Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2 # Fixed species", "model = \"\"\" # Reactions R1: G1 > G1 + mRNA1 Ksyn*G1 R2:", "Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1 R3: G2 > G2 + mRNA2 Ksyn*G2", "Reactions R1: G1 > G1 + mRNA1 Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1", "G1 + mRNA1 Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1 R3: G2 > G2", "mRNA2 > $pool Kdeg*mRNA2 # Fixed species # Variable species mRNA1 = 50.0", "> $pool Kdeg*mRNA1 R3: G2 > G2 + mRNA2 Ksyn*G2 R4: mRNA2 >", "> G1 + mRNA1 Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1 R3: G2 >", "50.0 G1 = 1 mRNA2 = 50.0 G2 = 1 # Parameters Ksyn", "G2 + mRNA2 Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2 # Fixed species #", "G1 > G1 + mRNA1 Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1 R3: G2", "Variable species mRNA1 = 50.0 G1 = 1 mRNA2 = 50.0 G2 =", "mRNA1 > $pool Kdeg*mRNA1 R3: G2 > G2 + mRNA2 Ksyn*G2 R4: mRNA2", "50.0 G2 = 1 # Parameters Ksyn = 10 Kdeg = 0.2 \"\"\"", "species # Variable species mRNA1 = 50.0 G1 = 1 mRNA2 = 50.0", "# Fixed species # Variable species mRNA1 = 50.0 G1 = 1 mRNA2", "Fixed species # Variable species mRNA1 = 50.0 G1 = 1 mRNA2 =", "+ mRNA1 Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1 R3: G2 > G2 +", "R4: mRNA2 > $pool Kdeg*mRNA2 # Fixed species # Variable species mRNA1 =", "> $pool Kdeg*mRNA2 # Fixed species # Variable species mRNA1 = 50.0 G1", "= 50.0 G1 = 1 mRNA2 = 50.0 G2 = 1 # Parameters", "G1 = 1 mRNA2 = 50.0 G2 = 1 # Parameters Ksyn =", "Kdeg*mRNA1 R3: G2 > G2 + mRNA2 Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2", "mRNA2 Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2 # Fixed species # Variable species", "$pool Kdeg*mRNA2 # Fixed species # Variable species mRNA1 = 50.0 G1 =", "= 1 mRNA2 = 50.0 G2 = 1 # Parameters Ksyn = 10", "Kdeg*mRNA2 # Fixed species # Variable species mRNA1 = 50.0 G1 = 1", "# Variable species mRNA1 = 50.0 G1 = 1 mRNA2 = 50.0 G2", "R3: G2 > G2 + mRNA2 Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2 #", "mRNA1 Ksyn*G1 R2: mRNA1 > $pool Kdeg*mRNA1 R3: G2 > G2 + mRNA2", "G2 > G2 + mRNA2 Ksyn*G2 R4: mRNA2 > $pool Kdeg*mRNA2 # Fixed" ]
[ "default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\")", "output_idx verb_rank = [] for sr_ in output[0].squeeze().cpu().numpy(): if sr_ == 0: break", "range(len(control_verb[i])): # caption数目 # visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos", "'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda()", "verb this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr", "enumerate(this_det_seqs_v): # fixed_len for k, v in enumerate(vs): # max_verb if verb ==", "sr_ == 0: break if len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_]) else: verb_rank", "DataLoader, DictionaryDataset, RawField from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice from speaksee.evaluation", "import itertools import argparse import munkres from tqdm import tqdm from utils import", "* import torch import random import numpy as np import itertools import argparse", "recons[np.sum(recons, (1, 2)) != 0] last = recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] =", "{} need_re_rank = set() for j, vs in enumerate(this_det_seqs_v): # fixed_len for k,", "print(opt) print('Loading caption model trained with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap =", "+= sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks) == 1: final_rank = verb_ranks[0]", "captions = values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _,", "np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2)) != 0]", "out = out[0].data.cpu().numpy() for o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update()", "accuracy performance.\") for i, cap in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap =", "Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _", "val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider) val_spice, _ = Spice().compute_score(gts_t, gen_t)", "semantic role and verb this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx] #", "perm_matrix = np.zeros((fixed_len, fixed_len)) for j, rk in enumerate(final_rank): if j < fixed_len:", "o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute the metric", "= control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx]", "= [] for idx_ in range(len(sr_find[sr])): for a in ass: if a[0] ==", "axis=0)) gt_captions.append(caps) pbar.update() # Compute the metric scores predictions = np.concatenate(predictions, axis=0) gen", "pbar: with torch.no_grad(): for it, (keys, values) in enumerate(iter(dataloader_test)): detections, imgids = keys", "h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions = [] gt_captions = []", "len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank =", "utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16,", "print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider) val_spice, _ = Spice().compute_score(gts_t,", "'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field =", "print('Loading caption model trained with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt']", "in enumerate(this_det_seqs_v): # fixed_len for k, v in enumerate(vs): # max_verb if verb", "values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list, captions", "in enumerate(final_rank): if j < fixed_len: perm_matrix[j, int(rk)] = 1 perm = np.reshape(this_seqs_all,", "model.eval() fixed_len = opt.fixed_len predictions = [] gt_captions = [] # Evaluate with", "PTBTokenizer from models import SinkhornNet, S_SSP from config import * import torch import", "(fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list = verb_list[i][idx] # visual", "optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define the field image_field =", "= [] for verb in this_control_verb: # 找到某个verb对应的semantic role序列 if verb == 0:", "= COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det)", "text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the dataloader _, _, test_dataset = test_dataset.splits test_dataset", "= ' '.join([k for k, g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] =", "data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities from models import ControllableCaptioningModel", "= [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if", "find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue this_verb =", "# define the datasets dataset = COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root,", "define the datasets dataset = COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'),", "_, _, test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset,", "# Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for it,", "re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {} if len(need_re_rank) != 0: for sr in", "permute the verb_list perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] = -1 *", "this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find = {} need_re_rank = set() for j, vs", "m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_ in range(len(sr_find[sr])):", "re_sort_net.eval() # R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn',", "default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int,", "max_verb if verb == v and find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not in", "if len(verb_ranks) == 1: final_rank = verb_ranks[0] else: final_rank = verb_ranks[0] for j", "= torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m = munkres.Munkres()", "default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether", "import TextField import os, sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from", "break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find = {} need_re_rank = set()", "last = recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last + 1:]", "dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth')))", "for idx in range(len(control_verb[i])): # caption数目 # visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt", "text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions = [] gt_captions", "# (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list = verb_list[i][idx] #", "classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root,", "saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root,", "num_workers=opt.nb_workers) # S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP", "in ass: if a[0] == idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re)", "import SinkhornNet, S_SSP from config import * import torch import random import numpy", "'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'),", "img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root,", "role序列 if verb == 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find", "0] last = recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last +", "det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, _, _, verb_list, captions = values", "test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP", "1: verb_rank += list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if", "+ 1:] = recons[last:last+1] # permute the verb_list perm_mask = (np.sum(perm_matrix, -1) ==", "= recons[last:last+1] # permute the verb_list perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx]", "detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt)", "perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len,", "= re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {} if len(need_re_rank) != 0: for sr", "verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco',", "= verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {}", "{} gts = {} print(\"Computing accuracy performance.\") for i, cap in enumerate(predictions): pred_cap", "from tqdm import tqdm from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda')", "SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model model = ControllableCaptioningModel(20,", "detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1],", "random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size')", "speaksee.data import DataLoader, DictionaryDataset, RawField from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice", "output_idx = np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] =", "predictions = [] gt_captions = [] # Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test)))", "_, test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size,", "in this_control_verb: # 找到某个verb对应的semantic role序列 if verb == 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0])", "'Bleu_3', 'Bleu_4'] for metric, score in zip(method, val_bleu): print(metric, score) val_meteor, _ =", "= np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): # caption数目 # visual", "det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): # caption数目 #", "out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0))", "score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t)", "idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr]))", "= ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score in zip(method, val_bleu): print(metric, score)", "idx in range(len(control_verb[i])): # caption数目 # visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt =", "= verb_list[i][idx] # visual feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks", "ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root,", "'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'),", "= output_idx verb_rank = [] for sr_ in output[0].squeeze().cpu().numpy(): if sr_ == 0:", "-1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len, feat_dim), det_seqs_recons:", "det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, _, _, verb_list, captions", "help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len',", "out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt) out", "speaksee.evaluation import PTBTokenizer from models import SinkhornNet, S_SSP from config import * import", ":] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx,", "dataset = COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset", "for j, rk in enumerate(final_rank): if j < fixed_len: perm_matrix[j, int(rk)] = 1", "sr_ in output[0].squeeze().cpu().numpy(): if sr_ == 0: break if len(sr_find[sr_]) != 1: verb_rank", "sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model model", "verb == v and find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())]", "(keys, values) in enumerate(iter(dataloader_test)): detections, imgids = keys # b_s, 100, feat if", "detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1,", "from models import SinkhornNet, S_SSP from config import * import torch import random", "= verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {} if len(need_re_rank) !=", "= {} print(\"Computing accuracy performance.\") for i, cap in enumerate(predictions): pred_cap = text_field.decode(cap,", "max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0),", "recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last + 1:] = recons[last:last+1]", "[] # Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for", "fixed_len)) for j, rk in enumerate(final_rank): if j < fixed_len: perm_matrix[j, int(rk)] =", "= verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for j, rk in", "verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find = {} need_re_rank = set() for", "Meteor, Rouge, Cider, Spice from speaksee.evaluation import PTBTokenizer from models import SinkhornNet, S_SSP", "continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank", "= 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1 recons = np.dot(perm_matrix,", "= Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider) val_spice,", "verb_list, captions = values for i in range(detections.size(0)): # batch # add a", "argparse import munkres from tqdm import tqdm from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234)", "v and find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = []", "'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'),", "- 1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last + 1:] = recons[last:last+1] #", "sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j,", "add a region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx", "this_verb_list) # detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list =", "need_re_rank = set() for j, vs in enumerate(this_det_seqs_v): # fixed_len for k, v", "for it, (keys, values) in enumerate(iter(dataloader_test)): detections, imgids = keys # b_s, 100,", "= PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1',", "= detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5,", "dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the dataloader _, _, test_dataset = test_dataset.splits", "sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): #", "sr_re = [] for idx_ in range(len(sr_find[sr])): for a in ass: if a[0]", "'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'),", "= np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx", "# visual feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks = []", "verb_list[i][idx] # visual feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks =", "join_words=False) pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]]", "= ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions", "ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_ in range(len(sr_find[sr])): for a in", "gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t,", "= [gt_captions[i]] gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _", "in zip(method, val_bleu): print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge,", ":recons.shape[0]] = recons det_seqs_recons[idx, last + 1:] = recons[last:last+1] # permute the verb_list", "from data.dataset import COCOEntities from models import ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset,", "text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(),", "'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>',", "entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root,", "2)) != 0] last = recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx,", "captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute the metric scores predictions = np.concatenate(predictions,", "# R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth')))", "this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor): mx", "opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'),", "this_verb_list = verb_list[i][idx] # visual feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1)", "else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr =", "'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'),", "= [] for sr_ in output[0].squeeze().cpu().numpy(): if sr_ == 0: break if len(sr_find[sr_])", "(1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons", "speaksee.data import TextField import os, sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField", "# semantic role and verb this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx]", "parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use detected region') parser.add_argument('--gt',", "sinkhorn_net.eval() # Role-shifting Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device)", "mx = torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m =", "= loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1,", "this_seqs_pos), -1) verb_ranks = [] for verb in this_control_verb: # 找到某个verb对应的semantic role序列 if", "feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device),", "= det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic role", "this_seqs_all = det_seqs_all[i][idx] # semantic role and verb this_control_verb = control_verb[i][idx] # (max_verb)", "# final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for j, rk in enumerate(final_rank): if j", "= torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10 for j, loc in enumerate(sr_find[sr]):", "else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list, captions =", "torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks = [] for verb in this_control_verb: # 找到某个verb对应的semantic", "= argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number of workers')", "the metric scores predictions = np.concatenate(predictions, axis=0) gen = {} gts = {}", "recons[last:last+1] # permute the verb_list perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] =", "verb_ranks[0] else: final_rank = verb_ranks[0] for j in range(len(verb_ranks) - 1): final_rank =", "= det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list =", "last + 1:] = recons[last:last+1] # permute the verb_list perm_mask = (np.sum(perm_matrix, -1)", "parser.add_argument('--nb_workers', default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len',", "Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len =", "fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define the", "pred_cap = text_field.decode(cap, join_words=False) pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)])", "np.zeros((fixed_len, fixed_len)) for j, rk in enumerate(final_rank): if j < fixed_len: perm_matrix[j, int(rk)]", "batch # add a region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape)", "if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re", "recons = np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2)) != 0] last =", "TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define the datasets dataset = COCOEntities(image_field, det_field,", "verb == 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find = {}", "det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last + 1:] = recons[last:last+1] # permute the", "1, 2).squeeze() if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m = munkres.Munkres() ass =", "pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic role and verb this_control_verb = control_verb[i][idx] #", "= COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'),", "= SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model model =", "enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank = [] for sr_ in", "ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset, RawField from speaksee.evaluation import Bleu, Meteor, Rouge,", "sr_rank = {} if len(need_re_rank) != 0: for sr in need_re_rank: this_sr_perm =", "= dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the dataloader _, _, test_dataset =", "import random import numpy as np import itertools import argparse import munkres from", "det_seqs_sr, control_verb, _, verb_list, captions = values for i in range(detections.size(0)): # batch", "det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset,", "-1)) # fixed_len, -1 recons = np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons", "out[0].data.cpu().numpy() for o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute", "os, sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities", "'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset,", "not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, _, _,", "idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root,", "= verb_ranks[0] else: final_rank = verb_ranks[0] for j in range(len(verb_ranks) - 1): final_rank", "img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ =", "import * import torch import random import numpy as np import itertools import", "k, g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts)", "np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list", "range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len))", "device = torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0,", "gt_captions = [] # Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with", "unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for it, (keys, values) in enumerate(iter(dataloader_test)):", "0: break if len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_]) else: verb_rank += sr_find[sr_]", "+= 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue this_verb = verb.unsqueeze(0).to(device)", "_, verb_list, captions = values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr,", "= parser.parse_args() print(opt) print('Loading caption model trained with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth')", "RawField from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice from speaksee.evaluation import PTBTokenizer", "2).squeeze() if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx))", "in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx", "if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'),", "max_sr) this_verb_list = verb_list[i][idx] # visual feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos),", "gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define the datasets dataset", "= torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out,", "== 1: final_rank = verb_ranks[0] else: final_rank = verb_ranks[0] for j in range(len(verb_ranks)", "np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): # caption数目 # visual feature", "help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str,", "final_rank = [] if len(verb_ranks) == 1: final_rank = verb_ranks[0] else: final_rank =", "this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze() if", "opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model", "verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for j, rk in enumerate(final_rank):", "for k, g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] = [pred_cap] gts_t =", "this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks = [] for verb in this_control_verb:", "(np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list)", "img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt)", "it, (keys, values) in enumerate(iter(dataloader_test)): detections, imgids = keys # b_s, 100, feat", "metric, score in zip(method, val_bleu): print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR',", "caption数目 # visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx]", "fixed_len: perm_matrix[j, int(rk)] = 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1", "feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0),", "ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for it, (keys, values) in enumerate(iter(dataloader_test)): detections,", "cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field", "method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score in zip(method, val_bleu): print(metric,", "det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the dataloader", "opt.fixed_len predictions = [] gt_captions = [] # Evaluate with tqdm(desc='Test', unit='it', ncols=110,", "val_bleu): print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _ =", "as pbar: with torch.no_grad(): for it, (keys, values) in enumerate(iter(dataloader_test)): detections, imgids =", "= 0 sr_find = {} need_re_rank = set() for j, vs in enumerate(this_det_seqs_v):", "type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from',", "!= 0: for sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len)", "== 0).astype(int) img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i:", "text_field.decode(cap, join_words=False) pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)]) gts[i] =", "# Role-shifting Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict'])", "mode='not-normal') sr_rank = {} if len(need_re_rank) != 0: for sr in need_re_rank: this_sr_perm", "= out[0].data.cpu().numpy() for o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() #", "= opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning", "this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list = verb_list[i][idx] # visual feature concat", "COCOEntities from models import ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset, RawField from speaksee.evaluation", "= COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset =", "print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t,", "= [] # Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad():", "= detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons,", "parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\") opt = parser.parse_args() print(opt) print('Loading caption model", "caption model trained with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] #", "min_freq=5) # define the dataloader _, _, test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples,", "img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='',", "visual feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks = [] for", "verb_det_seqs_sr, mode='not-normal') sr_rank = {} if len(need_re_rank) != 0: for sr in need_re_rank:", "# caption数目 # visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos =", "20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab),", "from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities from models import", "this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic", "the verb_list perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] = -1 * perm_mask[:,", "verb_rank = [] for sr_ in output[0].squeeze().cpu().numpy(): if sr_ == 0: break if", "val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t)", "COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root,", "Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len", "this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic role and verb", "performance.\") for i, cap in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap = '", "parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use", "opt = parser.parse_args() print(opt) print('Loading caption model trained with CIDEr optimization.') saved_data =", "= keys # b_s, 100, feat if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all,", "'Bleu_4'] for metric, score in zip(method, val_bleu): print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t,", "# define the dataloader _, _, test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields,", "= recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last + 1:] =", "type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use detected region') parser.add_argument('--gt', action='store_true',", "1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1 recons = np.dot(perm_matrix, perm)", "use detected region') parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\") opt = parser.parse_args() print(opt)", "keys # b_s, 100, feat if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v,", "det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list = verb_list[i][idx] # visual feature concat this_seqs_perm =", "detected region') parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\") opt = parser.parse_args() print(opt) print('Loading", "verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch", "detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list),", "values for i in range(detections.size(0)): # batch # add a region sort model", "np import itertools import argparse import munkres from tqdm import tqdm from utils", "= opt.fixed_len predictions = [] gt_captions = [] # Evaluate with tqdm(desc='Test', unit='it',", "trained with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define the", "= munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_ in range(len(sr_find[sr])): for", "else: final_rank = verb_ranks[0] for j in range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank,", "# fixed_len for k, v in enumerate(vs): # max_verb if verb == v", "model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): # caption数目", "= {} need_re_rank = set() for j, vs in enumerate(this_det_seqs_v): # fixed_len for", "this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr =", "det_seqs_all[i][idx] # semantic role and verb this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v =", "v in enumerate(vs): # max_verb if verb == v and find_sr < 10:", "if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, _,", "DictionaryDataset, RawField from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice from speaksee.evaluation import", "if j < fixed_len: perm_matrix[j, int(rk)] = 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1))", "= model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy()", "= det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx]", "feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all", "'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm,", "vs in enumerate(this_det_seqs_v): # fixed_len for k, v in enumerate(vs): # max_verb if", "values) in enumerate(iter(dataloader_test)): detections, imgids = keys # b_s, 100, feat if not", "this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10 for j, loc in enumerate(sr_find[sr]): tr_locs[j] =", "from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size',", "= recons[np.sum(recons, (1, 2)) != 0] last = recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]]", "cap in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap = ' '.join([k for k,", "= text_field.decode(cap, join_words=False) pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)]) gts[i]", "precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field =", "tr_locs[j] = loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix,", "use gt verb\") opt = parser.parse_args() print(opt) print('Loading caption model trained with CIDEr", "[] for idx_ in range(len(sr_find[sr])): for a in ass: if a[0] == idx_:", "det_seqs_v, \\ det_seqs_sr, control_verb, _, _, _, verb_list, captions = values else: det_seqs_txt,", "parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str,", "model trained with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define", "parser.add_argument('--det', action='store_true', help='whether use detected region') parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\") opt", "in output[0].squeeze().cpu().numpy(): if sr_ == 0: break if len(sr_find[sr_]) != 1: verb_rank +=", "this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue this_verb", "det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device)", "[gt_captions[i]] gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ =", "= sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy()", "int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr +=", "classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10,", "output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank = [] for sr_ in output[0].squeeze().cpu().numpy():", "remove_punctuation=True, fix_length=20) # define the datasets dataset = COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root,", "_ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider) val_spice, _ = Spice().compute_score(gts_t, gen_t) print('SPICE', val_spice)", "PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for", "tqdm import tqdm from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser", "det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list, captions = values for i in range(detections.size(0)):", "torch.ones(sinkhorn_len) * 10 for j, loc in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :]", "= np.concatenate(predictions, axis=0) gen = {} gts = {} print(\"Computing accuracy performance.\") for", "import ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset, RawField from speaksee.evaluation import Bleu, Meteor,", "+= list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks) ==", "# 找到某个verb对应的semantic role序列 if verb == 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr =", "'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5)", "loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze()", "sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device)", "need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10 for j, loc", "test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net = S_SSP().cuda()", "in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank = [] for sr_", "from speaksee.data import DataLoader, DictionaryDataset, RawField from speaksee.evaluation import Bleu, Meteor, Rouge, Cider,", "range(detections.size(0)): # batch # add a region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list", "enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx =", "predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute the metric scores predictions = np.concatenate(predictions, axis=0)", "sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for", "else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'),", "= this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find = {} need_re_rank = set() for j,", "'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'),", "'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define", "'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20)", "torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10 for j, loc in enumerate(sr_find[sr]): tr_locs[j]", "= torch.ones(sinkhorn_len) * 10 for j, loc in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j,", "list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks) == 1:", "gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method =", "= Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider,", "import munkres from tqdm import tqdm from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device", "# visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] #", "in range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len,", "gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score in zip(method, val_bleu):", "np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2)) != 0] last = recons.shape[0] -", "type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10)", "sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities from", "= [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t)", "Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for it, (keys,", "verb_ranks[0] for j in range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置", "for i, cap in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap = ' '.join([k", "test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers)", "[] for sr_ in output[0].squeeze().cpu().numpy(): if sr_ == 0: break if len(sr_find[sr_]) !=", "= torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int,", "type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use detected", "DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() #", "!= 0] last = recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last", "visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息", "= np.zeros((fixed_len, fixed_len)) for j, rk in enumerate(final_rank): if j < fixed_len: perm_matrix[j,", "region') parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\") opt = parser.parse_args() print(opt) print('Loading caption", "= test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) #", "for sr_ in output[0].squeeze().cpu().numpy(): if sr_ == 0: break if len(sr_find[sr_]) != 1:", "det_seqs_sr, control_verb, _, _, _, verb_list, captions = values else: det_seqs_txt, det_seqs_vis, det_seqs_pos,", "'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else:", "{} print(\"Computing accuracy performance.\") for i, cap in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False)", "det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list, captions = values for i", "10: if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item()", "= sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank = [] for sr_ in output[0].squeeze().cpu().numpy(): if", "< fixed_len: perm_matrix[j, int(rk)] = 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len,", "recons det_seqs_recons[idx, last + 1:] = recons[last:last+1] # permute the verb_list perm_mask =", "config import * import torch import random import numpy as np import itertools", "and verb this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb)", "= values for i in range(detections.size(0)): # batch # add a region sort", "predictions = np.concatenate(predictions, axis=0) gen = {} gts = {} print(\"Computing accuracy performance.\")", "ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions =", "= (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix,", "= TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define the datasets dataset = COCOEntities(image_field,", "sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank = [] for sr_ in output[0].squeeze().cpu().numpy(): if sr_", "perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] +", "'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'),", "COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities from models import ControllableCaptioningModel from speaksee.data import", "1 det_seqs_recons[idx, :recons.shape[0]] = recons det_seqs_recons[idx, last + 1:] = recons[last:last+1] # permute", "# pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic role and verb this_control_verb = control_verb[i][idx]", "gen = {} gts = {} print(\"Computing accuracy performance.\") for i, cap in", "= det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic role and verb this_control_verb", "cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) #", "role and verb this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len,", "import os, sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import", "type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use detected region') parser.add_argument('--gt', action='store_true', help=\"whether use gt", "isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re =", "model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for", "loc in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device))", "np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det,", "idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank = [] for", "TextField import os, sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset", "from speaksee.evaluation import PTBTokenizer from models import SinkhornNet, S_SSP from config import *", "= DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval()", "# (fixed_len, max_sr) this_verb_list = verb_list[i][idx] # visual feature concat this_seqs_perm = torch.cat((this_seqs_vis,", "'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field", "output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {} if len(need_re_rank) != 0: for", "S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len,", "torch.no_grad(): for it, (keys, values) in enumerate(iter(dataloader_test)): detections, imgids = keys # b_s,", "10 for j, loc in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :] = this_seqs_perm[loc]", "ImageDetectionsField from data.dataset import COCOEntities from models import ControllableCaptioningModel from speaksee.data import DataLoader,", "models import ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset, RawField from speaksee.evaluation import Bleu,", "mx.detach().cpu().numpy() m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_ in", "this_seqs_txt, this_seqs_pos), -1) verb_ranks = [] for verb in this_control_verb: # 找到某个verb对应的semantic role序列", "- 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for", "sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item()))", "re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20,", "with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for it, (keys, values)", "j, rk in enumerate(final_rank): if j < fixed_len: perm_matrix[j, int(rk)] = 1 perm", "fixed_len = opt.fixed_len predictions = [] gt_captions = [] # Evaluate with tqdm(desc='Test',", "the dataloader _, _, test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test", "import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int,", "define the dataloader _, _, test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image')", "scores predictions = np.concatenate(predictions, axis=0) gen = {} gts = {} print(\"Computing accuracy", "= np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2)) != 0] last = recons.shape[0]", "= [] if len(verb_ranks) == 1: final_rank = verb_ranks[0] else: final_rank = verb_ranks[0]", "Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score in zip(method,", "0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal')", "filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the", "# max_verb if verb == v and find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not", "feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks = [] for verb", "1:] = recons[last:last+1] # permute the verb_list perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int)", "print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t,", "(max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr)", "'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'),", "# (max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len,", "range(len(sr_find[sr])): for a in ass: if a[0] == idx_: sr_re.append(a[1]) sr_re = np.array(sr_re)", "total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for it, (keys, values) in enumerate(iter(dataloader_test)): detections, imgids", "idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10,", "(this_seqs_all.shape[0], -1)) # fixed_len, -1 recons = np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:])", "'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score in zip(method, val_bleu): print(metric, score) val_meteor, _", "parser.parse_args() print(opt) print('Loading caption model trained with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap", "= saved_data['opt'] # define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not", "找到某个verb对应的semantic role序列 if verb == 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0", "control_verb, _, verb_list, captions = values for i in range(detections.size(0)): # batch #", "det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root,", "for j, idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank =", "_, verb_list, captions = values for i in range(detections.size(0)): # batch # add", "max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define the datasets", "precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root,", "det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic role and verb this_control_verb =", "* perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len, feat_dim), det_seqs_recons: (1,", "np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): # caption数目 # visual feature this_seqs_vis = det_seqs_vis[i][idx]", "detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ =", "_ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt) out =", "ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root,", "sr_find = {} need_re_rank = set() for j, vs in enumerate(this_det_seqs_v): # fixed_len", "gt_captions.append(caps) pbar.update() # Compute the metric scores predictions = np.concatenate(predictions, axis=0) gen =", "final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for j, rk in enumerate(final_rank): if j <", "if len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank", "for idx_ in range(len(sr_find[sr])): for a in ass: if a[0] == idx_: sr_re.append(a[1])", "perm_matrix[j, int(rk)] = 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1 recons", "munkres from tqdm import tqdm from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device =", "_ = Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score", "load_in_tmp=False) if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root,", "COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities from models import ControllableCaptioningModel from speaksee.data", "= np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j, idx_", "import COCOEntities from models import ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset, RawField from", "[] gt_captions = [] # Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar:", "== 0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr,", "if a[0] == idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表", "gen_t) print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _ =", "of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int,", "gts = {} print(\"Computing accuracy performance.\") for i, cap in enumerate(predictions): pred_cap =", "for verb in this_control_verb: # 找到某个verb对应的semantic role序列 if verb == 0: break verb_det_seqs_sr", "import DataLoader, DictionaryDataset, RawField from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice from", "Spice from speaksee.evaluation import PTBTokenizer from models import SinkhornNet, S_SSP from config import", "verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks) == 1: final_rank =", "= np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2)) !=", "for sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10", "default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use detected region')", "if verb == v and find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not in sr_find:", "<filename>coco_scripts/eval_coco.py from speaksee.data import TextField import os, sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb,", "torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'],", "= this_seqs_perm[loc] tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor):", "train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the dataloader _,", "j, loc in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix =", "enumerate(final_rank): if j < fixed_len: perm_matrix[j, int(rk)] = 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0],", "this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank =", "= mx.detach().cpu().numpy() m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_", "int(rk)] = 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1 recons =", "[] for verb in this_control_verb: # 找到某个verb对应的semantic role序列 if verb == 0: break", "tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor): mx =", "idx_ in range(len(sr_find[sr])): for a in ass: if a[0] == idx_: sr_re.append(a[1]) sr_re", "in enumerate(iter(dataloader_test)): detections, imgids = keys # b_s, 100, feat if not opt.det:", "'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'),", "R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval()", "= [] gt_captions = [] # Evaluate with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as", "in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1 else:", "== 0: break if len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_]) else: verb_rank +=", "break if len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank)", "a region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx in", "for j in range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix", "gen_t) print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider) val_spice, _ =", "np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank", "torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m = munkres.Munkres() ass", "opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, _, _, verb_list,", "default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use detected region') parser.add_argument('--gt', action='store_true', help=\"whether", "the datasets dataset = COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root,", "'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'),", "fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root,", "batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level", "beam_size=5, \\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for o, caps in zip(out, captions[i]):", "in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap = ' '.join([k for k, g", "det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list, captions = values", "with torch.no_grad(): for it, (keys, values) in enumerate(iter(dataloader_test)): detections, imgids = keys #", "det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list, captions = values for i in", "0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'],", "import PTBTokenizer from models import SinkhornNet, S_SSP from config import * import torch", "1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr", "# sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_]", "verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks) == 1: final_rank = verb_ranks[0] else: final_rank", "SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() #", "gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root,", "\\ det_seqs_sr, control_verb, _, verb_list, captions = values for i in range(detections.size(0)): #", "val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider) val_spice, _ = Spice().compute_score(gts_t, gen_t) print('SPICE',", "numpy as np import itertools import argparse import munkres from tqdm import tqdm", "for j, vs in enumerate(this_det_seqs_v): # fixed_len for k, v in enumerate(vs): #", "len(need_re_rank) != 0: for sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs =", "== idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx =", "fix_length=20) # define the datasets dataset = COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'),", "= torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks = [] for verb in this_control_verb: #", "[] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr", "id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) #", "= S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net =", "verb_list, captions = values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb,", "verb\") opt = parser.parse_args() print(opt) print('Loading caption model trained with CIDEr optimization.') saved_data", "saved_data['opt'] # define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det:", "= recons det_seqs_recons[idx, last + 1:] = recons[last:last+1] # permute the verb_list perm_mask", "j, idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr] = output_idx verb_rank = []", "models import SinkhornNet, S_SSP from config import * import torch import random import", "len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions = []", "_, _, verb_list, captions = values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\", "datasets dataset = COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'))", "as np import itertools import argparse import munkres from tqdm import tqdm from", "find_sr = 0 sr_find = {} need_re_rank = set() for j, vs in", "if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr", "a in ass: if a[0] == idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx =", "a[0] == idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx", "score in zip(method, val_bleu): print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor)", "_ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge)", "verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {} if", "pbar.update() # Compute the metric scores predictions = np.concatenate(predictions, axis=0) gen = {}", "1: final_rank = verb_ranks[0] else: final_rank = verb_ranks[0] for j in range(len(verb_ranks) -", "= this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue", "[pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method", "img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for o, caps", "detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\", "'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20,", "'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp',", "this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all =", "= COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'),", "img_verb_list = np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): # caption数目 # visual feature this_seqs_vis", "action='store_true', help='whether use detected region') parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\") opt =", "model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len", "100, feat if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb,", "eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define the datasets dataset = COCOEntities(image_field, det_field, text_field,", "type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true',", "control_verb[i][idx] # (max_verb) this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] #", "entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset,", "# batch # add a region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list =", "COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset,", "sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10 for", "= set() for j, vs in enumerate(this_det_seqs_v): # fixed_len for k, v in", "'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True,", "_, _, _, verb_list, captions = values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v,", "ass: if a[0] == idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx = np.argsort(sr_re) #", "final_rank = verb_ranks[0] for j in range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1])", "perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1 recons = np.dot(perm_matrix, perm) recons", "sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx): output_idx[j] = sr_find[sr][idx_] sr_rank[sr]", "help=\"whether use gt verb\") opt = parser.parse_args() print(opt) print('Loading caption model trained with", "tr_locs = torch.ones(sinkhorn_len) * 10 for j, loc in enumerate(sr_find[sr]): tr_locs[j] = loc", "find_sr == 0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb,", "verb_list perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] = -1 * perm_mask[:, np.newaxis]", "gt=opt.gt) out = out[0].data.cpu().numpy() for o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps)", "torch.Tensor): mx = mx.detach().cpu().numpy() m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = []", "= -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len, feat_dim),", "0: for sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) *", "img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len,", "Rouge, Cider, Spice from speaksee.evaluation import PTBTokenizer from models import SinkhornNet, S_SSP from", "= m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_ in range(len(sr_find[sr])): for a in ass:", "dataloader _, _, test_dataset = test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test =", "in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10 for j,", "_ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the dataloader _, _, test_dataset", "\\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions = [] gt_captions =", "= DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net", "sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx):", "caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute the metric scores", "zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute the metric scores predictions =", "vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True,", "data.dataset import COCOEntities from models import ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset, RawField", "# add a region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for", "i in range(detections.size(0)): # batch # add a region sort model det_seqs_recons =", "= torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'),", "np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1 recons = np.dot(perm_matrix, perm) recons = np.reshape(recons,", "need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output", "size') parser.add_argument('--nb_workers', default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None)", "action='store_true', help=\"whether use gt verb\") opt = parser.parse_args() print(opt) print('Loading caption model trained", "Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider) val_spice, _", "< 10: if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] =", "det_seqs_recons, img_verb_list), eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for o,", "PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2',", "SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len = opt.sinkhorn_len", "for a in ass: if a[0] == idx_: sr_re.append(a[1]) sr_re = np.array(sr_re) sr_idx", "= np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])): # caption数目 # visual feature this_seqs_vis =", "'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define", "import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities from models import ControllableCaptioningModel from", "= det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list = verb_list[i][idx] # visual feature concat this_seqs_perm", "for o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute the", "# S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len", "in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t =", "np.array(sr_re) sr_idx = np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j, idx_ in", "in enumerate(vs): # max_verb if verb == v and find_sr < 10: if", "re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len = opt.sinkhorn_len sinkhorn_net", "(fixed_len, max_sr) this_verb_list = verb_list[i][idx] # visual feature concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt,", "'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'),", "= verb_ranks[0] for j in range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) #", "from models import ControllableCaptioningModel from speaksee.data import DataLoader, DictionaryDataset, RawField from speaksee.evaluation import", "with CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define the field", "the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root,", "det_seqs_recons[idx, last + 1:] = recons[last:last+1] # permute the verb_list perm_mask = (np.sum(perm_matrix,", "region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape) img_verb_list = np.zeros(verb_list[i].shape) for idx in range(len(control_verb[i])):", "max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'),", "S-level SSP re_sort_net = S_SSP().cuda() re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth'))) re_sort_net.eval() # R-level SSP sinkhorn_len =", "-1], beam_size=5, \\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for o, caps in zip(out,", "for k, v in enumerate(vs): # max_verb if verb == v and find_sr", "val_dataset, min_freq=5) # define the dataloader _, _, test_dataset = test_dataset.splits test_dataset =", "verb_rank += list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks)", "len(verb_ranks) == 1: final_rank = verb_ranks[0] else: final_rank = verb_ranks[0] for j in", "_ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr', val_cider)", "vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) text_field = TextField(init_token='<bos>', eos_token='<eos>',", "g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t", "verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr == 0:", "recons = recons[np.sum(recons, (1, 2)) != 0] last = recons.shape[0] - 1 det_seqs_recons[idx,", "fixed_len for k, v in enumerate(vs): # max_verb if verb == v and", "torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _", "if sr_ == 0: break if len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_]) else:", "= np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1 recons = np.dot(perm_matrix, perm) recons =", "det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1)) out, _ = model.beam_search_v((detections_i,", "final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for j, rk", "this_control_verb: # 找到某个verb对应的semantic role序列 if verb == 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr", "# define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field", "concat this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1) verb_ranks = [] for verb in", "!= 1: verb_rank += list(sr_rank[sr_]) else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank = []", "in range(len(control_verb[i])): # caption数目 # visual feature this_seqs_vis = det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx]", "= np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx): output_idx[j]", "random import numpy as np import itertools import argparse import munkres from tqdm", "i, cap in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap = ' '.join([k for", "torch.manual_seed(1234) device = torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers',", "= values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list,", "gen_t = PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3',", "cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt)", "id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root,", "tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar: with torch.no_grad(): for it, (keys, values) in", "= PTBTokenizer.tokenize(gen) val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']", "import argparse import munkres from tqdm import tqdm from utils import verb_rank_merge random.seed(1234)", "verb_ranks = [] for verb in this_control_verb: # 找到某个verb对应的semantic role序列 if verb ==", "default=10) parser.add_argument('--det', action='store_true', help='whether use detected region') parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\")", "vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root,", "output[0].squeeze().cpu().numpy(): if sr_ == 0: break if len(sr_find[sr_]) != 1: verb_rank += list(sr_rank[sr_])", "workers') parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10)", "torch import random import numpy as np import itertools import argparse import munkres", "det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root,", "RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _", "mx = mx.detach().cpu().numpy() m = munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for", "verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'), fix_length=10, max_detections=20, gt_verb=opt.gt) else: det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root,", "val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric,", "opt_cap = saved_data['opt'] # define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if", "parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det', action='store_true', help='whether use detected region') parser.add_argument('--gt', action='store_true', help=\"whether use", "captions = values for i in range(detections.size(0)): # batch # add a region", "verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'), cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'), verb_vob_path=os.path.join(coco_root,", "detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i,", "# Compute the metric scores predictions = np.concatenate(predictions, axis=0) gen = {} gts", "fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i =", "Cider, Spice from speaksee.evaluation import PTBTokenizer from models import SinkhornNet, S_SSP from config", "Bleu, Meteor, Rouge, Cider, Spice from speaksee.evaluation import PTBTokenizer from models import SinkhornNet,", "= det_seqs_all[i][idx] # semantic role and verb this_control_verb = control_verb[i][idx] # (max_verb) this_det_seqs_v", "image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root,", "# b_s, 100, feat if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\", "# detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1)", "import tqdm from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser =", "torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False)", "det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list = verb_list[i][idx]", "verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {} if len(need_re_rank)", "Compute the metric scores predictions = np.concatenate(predictions, axis=0) gen = {} gts =", "+ np.dot(perm_matrix, this_verb_list) # detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim)", "-1 recons = np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1,", "verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for j, rk in enumerate(final_rank): if", "sinkhorn_net(this_sr_perm.unsqueeze(0).to(device)) mx = torch.transpose(tr_matrix, 1, 2).squeeze() if isinstance(mx, torch.Tensor): mx = mx.detach().cpu().numpy() m", "# fixed_len, -1 recons = np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons =", "import torch import random import numpy as np import itertools import argparse import", "not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1", "recons = np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2))", "CIDEr optimization.') saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth') opt_cap = saved_data['opt'] # define the field image_field", "-1) verb_ranks = [] for verb in this_control_verb: # 找到某个verb对应的semantic role序列 if verb", "1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix = np.zeros((fixed_len, fixed_len)) for j,", "verb_det_seqs_sr.unsqueeze(0).to(device) output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal') sr_rank = {} if len(need_re_rank) != 0:", "test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True,", "sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j)", "print(\"Computing accuracy performance.\") for i, cap in enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap", "detections, imgids = keys # b_s, 100, feat if not opt.det: det_seqs_txt, det_seqs_vis,", "fixed_len, -1 recons = np.dot(perm_matrix, perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons,", "help='whether use detected region') parser.add_argument('--gt', action='store_true', help=\"whether use gt verb\") opt = parser.parse_args()", "0).astype(int) img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) # detections_i: (1,", "enumerate(predictions): pred_cap = text_field.decode(cap, join_words=False) pred_cap = ' '.join([k for k, g in", "verb in this_control_verb: # 找到某个verb对应的semantic role序列 if verb == 0: break verb_det_seqs_sr =", "import numpy as np import itertools import argparse import munkres from tqdm import", "parser.add_argument('--checkpoint_path', type=str, default=\"res\") parser.add_argument('--start_from', type=str, default=None) parser.add_argument('--sinkhorn_len', type=int, default=10) parser.add_argument('--fixed_len', type=int, default=10) parser.add_argument('--det',", "k, v in enumerate(vs): # max_verb if verb == v and find_sr <", "for j, loc in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :] = this_seqs_perm[loc] tr_matrix", "feat if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _,", "'.join([k for k, g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] = [pred_cap] gts_t", "\\ det_seqs_sr, control_verb, _, _, _, verb_list, captions = values else: det_seqs_txt, det_seqs_vis,", "this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs = torch.ones(sinkhorn_len) * 10 for j, loc in", "in zip(out, captions[i]): predictions.append(np.expand_dims(o, axis=0)) gt_captions.append(caps) pbar.update() # Compute the metric scores predictions", "COCOEntities(image_field, det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field,", "det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, _, _, verb_list, captions =", "img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1))", "define the field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field =", "val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L',", "COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root,", "j < fixed_len: perm_matrix[j, int(rk)] = 1 perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) #", "(1, fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device) detections_i", "Role-shifting Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\ h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval()", "'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field, RawField(), img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'),", "from speaksee.data import TextField import os, sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb,", "img_second_lstm=opt_cap.img_second_lstm).to(device) model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions = [] gt_captions = [] #", "enumerate(vs): # max_verb if verb == v and find_sr < 10: if int(this_det_seqs_sr[j][k].item())", "j, vs in enumerate(this_det_seqs_v): # fixed_len for k, v in enumerate(vs): # max_verb", "sr_rank[sr] = output_idx verb_rank = [] for sr_ in output[0].squeeze().cpu().numpy(): if sr_ ==", "find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr]", "{} if len(need_re_rank) != 0: for sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1])", "if find_sr == 0: continue this_verb = verb.unsqueeze(0).to(device) verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device) output =", "-1) == 0).astype(int) img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list) #", "import Bleu, Meteor, Rouge, Cider, Spice from speaksee.evaluation import PTBTokenizer from models import", "and find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] = [] sr_find[int(this_det_seqs_sr[j][k].item())].append(j)", "pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i]", "if len(need_re_rank) != 0: for sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1]) tr_locs", "sinkhorn_len = opt.sinkhorn_len sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda() sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting", "parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number of", "['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score in zip(method, val_bleu): print(metric, score) val_meteor,", "det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, _, _, verb_list, captions = values else:", "# permute the verb_list perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int) img_verb_list[idx] = -1", "not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root,", "np.concatenate(predictions, axis=0) gen = {} gts = {} print(\"Computing accuracy performance.\") for i,", "axis=0) gen = {} gts = {} print(\"Computing accuracy performance.\") for i, cap", "model.load_state_dict(saved_data['state_dict']) model.eval() fixed_len = opt.fixed_len predictions = [] gt_captions = [] # Evaluate", "det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx] # semantic role and", "val_rouge, _ = Rouge().compute_score(gts_t, gen_t) print('ROUGE_L', val_rouge) val_cider, _ = Cider().compute_score(gts_t, gen_t) print('CIDEr',", "control_verb, _, _, _, verb_list, captions = values else: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all,", "val_dataset, _ = dataset.splits text_field.build_vocab(train_dataset, val_dataset, min_freq=5) # define the dataloader _, _,", "from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice from speaksee.evaluation import PTBTokenizer from", "for metric, score in zip(method, val_bleu): print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t)", "j in range(len(verb_ranks) - 1): final_rank = verb_rank_merge(final_rank, verb_ranks[j+1]) # final_rank存的是原来idx现在应该在的位置 perm_matrix =", "'coco_detections.hdf5'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'),", "gt verb\") opt = parser.parse_args() print(opt) print('Loading caption model trained with CIDEr optimization.')", "b_s, 100, feat if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr,", "== 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find = {} need_re_rank", "eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for o, caps in", "speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice from speaksee.evaluation import PTBTokenizer from models", "this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list", "\\ out_size=1, gt=opt.gt) out = out[0].data.cpu().numpy() for o, caps in zip(out, captions[i]): predictions.append(np.expand_dims(o,", "if verb == 0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find =", "[] if len(verb_ranks) == 1: final_rank = verb_ranks[0] else: final_rank = verb_ranks[0] for", "'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), vocab_path=os.path.join(coco_root, 'vocab_tv.json'), vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'), cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'), fix_length=10, max_detections=20,", "* 10 for j, loc in enumerate(sr_find[sr]): tr_locs[j] = loc this_sr_perm[j, :] =", "final_rank = verb_ranks[0] else: final_rank = verb_ranks[0] for j in range(len(verb_ranks) - 1):", "rk in enumerate(final_rank): if j < fixed_len: perm_matrix[j, int(rk)] = 1 perm =", "torch.device('cuda') parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number", "ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations'), filtering=True, det_filtering=opt.det) train_dataset, val_dataset, _ = dataset.splits", "S_SSP from config import * import torch import random import numpy as np", "sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth'))) sinkhorn_net.eval() # Role-shifting Captioning Model model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \\", "this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2)) != 0] last = recons.shape[0] - 1", "tqdm from utils import verb_rank_merge random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda') parser = argparse.ArgumentParser()", "itertools import argparse import munkres from tqdm import tqdm from utils import verb_rank_merge", "from config import * import torch import random import numpy as np import", "field image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'),", "SinkhornNet, S_SSP from config import * import torch import random import numpy as", "0 sr_find = {} need_re_rank = set() for j, vs in enumerate(this_det_seqs_v): #", "det_seqs_vis[i][idx] this_seqs_txt = det_seqs_txt[i][idx] this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息 this_seqs_all = det_seqs_all[i][idx] #", "sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField from data.dataset import COCOEntities from models", "np.argsort(sr_re) # sr_idx代表 output_idx = np.zeros(len(sr_find[sr])) for j, idx_ in enumerate(sr_idx): output_idx[j] =", "' '.join([k for k, g in itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] = [pred_cap]", "= {} gts = {} print(\"Computing accuracy performance.\") for i, cap in enumerate(predictions):", "in range(detections.size(0)): # batch # add a region sort model det_seqs_recons = np.zeros(det_seqs_all[i].shape)", "= Bleu(n=4).compute_score(gts_t, gen_t) method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4'] for metric, score in", "in range(len(sr_find[sr])): for a in ass: if a[0] == idx_: sr_re.append(a[1]) sr_re =", "itertools.groupby(pred_cap)]) gts[i] = [gt_captions[i]] gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen)", "zip(method, val_bleu): print(metric, score) val_meteor, _ = Meteor().compute_score(gts_t, gen_t) print('METEOR', val_meteor) val_rouge, _", "= ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False) if not opt.det: det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'),", "for i in range(detections.size(0)): # batch # add a region sort model det_seqs_recons", "(1, 2)) != 0] last = recons.shape[0] - 1 det_seqs_recons[idx, :recons.shape[0]] = recons", "munkres.Munkres() ass = m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_ in range(len(sr_find[sr])): for a", "m.compute(munkres.make_cost_matrix(mx)) sr_re = [] for idx_ in range(len(sr_find[sr])): for a in ass: if", "det_field, text_field, img_root='', ann_root=os.path.join(coco_root, 'annotations'), entities_file=os.path.join(coco_root, 'coco_entities.json'), id_root=os.path.join(coco_root, 'annotations')) test_dataset = COCOEntities(image_field, det_field,", "max_verb) this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr) this_verb_list = verb_list[i][idx] # visual feature", "metric scores predictions = np.concatenate(predictions, axis=0) gen = {} gts = {} print(\"Computing", "enumerate(iter(dataloader_test)): detections, imgids = keys # b_s, 100, feat if not opt.det: det_seqs_txt,", "text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20) # define the datasets dataset =", "argparse.ArgumentParser() parser.add_argument('--batch_size', default=16, type=int, help='batch size') parser.add_argument('--nb_workers', default=0, type=int, help='number of workers') parser.add_argument('--checkpoint_path',", "lower=True, remove_punctuation=True, fix_length=20) # define the datasets dataset = COCOEntities(image_field, det_field, text_field, img_root='',", "det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim) img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1) detections_i, det_seqs_recons =", "sr_find[int(this_det_seqs_sr[j][k].item())].append(j) verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item() find_sr += 1 else: sr_find[int(this_det_seqs_sr[j][k].item())].append(j) need_re_rank.add(int(this_det_seqs_sr[j][k].item())) if find_sr ==", "else: verb_rank += sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks) == 1: final_rank", "'coco_detections.hdf5'), classes_path=os.path.join(coco_root, 'object_class_list.txt'), img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'), precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'), verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'), idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'), cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'),", "== v and find_sr < 10: if int(this_det_seqs_sr[j][k].item()) not in sr_find: sr_find[int(this_det_seqs_sr[j][k].item())] =", "= {} if len(need_re_rank) != 0: for sr in need_re_rank: this_sr_perm = torch.zeros(sinkhorn_len,", "det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \\ det_seqs_sr, control_verb, _, verb_list, captions = values for", "set() for j, vs in enumerate(this_det_seqs_v): # fixed_len for k, v in enumerate(vs):", "test_dataset.splits test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level", "0: break verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0]) find_sr = 0 sr_find = {} need_re_rank =", "sr_find[sr_] verb_ranks.append(verb_rank) final_rank = [] if len(verb_ranks) == 1: final_rank = verb_ranks[0] else:", "DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image') dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers) # S-level SSP re_sort_net =", "gts[i] = [gt_captions[i]] gen[i] = [pred_cap] gts_t = PTBTokenizer.tokenize(gts) gen_t = PTBTokenizer.tokenize(gen) val_bleu,", "imgids = keys # b_s, 100, feat if not opt.det: det_seqs_txt, det_seqs_vis, det_seqs_pos,", "perm) recons = np.reshape(recons, this_seqs_all.shape[0:]) recons = recons[np.sum(recons, (1, 2)) != 0] last" ]