diff --git "a/1942.jsonl" "b/1942.jsonl" new file mode 100644--- /dev/null +++ "b/1942.jsonl" @@ -0,0 +1,92 @@ +{"seq_id":"35354145796","text":"# 1608. Special Array With X Elements Greater Than or Equal X\n\n# You are given an array nums of non-negative integers. nums is considered special if there exists a number x such that there are exactly x numbers in nums that are greater than or equal to x.\n\n# Notice that x does not have to be an element in nums.\n\n# Return x if the array is special, otherwise, return -1. It can be proven that if nums is special, the value for x is unique.\n\ndef specialArray(self, nums: List[int]) -> int:\n nums.sort()\n n = len(nums)\n if n<=nums[0]:\n return n\n #binary search\n start,end = 0,n\n while(start<=end):\n mid = (start+end)//2\n #index of middle element\n count = 0\n for i in range(0,n):\n if nums[i]>=mid:\n count = n-i\n break\n if count==mid:\n return count\n elif count5d}.jpg\".format(self.Dowdnum + 1), \"wb\") as f: # 文件写入\n if resp.status_code==200 and len(str(byte)) > 1000 : # 访问成功200且返回页面字节长度大于1000\n f.write(byte)\n # print(len(str(byte)))\n self.Dowdnum = self.Dowdnum + 1 # 递增,表示又多下载了一张图片\n time.sleep(0.5) # 每隔0.5秒下载一张图片,避免由于访问过快被反爬了,认为我在DDS攻击服务器\n print(\"第{}张与{}有关的图片爬取成功!\".format(self.Dowdnum, self.search))\n else:\n break\n\n\n def run(self):\n #print(self.Dowdnum,self.imgNum)\n index=0 #页数\n while(self.Dowdnum j:\n dp[i][j] = dp[i-1][j]\n continue\n dp[i][j] = max(value + dp[i-1][j-weight], dp[i-1][j])\n\n included_items = traceItems(dp, items)\n\n return [dp[-1][-1], included_items]\n\n\ndef traceItems(dp, items):\n row, col = len(dp) - 1, len(dp[0]) - 1\n included_items = []\n while row >= 1 and col >= 1:\n if dp[row][col] != dp[row-1][col]:\n included_items.append(row-1)\n col -= items[row - 1][1]\n row -= 1\n else:\n row -= 1\n\n return included_items\n","repo_name":"dawar-s/algo-ds-sol-python","sub_path":"algoexpert/hard/KnapsackProblem.py","file_name":"KnapsackProblem.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22877541054","text":"import math\n# def main ():\n# principal = eval(input(\"please enter the prinpal\"))\n# air = eval(input(\"please enter the annual interest rate: \"))\n#\n# for i in range(10):\n# principal = principal*(1+air)\n#\n# print (\"total amount: \", princpal)\n#\n#\n#\n\n# def main():\n# n = eval(input(\"enter the number\"))\n# fact = 1\n#\n# for factor in range(n, 1, -1) :\n# fact = fact * factor\n#\n# print('The factorial of '\n# n,\" \"is\" fact)\n#\n\ndef main():\n n = int(input(\"how many num do you have: \"))\n sum = 0.0\n\n for i in range(n):\n x = eval(input(\"enter the number:\"))\n sum = sum + x\n\n print (\"the average of\", n, \"numbers is\", sum/n)\n\n main()","repo_name":"NashidC/Python-csc113-class","sub_path":"10-25.py","file_name":"10-25.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39023991845","text":"import unittest\nimport weakref\n\nfrom mock import patch, Mock, MagicMock, sentinel, call\n\nfrom pysparc import hardware, ftdi_chip, messages\n\n\nclass HiSPARCIIITest(unittest.TestCase):\n\n def test_description(self):\n self.assertEqual(hardware.HiSPARCIII.description,\n \"HiSPARC III Master\")\n\n @patch.object(hardware.BaseHardware, '__init__')\n @patch.object(hardware.HiSPARCIII, 'reset_hardware')\n def test_secondary_description(self, mock_reset, mock_basehardware):\n secondary = hardware.HiSPARCIII(secondary=True)\n self.assertEqual(secondary.description, \"HiSPARC III Slave\")\n\n @patch.object(hardware.HiSPARCIII, '__init__')\n @patch.object(hardware.HiSPARCIII, '_burn_firmware')\n @patch('time.sleep')\n @patch('pysparc.hardware.FtdiChip')\n def test_open(self, mock_Device, mock_sleep, mock_burn, mock_init):\n # Using a manager with child mocks allows us to test for the order of\n # calls (see below). The manager itself is not used.\n manager = Mock()\n manager.attach_mock(mock_burn, 'burn')\n manager.attach_mock(mock_sleep, 'sleep')\n manager.attach_mock(mock_Device, 'Device')\n mock_init.return_value = None\n\n hisparc = hardware.HiSPARCIII()\n hisparc.open()\n\n expected = [call.burn(), call.sleep(.5),\n call.Device(hardware.HiSPARCIII.description,\n interface_select=2)]\n self.assertEqual(manager.mock_calls, expected)\n\n\nclass HiSPARCIITest(unittest.TestCase):\n\n @patch.object(hardware.BaseHardware, '__init__')\n @patch('pysparc.hardware.config.Config')\n @patch.object(hardware.HiSPARCII, 'reset_hardware')\n def setUp(self, mock_reset, mock_Config, mock_super):\n self.mock_super = mock_super\n self.mock_Config = mock_Config\n self.mock_config = mock_Config.return_value\n self.mock_reset = mock_reset\n self.mock_device = Mock()\n\n self.hisparc = hardware.HiSPARCII()\n self.hisparc._device = self.mock_device\n self.hisparc._buffer = MagicMock()\n\n def test_description(self):\n self.assertEqual(hardware.HiSPARCII.description,\n \"HiSPARC II Master\")\n\n @patch.object(hardware.BaseHardware, '__init__')\n @patch.object(hardware.HiSPARCII, 'reset_hardware')\n def test_secondary_description(self, mock_reset, mock_basehardware):\n secondary = hardware.HiSPARCII(secondary=True)\n self.assertEqual(secondary.description, \"HiSPARC II Slave\")\n\n def test_init_calls_super(self):\n # test that super *was* called during setUp()\n self.mock_super.assert_called_once_with()\n\n def test_init_creates_device_configuration(self):\n self.mock_Config.assert_called_once_with(self.hisparc)\n self.assertIs(self.hisparc.config, self.mock_config)\n\n def test_init_calls_reset(self):\n self.mock_reset.assert_called_once_with()\n\n @patch.object(hardware.HiSPARCII, 'send_message')\n @patch('pysparc.hardware.GetControlParameterList')\n @patch('pysparc.hardware.ResetMessage')\n @patch('pysparc.hardware.InitializeMessage')\n def test_reset_hardware(self, mock_Init_msg, mock_Reset_msg,\n mock_Parameter_msg, mock_send):\n self.hisparc.config = Mock()\n self.hisparc.reset_hardware()\n msg1 = mock_Reset_msg.return_value\n msg2 = mock_Init_msg.return_value\n msg3 = mock_Parameter_msg.return_value\n mock_send.assert_has_calls([call(msg1), call(msg2), call(msg3)])\n self.hisparc.config.reset_hardware.assert_called_once_with()\n\n @patch.object(hardware.HiSPARCII, 'read_into_buffer')\n @patch('pysparc.hardware.HisparcMessageFactory')\n def test_read_message(self, mock_factory, mock_read_into_buffer):\n self.hisparc.read_message()\n mock_read_into_buffer.assert_called_once_with()\n\n @patch('pysparc.hardware.HisparcMessageFactory')\n def test_read_message_calls_message_factory(self, mock_factory):\n self.hisparc.read_message()\n mock_factory.assert_called_once_with(self.hisparc._buffer)\n\n @patch('pysparc.hardware.HisparcMessageFactory')\n def test_read_message_sets_config_parameters(self, mock_factory):\n mock_config_message = Mock(spec=messages.ControlParameterList)\n mock_other_message = Mock()\n\n mock_factory.return_value = mock_other_message\n self.hisparc.read_message()\n self.assertFalse(self.mock_config.update_from_config_message.called)\n\n mock_factory.return_value = mock_config_message\n self.hisparc.read_message()\n self.mock_config.update_from_config_message.assert_called_once_with(\n mock_config_message)\n\n @patch('pysparc.hardware.HisparcMessageFactory')\n def test_read_message_returns_message(self, mock_factory):\n mock_factory.return_value = sentinel.msg\n actual = self.hisparc.read_message()\n self.assertIs(actual, sentinel.msg)\n\n @patch('pysparc.hardware.HisparcMessageFactory')\n def test_read_message_returns_config_message(self, mock_factory):\n mock_config_message = Mock(spec=messages.ControlParameterList)\n mock_factory.return_value = mock_config_message\n actual = self.hisparc.read_message()\n self.assertIs(actual, mock_config_message)\n\n @patch.object(hardware.HiSPARCII, 'flush_device')\n @patch.object(hardware.HiSPARCII, 'read_message')\n def test_flush_and_get_measured_data_message_calls_flush(self,\n mock_read, mock_flush):\n self.hisparc.flush_and_get_measured_data_message(timeout=.01)\n mock_flush.assert_called_once_with()\n\n @patch.object(hardware.HiSPARCII, 'read_message')\n def test_flush_and_get_measured_data_message_calls_read_message(self,\n mock_read):\n self.hisparc.flush_and_get_measured_data_message(timeout=.01)\n self.assertTrue(mock_read.called)\n\n @patch.object(hardware.HiSPARCII, 'read_message')\n def test_flush_and_get_measured_data_message_returns_correct_type(\n self, mock_read):\n mock_msg = Mock(spec=messages.MeasuredDataMessage)\n mock_read.side_effect = [Mock(), Mock(), mock_msg, Mock()]\n msg = self.hisparc.flush_and_get_measured_data_message(\n timeout=.01)\n self.assertIs(msg, mock_msg)\n\n\nclass BaseHardwareTest(unittest.TestCase):\n\n @patch.object(hardware.BaseHardware, 'open')\n def setUp(self, mock_open):\n self.mock_open = mock_open\n self.mock_device = Mock()\n self.hisparc = hardware.BaseHardware()\n self.hisparc._device = self.mock_device\n self.mock_device.closed = False\n\n def test_description(self):\n self.assertEqual(hardware.BaseHardware.description,\n \"BaseHardware\")\n\n def test_device_is_none_before_instantiation(self):\n self.assertIs(hardware.BaseHardware._device, None)\n\n def test_init_calls_open(self):\n self.mock_open.assert_called_once_with()\n\n @patch('pysparc.hardware.FtdiChip')\n def test_open_opens_and_saves_device(self, mock_Device):\n mock_device = Mock()\n mock_Device.return_value = mock_device\n\n self.hisparc.open()\n\n mock_Device.assert_called_once_with(self.hisparc.description)\n self.assertIs(self.hisparc._device, mock_device)\n\n @patch.object(hardware.BaseHardware, 'close')\n def test_destructor_calls_close(self, mock_close):\n del self.hisparc\n mock_close.assert_called_once_with()\n\n def test_close_closes_device(self):\n self.hisparc.close()\n self.mock_device.close.assert_called_once_with()\n\n def test_close_does_nothing_if_device_is_closed(self):\n self.mock_device.closed = True\n self.hisparc.close()\n self.assertFalse(self.mock_device.close.called)\n\n def test_close_does_nothing_if_device_is_none(self):\n self.hisparc._device = None\n self.hisparc.close()\n self.assertFalse(self.mock_device.close.called)\n\n def test_buffer_is_none_before_instantiation(self):\n self.assertIs(hardware.BaseHardware._buffer, None)\n\n def test_buffer_attribute_is_bytearray(self):\n self.assertIs(type(self.hisparc._buffer), bytearray)\n\n def test_flush_device_flushes_device(self):\n self.hisparc._buffer = MagicMock()\n self.hisparc.flush_device()\n self.mock_device.flush.assert_called_once_with()\n\n def test_flush_device_clears_buffer(self):\n self.hisparc._buffer = bytearray([0x1, 0x2, 0x3])\n self.hisparc.flush_device()\n self.assertEqual(len(self.hisparc._buffer), 0)\n\n def test_send_message_calls_msg_encode(self):\n msg = Mock()\n self.hisparc.send_message(msg)\n msg.encode.assert_called_once_with()\n\n def test_send_message_writes_to_device(self):\n msg = Mock()\n msg.encode.return_value = sentinel.encoded_msg\n self.hisparc.send_message(msg)\n self.mock_device.write.assert_called_once_with(\n sentinel.encoded_msg)\n\n def test_read_into_buffer_reads_from_device(self):\n self.hisparc._buffer = MagicMock()\n self.mock_device.read.return_value = MagicMock()\n self.hisparc.read_into_buffer()\n self.mock_device.read.assert_called_once_with(hardware.READ_SIZE)\n\n def test_read_into_buffer_reads_into_buffer(self):\n mock_buffer = Mock()\n self.hisparc._buffer = mock_buffer\n read_data = self.mock_device.read.return_value\n self.hisparc.read_into_buffer()\n mock_buffer.extend.assert_called_once_with(read_data)\n\n @patch.object(hardware.BaseHardware, 'read_into_buffer')\n def test_read_message(self, mock_read_into_buffer):\n self.assertRaises(NotImplementedError, self.hisparc.read_message)\n mock_read_into_buffer.assert_called_once_with()\n\n\nclass TrimbleGPSTest(unittest.TestCase):\n\n # mock __init__ of the parent class, so we do not depend on that\n # implementation\n @patch.object(hardware.TrimbleGPS, '__init__')\n def setUp(self, mock_init):\n mock_init.return_value = None\n\n self.gps = hardware.TrimbleGPS()\n self.gps._buffer = sentinel.buffer\n self.mock_device = Mock()\n self.gps._device = self.mock_device\n\n patcher1 = patch.object(hardware.TrimbleGPS, 'read_into_buffer')\n patcher2 = patch('pysparc.hardware.GPSMessageFactory')\n self.mock_read_into_buffer = patcher1.start()\n self.mock_factory = patcher2.start()\n\n self.addCleanup(patcher1.stop)\n self.addCleanup(patcher2.stop)\n\n def test_type_is_basehardware(self):\n self.assertIsInstance(self.gps, hardware.BaseHardware)\n\n @patch.object(hardware.BaseHardware, 'open')\n def test_open_calls_super(self, mock_super):\n self.gps.open()\n mock_super.assert_called_once_with()\n\n @patch.object(hardware.BaseHardware, 'open')\n def test_open_sets_line_settings(self, mock_super):\n self.gps.open()\n self.gps._device.set_line_settings.assert_called_once_with(\n ftdi_chip.BITS_8, ftdi_chip.PARITY_ODD, ftdi_chip.STOP_BIT_1)\n\n def test_description(self):\n self.assertEqual(hardware.TrimbleGPS.description,\n \"FT232R USB UART\")\n\n def test_read_message(self):\n self.gps.read_message()\n self.mock_read_into_buffer.assert_called_once_with()\n\n def test_read_message_calls_message_factory(self):\n self.gps.read_message()\n self.mock_factory.assert_called_once_with(sentinel.buffer)\n\n def test_read_message_returns_message(self):\n self.mock_factory.return_value = sentinel.msg\n actual = self.gps.read_message()\n self.assertIs(actual, sentinel.msg)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"HiSPARC/pysparc","sub_path":"pysparc/tests/test_hardware.py","file_name":"test_hardware.py","file_ext":"py","file_size_in_byte":11760,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"7503459446","text":"#!/usr/bin/env python\n# Four spaces as indentation [no tabs]\n\nimport re\nfrom pddl.action import Action\n\n\nclass PDDLParser:\n\n SUPPORTED_REQUIREMENTS = [':strips', ':negative-preconditions', ':typing']\n\n # ------------------------------------------\n # Tokens\n # ------------------------------------------\n\n def scan_tokens(self, filename):\n with open(filename,'r') as f:\n # Remove single line comments\n str = re.sub(r';.*$', '', f.read(), flags=re.MULTILINE).lower()\n # Tokenize\n stack = []\n list = []\n for t in re.findall(r'[()]|[^\\s()]+', str):\n if t == '(':\n stack.append(list)\n list = []\n elif t == ')':\n if stack:\n l = list\n list = stack.pop()\n list.append(l)\n else:\n raise Exception('Missing open parentheses')\n else:\n list.append(t)\n if stack:\n raise Exception('Missing close parentheses')\n if len(list) != 1:\n raise Exception('Malformed expression')\n return list[0]\n\n #-----------------------------------------------\n # Parse domain\n #-----------------------------------------------\n\n def parse_domain(self, domain_filename):\n tokens = self.scan_tokens(domain_filename)\n if type(tokens) is list and tokens.pop(0) == 'define':\n self.domain_name = 'unknown'\n self.requirements = []\n self.types = []\n self.actions = []\n self.predicates = {}\n while tokens:\n group = tokens.pop(0)\n t = group.pop(0)\n if t == 'domain':\n self.domain_name = group[0]\n elif t == ':requirements':\n for req in group:\n if not req in self.SUPPORTED_REQUIREMENTS:\n raise Exception('Requirement ' + req + ' not supported')\n self.requirements = group\n elif t == ':predicates':\n self.parse_predicates(group)\n elif t == ':types':\n self.types = group\n elif t == ':action':\n self.parse_action(group)\n else: print(str(t) + ' is not recognized in domain')\n else:\n raise Exception('File ' + domain_filename + ' does not match domain pattern')\n\n #-----------------------------------------------\n # Parse predicates\n #-----------------------------------------------\n\n def parse_predicates(self, group):\n for pred in group:\n predicate_name = pred.pop(0)\n if predicate_name in self.predicates:\n raise Exception('Predicate ' + predicate_name + ' redefined')\n arguments = {}\n untyped_variables = []\n while pred:\n t = pred.pop(0)\n if t == '-':\n if not untyped_variables:\n raise Exception('Unexpected hyphen in predicates')\n type = pred.pop(0)\n while untyped_variables:\n arguments[untyped_variables.pop(0)] = type\n else:\n untyped_variables.append(t)\n while untyped_variables:\n arguments[untyped_variables.pop(0)] = 'object'\n self.predicates[predicate_name] = arguments\n\n #-----------------------------------------------\n # Parse action\n #-----------------------------------------------\n\n def parse_action(self, group):\n name = group.pop(0)\n if not type(name) is str:\n raise Exception('Action without name definition')\n for act in self.actions:\n if act.name == name:\n raise Exception('Action ' + name + ' redefined')\n parameters = []\n positive_preconditions = []\n negative_preconditions = []\n add_effects = []\n del_effects = []\n while group:\n t = group.pop(0)\n if t == ':parameters':\n if not type(group) is list:\n raise Exception('Error with ' + name + ' parameters')\n parameters = []\n untyped_parameters = []\n p = group.pop(0)\n while p:\n t = p.pop(0)\n if t == '-':\n if not untyped_parameters:\n raise Exception('Unexpected hyphen in ' + name + ' parameters')\n ptype = p.pop(0)\n while untyped_parameters:\n parameters.append([untyped_parameters.pop(0), ptype])\n else:\n untyped_parameters.append(t)\n while untyped_parameters:\n parameters.append([untyped_parameters.pop(0), 'object'])\n elif t == ':precondition':\n self.split_propositions(group.pop(0), positive_preconditions, negative_preconditions, name, ' preconditions')\n elif t == ':effect':\n self.split_propositions(group.pop(0), add_effects, del_effects, name, ' effects')\n else: print(str(t) + ' is not recognized in action')\n self.actions.append(Action(name, tuple(parameters), frozenset(positive_preconditions), frozenset(negative_preconditions), frozenset(add_effects), frozenset(del_effects)))\n\n #-----------------------------------------------\n # Parse problem\n #-----------------------------------------------\n\n def parse_problem(self, problem_filename):\n tokens = self.scan_tokens(problem_filename)\n if type(tokens) is list and tokens.pop(0) == 'define':\n self.problem_name = 'unknown'\n self.objects = dict()\n self.state = frozenset()\n self.positive_goals = frozenset()\n self.negative_goals = frozenset()\n while tokens:\n group = tokens.pop(0)\n t = group[0]\n if t == 'problem':\n self.problem_name = group[-1]\n elif t == ':domain':\n if self.domain_name != group[-1]:\n raise Exception('Different domain specified in problem file')\n elif t == ':requirements':\n pass # TODO\n elif t == ':objects':\n group.pop(0)\n object_list = []\n while group:\n if group[0] == '-':\n group.pop(0)\n self.objects[group.pop(0)] = object_list\n object_list = []\n else:\n object_list.append(group.pop(0))\n if object_list:\n if not 'object' in self.objects:\n self.objects['object'] = []\n self.objects['object'] += object_list\n elif t == ':init':\n group.pop(0)\n self.state = self.state_to_tuple(group)\n elif t == ':goal':\n pos = []\n neg = []\n self.split_propositions(group[1], pos, neg, '', 'goals')\n self.positive_goals = frozenset(pos)\n self.negative_goals = frozenset(neg)\n else: print(str(t) + ' is not recognized in problem')\n else:\n raise Exception('File ' + problem_filename + ' does not match problem pattern')\n\n #-----------------------------------------------\n # Split propositions\n #-----------------------------------------------\n\n def split_propositions(self, group, pos, neg, name, part):\n if not type(group) is list:\n raise Exception('Error with ' + name + part)\n if group[0] == 'and':\n group.pop(0)\n else:\n group = [group]\n for proposition in group:\n if proposition[0] == 'not':\n if len(proposition) != 2:\n raise Exception('Unexpected not in ' + name + part)\n neg.append(tuple(proposition[-1]))\n else:\n pos.append(tuple(proposition))\n\n #-----------------------------------------------\n # State to tuple\n #-----------------------------------------------\n\n def state_to_tuple(self, state):\n return frozenset(tuple(fact) for fact in state)\n\n\n# ==========================================\n# Main\n# ==========================================\nif __name__ == '__main__':\n import sys\n import pprint\n domain = sys.argv[1]\n problem = sys.argv[2]\n parser = PDDLParser()\n print('----------------------------')\n pprint.pprint(parser.scan_tokens(domain))\n print('----------------------------')\n pprint.pprint(parser.scan_tokens(problem))\n print('----------------------------')\n parser.parse_domain(domain)\n parser.parse_problem(problem)\n print('Domain name: ' + parser.domain_name)\n for act in parser.actions:\n print(act)\n print('----------------------------')\n print('Problem name: ' + parser.problem_name)\n print('Objects: ' + str(parser.objects))\n print('State: ' + str(parser.state))\n print('Positive goals: ' + str(parser.positive_goals))\n print('Negative goals: ' + str(parser.negative_goals))","repo_name":"pucrs-automated-planning/heuristic-planning","sub_path":"pddl/pddl_parser.py","file_name":"pddl_parser.py","file_ext":"py","file_size_in_byte":9514,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"2637230616","text":"#[백준] 바이러스 (실버3) 231208 🅾\ndef solution():\n n = int(input()) #컴퓨터의 수\n pn = int(input()) #컴퓨터 쌍의 수\n\n #네트워크 상 연결된 컴퓨터 쌍의 정보\n graph = [[] for i in range(n + 1)]\n for _ in range(pn):\n a, b = map(int, input().split())\n #연결되어 있으면 모두 바이러스 걸리니까 양방향 고려\n graph[a].append(b)\n graph[b].append(a)\n\n #방문 정보 리스트\n visited = [False] * (n + 1)\n\n #DFS\n def dfs(graph, v, visited):\n visited[v] = True #현재 방문 중인 노드 방문 처리\n\n for i in graph[v]:\n if not visited[i]:\n dfs(graph, i, visited)\n\n #1번 컴퓨터를 통해 방문한 컴퓨터의 수(= 바이러스 걸리게 되는 컴퓨터 수)\n return sum(visited) - 1\n\n #DFS 호출\n print(dfs(graph, 1, visited))\n","repo_name":"eun417/replit_test","sub_path":"algorithm/DFS_BFS/BJ/bj_17.py","file_name":"bj_17.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36519658160","text":"from conf import *\nimport random\nimport matplotlib.pyplot as plt\nfrom Users.Users import UserManager\nimport datetime\nfrom DatasetCollection import DataSet\nfrom lib.NeuralLinear import NeuralLinearAlgorithm\n\n\nclass randomStruct:\n def __init__(self):\n self.reward = 0\n self.regret = 0\n\n\nclass Article:\n def __init__(self, aid, FV=None):\n self.id = aid\n self.featureVector = FV\n self.contextFeatureVector = FV\n\n\nclass L2RRewardManager:\n def __init__(self, arg_dict):\n for key in arg_dict:\n setattr(self, key, arg_dict[key])\n\n def runAlgorithms(self, algorithms, diffLists):\n timeRun = datetime.datetime.now().strftime(\"_%m_%d_%H_%M\")\n filenameWriteRegret = os.path.join(self.save_address, \"AccRegret\" + timeRun + \".csv\")\n self.set_up_regret_file(filenameWriteRegret, algorithms)\n\n tsave = 60 * 60 * 47 # Time interval for saving model.\n tstart = datetime.datetime.now()\n save_flag = 0\n\n tim_ = []\n AlgReward = {}\n AlgPicked = {} # Records what article each algorithm picks\n AlgRegret = {}\n AlgRewardRatio_vsRandom = {}\n BatchCumulateRegret = {}\n RandomChoice = randomStruct()\n RandomChoiceRegret = []\n\n for alg_name, alg in algorithms.items(): # \"NeuralLinear\"\n AlgReward[alg_name] = []\n AlgPicked[alg_name] = []\n AlgRegret[alg_name] = []\n BatchCumulateRegret[alg_name] = []\n AlgRewardRatio_vsRandom[alg_name] = []\n\n print(\"Preparing the dataset...\")\n data = DataSet(self.address, self.context_dimension)\n data.read_data()\n n_queries = data.n_queries\n # random shuffle the query list\n query_sequence = random.sample(range(n_queries), n_queries)\n\n # only one user\n UserID = 0\n cumulativeOptimalReward = 0\n print(\"Start simulation\")\n for i, qid in enumerate(query_sequence):\n print(i, qid)\n articlePool = []\n s_i = data.DoclistRanges[qid]\n e_i = data.DoclistRanges[qid + 1]\n label_vector = data.LabelVector[s_i:e_i]\n feature = data.FeatureMatrix[s_i:e_i]\n for DocId in range(e_i - s_i):\n articlePool.append(Article(DocId, feature[DocId]))\n RandomArticlePickled = random.choice(articlePool)\n RandomChoice.reward += label_vector[RandomArticlePickled.id]\n optimalReward = max(label_vector)\n cumulativeOptimalReward += optimalReward\n RandomChoice.regret = cumulativeOptimalReward - RandomChoice.reward\n for alg_name, alg in algorithms.items():\n pickedArticle = alg.createRecommendation(articlePool, UserID, self.k).articles[0]\n reward = label_vector[pickedArticle.id]\n alg.updateParameters(pickedArticle, reward, UserID)\n\n AlgReward[alg_name].append(reward)\n AlgPicked[alg_name].append(pickedArticle.id)\n AlgRegret[alg_name].append(optimalReward - reward)\n if i % 100 == 0:\n BatchCumulateRegret[alg_name].append(sum(AlgRegret[alg_name]))\n if RandomChoice.reward != 0:\n AlgRewardRatio_vsRandom[alg_name].append(\n (cumulativeOptimalReward - BatchCumulateRegret[alg_name][-1]) / (1.0 * RandomChoice.reward))\n else:\n AlgRewardRatio_vsRandom[alg_name].append(0)\n if i % 100 == 0:\n tim_.append(i)\n RandomChoiceRegret.append(RandomChoice.regret)\n if i % 1000 == 0 or i == 9999:\n self.batchRecord(algorithms, i, tstart, RandomChoice, AlgPicked)\n self.write_regret_to_file(filenameWriteRegret, algorithms, BatchCumulateRegret, i,\n RandomChoice.regret)\n\n def set_up_regret_file(self, filenameWriteRegret, algorithms):\n with open(filenameWriteRegret, \"w\") as f:\n f.write(\"Time(Iteration),Random\")\n f.write(\",\" + \",\".join([str(alg_name) for alg_name in algorithms.keys()]))\n f.write(\"\\n\")\n\n def batchRecord(self, algorithms, iter_, tstart, articles_random, AlgPicked):\n print(\"Datapoint #{} Elapsed time\".format(iter_, datetime.datetime.now() - tstart))\n\n def write_regret_to_file(self, filenameWriteRegret, algorithms, BatchCumulateRegret, iter_, randomRegret):\n with open(filenameWriteRegret, \"a+\") as f:\n f.write(str(iter_))\n f.write(\",\" + str(randomRegret))\n f.write(\",\" + \",\".join([str(BatchCumulateRegret[alg_name][-1]) for alg_name in algorithms.keys()]))\n f.write(\"\\n\")\n","repo_name":"HCDM/BanditLib","sub_path":"L2RRewardManager.py","file_name":"L2RRewardManager.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"18"} +{"seq_id":"25806052590","text":"from boto3 import client\nfrom syndicate.commons.log_helper import get_logger\n\n_LOG = get_logger(\n 'syndicate.connection.resource_groups_tagging_api_connection')\n\n\nclass ResourceGroupsTaggingAPIConnection:\n \"\"\"Resource Groups Tagging API connection class.\"\"\"\n\n def __init__(self, region=None, aws_access_key_id=None,\n aws_secret_access_key=None, aws_session_token=None):\n self.region = region\n self.aws_access_key_id = aws_access_key_id\n self.aws_secret_access_key = aws_secret_access_key\n self.aws_session_token = aws_session_token\n self.client = client('resourcegroupstaggingapi', region,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n aws_session_token=aws_session_token)\n _LOG.debug('Opened new Resource Groups Tagging API connection.')\n\n def tag_resources(self, resources_arns: list, tags: dict):\n params = dict(ResourceARNList=resources_arns,\n Tags=tags)\n response = self.client.tag_resources(**params)\n return response.get('FailedResourcesMap')\n\n def untag_resources(self, resources_arns: list, tag_keys: list):\n params = dict(ResourceARNList=resources_arns,\n TagKeys=tag_keys)\n response = self.client.untag_resources(**params)\n return response.get('FailedResourcesMap')\n","repo_name":"epam/aws-syndicate","sub_path":"syndicate/connection/resource_groups_tagging_api_connection.py","file_name":"resource_groups_tagging_api_connection.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"18"} +{"seq_id":"70924041639","text":"#!/usr/bin/python3\n\n# Descente de gradient \n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef descente_stochastique(f, grad_fi, points, X0, delta=0.1, nmax=10):\n liste_X = [X0]\n liste_grad = []\n X = X0\n N = len(points)\n for i in range(nmax):\n xi, yi = points[i%N]\n gradienti = grad_fi(*X,xi,yi)\n X = X - delta*gradienti\n liste_X.append(X)\n liste_grad.append(gradienti)\n return liste_X, liste_grad\n\n\ndef affiche_descente_stochastique(f, grad_fi, points, X0, delta=0.1, nmax=10):\n liste_X, liste_grad = descente_stochastique(f, grad_fi, points, X0, delta=delta, nmax=nmax)\n print(\"Delta\",delta)\n print(\"Nombre d'itérations\", nmax)\n print(\"Point initial\", X0)\n for i in range(len(liste_X)-1): # flèches\n print(\"--- Etape\",i)\n print(\"Point :\", *liste_X[i])\n print(\"Gradient \", *liste_grad[i])\n print(\"Valeur de la fonction \", f(*liste_X[i]))\n print(\"Dernier point :\", *liste_X[-1])\n print(\"Dernière valeur de la fonction \", f(*liste_X[-1]))\n return\n\n\ndef graphique_descente_stochastique(f, grad_fi, points, X0, delta=0.1, nmax=10, zone = (-3.0,3.0,-3.0,3.0)):\n # 1. Points et gradients\n liste_X, liste_grad = descente_stochastique(f, grad_fi, points, X0, delta=delta, nmax=nmax)\n for x, y in liste_X: # points\n plt.scatter(x, y, color='red')\n\n for i in range(len(liste_X)-1): # flèches\n plt.arrow(*liste_X[i], *(-delta*liste_grad[i]), linewidth=2, color='green', length_includes_head=True, head_width=0.05, head_length=0.1)\n\n # 2. lignes de niveaux\n xmin, xmax, ymin, ymax = zone\n num = 40\n VX = np.linspace(xmin, xmax, num)\n VY = np.linspace(ymin, ymax, num)\n\n X, Y = np.meshgrid(VX, VY)\n Z = f(X, Y)\n\n # 3. affichage\n plt.contour(X, Y, Z, 30, colors='black')\n plt.scatter(-3,2, color='blue') # minimum\n # plt.colorbar();\n plt.axis('equal')\n plt.tight_layout()\n plt.savefig('stochastique.png')\n plt.show()\n return","repo_name":"exo7math/deepmath-exo7","sub_path":"descente/python/descente_stochastique.py","file_name":"descente_stochastique.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"fr","doc_type":"code","stars":39,"dataset":"github-code","pt":"18"} +{"seq_id":"11182007476","text":"# para rodar primeiro precisa ativar o env com o comando => source env/bin/activate\n# e depois rodar o comando => uvicorn main:app --reload\n# no link http://127.0.0.1:8000/docs# é possível testar a api\n\nfrom fastapi import FastAPI, File, UploadFile, status, HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\n\nimport src.mad as mad\nimport src.dbscan as dbscan\nimport src.graficos as graficos\nimport src.boxplot as boxplot\n\nfrom schemas.usuarios import Usuarios\nfrom schemas.historicos import Historicos\nfrom config.db import con\nfrom models.usuarios import tab_users\nfrom models.historicos import tab_historic\n\napp=FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n \"http://localhost:3000\",\n \"*\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n## Api's database\n## Usuarios\n@app.post('/usuarios')\nasync def create_usuario(usuario: Usuarios):\n data=con.execute(tab_users.insert().values( \n nome=usuario.nome,\n email=usuario.email,\n senha=usuario.senha,\n departamento=usuario.departamento\n ))\n if data.is_insert: \n return {\n \"sucesso\": True,\n \"usuario\": data\n }\n else:\n return {\n \"sucesso\": False,\n \"msg\": data\n }\n \n \n@app.get('/usuarios/{email}') \nasync def get_usuarios(email: str):\n data = con.execute(tab_users.select().where(tab_users.c.email == email)).fetchall() \n if data: \n return {\n \"sucesso\": True,\n \"usuario\": data\n }\n else:\n return {\n \"sucesso\": False,\n \"msg\": \"Usuario não localizado\"\n }\n \n \n@app.patch('/usuarios/{email}') \nasync def update_usuarios(email:str, usuario:Usuarios):\n data = con.execute(tab_users.update().values(\n nome=usuario.nome,\n email=usuario.email,\n senha=usuario.senha,\n departamento=usuario.departamento\n ).where(tab_users.c.email == email))\n \n if data: \n return {\n \"sucesso\": True,\n \"usuario\": data\n }\n else:\n return {\n \"sucesso\": False,\n \"msg\": \"Usuario não localizado\"\n }\n \n\n@app.delete('/usuarios/{email}') \nasync def delete_usuarios(email: str):\n data = con.execute(tab_users.delete().where(tab_users.c.email == email))\n if data: \n return {\n \"sucesso\": True,\n \"usuario\": data\n }\n else:\n return {\n \"sucesso\": False,\n \"msg\": \"Usuario não localizado\"\n }\n \n\n## Api's database\n## Historicos \n@app.post('/historicos')\nasync def create_historic(hiscoricos: Historicos):\n data=con.execute(tab_historic.insert().values( \n criado=hiscoricos.criado,\n outlier=hiscoricos.outlier,\n nome_arquivo=hiscoricos.nome_arquivo,\n usuario=hiscoricos.usuario,\n ))\n if data.is_insert: \n return {\n \"sucesso\": True,\n \"historicos\": data\n }\n else:\n return {\n \"sucesso\": False,\n \"msg\": data\n }\n \n \n@app.get('/historicos') \nasync def get_histories():\n data = con.execute(tab_historic.select()).fetchall() \n if data: \n return {\n \"sucesso\": True,\n \"histories\": data\n }\n else:\n return {\n \"sucesso\": False,\n \"msg\": \"Históricos não localizado\"\n }\n \n \n# Api's calculos\n@app.post('/outlier/boxplot')\nasync def calculate_boxplot(ft, file: UploadFile = File(...)):\n try:\n result = boxplot.get_result_boxplot(ft, file)\n return result\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n@app.post('/outlier/mad')\nasync def calculate_mad(ft, file: UploadFile = File(...)):\n try:\n result = mad.get_mediana_valor(ft, file)\n return result\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n@app.post('/outlier/dbscan')\nasync def calculate_dbscan(ft, ft2, file: UploadFile = File(...)):\n try:\n result = dbscan.get_result_dbscan(ft, ft2, file)\n return result\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n# Api download csv\n@app.post('/outlier/csv_boxplot')\nasync def get_csv_boxplot(ft, file: UploadFile = File(...)):\n try:\n boxplot.download_csv_boxplot(ft, file)\n return True\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n@app.post('/outlier/csv_mad')\nasync def get_csv_mad(ft, file: UploadFile = File(...)):\n try:\n mad.download_csv_mad(ft, file)\n return True\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n@app.post('/outlier/csv_dbscan')\nasync def get_csv_dbscan(ft, ft2, file: UploadFile = File(...)):\n try:\n dbscan.download_csv_dbscan(ft, ft2, file)\n return True\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n# Api Graficos\n@app.post('/outlier/grafico_barras')\nasync def get_grafico_barras(ft, ft2, file: UploadFile = File(...)):\n try:\n result = graficos.graficos_barras(ft, ft2, file)\n return result\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n@app.post('/outlier/grafico_dispersao')\nasync def get_grafico_dispersao(ft, ft2, file: UploadFile = File(...)):\n try:\n result = graficos.grafico_dispersao(ft, ft2, file)\n return result\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n@app.post('/outlier/grafico_boxplot')\nasync def get_grafico_boxplot(ft, ft2, file: UploadFile = File(...)):\n try:\n result = graficos.grafico_boxplot(ft, ft2, file)\n return result\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n\n\n@app.post('/outlier/grafico_dbscan')\nasync def get_grafico_dbscan(ft, ft2, file: UploadFile = File(...)):\n try:\n result = graficos.grafico_dbscan(ft, ft2, file)\n return result\n except:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=f\"Ocorreu um erro no cálculo\")\n","repo_name":"Elen0207/projeto_outlier","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33364648695","text":"from django.db import models\nfrom django.conf import settings\n\n# Create your models here.\nclass Essay(models.Model):\n author = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE) \n #on_delete : 게시물지우면 모델도 같이 지워진다.\n title = models.CharField(max_length=30)\n body = models.TextField()\n\n# 사용자들이 직접 업로드 할 수 있는 사진과 파일을 다루는 모델을 올려줄거야!!\n\nclass Album(models.Model):\n author = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE) \n image = models.ImageField(upload_to =\"images\")\n desc = models.CharField(max_length=100)\nclass Files(models.Model):\n author= models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE) \n myfile=models.FileField(blank=False, null=False, upload_to=\"files\")\n desc=models.CharField(max_length=100)","repo_name":"yesjiyoung/django-api-project","sub_path":"mystorage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24164609385","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport utils\nimport debug\n\nd = debug.Debug('course')\n\nclass Course:\n def __init__(self, cid, name, credit, groups, teachers, week, time):\n \"\"\"课程\n\n Attributes:\n cid -- 每个课程有唯一的一个 id\n name -- 课程名字\n credit -- 学分\n group -- 用于年级名称或者班的名称\n teachers -- 这门课的老师,可能多个\n start_time -- 这门课的上课时间。\n 如果没有预置上课,这里是 None\n 如果预置上课时间,是(时间,星期)的 tuple\n preference -- 课程所放置的位置的偏好,为空则表示没有偏好\n factor -- 决定本门课程何时开始放置的因子,factor 越小就越\n 要最先摆放\n\n \"\"\"\n self.cid = cid\n self.name = name\n self.credit = int(credit)\n self.groups = groups\n self.teachers = teachers\n #将 week, time 转化为二维数组的坐标[time, week]\n self.start_time = utils.to_pos(week, time)\n # 每门课的影响因子,用于之后课程之间的排序用\n self.factor = 0\n # 每门课的特殊要求(要求安排在周几,第几节课)\n # preference 为空意味着没有偏好\n self.preference = []\n # compact 也是课程的特殊需求,compact=1则表示该课的老师希望他\n # 的课能够在一天上完\n self.compact = 0\n\n def set_prefs(self, prefs):\n #TODO判断是否是第二次进行设置,要进行prefs的合并\n self.preference = prefs[0]\n # 合并几个老师的 compact\n if self.compact == 0 or \\\n (self.compact != 0 and \\\n self.compact > prefs[1]):\n self.compact = prefs[1]\n\n\n def calc_factor(self):\n \"\"\"计算课程的影响因子。影响因子会影响课程排序的先后。\n 如果一门课影响因子越小,这门课应该越先排\n\n 对于有要求的课\n 1. 这门课有多少可放的位置作为其影响因素\n \"\"\"\n # 计算可以放置的位置\n if self.preference == []:\n self.factor = 100\n else:\n for p in self.preference:\n self.factor += len(p.time)\n\n def __str__(self):\n return \"Name:%s,T:%s,Groups:%s,credit:%s,COMPACT:%s\" % (self.name,\n self.teachers,\n self.groups,\n self.credit,\n self.compact)\n def need_allocate_p(self):\n return self.start_time == None\n\n def conflict_pref_day_p(self, day):\n \"\"\"判断给定的天数是否会和自己的 preference 冲突\"\"\"\n d.p('判断day %d 是否符合 %s 的偏好?' % (day, self.name))\n if self.preference == []:\n d.p('无偏好,不冲突')\n return False\n\n prefs_days = [p.day for p in self.preference]\n d.p('偏好是:%s' % prefs_days)\n if day in prefs_days:\n d.p('不冲突')\n return False\n else:\n d.p('冲突')\n return True\n\n def conflict_pref_time_p(self, day, time):\n \"\"\"判断给的时间是否会和自己的 preference 冲突\"\"\"\n d.p('判断time %d 是否符合 %s 的偏好?' % (time, self.name))\n\n if self.preference == []:\n d.p('本课程无偏好,不冲突')\n return False\n\n prefs_time = self.get_time_preference(day)\n if prefs_time == []:\n d.p('老师的preference中不包含这一天 %d,冲突' % day)\n return True\n d.p('偏好是:%s' % prefs_time)\n if time in prefs_time:\n d.p('不冲突')\n return False\n else:\n d.p('冲突')\n return True\n\n def has_day_preference_p(self):\n \"\"\"针对天而言,判断老师周一到周五是否有特殊的偏好,\n 例如周一不上课,或者周N不上课\n\n 至于每天时间的偏好不算在内\"\"\"\n if self.preference == []:\n return False\n days = [p.day for p in self.preference]\n if set(days) == set([0,1,2,3,4]):\n return False\n else:\n return True\n\n def has_time_preference_p(self, day):\n \"对于某天而言,判断这一天时间上有没有任何的 preference\"\n if self.preference == []:\n return False\n\n pref = filter(lambda x: x.day == day, self.preference)\n if pref == []:\n return True #如果preference 里面没有 day 这一天,说明这一\n #天不能排,意味着有 time preference\n else:\n time = pref[0].time\n if set(time) != set([0,1,2,3,4,5,6,7,8,9,10,11,12]):\n return True\n else:\n return False\n\n def has_preference_p(self):\n \"判断这个课程是否有任何的 preference\"\n if self.preference == []:\n return False\n\n if self.has_day_preference_p():\n return True\n for d in [0,1,2,3,4]:\n if self.has_time_preference_p(d):\n return True\n return False\n\n def get_time_preference(self, day):\n \"\"\"提取某一天的 time preference,如果没有的话,\n time preference 是 []\"\"\"\n pref = filter(lambda x: x.day == day, self.preference)\n if pref == []:\n return []\n else:\n return pref[0].time\n","repo_name":"lijunsong/course-table-genertor","sub_path":"course_table_generator/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"36185718874","text":"import os\nimport tensorflow as tf\nimport numpy as np\n\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\n\nmodel = tf.keras.models.Sequential([\n # Note the input shape is the desired size of the image 300x300 with 3 bytes color\n # This is the first convolution\n tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(300, 300, 3)),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The second convolution\n tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The third convolution\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The fourth convolution\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The fifth convolution\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Flatten the results to feed into a DNN\n tf.keras.layers.Flatten(),\n # 512 neuron hidden layer\n tf.keras.layers.Dense(1024, activation='relu'),\n # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other (\n # 'humans')\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nmodel.summary()\nmodel.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(0.001), metrics=['accuracy'])\n\n# All images will be rescaled by 1./255\ntrain_datagen = ImageDataGenerator(rescale=1 / 255)\n\n# Flow training images in batches of 128 using train_datagen generator\n# This is the source directory for training images\ntrain_generator = train_datagen.flow_from_directory(\n 'horse-or-human',\n target_size=(300, 300), # All images will be resized to 150x150\n batch_size=128,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary')\n\nhistory = model.fit(train_generator, steps_per_epoch=8, epochs=10, verbose=1)\n\nmodel.save('saved_model/tf_model')\n\n# predicting images\npath = 'content/girl.jpg'\nimg = image.load_img(path, target_size=(300, 300))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\n\nimages = np.vstack([x])\nclasses = model.predict(images, batch_size=10)\nprint(classes[0])\nif classes[0] > 0.5:\n print(\"is a human\")\nelse:\n print(\"is a horse\")\n","repo_name":"debjyotiC/Coursera-TF","sub_path":"intro-to-TF/week-4.py","file_name":"week-4.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25674823801","text":"import streamlit as st\nimport pandas as pd \nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nimport pickle\n\nst.title('Ames House Price Prediction')\n\nst.write(\"\"\"\n\n# Ames Housing Prices\n------------\nThis app predicts Ames Housing Prices using a machine learning model powered by [Scikit Learn](https://scikit-learn.org/).\nThe data for the model is the famous [Ames Housing Prices](https://www.kaggle.com/competitions/dsi-us-11-project-2-regression-challenge/data) Dataset.\nPlay with the values via the sliders on the left panel to generate new predictions.\n\"\"\")\n\n\nst.write(\"---\")\n\ntrain_df = pd.read_csv('C:/Users/Mingzi/Documents/DSIF_edit/my_materials/my_materials/project_2/datasets/clean/df_final.csv')\ntrain_df = train_df.drop('Unnamed: 0', axis=1)\ntest_df = pd.read_csv('C:/Users/Mingzi/Documents/DSIF_edit/my_materials/my_materials/project_2/datasets/clean/test_df_final.csv')\ntest_df = test_df.drop('Unnamed: 0', axis=1)\n\nfeature_list = list(train_df.columns)\nfeature_list.remove('SalePrice')\n\nX = train_df[feature_list]\ny = train_df['SalePrice']\n\nst.sidebar.header('Specify Input Parameters - these will determine the predicted value.')\n\ndef features_from_user():\n house_age = st.sidebar.slider('House Age', int(train_df['Age_built'].min()), int(train_df['Age_built'].max()), int(train_df['Age_built'].mean()))\n gr_liv_area = st.sidebar.slider('Living Area', int(train_df['Gr Liv Area'].min()), int(train_df['Gr Liv Area'].max()), int(train_df['Gr Liv Area'].mean()))\n overall_qual = st.sidebar.slider('Overall Quality', int(train_df['Overall Qual'].min()), int(train_df['Overall Qual'].max()), int(train_df['Overall Qual'].mean()))\n totrms = st.sidebar.slider('Total room above grade', int(train_df['TotRms AbvGrd'].min()), int(train_df['TotRms AbvGrd'].max()), int(train_df['TotRms AbvGrd'].mean()))\n fullbath = st.sidebar.slider('Full bathrooms above grade', int(train_df['Full Bath'].min()), int(train_df['Full Bath'].max()), int(train_df['Full Bath'].mean()))\n garage_age = st.sidebar.slider('Garage Age', int(train_df['Age_garage'].min()), int(train_df['Age_garage'].max()), int(train_df['Age_garage'].mean()))\n garage_area = st.sidebar.slider('Garage Area', int(train_df['Garage Area'].min()), int(train_df['Garage Area'].max()), int(train_df['Garage Area'].mean()))\n\n \n data = {'Id': train_df['Id'].loc[0],\n 'Age_built': house_age,\n 'Garage Area': garage_area,\n '1st Flr SF': train_df['1st Flr SF'].loc[0],\n 'Overall Qual': overall_qual,\n 'TotRms AbvGrd': totrms,\n 'Full Bath': fullbath,\n 'Age_garage': garage_age,\n 'Gr Liv Area': gr_liv_area,\n 'Total Bsmt SF': train_df['Total Bsmt SF'].loc[0],\n 'Garage Cars': train_df['Garage Cars'].loc[0],\n 'Mas Vnr Area': train_df['Mas Vnr Area'].loc[0]}\n \n input_data = pd.DataFrame(data, index=[0])\n \n return input_data\n \n \ndf = features_from_user()\nss_list = ['Id','Age_built','Garage Area','1st Flr SF','Overall Qual','TotRms AbvGrd','Full Bath','Age_garage','Gr Liv Area','Total Bsmt SF','Garage Cars','Mas Vnr Area']\n\nss = StandardScaler()\nX_train_sc = ss.fit_transform(X[ss_list])\ndf1 = ss.transform(df[ss_list])\n\nst.write('---')\n\n# Load the saved model\nloaded_model = pickle.load(open('C:/Users/Mingzi/Documents/DSIF_edit/my_materials/my_materials/project_2/ridge_model.sav', 'rb'))\n\n# Apply Model to Make Prediction\nprediction = int(loaded_model.predict(df1))\nprediction_nice = f\"{prediction:,d}\"\n\n\nst.header('Prediction of House Price in Ames:')\nst.write('Based on your selections, the model predicts a value of $%s.'%prediction_nice)\nst.write('---')\n \n \n\n","repo_name":"yangmz0528/Project_2_Ames_House_Prediction","sub_path":"test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28468466814","text":"#-------------------------------------------------------------------------\n# AUTHOR: Chi Le\n# FILENAME: crawler.py\n# SPECIFICATION: The program retrieves and parses HTML content from linked pages of the CS department\n# website at CPP, searching for the page with the \"Permanent Faculty\" heading.\n# Extracted data, including URLs, titles, and content,\n# is stored in a MongoDB collection named \"pages\" for persistence.\n# FOR: CS 4250- Assignment #3\n# TIME SPENT: 5h\n#-----------------------------------------------------------*/\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup\nimport re\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom urllib.error import URLError\nfrom urllib.parse import urljoin\n\n#Connect to database\ndef connectDataBase():\n DB_NAME = \"CPP\"\n DB_HOST = \"localhost\"\n DB_PORT = 27017\n try:\n client = MongoClient(host=DB_HOST, port=DB_PORT)\n db = client[DB_NAME]\n return db\n except:\n print(\"Database not connected successfully\")\n\n# Class to manage the list of URLs to visit\nclass Frontier:\n def __init__(self, initial_url):\n self.urls = [initial_url]\n self.visited = set()\n\n def add_url(self, url):\n if url not in self.visited and url not in self.urls:\n self.urls.append(url)\n\n def next_url(self):\n if not self.urls:\n return None\n url = self.urls.pop(0)\n self.visited.add(url)\n return url\n\n def done(self):\n return not self.urls\n\n def clear_frontier(self):\n self.urls = []\n\n# Retrieve HTML content from a URL\ndef retrieve_url(url):\n try:\n html = urlopen(url)\n return html.read().decode(encoding=\"iso-8859-1\")\n except HTTPError as e:\n # print(e)\n return None\n except URLError as e:\n # print('The server could not be found!')\n return None\n\n# Store page data in MongoDB\ndef store_page(collection, url, html, bool):\n data = {\n '_id': collection.count_documents({}) + 1,\n 'url': url,\n 'html': html,\n 'isTarget': bool\n }\n collection.insert_one(data)\n\n# Check if the page contains the target heading\ndef target_page(html):\n bs = BeautifulSoup(html, 'html.parser')\n heading = bs.find('h1', string='Permanent Faculty')\n return heading\n\n# Extract links from HTML\ndef parse(html):\n bs = BeautifulSoup(html, 'html.parser')\n links = bs.find_all('a', href=True)\n return [link['href'] for link in links]\n\n# Crawler main function\ndef crawler_thread(collection, frontier):\n crawled_pages = []\n while not frontier.done():\n is_target = False\n url = frontier.next_url()\n crawled_pages.append(url)\n html = retrieve_url(url)\n\n if html:\n if target_page(html):\n print(f\"Target page found: {url}\")\n is_target = True\n frontier.clear_frontier()\n else:\n for new_url in parse(html):\n new_url = new_url.strip()\n is_absolute = re.search('^http', new_url)\n if is_absolute:\n full_url = new_url\n else:\n full_url = urljoin(url, new_url)\n if full_url not in frontier.urls and full_url not in crawled_pages:\n frontier.add_url(full_url)\n store_page(collection, url, html, is_target)\n\ndef main():\n db = connectDataBase()\n pages = db.pages\n base_url = \"https://www.cpp.edu/sci/computer-science/\"\n frontier = Frontier(base_url)\n frontier.add_url(base_url)\n crawler_thread(pages, frontier)\n\nif __name__ == '__main__':\n main()","repo_name":"chile20/CS4250-SearchEngineProject","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1341043360","text":"from pyqtgraph.graphicsItems.LegendItem import LegendItem\n\nclass MatveyevLegend(LegendItem):\n\n def __init__(self, size=None, offset=None):\n super(MatveyevLegend, self).__init__(size, offset)\n\n def removeItemByAddress(self, item):\n for sample, label in self.items:\n if sample.item == item: # !\n self.items.remove((sample, label))\n self.layout.removeItem(sample)\n sample.close()\n self.layout.removeItem(label)\n label.close()\n self.updateSize()\n\ndef addLegend(plot_item, size=None, offset=(30, 30)):\n\n plot_item.legend = MatveyevLegend(size, offset)\n plot_item.legend.setParentItem(plot_item.vb)\n return plot_item.legend\n","repo_name":"yamedvedya/data_viewer","sub_path":"petra_viewer/utils/legend_item.py","file_name":"legend_item.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18180233533","text":"#Caixa de supermercado - Ler os valores e as quantidades de produtos que\n# se encera se 0 for digitado\n\ntotalCompra = 0\n\nwhile True:\n preco = float(input(\"Preço do produto >> \"))\n\n if preco == 0:\n break\n else:\n qtProd = int(input(\"Quantidade do produto >> \"))\n totalCompra += preco * qtProd\nprint(f\"\\nO total da compra é {totalCompra}\")\n\n\n","repo_name":"PabloHenrique/AulasPython-Fatec","sub_path":"Exercícios/Algoritmo/Lista 07/exe03.py","file_name":"exe03.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"450370837","text":"import argparse\nimport os\nfrom glob import glob\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nfrom pose import KEYPOINTS, BOXES, VIDEO\n\n\ndef calculate_statistics(results_dir):\n assert is_output_dir(results_dir)\n\n print(f'Looking for results in {results_dir}')\n np_files = glob(str(results_dir / KEYPOINTS / '*.npy'))\n print(f'{len(np_files)} results found')\n\n keypoints = []\n n_people = np.zeros(len(np_files))\n\n for i in range(len(np_files)):\n array = np.load(np_files[i])\n keypoints.append(array)\n n_people[i] = array.shape[1]\n\n people_labels, people_counts = np.unique(n_people, return_counts=True)\n\n sns.set_theme()\n sns.set_style('whitegrid')\n sns.set_context('paper')\n plt.title('Max number of people found in any frame', fontsize=15)\n plt.pie(people_counts,\n labels=[int(label) for label in people_labels],\n autopct='%1.1f%%')\n plt.show()\n\n\ndef is_output_dir(path):\n if not os.path.exists(path):\n return False\n\n subdirs = [BOXES, KEYPOINTS, VIDEO]\n\n for subdir in subdirs:\n if not os.path.exists(path / subdir):\n return False\n\n return True\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('results_dir', metavar='results-dir', type=Path)\n args = parser.parse_args()\n\n calculate_statistics(args.results_dir)\n","repo_name":"Casvanrijbroek/Sign-Language-Thesis","sub_path":"pose/analyze_results.py","file_name":"analyze_results.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73701110760","text":"import json\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.conf import settings\nfrom django.contrib import admin, messages\nfrom django.contrib.admin.models import LogEntry, ADDITION, CHANGE\nfrom django.contrib.admin.options import get_content_type_for_model\nfrom django.contrib.admin.views.main import ChangeList\nfrom django.db import IntegrityError, models, transaction\nfrom django.urls import reverse, path, re_path\nfrom django.utils.encoding import force_str\nfrom django.utils.html import escape, format_html\nfrom django.utils.translation import gettext_lazy as gettext, ngettext\n\nfrom .forms import (\n ClerkGenerationForm,\n UITextForm,\n ClerkEditForm,\n ClerkSSOForm,\n VendorSetSelfForm,\n)\n\nfrom .models import (\n AccessSignup,\n Account,\n Clerk,\n Event,\n EventPermission,\n Item,\n ItemType,\n Vendor,\n Counter,\n Person,\n Receipt,\n ReceiptExtraRow,\n ReceiptItem,\n ReceiptNote,\n UIText,\n ItemStateLog,\n Box,\n TemporaryAccessPermit,\n TemporaryAccessPermitLog,\n)\n\nfrom .util import get_form\nfrom .utils import datetime_iso_human\n\n__author__ = 'jyrkila'\n\n\ndef with_description(short_description):\n def decorator(action_function):\n action_function.short_description = short_description\n return action_function\n return decorator\n\n\nclass FieldAccessor(object):\n \"\"\"\n Abstract base class for field-links to be used in Admin.list_display.\n Sub-classes must implement __call__ that is used to generate the field text / link.\n \"\"\"\n def __init__(self, field_name, description):\n \"\"\"\n :param field_name: Field to link to.\n :type field_name: str\n :param description: Column description.\n :type description: str\n \"\"\"\n self._field_name = field_name\n self._description = description\n\n def __call__(self, obj):\n \"\"\"\n :param obj: Model object from the query.\n :rtype: str\n :return: Unsafe string containing the field value.\n \"\"\"\n raise NotImplementedError\n\n @property\n def short_description(self):\n return self._description\n\n def __str__(self):\n # Django 1.9 converts the field to string for id.\n return self._field_name\n\n @property\n def __name__(self):\n # Django 1.10 lookups the field name via __name__.\n return self._field_name\n\n\nclass RefLinkAccessor(FieldAccessor):\n \"\"\"\n Accessor function that returns a link to given FK-field admin.\n \"\"\"\n def __call__(self, obj):\n field = getattr(obj, self._field_name)\n if field is None:\n return u\"(None)\"\n if callable(field):\n field = field()\n # noinspection PyProtectedMember\n info = field._meta.app_label, field._meta.model_name\n return format_html(\n u'{1}',\n reverse(\"admin:%s_%s_change\" % info, args=(field.id,)),\n escape(field)\n )\n\n\n@admin.register(Event)\nclass EventAdmin(admin.ModelAdmin):\n ordering = (\"-start_date\", \"name\")\n\n def get_list_display(self, request):\n list_display = [\"name\", \"slug\", \"start_date\", \"end_date\", \"registration_end\", \"checkout_active\"]\n if settings.KIRPPU_EXTRA_DATABASES or settings.KIRPPU_EXTRA_EVENTS:\n return list_display + [\"source_db\"]\n return list_display\n\n def get_readonly_fields(self, request, obj=None):\n if obj is not None:\n return (\"source_db\",)\n return ()\n\n def get_form(self, request, obj=None, change=False, **kwargs):\n form = super().get_form(request, obj, change, **kwargs)\n field = form.base_fields[\"provision_function\"]\n text = field.help_text\n text += format_html(' ' + escape(\n force_str(gettext(\"View help.\"))) + '')\n field.help_text = text\n\n if obj is None:\n from django import forms\n source_dbs = [(e, e) for e in Event.get_source_event_list()]\n source_dbs.insert(0, (None, \"(default)\"))\n form.base_fields[\"source_db\"] = forms.ChoiceField(\n choices=source_dbs,\n required=False,\n help_text=gettext(\"Setting a value other than default enables mobile view\"\n \" and this event is otherwise made read-only.\"))\n return form\n\n def save_form(self, request, form, change):\n i = form.instance # type: Event\n if i.source_db == \"\":\n i.source_db = None\n elif i.source_db is not None:\n i.mobile_view_visible = True\n i.checkout_active = False\n\n return super().save_form(request, form, change)\n\n def get_urls(self):\n urls = super().get_urls()\n from django.views.generic import TemplateView\n urls.append(path('help/lisp', TemplateView.as_view(template_name=\"kirppu/help_lisp.html\"),\n name=\"kirppu_lisp_help\"))\n return urls\n\n\n\"\"\"\nAdmin UI list column that displays user name with link to the user model itself.\n\n:param obj: Object being listed, such as Clerk or Vendor.\n:type obj: Clerk | Vendor | T\n:return: Contents for the field.\n:rtype: unicode\n\"\"\"\n_user_link = RefLinkAccessor(\"user\", gettext(u\"User\"))\n\n_person_link = RefLinkAccessor(\"person\", gettext(u\"Person\"))\n\n_event_link = RefLinkAccessor(\"event\", gettext(\"Event\"))\n\n\n@admin.register(EventPermission)\nclass EventPermissionAdmin(admin.ModelAdmin):\n list_display = (\"id\", _event_link, _user_link, \"combination\")\n list_display_links = (\"id\", \"combination\")\n list_filter = (\n \"event\",\n )\n list_select_related = (\n \"event\",\n \"user\",\n )\n autocomplete_fields = [\n \"user\",\n ]\n\n\n@admin.register(Vendor)\nclass VendorAdmin(admin.ModelAdmin):\n ordering = ('user__first_name', 'user__last_name')\n search_fields = ['id', 'user__first_name', 'user__last_name', 'user__username',\n 'person__first_name', 'person__last_name']\n list_display = ['id', _user_link, _person_link, \"terms_accepted\", \"event\"]\n list_filter = (\n \"event\",\n )\n list_select_related = (\n \"event\",\n \"user\",\n \"person\",\n )\n\n @staticmethod\n def _can_set_user(request, obj):\n return obj is not None and\\\n request.user.is_superuser and\\\n not obj.user.is_superuser and\\\n settings.KIRPPU_SU_AS_USER\n\n def get_form(self, request, obj=None, **kwargs):\n if self._can_set_user(request, obj):\n kwargs[\"form\"] = VendorSetSelfForm\n return super(VendorAdmin, self).get_form(request, obj, **kwargs)\n\n def get_readonly_fields(self, request, obj=None):\n fields = [\"user\"] if obj is not None and not self._can_set_user(request, obj) else []\n fields.append(\"terms_accepted\")\n if obj is not None:\n fields.append(\"event\")\n fields.append(\"person\")\n return fields\n\n\nadmin.site.register(Person)\nadmin.site.register(Account)\n\n\nclass ClerkEditLink(FieldAccessor):\n def __call__(self, obj):\n \"\"\"\n :type obj: Clerk\n :return:\n \"\"\"\n value = getattr(obj, self._field_name)\n info = obj._meta.app_label, obj._meta.model_name\n if obj.user is None:\n return escape(value)\n else:\n return format_html(\n u'{1}',\n reverse(\"admin:%s_%s_change\" % info, args=(obj.id,)),\n escape(value)\n )\n\n\n_clerk_id_link = ClerkEditLink(\"id\", gettext(\"ID\"))\n_clerk_access_code_link = ClerkEditLink(\"access_code_str\", gettext(\"Access code\"))\n\n\n# noinspection PyMethodMayBeStatic\n@admin.register(Clerk)\nclass ClerkAdmin(admin.ModelAdmin):\n uses_sso = settings.KIRPPU_USE_SSO # Used by the overridden template.\n actions = [\"_gen_clerk_code\", \"_del_clerk_code\", \"_move_clerk_code\"]\n ordering = (\"event\", 'user__first_name', 'user__last_name')\n search_fields = ['user__first_name', 'user__last_name', 'user__username']\n exclude = ['access_key']\n list_filter = (\"event\",)\n list_display_links = None\n autocomplete_fields = [\n \"user\",\n ]\n\n def get_list_display(self, request):\n if settings.DEBUG:\n return _clerk_id_link, _user_link, _clerk_access_code_link, 'access_key', 'is_enabled', 'event'\n else:\n return _clerk_id_link, _user_link, _clerk_access_code_link, 'is_enabled', 'event'\n\n @with_description(gettext(u\"Generate missing Clerk access codes\"))\n def _gen_clerk_code(self, request, queryset):\n for clerk in queryset:\n if not clerk.is_valid_code:\n clerk.generate_access_key()\n clerk.save(update_fields=[\"access_key\"])\n\n @with_description(gettext(u\"Delete Clerk access codes\"))\n def _del_clerk_code(self, request, queryset):\n for clerk in queryset:\n while True:\n clerk.generate_access_key(disabled=True)\n try:\n clerk.save(update_fields=[\"access_key\"])\n except IntegrityError:\n continue\n else:\n break\n\n def _move_error(self, request, error):\n if error == \"count\":\n msg = gettext(u\"Must select exactly one 'unbound' and one 'bound' Clerk for this operation\")\n elif error == \"event\":\n msg = gettext(\"Must select unbound and bound rows from same Event\")\n else:\n msg = \"Unknown error key: \" + error\n self.message_user(request, msg, messages.ERROR)\n\n @with_description(gettext(u\"Move unused access code to existing Clerk.\"))\n @transaction.atomic\n def _move_clerk_code(self, request, queryset):\n if len(queryset) != 2:\n self._move_error(request, \"count\")\n return\n\n # Guess the order.\n unbound = queryset[0]\n bound = queryset[1]\n if queryset[1].user is None:\n # Was wrong, swap around.\n bound, unbound = unbound, bound\n\n if unbound.user is not None or bound.user is None:\n # Selected wrong rows.\n self._move_error(request, \"count\")\n return\n\n if unbound.event != bound.event:\n # Must move inside same event.\n self._move_error(request, \"event\")\n return\n\n # Assign the new code to be used. Remove the unbound item first, so unique-check doesn't break.\n bound.access_key = unbound.access_key\n\n self.log_access_key_move(request, unbound, bound)\n unbound.delete()\n bound.save(update_fields=[\"access_key\"])\n\n self.message_user(request, gettext(u\"Access code set for '{0}' in '{1}'\").format(bound.user, bound.event.name))\n\n def get_form(self, request, obj=None, **kwargs):\n # Custom form for editing already created Clerks.\n if obj is not None:\n return ClerkEditForm\n\n return super(ClerkAdmin, self).get_form(request, obj, **kwargs)\n\n def has_change_permission(self, request, obj=None):\n # Don't allow changing unbound Clerks. That might create unusable codes (because they are not printed).\n if obj is not None and obj.user is None:\n return False\n return True\n\n def save_related(self, request, form, formsets, change):\n if isinstance(form, (ClerkEditForm, ClerkSSOForm)):\n # No related fields...\n return\n return super(ClerkAdmin, self).save_related(request, form, formsets, change)\n\n def save_model(self, request, obj, form, change):\n if change and isinstance(form, ClerkEditForm):\n # Need to save the form instead of obj.\n form.save()\n else:\n super(ClerkAdmin, self).save_model(request, obj, form, change)\n\n def get_urls(self):\n info = self.opts.app_label, self.opts.model_name\n return super(ClerkAdmin, self).get_urls() + [\n re_path(r'^add/bulk_unbound$', self.bulk_add_unbound, name=\"%s_%s_bulk\" % info),\n re_path(r'^add/sso$', self.add_from_sso, name=\"%s_%s_sso\" % info),\n ]\n\n def add_from_sso(self, request):\n if not self.has_add_permission(request) or not self.uses_sso:\n raise PermissionDenied\n\n form = get_form(ClerkSSOForm, request) # type: ClerkSSOForm\n\n if request.method == 'POST' and form.is_valid():\n clerk = form.save()\n self.log_addition(request, clerk, {\"added\": {}})\n\n msg = format_html(\n gettext(\"Clerk {name} added into {event}.\"),\n name=form.cleaned_data[\"user\"],\n event=form.cleaned_data[\"event\"],\n )\n self.message_user(request, msg, messages.SUCCESS)\n\n from django.http import HttpResponseRedirect\n return HttpResponseRedirect(reverse(\"admin:%s_%s_changelist\" % (self.opts.app_label, self.opts.model_name)))\n\n return self._get_custom_form(request, form, gettext('Add clerk from SSO provider'))\n\n def bulk_add_unbound(self, request):\n if not self.has_add_permission(request):\n raise PermissionDenied\n\n form = get_form(ClerkGenerationForm, request)\n\n if request.method == 'POST' and form.is_valid():\n objs = form.generate()\n self.log_bulk_addition(request, objs)\n\n msg = format_html(\n ngettext('One unbound clerk added.', '{count} unbound clerks added.', form.get_count()),\n count=form.get_count()\n )\n self.message_user(request, msg, messages.SUCCESS)\n\n from django.http import HttpResponseRedirect\n return HttpResponseRedirect(reverse(\"admin:%s_%s_changelist\" % (self.opts.app_label, self.opts.model_name)))\n\n return self._get_custom_form(request, form, gettext('Add unbound clerk'))\n\n def _get_custom_form(self, request, form, title):\n from django.contrib.admin.helpers import AdminForm, AdminErrorList\n admin_form = AdminForm(\n form,\n form.get_fieldsets(),\n {},\n model_admin=self)\n media = self.media + admin_form.media\n\n inline_formsets = []\n context = dict(\n self.admin_site.each_context(request),\n title=force_str(title),\n media=media,\n adminform=admin_form,\n is_popup=False,\n show_save_and_continue=False,\n inline_admin_formsets=inline_formsets,\n errors=AdminErrorList(form, inline_formsets),\n )\n\n return self.render_change_form(request, context, add=True)\n\n def log_bulk_addition(self, request, objects):\n # noinspection PyProtectedMember\n change_message = json.dumps([{\n 'added': {\n 'name': force_str(added_object._meta.verbose_name),\n 'object': force_str(added_object),\n }\n } for added_object in objects])\n\n from .util import shorten_text\n object_repr = \", \".join([shorten_text(force_str(added_object), 5) for added_object in objects])\n\n return LogEntry.objects.create(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(objects[0]).pk,\n object_repr=object_repr[:200],\n action_flag=ADDITION,\n change_message=change_message,\n )\n\n def log_access_key_move(self, request, unbound, target):\n # noinspection PyProtectedMember\n change_message = [{\n 'changed': {\n 'name': force_str(target._meta.verbose_name),\n 'object': force_str(target),\n 'fields': [\"access_key\"],\n },\n 'deleted': {\n 'name': force_str(unbound._meta.verbose_name),\n 'object': force_str(unbound)\n }\n }]\n return LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(target).pk,\n object_id=target.pk,\n object_repr=force_str(target),\n action_flag=CHANGE,\n change_message=change_message,\n )\n\n\n@admin.register(Counter)\nclass CounterAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"identifier\", \"event\", \"is_in_use\", \"is_locked\")\n list_filter = (\"event\",)\n actions = (\"lock_counter\", \"reset_use\")\n\n @with_description(gettext(\"Lock Counter\"))\n def lock_counter(self, request, queryset):\n for counter in queryset:\n counter.assign_private_key(for_lock=True)\n\n @with_description(gettext(\"Reset Counter usage status\"))\n def reset_use(self, request, queryset):\n queryset.update(private_key=None)\n\n\nadmin.site.register(ReceiptExtraRow)\n\n\n@admin.register(UIText)\nclass UITextAdmin(admin.ModelAdmin):\n model = UIText\n ordering = [\"event\", \"identifier\"]\n form = UITextForm\n list_display = [\"identifier\", \"text_excerpt\", \"event\"]\n list_filter = (\"event\",)\n\n\n@admin.register(ItemType)\nclass ItemTypeAdmin(admin.ModelAdmin):\n ordering = [\"event\", \"order\"]\n list_display = [\"title\", \"order\", \"event\"]\n list_editable = [\"order\"]\n list_filter = (\"event\",)\n list_display_links = [\"title\"]\n\n\n@admin.register(Item)\nclass ItemAdmin(admin.ModelAdmin):\n @with_description(gettext(u\"Re-generate bar codes for items\"))\n def _regen_barcode(self, request, queryset):\n for item in queryset:\n item.code = Item.gen_barcode()\n item.save(update_fields=[\"code\"])\n\n def get_actions(self, request):\n s = super().get_actions(request)\n if settings.DEBUG:\n for f in [ItemAdmin._regen_barcode]:\n (func, name, desc) = self.get_action(f)\n s[name] = (func, name, desc)\n return s\n\n list_display = ('name', 'code', 'price', 'state', RefLinkAccessor('vendor', gettext(\"Vendor\")))\n ordering = ('vendor', 'name')\n search_fields = ['name', 'code']\n list_select_related = (\"vendor\", \"vendor__user\")\n list_filter = (\n \"vendor__event\",\n \"state\",\n )\n autocomplete_fields = [\n \"box\",\n \"vendor\",\n ]\n\n\n_receipt_item_link = RefLinkAccessor(\"item\", gettext(\"Item\"))\n\n\nclass ReceiptItemAdmin(admin.TabularInline):\n model = ReceiptItem\n ordering = [\"add_time\"]\n exclude = [\"item\"]\n readonly_fields = [_receipt_item_link, \"action\", \"price_str\", \"add_time_str\"]\n\n @with_description(Item._meta.get_field(\"price\").verbose_name)\n def price_str(self, instance: ReceiptItem):\n return instance.item.price\n\n @with_description(ReceiptItem._meta.get_field(\"add_time\").verbose_name)\n def add_time_str(self, instance: ReceiptItem):\n return instance.add_time.isoformat(sep=\" \", timespec=\"milliseconds\")\n\n\nclass ReceiptExtraAdmin(admin.TabularInline):\n model = ReceiptExtraRow\n\n\nclass ReceiptNoteAdmin(admin.TabularInline):\n model = ReceiptNote\n ordering = [\"timestamp\"]\n readonly_fields = [\"clerk\", \"text\", \"timestamp\"]\n\n\n@admin.register(Receipt)\nclass ReceiptAdmin(admin.ModelAdmin):\n inlines = [\n ReceiptItemAdmin,\n ReceiptExtraAdmin,\n ReceiptNoteAdmin,\n ]\n ordering = [\"-start_time\"]\n list_display = [\"__str__\", \"status\", \"total\", \"counter\", \"end_time_str\"]\n list_filter = [\n (\"type\", admin.ChoicesFieldListFilter),\n \"clerk\",\n \"counter\",\n \"status\",\n ]\n search_fields = [\"items__code\", \"items__name\"]\n actions = [\"re_calculate_total\"]\n exclude = [\"end_time\"]\n readonly_fields = [\"start_time_str\", \"end_time_str\"]\n list_select_related = [\"clerk\", \"clerk__user\", \"counter\"]\n\n @with_description(\"Re-calculate total sum of receipt\")\n def re_calculate_total(self, request, queryset):\n for i in queryset: # type: Receipt\n i.calculate_total()\n i.save(update_fields=[\"total\"])\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n @with_description(Receipt._meta.get_field(\"start_time\").verbose_name)\n def start_time_str(self, instance: Receipt):\n return datetime_iso_human(instance.start_time)\n\n @with_description(Receipt._meta.get_field(\"end_time\").verbose_name)\n def end_time_str(self, instance: Receipt):\n return datetime_iso_human(instance.end_time)\n\n\n@admin.register(ItemStateLog)\nclass ItemStateLogAdmin(admin.ModelAdmin):\n model = ItemStateLog\n ordering = [\"-id\"]\n search_fields = ['item__code', 'clerk__user__username']\n list_display = ['id', 'time_str',\n RefLinkAccessor(\"item\", gettext(\"Item\")),\n 'old_state', 'new_state',\n RefLinkAccessor(\"clerk\", gettext(\"Clerk\")),\n 'counter']\n list_select_related = (\n \"clerk\", \"counter\", \"item\", \"clerk__user\",\n )\n readonly_fields = [\"time_str\"]\n list_filter = (\n \"old_state\", \"new_state\", \"clerk\", \"counter\",\n )\n autocomplete_fields = [\n \"item\",\n ]\n\n @with_description(ItemStateLog._meta.get_field(\"time\").verbose_name)\n def time_str(self, instance: ItemStateLog):\n return datetime_iso_human(instance.time)\n\n\nclass BoxItemAdmin(admin.TabularInline):\n model = Item\n exclude = [\"name\", \"price\", \"type\", \"itemtype\", \"adult\", \"vendor\"]\n readonly_fields = [\"code\", \"state\", \"printed\", \"hidden\", \"abandoned\", \"lost_property\"]\n can_delete = False\n extra = 0\n show_change_link = True\n\n def has_add_permission(self, request, obj=None):\n return False\n\n\n@admin.register(Box)\nclass BoxAdmin(admin.ModelAdmin):\n model = Box\n inlines = [\n BoxItemAdmin,\n ]\n readonly_fields = [\n 'representative_item',\n 'get_item_type_for_display',\n 'get_item_adult_for_display',\n 'bundle_size',\n 'get_item_count',\n ]\n search_fields = ['box_number', 'description', 'representative_item__code']\n ordering = ['box_number']\n list_display = [\n 'box_number',\n 'description',\n 'code',\n 'get_price',\n '_list_item_count',\n 'bundle_size',\n RefLinkAccessor(\"get_vendor\", gettext(\"Vendor\")),\n ]\n list_display_links = ['box_number', 'description']\n list_select_related = (\n \"representative_item\",\n \"representative_item__vendor\",\n \"representative_item__vendor__event\",\n \"representative_item__vendor__user\"\n )\n list_filter = (\n \"representative_item__vendor__event\",\n )\n\n class BoxChangeList(ChangeList):\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n # Pre-calculate item count so it is not needed to be calculated per row afterwards.\n # _list_item_count is used to access this, as list_display must point to concrete fields or functions.\n return qs.annotate(item_count=models.Count(\"item\", models.Q(item__hidden=False)))\n\n def get_changelist(self, request, **kwargs):\n return self.BoxChangeList\n\n @with_description(gettext(\"Item count\"))\n def _list_item_count(self, instance):\n return instance.item_count\n\n\n@admin.register(TemporaryAccessPermit)\nclass TemporaryAccessPermitAdmin(admin.ModelAdmin):\n model = TemporaryAccessPermit\n readonly_fields = (\"vendor\", \"creator\", \"short_code\")\n list_display = (\n \"__str__\",\n RefLinkAccessor(\"vendor\", gettext(\"Vendor\")),\n )\n\n\n@admin.register(TemporaryAccessPermitLog)\nclass TemporaryAccessPermitLogAdmin(admin.ModelAdmin):\n model = TemporaryAccessPermitLog\n readonly_fields = (\"permit\", \"timestamp\", \"action\", \"address\", \"peer\")\n list_display = (\n \"__str__\",\n RefLinkAccessor(\"permit\", gettext(\"Permit\")),\n \"timestamp\",\n \"action\",\n )\n\n\n@admin.register(AccessSignup)\nclass AccessSignupAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"event\",\n \"user\",\n \"update_time\",\n )\n list_filter = (\"event\",)\n","repo_name":"jlaunonen/kirppu","sub_path":"kirppu/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":23990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27014018783","text":"#!/usr/bin/python\r\n\r\nimport socket, sys\r\nfrom time import sleep\r\n\r\nbuffer = \"A\" * 100\r\n\r\nwhile True:\r\n\ttry:\r\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ts.connect(('192.168.56.106', 9999))\r\n\t\ts.send('string' + buffer)\r\n\t\tsleep(1)\r\n\r\n\t\tbuffer = buffer + 'A' * 100\r\n\r\n\texcept:\r\n\t\tprint(\"Fuzzing crashed at: \",len(buffer))\r\n\t\tsys.exit()","repo_name":"Xcatolin/general-tooling","sub_path":"BufferOverflow-Utilities/fuzz.py","file_name":"fuzz.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"15835341703","text":"from django.db import models\nfrom django.core.validators import validate_image_file_extension\n\ncategories = {\n (\"Laptop\",\"Laptop\"),\n (\"Printer\",\"Printer\"),\n (\"Desktop\",\"Desktop\"),\n (\"Accessories\",\"Accessories\")\n}\n\nclass Product(models.Model):\n category=models.CharField(choices=categories,max_length=50)\n model_name=models.CharField(max_length=100)\n image = models.ImageField(upload_to='Images',validators=[validate_image_file_extension])\n specification=models.TextField()\n price = models.PositiveIntegerField()\n published_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\" {self.category} - {self.model_name}\"\n\n \n class Meta:\n ordering = ['-published_at']\n\nclass Order(models.Model):\n product = models.ForeignKey(Product,on_delete=models.CASCADE)\n name = models.CharField(max_length=100)\n address = models.CharField(max_length=100)\n phone_no = models.CharField(max_length=15)\n email = models.EmailField(max_length=100)\n ordered_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\" {self.product} - {self.name} - {self.ordered_at.year}-{self.ordered_at.month}-{self.ordered_at.day}\"\n\n class Meta:\n ordering = ['-ordered_at']\n\nclass Contact(models.Model):\n name =models.CharField(max_length=500)\n number = models.CharField(max_length=15)\n query =models.CharField(max_length=500)\n reported_on =models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"{self.name} - {self.reported_on.year}-{self.reported_on.month}-{self.reported_on.day}\"\n \n class Meta:\n ordering = ['-reported_on']","repo_name":"CPTCIC5/cgkatni","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9479815901","text":"import usb1\nimport threading\nimport queue\nimport hid\nimport asyncio\n\n\nclass BaseUSBDeviceHandler:\n def __init__(self):\n context = usb1.USBContext()\n context.open()\n\n loop = asyncio.get_event_loop()\n\n def added_cb(fd, events):\n if events & 1:\n loop.add_reader(fd, context.handleEventsTimeout)\n if events & 4:\n loop.add_writer(fd, context.handleEventsTimeout)\n\n def removed_cb(fd):\n loop.remove_reader(fd)\n loop.remove_writer(fd)\n\n for fd, events in context.getPollFDList():\n added_cb(fd, events)\n\n context._USBContext__has_pollfd_finalizer = True\n context.setPollFDNotifiers(added_cb=added_cb, removed_cb=removed_cb)\n context.setDebug(usb1.LOG_LEVEL_DEBUG)\n\n def hotplug_callback(context, device, event):\n print(\"event\", repr(device), event)\n if event == usb1.HOTPLUG_EVENT_DEVICE_ARRIVED:\n loop.create_task(self._handle_new_device(device))\n\n context.hotplugRegisterCallback(callback=hotplug_callback, vendor_id=0x5ac)\n self._context = context\n\n def close(self):\n self._context.setPollFDNotifiers(None, None)\n self._context.close()\n\n\nclass USBRoleSwitchHandler(BaseUSBDeviceHandler):\n def __init__(self, after_role_switch, car_play=False):\n super().__init__()\n self._after_role_switch = after_role_switch\n self._car_play = car_play\n\n async def _handle_new_device(self, device):\n open_device = device.open()\n\n await _usb_control_transfer(\n open_device, usb1.RECIPIENT_DEVICE | usb1.LIBUSB_REQUEST_TYPE_VENDOR,\n 0x51, 1 if self._car_play else 0, 0, 0)\n self._after_role_switch()\n\n\nclass USBDeviceTransport(BaseUSBDeviceHandler):\n def __init__(self, on_connection):\n super().__init__()\n self._on_connection = on_connection\n\n async def _handle_new_device(self, device):\n CONFIGURATION_VALUE = 2\n configs = [\n c for c in device.iterConfigurations()\n if c.getConfigurationValue() == CONFIGURATION_VALUE\n ]\n config = configs[0]\n\n interfaces = [\n s for i in config.iterInterfaces() for s in i.iterSettings()\n if s.getClassTuple() == (3, 0)\n ]\n interface_setting = interfaces[0]\n interface_num = interface_setting.getNumber()\n endpoints = list(interface_setting.iterEndpoints())\n endpoint = endpoints[0]\n print(interface_num)\n\n open_device = device.open()\n open_device.setConfiguration(CONFIGURATION_VALUE)\n\n report_descriptor = await _usb_control_transfer(\n open_device, usb1.ENDPOINT_IN | usb1.RECIPIENT_INTERFACE,\n usb1.REQUEST_GET_DESCRIPTOR, (usb1.DT_REPORT << 8), interface_num,\n 2000)\n print(report_descriptor)\n output_report_ids = []\n input_report_ids = dict()\n report_id = None\n report_count = None\n for tag, item in get_descriptor_items(report_descriptor):\n if tag == 0x84:\n report_id = item[0]\n elif tag == 0x94:\n report_count = int(item[0]) if len(\n item) == 1 else int(item[1]) << 8 | int(item[0])\n elif tag == 0x90:\n output_report_ids.append((report_id, report_count))\n elif tag == 0x80:\n input_report_ids[report_id] = report_count\n\n output_report_ids.sort(key=lambda a:a[0])\n\n hid_device = hid.Device(vid=device.getVendorID(),\n pid=device.getProductID(),\n serial=device.getSerialNumber())\n w = HIDWriter(hid_device, output_report_ids)\n r = HIDReader(hid_device, input_report_ids)\n self._on_connection(w, r)\n\n\ndef _usb_control_transfer(device, request_type, request, value, index, length):\n transfer = device.getTransfer()\n future = asyncio.get_event_loop().create_future()\n\n def cb(transfer):\n status = transfer.getStatus()\n if status == usb1.LIBUSB_TRANSFER_COMPLETED:\n future.set_result(transfer.getBuffer()[:transfer.getActualLength()])\n else:\n future.set_exception(IOError(f\"USB error {status}\"))\n\n transfer.setControl(request_type, request, value, index, length, callback=cb)\n transfer.submit()\n return future\n\n\ndef get_descriptor_items(descriptor):\n i = 0\n while i < len(descriptor):\n tag = descriptor[i]\n if tag == 0xFE:\n size = descriptor[i + 1]\n tag = descriptor[i + 2]\n i += 3\n data = descriptor[i:i + size]\n i += size\n else:\n size = (1 << (tag & 3)) >> 1\n i += 1\n data = descriptor[i:i + size]\n tag &= 0xFC\n i += size\n yield (tag, data)\n\n\nLCB_CONTINUATION = 1\nLCB_MORE_TO_FOLLOW = 2\n\n\nclass HIDReader:\n def __init__(self, hid_device, input_report_ids):\n self._loop = asyncio.get_event_loop()\n self._hid_device = hid_device\n self._input_report_ids = input_report_ids\n self._read_buffer_semaphore = threading.Semaphore(value=3)\n self._read_buffer_queue = asyncio.Queue()\n self._max_len = max(input_report_ids.values())\n self.eof = False\n self._read_buffer = None\n threading.Thread(target=self._read_loop).start()\n\n async def readexactly(self, nbytes):\n if not self._read_buffer or len(self._read_buffer) == 0:\n self._read_buffer_semaphore.release()\n if self.eof:\n raise asyncio.exceptions.IncompleteReadError(partial=self._read_buffer, expected=nbytes)\n self._read_buffer = await self._read_buffer_queue.get()\n\n if len(self._read_buffer) >= nbytes:\n b = self._read_buffer[:nbytes]\n self._read_buffer = self._read_buffer[nbytes:]\n return b\n else:\n b = bytearray(self._read_buffer)\n while len(b) <= nbytes:\n self._read_buffer_semaphore.release()\n if self.eof:\n raise asyncio.exceptions.IncompleteReadError(partial=self._read_buffer, expected=nbytes)\n b.extend(await self._read_buffer_queue.get())\n self._read_buffer = b[nbytes:]\n return b[:nbytes]\n\n def reset(self):\n self._read_buffer = None\n\n def _read_loop(self):\n buf = bytearray()\n try:\n while not self.eof:\n self._read_buffer_semaphore.acquire()\n while not self.eof:\n report = self._hid_device.read(self._max_len + 2)\n if len(report) <= 2:\n continue\n lcb = report[1]\n payload = report[2:]\n if (lcb & LCB_CONTINUATION) == 0:\n buf.clear()\n if (lcb & LCB_MORE_TO_FOLLOW) != 0:\n buf.extend(payload)\n else:\n if len(buf) > 0:\n buf.extend(payload)\n packet = bytes(buf)\n buf.clear()\n else:\n packet = payload\n self._loop.call_soon_threadsafe(\n lambda: self._read_buffer_queue.put_nowait(packet))\n break\n except:\n self.feed_eof()\n\n def feed_eof(self):\n self.eof = True\n self._read_buffer_semaphore.acquire()\n\n\nclass HIDWriter:\n def __init__(self, hid_device, output_report_ids):\n self.closed = False\n self._hid_device = hid_device\n self._output_report_ids = output_report_ids\n self._write_buffer_queue = queue.Queue()\n threading.Thread(target=self._write_loop).start()\n\n def write(self, buffer):\n if self.closed:\n raise IOError(\"closed\")\n self._write_buffer_queue.put_nowait(buffer)\n\n def _write_loop(self):\n while True:\n first = True\n buf = self._write_buffer_queue.get()\n if buf is None or self.closed:\n return\n while len(buf) > 0:\n report_id = None\n report_count = None\n for id, count in self._output_report_ids:\n count -= 1 # take lcb into account\n report_id = id\n report_count = count\n if count > len(buf):\n break\n lcb = 0\n if first:\n first = False\n else:\n lcb |= LCB_CONTINUATION\n if report_count < len(buf):\n lcb |= LCB_MORE_TO_FOLLOW\n padding = b'\\0' * max(report_count - len(buf), 0)\n self._hid_device.write(\n bytes([report_id, lcb]) + buf[:report_count] + padding)\n buf = buf[report_count:]\n\n def close(self):\n self.closed = True\n self._write_buffer_queue.put_nowait(None)\n","repo_name":"wiomoc/iap2","sub_path":"iap2/transport/usb_device.py","file_name":"usb_device.py","file_ext":"py","file_size_in_byte":9188,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"} +{"seq_id":"73202034601","text":"\"2) Desenvolver um algoritmo que leia a altura de 15 pessoas. Este programa deverá calcular e mostrar\"\n\"a. A menor altura do grupo;\"\n\"b. A maior altura do grupo;\"\nmaiorAltura = 0\nmenorAltura = 1000\nfor c in range(1, 16):\n altura = float(input('Digite o valor da altura {}: '.format(c)))\n if altura > maiorAltura:\n maiorAltura = altura\n if altura < menorAltura:\n menorAltura = altura\nprint('Maior altura: {}'.format(maiorAltura))\nprint('Menor altura: {}'.format(menorAltura))\n\n","repo_name":"fhvol/exercicios-python","sub_path":"exec_2.py","file_name":"exec_2.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10008702176","text":"from flask import render_template, flash, redirect, url_for, request, abort, Blueprint\nfrom flask_lib import db\nfrom flask_lib.main.forms import InviteForm\nfrom flask_lib.models import User, Library, BCopy\nfrom flask_login import current_user, login_required\n\nmain = Blueprint('main', __name__)\n\n\n@main.route('/')\n@main.route('/home')\n@login_required\ndef home():\n page = request.args.get('page', 1, type=int)\n main_lib = Library.query.filter_by(owner_id=current_user.id).first()\n # libs = Library.query.filter_by(member=current_user).paginate(page=page, per_page=5)\n libs = current_user.Libraries\n return render_template('home.html', libs=libs, main_lib=main_lib)\n\n\n@main.route('/library/')\n@login_required\ndef library(lib_id):\n page = request.args.get('page', 1, type=int)\n lib = Library.query.get_or_404(lib_id)\n user = lib.owner\n if user != current_user and lib not in current_user.Libraries:\n flash('You cant view this library.', 'danger')\n return redirect(url_for('main.home'))\n books = BCopy.query.filter_by(part=lib).paginate(page=page, per_page=5)\n data = {'Reading': 0, 'Finished': 0, 'Lend': 0, 'New': 0, 'PD': 0}\n for book in books.items:\n if book.lend:\n data['Lend'] += 1\n else:\n data[book.status] += 1\n if user.days > 0:\n data['PD'] = user.pages / user.days\n return render_template('library.html', books=books, lib=lib, data=data)\n\n\n@main.route('/library//invite', methods=['GET', 'POST'])\n@login_required\ndef invite(lib_id):\n form = InviteForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n lib = Library.query.filter_by(id=lib_id).first()\n if lib.owner != current_user:\n abort(403)\n user.Libraries.append(lib)\n db.session.commit()\n return redirect(url_for('main.home'))\n return render_template('invite.html', title='Invite', form=form)\n","repo_name":"Wbrzozowski95/Library","sub_path":"flask_lib/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"40170598871","text":"# coding: utf-8\n\nans = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n\nn = int(input())\n\nresult = []\nfor i in range(n):\n exam = list(map(int, input().split()))\n if ans == exam:\n result.append(i+1)\n\nfor r in result:\n print(r)\n","repo_name":"lee-seul/baekjoon","sub_path":"10874.py","file_name":"10874.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"27613958627","text":"#!/usr/bin/env python3\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport math\nimport os\nimport csv\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom util import *\n\nimport ssl\n# deal with : ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: ...\nssl._create_default_https_context = ssl._create_unverified_context\n\nnp.set_printoptions(suppress=True)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nnp.random.seed(0)\n\nEPOCHES = 100\nBATCH_SIZE = 256\nlearning_rate = 0.001\n\n# real data\n# ########################################\n# Note: We set one_hot so that the 10 labels are vectorized as 0 or 1.\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\ntrain_images = mnist.train.images\ntrain_labels = mnist.train.labels\ntest_images = mnist.test.images\ntest_labels = mnist.test.labels\n\nprint(\"train_images_shape:\", train_images.shape)\nprint(\"train_labels_shape:\", train_labels.shape)\nprint(\"test_images_shape:\", test_images.shape)\nprint(\"test_labels_shape:\", test_labels.shape)\nreal_X, real_Y = train_images, train_labels\nseed = 113\nnp.random.seed(seed)\nnp.random.shuffle(real_X)\nnp.random.seed(seed)\nnp.random.shuffle(real_Y)\n\n# We only use the first one-hot encoded label, such that this is a binary classification task on the first label.\nreal_y = np.ones([real_Y.shape[0], 1])\nfor i in range(len(real_Y)):\n if real_Y[i][0] > 0.5:\n real_y[i] = 0\nreal_Y = real_y\n\n# ########################################\nDIM_NUM = real_X.shape[1]\n\nX = tf.placeholder(tf.float64, [None, DIM_NUM])\nY = tf.placeholder(tf.float64, [None, 1])\n# print(X)\n# print(Y)\n\n# initialize W & b\nW = tf.Variable(tf.zeros([DIM_NUM, 1], dtype=tf.float64), name='w')\nb = tf.Variable(tf.zeros([1], dtype=tf.float64), name='b')\n# print(W)\n# print(b)\n\n# predict\npred_Y = tf.sigmoid(tf.matmul(X, W) + b)\n# print(pred_Y)\n\n# loss\nlogits = tf.matmul(X, W) + b\nloss = tf.nn.sigmoid_cross_entropy_with_logits(labels=Y, logits=logits)\nloss = tf.reduce_mean(loss)\n# print(loss)\n\n# optimizer\ntrain = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n# print(train)\n\ninit = tf.global_variables_initializer()\n# print(init)\n\nsaver = tf.train.Saver(var_list=None, max_to_keep=5, name='v2')\nos.makedirs(\"./log/ckpt0\", exist_ok=True)\n\nwith tf.Session() as sess:\n sess.run(init)\n # train\n BATCHES = math.ceil(len(real_X) / BATCH_SIZE)\n print(\"Begin training...\")\n for e in range(EPOCHES):\n for i in range(BATCHES):\n bX = real_X[(i * BATCH_SIZE): (i + 1) * BATCH_SIZE]\n bY = real_Y[(i * BATCH_SIZE): (i + 1) * BATCH_SIZE]\n sess.run(train, feed_dict={X: bX, Y: bY})\n saver.save(sess, './log/ckpt0/model')\n print(\"Training done, and the model is saved in ./log/ckpt0/model .\")","repo_name":"LatticeX-Foundation/Rosetta","sub_path":"example/zkp_lr/tf-logistic_regression_train.py","file_name":"tf-logistic_regression_train.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":544,"dataset":"github-code","pt":"18"} +{"seq_id":"38063562142","text":"import numpy as np\nimport time\nimport heapq\nimport linecache\n\ndef dijkstra(WG, src_node, dest_node, const_weights = True, return_path = True, max_steps = None, debug = False):\n \"\"\"\n Parameters\n ----------\n WG : WikiGraph\n Instance of WikiGraph used for Dijkstra's algorithm.\n src_node : string\n Source node of the edge: either id or title;\n dest_node : string\n Destination node of the edge.\n return_path : bool\n True if path should be returned; False otherwise.\n debug : bool\n True if debug mode is activated; False otherwise.\n Takes two string: id or title of source and destination nodes: either id or title;\n Returns\n -------\n cost : float\n A float representing the cost of the path from src_node to dest_node\n It is either a finite positive integer or -1 if there is no path between the 2 nodes.\n A list of strings representing the path is also returned if return_path is True.\n \"\"\"\n if not isinstance(debug, bool):\n raise Exception(\"Debug must be of type bool!\")\n if debug:\n t0 = time.time()\n # if not isinstance(WG, WikiGraph):\n # raise Exception(\"WG must be of type WikiGraph!\")\n if not isinstance(src_node, str) or not isinstance(dest_node, str):\n raise Exception(\"The two nodes must be both strings!\")\n if not WG.__contains__(src_node) or not WG.__contains__(dest_node):\n raise Exception(\"The two nodes must belong to the graph!\")\n \n if src_node == dest_node:\n if debug:\n t1 = time.time()\n print(f\"Sucesfully completed task in {t1-t0} seconds.\")\n print(\"Vistited 0 nodes.\")\n if return_path:\n return (0., [src_node])\n else:\n return 0.\n \n if WG.get_outward_weight(src_node, dest_node) < np.inf:\n if debug:\n t1 = time.time()\n print(f\"Sucesfully completed task in {t1-t0} seconds.\")\n print(\"Vistited 1 node.\")\n if return_path:\n return (0., [src_node, dest_node])\n else:\n return 1. \n \n if src_node in WG.article_id:\n src_node = WG.article_id[src_node]\n if dest_node in WG.article_id:\n dest_node = WG.article_id[dest_node]\n \n #distances = {vertex: np.inf for vertex in WG.title}\n distances = dict()\n distances[src_node] = 0\n \n pred = {src_node: None}\n num_visited_nodes = 0\n \n pq = [(0, src_node)]\n while len(pq) > 0:\n \n current_distance, current_vertex = heapq.heappop(pq)\n num_visited_nodes += 1\n \n if max_steps is not None and num_visited_nodes > max_steps:\n return 4.0\n \n if current_vertex not in distances:\n distances[current_vertex] = np.inf\n \n if current_distance > distances[current_vertex]:\n continue\n \n #neigh = WG.__getitem__(current_vertex)\n neigh = WG.get_outward_neighbours(current_vertex)\n if not const_weights:\n neigh_set = set(neigh)\n \n for neighbour in neigh:\n \n if neighbour == '':\n continue\n \n if const_weights:\n weight = 1.0\n else:\n weight = WG.get_outward_weight(current_vertex, neighbour, neigh_set, check_input = False, input_ids = True)\n distance = current_distance + weight\n \n if neighbour not in distances:\n distances[neighbour] = np.inf\n \n if distance < distances[neighbour]:\n distances[neighbour] = distance\n heapq.heappush(pq, (distance,neighbour))\n pred[neighbour] = current_vertex\n\n if dest_node == neighbour:\n \n if debug:\n t1 = time.time()\n print(f\"Sucesfully completed task in {t1-t0} seconds.\")\n print(f\"Vistited {num_visited_nodes} nodes.\")\n \n path = [WG.get_title(dest_node)]\n node = dest_node\n while pred[node] is not None:\n node = pred[node]\n path.append(WG.get_title(node))\n path = path[::-1]\n \n if return_path:\n return (distance, path)\n else:\n return distance\n \n if debug:\n t2 = time.time()\n print(f\"No path found. Task completed in {t2-t0} seconds.\")\n print(\"Vistited {num_visited_nodes} nodes.\")\n if return_path:\n return (-1, [])\n else:\n return -1 \n \ndef bi_dijkstra(WG, src_node, dest_node, const_weights = True, debug = True):\n \"\"\"\n Parameters\n ----------\n WG : WikiGraph\n Instance of WikiGraph used for Dijkstra's algorithm.\n src_node : string\n Source node of the edge: either id or title;\n dest_node : string\n Destination node of the edge.\n debug : bool\n True if debug mode is activated; False otherwise.\n Takes two string: id or title of source and destination nodes: either id or title;\n Returns\n -------\n weight : float\n A float representing the weight of the path from src_node to dest_node\n It is either a finite positive integer or -1 if there is no path between the 2 nodes.\n \"\"\"\n t0 = time.time()\n \n num_visited_nodes = 0\n \n if not WG.__contains__(src_node) or not WG.__contains__(dest_node):\n raise Exception(\"Either source {src_node} or target {dest_node} is not in WG\")\n \n if src_node in WG.article_id:\n src_node = WG.article_id[src_node]\n if dest_node in WG.article_id:\n dest_node = WG.article_id[dest_node]\n\n if src_node == dest_node:\n if debug:\n t1 = time.time()\n print(f\"Visited {num_visited_nodes}. Path found in {t1 - t0} seconds.\")\n return (0, [src_node])\n \n if WG.get_outward_weight(src_node, dest_node) < np.inf:\n if debug:\n t1 = time.time()\n num_visited_nodes += 1\n print(f\"Visited {num_visited_nodes}. Path found in {t1 - t0} seconds.\")\n return (1, [src_node, dest_node])\n\n push = heapq.heappush\n pop = heapq.heappop\n # Init: [Forward, Backward]\n dists = [{}, {}] # dictionary of final distances\n paths = [{src_node: [src_node]}, {dest_node: [dest_node]}] # dictionary of paths\n fringe = [[], []] # heap of (distance, node) for choosing node to expand\n seen = [{src_node: 0}, {dest_node: 0}] # dict of distances to seen nodes\n # initialize fringe heap\n push(fringe[0], (0, src_node))\n push(fringe[1], (0, dest_node))\n # variables to hold shortest discovered path\n finaldist = np.inf\n finalpath = []\n direction = 1\n while fringe[0] and fringe[1]:\n #print(fringe)\n # choose direction\n # direction == 0 is forward direction and direrction == 1 is back\n direction = 1 - direction\n # extract closest to expand\n (dist, v) = pop(fringe[direction])\n \n num_visited_nodes += 1 #keep track of visited nodes\n \n if v in dists[direction]:\n # Shortest path to v has already been found\n continue\n # update distance\n dists[direction][v] = dist # equal to seen[dir][v]\n if v in dists[1 - direction]:\n # if we have scanned v in both directions we are done\n # we have now discovered the shortest path\n return (finaldist, finalpath)\n\n if direction == 0:\n neighs = WG.get_outward_neighbours(v)\n else:\n neighs = WG.get_inward_neighbours(v)\n \n if not const_weights:\n neighs_set = set(neighs)\n \n for w in neighs:\n if w == '':\n continue\n # weight(v, w) for forward and weight(w, v) for back direction\n if const_weights:\n weight = 1.0\n else:\n if direction == 0:\n weight = WG.get_outward_weight(v, w, src_neighs = neighs_set, check_input = False, input_ids = True) \n else:\n weight = WG.get_inward_weight(w, v, src_neighs = neighs_set, check_input = False, input_ids = True)\n \n if weight == np.inf:\n print(v, w, w in neighs_set, direction)\n return -1\n \n vwLength = dists[direction][v] + weight\n if w in dists[direction]:\n if vwLength < dists[direction][w]:\n raise ValueError(\"Contradictory paths found: negative weights?\")\n elif w not in seen[direction] or vwLength < seen[direction][w]:\n # relaxing\n seen[direction][w] = vwLength\n push(fringe[direction], (vwLength, w))\n paths[direction][w] = paths[direction][v] + [w]\n if w in seen[0] and w in seen[1]:\n # see if this path is better than the already\n # discovered shortest path\n totaldist = seen[0][w] + seen[1][w]\n if finalpath == [] or finaldist > totaldist:\n finaldist = totaldist\n revpath = paths[1][w][:]\n revpath.reverse()\n finalpath = paths[0][w] + revpath[1:]\n #if num_visited_nodes > 50:\n #print(len(seen[0]), len(seen[1]))\n #return -1\n raise Exception(f\"No path between {src_node} and {dest_node}.\")","repo_name":"MateiCosa/BAINSA-Knowledge-Graph","sub_path":"PathFinding.py","file_name":"PathFinding.py","file_ext":"py","file_size_in_byte":9627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29920136074","text":"import re\nfrom wordcloud import WordCloud\nfrom PIL import Image\nfrom nltk import pos_tag as pt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport spacy\nfrom nltk.stem.porter import *\nimport string\n\n\nclass Tokenizer:\n def __init__(self):\n self.stammer = PorterStemmer()\n self.nlp = spacy.load('en_core_web_sm')\n self.add_missing_stopwords(self.nlp)\n\n @staticmethod\n def row_gen():\n with open('crime_and_punishment.txt', encoding='utf-8-sig') as book:\n for row in book:\n yield row\n\n @staticmethod\n def add_missing_stopwords(nlp):\n extended_stopwords_list = [\"a\", \"about\", \"above\", \"after\", \"again\", \"against\", \"ain\", \"all\", \"am\", \"an\", \"and\",\n \"any\", \"are\", \"aren\", \"aren't\", \"as\", \"at\", \"be\", \"because\", \"been\", \"before\",\n \"being\",\n \"below\", \"between\", \"both\", \"but\", \"by\", \"can\", \"couldn\", \"couldn't\", \"d\", \"de\",\n \"dr\",\n \"did\", \"didn\",\n \"didn't\", \"do\", \"does\", \"doesn\", \"doesn't\", \"doing\", \"don\", \"don't\", \"down\",\n \"during\",\n \"each\", \"few\", \"for\", \"from\", \"further\", \"had\", \"hadn\", \"hadn't\", \"has\", \"hasn\",\n \"hasn't\", \"have\", \"haven\", \"haven't\", \"having\", \"he\", \"her\", \"here\", \"hers\",\n \"herself\",\n \"him\", \"himself\", \"his\", \"how\", \"i\", \"if\", \"in\", \"into\", \"is\", \"isn\", \"isn't\", \"it\",\n \"it's\", \"its\", \"itself\", \"just\", \"ll\", \"m\", \"ma\", \"me\", \"mightn\", \"mightn't\", \"more\",\n \"most\", \"mustn\", \"mustn't\", \"my\", \"myself\", \"needn\", \"needn't\", \"no\", \"nor\", \"not\",\n \"now\", \"o\", \"of\", \"off\", \"on\", \"once\", \"only\", \"or\", \"other\", \"our\", \"ours\",\n \"ourselves\",\n \"out\", \"over\", \"own\", \"re\", \"s\", \"same\", \"shan\", \"shan't\", \"she\", \"she's\", \"should\",\n \"should've\", \"shouldn\", \"shouldn't\", \"so\", \"some\", \"such\", \"t\", \"than\", \"that\",\n \"that'll\", \"the\", \"their\", \"theirs\", \"them\", \"themselves\", \"then\", \"there\", \"these\",\n \"they\", \"this\", \"those\", \"through\", \"to\", \"too\", \"under\", \"until\", \"up\", \"ve\",\n \"very\",\n \"was\", \"wasn\", \"wasn't\", \"we\", \"were\", \"weren\", \"weren't\", \"what\", \"when\", \"where\",\n \"which\", \"while\", \"who\", \"whom\", \"why\", \"will\", \"with\", \"won\", \"won't\", \"wouldn\",\n \"wouldn't\", \"y\", \"you\", \"you'd\", \"you'll\", \"you're\", \"you've\", \"your\", \"yours\",\n \"yourself\", \"yourselves\", \"could\", \"he'd\", \"he'll\", \"he's\", \"here's\", \"how's\", \"i'd\",\n \"i'll\", \"i'm\", \"i've\", \"let's\", \"ought\", \"she'd\", \"she'll\", \"that's\", \"there's\",\n \"they'd\", \"they'll\", \"they're\", \"they've\", \"we'd\", \"we'll\", \"we're\", \"we've\",\n \"what's\",\n \"when's\", \"where's\", \"who's\", \"why's\", \"would\", \"able\", \"abst\", \"accordance\",\n \"according\", \"accordingly\", \"across\", \"act\", \"actually\", \"added\", \"adj\", \"affected\",\n \"affecting\", \"affects\", \"afterwards\", \"ah\", \"almost\", \"alone\", \"along\", \"already\",\n \"also\", \"although\", \"always\", \"among\", \"amongst\", \"announce\", \"another\", \"anybody\",\n \"anyhow\", \"anymore\", \"anyone\", \"anything\", \"anyway\", \"anyways\", \"anywhere\",\n \"apparently\",\n \"approximately\", \"arent\", \"arise\", \"around\", \"aside\", \"ask\", \"asking\", \"auth\",\n \"available\", \"away\", \"awfully\", \"b\", \"back\", \"became\", \"become\", \"becomes\",\n \"becoming\",\n \"beforehand\", \"begin\", \"beginning\", \"beginnings\", \"begins\", \"behind\", \"believe\",\n \"beside\", \"besides\", \"beyond\", \"biol\", \"brief\", \"briefly\", \"c\", \"ca\", \"came\",\n \"cannot\",\n \"can't\", \"cause\", \"causes\", \"certain\", \"certainly\", \"co\", \"com\", \"come\", \"comes\",\n \"contain\", \"containing\", \"contains\", \"couldnt\", \"date\", \"different\", \"done\",\n \"downwards\",\n \"due\", \"e\", \"eh\", \"ed\", \"edu\", \"effect\", \"eg\", \"eight\", \"eighty\", \"either\", \"else\",\n \"elsewhere\", \"end\", \"ending\", \"enough\", \"especially\", \"et\", \"etc\", \"even\", \"ever\",\n \"every\", \"everybody\", \"everyone\", \"everything\", \"everywhere\", \"ex\", \"except\", \"f\",\n \"far\",\n \"ff\", \"fifth\", \"first\", \"five\", \"fix\", \"followed\", \"following\", \"follows\", \"former\",\n \"formerly\", \"forth\", \"found\", \"four\", \"furthermore\", \"g\", \"gave\", \"get\", \"gets\",\n \"getting\", \"give\", \"given\", \"gives\", \"giving\", \"go\", \"goes\", \"gone\", \"got\", \"gotten\",\n \"h\", \"hm\", \"ha\", \"happens\", \"hardly\", \"hed\", \"hence\", \"hereafter\", \"hereby\",\n \"herein\",\n \"heres\",\n \"hereupon\", \"hes\", \"hi\", \"hid\", \"hither\", \"home\", \"howbeit\", \"however\", \"hundred\",\n \"id\",\n \"ie\", \"im\", \"immediate\", \"immediately\", \"importance\", \"important\", \"inc\", \"indeed\",\n \"index\", \"information\", \"instead\", \"invention\", \"inward\", \"itd\", \"it'll\", \"j\", \"k\",\n \"keep\", \"keeps\", \"kept\", \"kg\", \"km\", \"know\", \"known\", \"knows\", \"l\", \"la\", \"largely\",\n \"last\",\n \"lately\", \"later\", \"latter\", \"latterly\", \"least\", \"less\", \"lest\", \"let\", \"lets\",\n \"like\",\n \"liked\", \"likely\", \"line\", \"little\", \"'ll\", \"look\", \"looking\", \"looks\", \"ltd\",\n \"made\",\n \"mainly\", \"make\", \"makes\", \"many\", \"may\", \"maybe\", \"mean\", \"means\", \"meantime\",\n \"meanwhile\", \"merely\", \"mg\", \"might\", \"million\", \"miss\", \"ml\", \"moreover\", \"mostly\",\n \"mr\", \"mrs\", \"much\", \"mug\", \"must\", \"n\", \"na\", \"nt\", \"name\", \"namely\", \"nay\", \"nd\",\n \"near\",\n \"nearly\", \"necessarily\", \"necessary\", \"need\", \"needs\", \"neither\", \"never\",\n \"nevertheless\", \"new\", \"next\", \"nine\", \"ninety\", \"nobody\", \"non\", \"none\",\n \"nonetheless\",\n \"noone\", \"normally\", \"nos\", \"noted\", \"nothing\", \"nowhere\", \"obtain\", \"obtained\",\n \"obviously\", \"often\", \"oh\", \"ok\", \"okay\", \"old\", \"omitted\", \"one\", \"ones\", \"onto\",\n \"ord\",\n \"others\", \"otherwise\", \"outside\", \"overall\", \"owing\", \"p\", \"page\", \"pages\", \"part\",\n \"particular\", \"particularly\", \"past\", \"per\", \"perhaps\", \"placed\", \"please\", \"plus\",\n \"poorly\", \"possible\", \"possibly\", \"potentially\", \"pp\", \"predominantly\", \"present\",\n \"previously\", \"primarily\", \"probably\", \"promptly\", \"proud\", \"provides\", \"put\", \"q\",\n \"que\", \"quickly\", \"quite\", \"qv\", \"r\", \"ran\", \"rather\", \"rd\", \"readily\", \"really\",\n \"recent\", \"recently\", \"ref\", \"refs\", \"regarding\", \"regardless\", \"regards\", \"related\",\n \"relatively\", \"research\", \"respectively\", \"resulted\", \"resulting\", \"results\",\n \"right\",\n \"run\", \"said\", \"saw\", \"say\", \"saying\", \"says\", \"sec\", \"section\", \"see\", \"seeing\",\n \"seem\",\n \"seemed\", \"seeming\", \"seems\", \"seen\", \"self\", \"selves\", \"sent\", \"seven\", \"several\",\n \"shall\", \"shed\", \"shes\", \"show\", \"showed\", \"shown\", \"showns\", \"shows\", \"significant\",\n \"significantly\", \"similar\", \"similarly\", \"since\", \"six\", \"slightly\", \"somebody\",\n \"somehow\", \"someone\", \"somethan\", \"something\", \"sometime\", \"sometimes\", \"somewhat\",\n \"somewhere\", \"soon\", \"sorry\", \"specifically\", \"specified\", \"specify\", \"specifying\",\n \"still\", \"stop\", \"strongly\", \"sub\", \"substantially\", \"successfully\", \"sufficiently\",\n \"suggest\", \"sup\", \"sure\", \"take\", \"taken\", \"taking\", \"tell\", \"tends\", \"th\", \"thank\",\n \"thanks\", \"thanx\", \"thats\", \"that've\", \"thence\", \"thereafter\", \"thereby\", \"thered\",\n \"therefore\", \"therein\", \"there'll\", \"thereof\", \"therere\", \"theres\", \"thereto\",\n \"thereupon\", \"there've\", \"theyd\", \"theyre\", \"think\", \"thou\", \"though\", \"thoughh\",\n \"thousand\", \"throug\", \"throughout\", \"thru\", \"thus\", \"til\", \"tip\", \"together\", \"took\",\n \"toward\", \"towards\", \"tried\", \"tries\", \"truly\", \"try\", \"trying\", \"ts\", \"twice\",\n \"two\",\n \"u\", \"un\", \"unfortunately\", \"unless\", \"unlike\", \"unlikely\", \"unto\", \"upon\", \"ups\",\n \"us\",\n \"use\", \"used\", \"useful\", \"usefully\", \"usefulness\", \"uses\", \"using\", \"usually\", \"v\",\n \"value\", \"various\", \"'ve\", \"via\", \"viz\", \"vol\", \"vols\", \"vs\", \"w\", \"wo\", \"want\",\n \"wants\",\n \"wasnt\", \"way\", \"wed\", \"welcome\", \"went\", \"werent\", \"whatever\", \"what'll\", \"whats\",\n \"whence\", \"whenever\", \"whereafter\", \"whereas\", \"whereby\", \"wherein\", \"wheres\",\n \"whereupon\", \"wherever\", \"whether\", \"whim\", \"whither\", \"whod\", \"whoever\", \"whole\",\n \"who'll\", \"whomever\", \"whos\", \"whose\", \"widely\", \"willing\", \"wish\", \"within\",\n \"without\",\n \"wont\", \"words\", \"world\", \"wouldnt\", \"www\", \"x\", \"yes\", \"yet\", \"ye\", \"youd\", \"youre\",\n \"z\",\n \"zero\", \"a's\", \"ain't\", \"allow\", \"allows\", \"apart\", \"appear\", \"appreciate\",\n \"appropriate\", \"associated\", \"best\", \"better\", \"c'mon\", \"c's\", \"cant\", \"changes\",\n \"clearly\", \"concerning\", \"consequently\", \"consider\", \"considering\", \"corresponding\",\n \"course\", \"currently\", \"definitely\", \"described\", \"despite\", \"entirely\", \"exactly\",\n \"example\", \"going\", \"greetings\", \"hello\", \"help\", \"hopefully\", \"ignored\", \"inasmuch\",\n \"indicate\", \"indicated\", \"indicates\", \"inner\", \"insofar\", \"it'd\", \"keep\", \"keeps\",\n \"novel\", \"presumably\", \"reasonably\", \"second\", \"secondly\", \"sensible\", \"serious\",\n \"seriously\", \"sure\", \"t's\", \"third\", \"thorough\", \"thoroughly\", \"three\", \"well\",\n \"wonder\"]\n for stop_word in extended_stopwords_list:\n nlp.vocab[stop_word].is_stop = True\n\n # @staticmethod\n # def remove_punctuation(row):\n # return row.translate(str.maketrans('', '', string.punctuation + '’‘' + '“' + '”'))\n\n @staticmethod\n def process_token(token):\n if token.is_punct:\n return None\n stripped_tok = token.string.strip().lower()\n return stripped_tok if len(stripped_tok) > 1 else None\n\n def tokenize(self, remove_stop_words=False, stem_words=False):\n occurrences = {}\n for row in self.row_gen():\n for token in self.nlp.tokenizer(row):\n word = self.process_token(token)\n if word and word:\n if remove_stop_words and self.nlp.vocab[word].is_stop:\n continue\n if stem_words:\n word = self.stammer.stem(word)\n if word not in occurrences:\n occurrences[word] = 0\n occurrences[word] += 1\n return occurrences\n\n\ndef print_top_20_tokens(occur_dict):\n sorted_dict = {k: v for k, v in sorted(occur_dict.items(), key=lambda item: item[1])[::-1]}\n print([token for token in sorted_dict.keys()][:20])\n\n\ndef plot_results(occur_dict, title='results'):\n plt.figure(title)\n plt.title(title)\n plt.xlabel('log(rank)')\n plt.ylabel('log(frequency)')\n occurrences = list(occur_dict.values())\n occurrences.sort()\n log_rank = np.log(np.arange(1, len(occurrences) + 1))\n log_freq = np.log(occurrences[::-1])\n plt.plot(log_rank, log_freq)\n plt.show()\n\n\ndef get_all_words():\n \"\"\"\n breaks rows of text (using a generator) into structually significant piece\n example - 'I, don't think it's safe breaks' to ['I', ',', 'do', 'n\\'t', 'think', 'it', '\\'s', 'safe']\n :returns: a numpy array of strings\n \"\"\"\n res = []\n sp = spacy.load('en_core_web_sm')\n for row in Tokenizer.row_gen():\n line = row.strip()\n for token in sp.tokenizer(line):\n w = token.string.strip(' ')\n if not w:\n continue\n res.append(w)\n return res\n\n\ndef process_word(token):\n if token.is_punct:\n return None\n stripped_tok = token.string.strip()\n return stripped_tok if len(stripped_tok) > 1 else None\n\n\ndef get_POS_tags(words):\n \"\"\"\n uses the NLTK library to take a list of strings and give them PoS tags\n :param words: np array of shape (N,) of strings\n :returns: np array of shame (2,N) where res[0, :] is the original list of strings and res[1, :] is the list of tags\n \"\"\"\n sp = spacy.load('en_core_web_sm')\n res = pt(words)\n return remove_punct(np.array(res).T)\n\n\ndef remove_punct(pos_tags):\n \"\"\"\n removes punctuation for pos tagging\n \"\"\"\n index = np.char.strip(pos_tags[0, :], string.punctuation + '”' + '’' + '“' + '‘')\n # these single punctuations aren't the typical \" and ' seen on the keyboard and appear in the pdf\n # they are removed \"manually\"\n locs = []\n for i in range(index.shape[0]):\n if len(index[i]) != 0:\n locs.append(i)\n res = np.row_stack((pos_tags[0, :][locs], pos_tags[1, :][locs]))\n return res\n\n\ndef make_dict(pos_tags):\n \"\"\"\n given a list of words and positional arguments returns a dict of each words use\n :param pos_tags: a np array of shape (2,N) where 1st row is strings and 2nd row is their PoS tag\n :returns: a dict with keys being words (lower case) and values are sets of PoS tags of the key word\n \"\"\"\n res = {}\n for i in np.arange(pos_tags.shape[1]):\n key = pos_tags[0, i].strip()\n if not key:\n continue\n res.setdefault(key.lower(), set())\n res[key.lower()].add(pos_tags[1, i])\n return res\n\n\ndef tag_adj(pos_tags):\n \"\"\"\n :param pos_tags: a np array of shape (2,N) where 1st row is strings and 2nd row is their PoS tag\n :returns: an np array of shape (M,) of the indexes of adjectives\n \"\"\"\n data = pos_tags[1, :]\n index = np.where(np.char.startswith(data, 'JJ') == 1)[0]\n return np.array(index)\n\n\ndef find_adj_noun(pos_tags):\n \"\"\"\n :param pos_tags: a np array of shape (2,N) where 1st row is strings and 2nd row is their PoS tag\n :param adj_ind: an np array of shape (M,) of the indexes of adjectives\n :returns: a set of all adjective + noun phrases (numltiple adjectives followed by multiple nouns)\n \"\"\"\n i = 0\n res = set()\n adj_ind = tag_adj(pos_tags)\n while i < adj_ind.shape[0]:\n j = 1\n flag = False\n start = adj_ind[i]\n if start == pos_tags.shape[0] - 1:\n break\n while True:\n while pos_tags[1, adj_ind[i] + 1].startswith('JJ'):\n i += 1\n if adj_ind[i] + j == pos_tags.shape[0] - 1:\n flag = True\n break\n if flag:\n break\n while pos_tags[1, adj_ind[i] + j].startswith('NN'):\n j += 1\n if adj_ind[i] + j >= pos_tags.shape[0] - 1:\n break\n if j > 1:\n txt = \"\"\n for k in range(start, adj_ind[i] + j):\n txt += pos_tags[0, k] + \" \"\n res.add(txt[:len(txt) - 1])\n i += 1\n break\n return res, count_uniques(res)\n\n\ndef count_uniques(str_list):\n \"\"\"\n counts occurrences of item in a list and returns a dict of the results\n \"\"\"\n res = {}\n for w in str_list:\n res.setdefault(w.lower(), 0)\n res[w.lower()] += 1\n return res\n\n\ndef get_homographs(pos_dict):\n \"\"\"\n Q4- p.g\n :param pos_dict: a dictionary of the text, where keys are the words and\n the pos tags are the values.\n :return: Outputs the text's 10 top and 10 bottoms homographs as 2\n lists. each list contains of sub list where the first element is the word,\n and the second element is it's POS list.\n \"\"\"\n pd = pos_dict.copy()\n highest, lowest = [], []\n for i in range(10):\n high = max(pd.keys(), key=(lambda k: len(pd[k])))\n highest.append([high, pd[high]])\n del pd[high]\n low = min(pd.keys(), key=(lambda k: len(pd[k])))\n lowest.append([low, pd[low]])\n del pd[low]\n return [highest, lowest]\n\n\ndef tag_proper_noun(pos_tags):\n \"\"\"\n :param pos_tags: a np array of shape (2,N) where 1st row is strings and 2nd row is their PoS tag\n :returns: an np array of shape (M,) of the indexes of proper nouns\n \"\"\"\n data = pos_tags[1, :]\n index = np.where(np.char.startswith(data, 'NNP') == 1)[0]\n return np.array(index)\n\n\ndef make_nnp_dict(pos_tags):\n \"\"\"\n :param pos_tags: a np array of shape (2,N) where 1st row is strings and 2nd row is their PoS tag\n :returns: dict with keys being words (in lower case) and values are number of occurances as proper nouns\n \"\"\"\n index = tag_proper_noun(pos_tags)\n words = pos_tags[0, :][index]\n res = {}\n for w in words:\n res.setdefault(w.lower(), 0)\n res[w.lower()] += 1\n return res\n\n\ndef word_cloud(pos_tags, mask_path):\n \"\"\"\n Q4- p.h\n creates & saves a word cloud out of the text's NNP or NNPS words.\n :param pos_dict: a dictionary of the text, where keys are the NNP or NNPS words and\n the values are their occurrences in the text.\n :param mask_path: the mask's path\n \"\"\"\n new_dict = make_nnp_dict(pos_tags)\n mask_im = np.array(Image.open(mask_path))\n cloud = WordCloud(background_color=\"black\", relative_scaling=0.5,\n mask=mask_im).generate_from_frequencies(new_dict)\n cloud.to_file(\"wordCloud.png\")\n\n\ndef find_cons_rep_words():\n \"\"\"\n Q4- p.i:\n finds all two consecutive repeated words in the text,\n and the row from which they came.\n :return: returns a list of lists, where every inner list holds\n the consecutive word (string) at idx 0, and the row (as string) at idx 1.\n \"\"\"\n res = []\n last_word = \"\"\n for row in Tokenizer.row_gen():\n row = last_word + \" \" + row\n a = re.findall(r'\\b([A-Za-z]+)\\W?\\s+\\1\\b', row)\n if a:\n res.append([a[0], row])\n\n # maintain last word:\n s_row = row.split()\n if s_row:\n last_word = s_row[-1]\n else:\n last_word = \"\"\n\n return res\n\n\nif __name__ == \"__main__\":\n tok = Tokenizer()\n occur_dict = tok.tokenize()\n plot_results(occur_dict, 'Tokenizing results, with stopwords, no stemming.')\n print_top_20_tokens(occur_dict)\n occur_dict = tok.tokenize(remove_stop_words=True)\n plot_results(occur_dict, 'Tokenizing results, no stopwords, no stemming.')\n print_top_20_tokens(occur_dict)\n occur_dict = tok.tokenize(remove_stop_words=True, stem_words=True)\n plot_results(occur_dict, 'Tokenizing results, no stopwords, with stemming.')\n print_top_20_tokens(occur_dict)\n\n\n words = get_all_words()\n tags = get_POS_tags(words)\n adj = tag_adj(tags)\n an_list, an_dict = find_adj_noun(tags)\n print_top_20_tokens(an_dict)\n plot_results(an_dict, 'Tokenizing results for adjective and noun combinations')\n\n pos_dict = make_dict(tags)\n\n # Q4 p.i:\n w = find_cons_rep_words()\n print(\"rep words:\\n\")\n print(np.array(w)[:, 0])\n for i in w:\n print(i)\n print(len(w))\n\n # Q4 p.g:\n h, l = get_homographs(pos_dict)\n print(\"\\nhighest homo:\")\n for i in h:\n print(i)\n print(\"\\nlowest homo:\")\n for i in l:\n print(i)\n\n # Q4 p.h:\n img_path = \"man_mask.png\"\n word_cloud(tags, img_path)\n","repo_name":"baraloni/NeedleInDataHaystack","sub_path":"ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":21747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33748883014","text":"# 이진 탐색 문제 - 떡볶이 떡 만들기\n# 파라매트릭 서치 문제 유형: 최적화 문제를 결정 문제로 바꾸어 해결하는 방법\n# 보통 파라메트릭 서치 유형은 이진 탐색을 이용해 해결함\n# 파라메트릭의 이진 탐색은 반복문을 이용해 구현하면 더 간결함\n\n# 떡의 개수, 요청한 떡의 길이 입력 받기\nn, m = map(int, input().split())\n\n# 떡의 개별 높이 입력 받기\narray = list(map(int, input().split()))\n\n# 이진 탐색을 위한 시작점, 끝점 설정\nstart, end = 0, max(array)\n\n# 이진 탐색 수행 (반복적)\nresult = 0\nwhile start <= end:\n\n # 중간값 설정\n mid = (start + end) // 2\n total = 0\n\n for ddeok in array:\n # 잘랐을 때의 남은 떡의 길이 계산\n if ddeok > mid:\n total += ddeok - mid\n\n # 남은 떡의 총합이 요청한 길이보다 작은 경우\n # 자르는 높이를 짧게 하기 (왼쪽 탐색)\n if total < m:\n end = mid - 1\n # 남은 떡의 총합이 요청한 길이보다 큰 경우\n # 자르는 높이를 길게 하기 (오른쪽 탐색)\n else:\n result = mid # 최대한 덜 잘랐을 때가 정답이므로, result에 기록\n start = mid + 1\n\n# 결과값 출력\nprint(result)\n","repo_name":"bokkuembab/For-coding-practice","sub_path":"Book-이것이코테다/5. 이진탐색/7-8 떡볶이 떡 만들기.py","file_name":"7-8 떡볶이 떡 만들기.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70954660520","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass ScannedDialog(object):\r\n # Creates Scanned Dialog\r\n def setupScanned(self, Scanned, Material, Recycle, Rinse):\r\n # Dialog Window\r\n Scanned.setObjectName(\"Scanned\")\r\n Scanned.resize(400, 300)\r\n self.verticalLayout = QtWidgets.QVBoxLayout(Scanned)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n Scanned.setWindowTitle(\"Scanned\")\r\n\r\n # Material Label\r\n self.lbl_material = QtWidgets.QLabel(Scanned)\r\n font = QtGui.QFont()\r\n font.setPointSize(20)\r\n self.lbl_material.setFont(font)\r\n self.lbl_material.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lbl_material.setObjectName(\"lbl_material\")\r\n self.verticalLayout.addWidget(self.lbl_material)\r\n self.lbl_material.setText(Material)\r\n\r\n # Recycle Label\r\n self.lbl_recycle = QtWidgets.QLabel(Scanned)\r\n font = QtGui.QFont()\r\n font.setPointSize(20)\r\n self.lbl_recycle.setFont(font)\r\n self.lbl_recycle.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lbl_recycle.setObjectName(\"lbl_recycle\")\r\n self.verticalLayout.addWidget(self.lbl_recycle)\r\n self.lbl_recycle.setText(Recycle)\r\n\r\n # Rinse Label\r\n self.lbl_rinse = QtWidgets.QLabel(Scanned)\r\n font = QtGui.QFont()\r\n font.setPointSize(20)\r\n self.lbl_rinse.setFont(font)\r\n self.lbl_rinse.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lbl_rinse.setObjectName(\"lbl_rinse\")\r\n self.verticalLayout.addWidget(self.lbl_rinse)\r\n self.lbl_rinse.setText(Rinse)\r\n\r\n # Close Dialog Button\r\n self.btn_close = QtWidgets.QDialogButtonBox(Scanned)\r\n self.btn_close.setOrientation(QtCore.Qt.Horizontal)\r\n self.btn_close.setStandardButtons(QtWidgets.QDialogButtonBox.Close)\r\n self.btn_close.setCenterButtons(True)\r\n self.btn_close.setObjectName(\"btn_close\")\r\n self.verticalLayout.addWidget(self.btn_close)\r\n self.btn_close.accepted.connect(Scanned.accept)\r\n self.btn_close.rejected.connect(Scanned.reject)\r\n\r\n \r\n# Only needed to test window on its own\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n Scanned = QtWidgets.QDialog()\r\n ui = ScannedDialog()\r\n ui.setupScanned(Scanned,'Material', 'Recycle', 'Rinse')\r\n Scanned.show()\r\n sys.exit(app.exec_())\r\n \r\n\r\n","repo_name":"brettsherwood/EMMA","sub_path":"ScannedDialog.py","file_name":"ScannedDialog.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21394471497","text":"# The screenshots of this are included in the screenshots folder\r\nimport sys\r\n#from PyQt5.QtWidgets import QApplication\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QLineEdit, QPushButton, QMessageBox\r\n# load json and create model\r\njson_file = open('model3conv_200.json', 'r')\r\nloaded_model_json = json_file.read()\r\njson_file.close()\r\nloaded_model = model_from_json(loaded_model_json)\r\n# load weights into new model\r\nloaded_model.load_weights(\"model3conv_200.h5\")\r\nprint(\"Loaded model from disk\")\r\nclass App(QMainWindow):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.title = 'Phishing Website Detection'\r\n self.left = 10\r\n self.top = 10\r\n self.width = 600\r\n self.height = 600\r\n self.initUI()\r\n \r\n def initUI(self):\r\n self.setWindowTitle(self.title)\r\n self.setGeometry(self.left, self.top, self.width, self.height)\r\n \r\n # Create textbox\r\n self.textbox = QLineEdit(self)\r\n self.textbox.move(20, 20)\r\n self.textbox.resize(280,40)\r\n \r\n # Create a button in the window\r\n self.button = QPushButton('Enter', self)\r\n self.button.move(20,80)\r\n \r\n # connect button to function on_click\r\n self.button.clicked.connect(self.on_click)\r\n self.show()\r\n \r\n @pyqtSlot()\r\n def on_click(self):\r\n url = self.textbox.text()\r\n #url=[]\r\n #url.append(textboxValue)\r\n # Step 1: Convert raw URL string in list of lists where characters that are contained in \"printable\" are stored encoded as integer \r\n url_int_tokens = [[printable.index(x) + 1 for x in url if x in printable]]\r\n # Step 2: Cut URL string at max_len or pad with zeros if shorter\r\n max_len=75\r\n X = sequence.pad_sequences(url_int_tokens, maxlen=max_len)\r\n y_prob = loaded_model.predict(X,batch_size=1)\r\n #print(url)\r\n QMessageBox.question(self, \"Results\",\"The URL is:\" + print_result(y_prob), QMessageBox.Ok, QMessageBox.Ok)\r\n self.textbox.setText(\"\")\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtCore.QCoreApplication.instance()\r\n if app is None:\r\n app = QtWidgets.QApplication(sys.argv)\r\n AppWindow = QtWidgets.QMainWindow()\r\n ui = App()\r\n sys.exit(app.exec_())","repo_name":"YazhVM/AntiPhishing-system","sub_path":"demoApp.py","file_name":"demoApp.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26398005996","text":"import sys\nsys.stdin = open('전자키드.txt','r')\ndef solve(start,v,cnt):\n global minV\n if cnt == N and not check[-1] and minV > v:\n minV = v\n return\n else:\n # v값이 최소값보다 커지는 순간 다시 돌아감\n if v > minV:\n return\n # 첫 번째 행부터 검사\n for i in range(N):\n # 만약 값이 0이 아니고 방문한 적이 없으면\n if data[start][i] and i not in check:\n # 방문 체크\n check.append(i)\n # 다음 행으로 넘어감\n solve(i,v+data[start][i],cnt+1)\n # 방문한 것을 초기화\n check.pop()\n\n# 각 구역을 한 번씩만 방문하고 돌아왔을 때 배터리 사용량의 최소를 구함\n# 테스트 케이스\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n data = [list(map(int,input().split())) for _ in range(N)]\n # 최솟값을 구하는 변수\n minV = 999999\n # 방문한 적이 있는지 확인하는 리스트\n check = []\n solve(0,0,0)\n print(f'#{tc} {minV}')\n","repo_name":"gangnamssal/honey_man_space","sub_path":"개인 공부/SWEA/알고리즘 수업 hw/220922/전자키트.py","file_name":"전자키트.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71426654440","text":"import numpy as np\nimport pandas as pd\n\nimport random\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, Input, concatenate, Activation, BatchNormalization, Softmax, Conv2DTranspose\nimport tensorflow.keras.layers as layers\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.utils import plot_model\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport os\nimport json\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n print(dirname)\n \nfrom pathlib import Path\n\ndata_path = Path('/kaggle/input/abstraction-and-reasoning-challenge/')\ntraining_path = data_path / 'training'\nevaluation_path = data_path / 'evaluation'\ntest_path = data_path / 'test'\ndef plot_one(task, ax, i,train_or_test,input_or_output):\n cmap = colors.ListedColormap(\n ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00',\n '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])\n norm = colors.Normalize(vmin=0, vmax=9)\n \n input_matrix = task[train_or_test][i][input_or_output]\n ax.imshow(input_matrix, cmap=cmap, norm=norm)\n ax.grid(True,which='both',color='lightgrey', linewidth=0.5) \n ax.set_yticks([x-0.5 for x in range(1+len(input_matrix))])\n ax.set_xticks([x-0.5 for x in range(1+len(input_matrix[0]))]) \n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_title(train_or_test + ' '+input_or_output)\n\ndef plot(img):\n cmap = colors.ListedColormap(\n ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00',\n '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])\n norm = colors.Normalize(vmin=0, vmax=9)\n fig, ax = plt.subplots()\n ax.imshow(img, cmap=cmap, norm=norm)\n ax.grid(True,which='both',color='lightgrey', linewidth=0.5) \n ax.set_yticks([x-0.5 for x in range(1+len(img))])\n ax.set_xticks([x-0.5 for x in range(1+len(img[0]))]) \n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n\ndef plot_task(task):\n \"\"\"\n Plots the first train and test pairs of a specified task,\n using same color scheme as the ARC app\n \"\"\" \n num_train = len(task['train'])\n fig, axs = plt.subplots(2, num_train, figsize=(3*num_train,3*2))\n for i in range(num_train): \n plot_one(task, axs[0,i],i,'train','input')\n plot_one(task, axs[1,i],i,'train','output') \n plt.tight_layout()\n plt.show() \n \n num_test = len(task['test'])\n fig, axs = plt.subplots(2, num_test, figsize=(3*num_test,3*2))\n if num_test==1: \n plot_one(task, axs[0],0,'test','input')\n plot_one(task, axs[1],0,'test','output') \n else:\n for i in range(num_test): \n plot_one(task, axs[0,i],i,'test','input')\n plot_one(task, axs[1,i],i,'test','output') \n plt.tight_layout()\n plt.show() \nskill_series = pd.Series(\n [[] for name in sorted(os.listdir(training_path))],\n index = sorted(os.listdir(training_path))\n)\n\n# 0-5\nskill_series['007bbfb7.json'] = ['image_repetition', 'fractal_repetition']\nskill_series['00d62c1b.json'] = ['loop_filling']\nskill_series['017c7c7b.json'] = ['recoloring', 'pattern_expansion', 'pattern_repetition', 'image_expansion']\nskill_series['025d127b.json'] = ['pattern_modification']\nskill_series['045e512c.json'] = ['pattern_expansion', 'direction_guessing']\nskill_series['0520fde7.json'] = ['detect_wall', 'separate_images', 'pattern_intersection']\n\n# 6-10\nskill_series['05269061.json'] = ['image_filling', 'pattern_expansion', 'diagonals']\nskill_series['05f2a901.json'] = ['pattern_moving', 'direction_guessing', 'bring_patterns_close']\nskill_series['06df4c85.json'] = ['detect_grid', 'connect_the_dots', 'grid_coloring']\nskill_series['08ed6ac7.json'] = ['measure_length', 'order_numbers', 'associate_colors_to_ranks', 'recoloring']\nskill_series['09629e4f.json'] = ['detect_grid', 'separate_images', 'count_tiles', 'take_minimum', 'enlarge_image', 'create_grid', 'adapt_image_to_grid']\n\n# 11-15\nskill_series['0962bcdd.json'] = ['pattern_expansion']\nskill_series['0a938d79.json'] = ['direction_guessing', 'draw_line_from_point', 'pattern_expansion']\nskill_series['0b148d64.json'] = ['detect_grid', 'separate_images', 'find_the_intruder', 'crop']\nskill_series['0ca9ddb6.json'] = ['pattern_expansion', 'associate_patterns_to_colors']\nskill_series['0d3d703e.json'] = ['associate_colors_to_colors']\n\n# 16-20\nskill_series['0dfd9992.json'] = ['image_filling', 'pattern_expansion']\nskill_series['0e206a2e.json'] = ['associate_patterns_to_patterns', 'pattern_repetition', 'pattern_rotation', 'pattern_reflection', 'pattern_juxtaposition']\nskill_series['10fcaaa3.json'] = ['pattern_expansion', 'image_repetition']\nskill_series['11852cab.json'] = ['pattern_expansion']\nskill_series['1190e5a7.json'] = ['detect_grid', 'count_hor_lines', 'count_ver_lines', 'detect_background_color', 'color_guessing', 'create_image_from_info']\n\n# 21-25\nskill_series['137eaa0f.json'] = ['pattern_juxtaposition']\nskill_series['150deff5.json'] = ['pattern_coloring', 'pattern_deconstruction', 'associate_colors_to_patterns']\nskill_series['178fcbfb.json'] = ['direction_guessing', 'draw_line_from_point']\nskill_series['1a07d186.json'] = ['bring_patterns_close', 'find_the_intruder']\nskill_series['1b2d62fb.json'] = ['detect_wall', 'separate_images', 'pattern_intersection']\n\n# 26-30\nskill_series['1b60fb0c.json'] = ['pattern_deconstruction', 'pattern_rotation', 'pattern_expansion']\nskill_series['1bfc4729.json'] = ['pattern_expansion']\nskill_series['1c786137.json'] = ['detect_enclosure', 'crop']\nskill_series['1caeab9d.json'] = ['pattern_moving', 'pattern_alignment']\nskill_series['1cf80156.json'] = ['crop']\n\n# 31-35\nskill_series['1e0a9b12.json'] = ['pattern_moving', 'gravity']\nskill_series['1e32b0e9.json'] = ['detect_grid', 'separate_images', 'image_repetition', 'pattern_completion']\nskill_series['1f0c79e5.json'] = ['pattern_expansion', 'diagonals', 'direction_guessing']\nskill_series['1f642eb9.json'] = ['image_within_image', 'projection_unto_rectangle']\nskill_series['1f85a75f.json'] = ['crop', 'find_the_intruder']\n\n# 36-40\nskill_series['1f876c06.json'] = ['connect_the_dots', 'diagonals']\nskill_series['1fad071e.json'] = ['count_patterns', 'associate_images_to_numbers']\nskill_series['2013d3e2.json'] = ['pattern_deconstruction', 'crop']\nskill_series['2204b7a8.json'] = ['proximity_guessing', 'recoloring']\nskill_series['22168020.json'] = ['pattern_expansion']\n\n# 41-45\nskill_series['22233c11.json'] = ['pattern_expansion', 'size_guessing']\nskill_series['2281f1f4.json'] = ['direction_guessing', 'draw_line_from_point', 'pattern_intersection']\nskill_series['228f6490.json'] = ['pattern_moving', 'loop_filling', 'shape_guessing', 'x_marks_the_spot']\nskill_series['22eb0ac0.json'] = ['connect_the_dots', 'color_matching']\nskill_series['234bbc79.json'] = ['recoloring', 'bring_patterns_close', 'crop']\n\n# 46-50\nskill_series['23581191.json'] = ['draw_line_from_point', 'pattern_intersection']\nskill_series['239be575.json'] = ['detect_connectedness', 'associate_images_to_bools']\nskill_series['23b5c85d.json'] = ['measure_area', 'take_minimum', 'crop']\nskill_series['253bf280.json'] = ['connect_the_dots', 'direction_guessing']\nskill_series['25d487eb.json'] = ['draw_line_from_point', 'direction_guessing', 'color_guessing']\n\n# 51-55\nskill_series['25d8a9c8.json'] = ['detect_hor_lines', 'recoloring', 'remove_noise']\nskill_series['25ff71a9.json'] = ['pattern_moving']\nskill_series['264363fd.json'] = ['pattern_repetition', 'pattern_juxtaposition', 'draw_line_from_point']\nskill_series['272f95fa.json'] = ['detect_grid', 'mimic_pattern', 'grid_coloring']\nskill_series['27a28665.json'] = ['associate_colors_to_patterns', 'take_negative', 'associate_images_to_patterns']\n\n# 56-60\nskill_series['28bf18c6.json'] = ['crop', 'pattern_repetition']\nskill_series['28e73c20.json'] = ['ex_nihilo', 'mimic_pattern']\nskill_series['29623171.json'] = ['detect_grid', 'separate_images', 'count_tiles', 'take_maximum', 'grid_coloring']\nskill_series['29c11459.json'] = ['draw_line_from_point', 'count_tiles']\nskill_series['29ec7d0e.json'] = ['image_filling', 'pattern_expansion', 'detect_grid', 'pattern_repetition']\n\n# 61-65\nskill_series['2bcee788.json'] = ['pattern_reflection', 'direction_guessing', 'image_filling', 'background_filling']\nskill_series['2bee17df.json'] = ['draw_line_from_border', 'count_tiles', 'take_maximum']\nskill_series['2c608aff.json'] = ['draw_line_from_point', 'projection_unto_rectangle']\nskill_series['2dc579da.json'] = ['detect_grid', 'find_the_intruder', 'crop']\nskill_series['2dd70a9a.json'] = ['draw_line_from_point', 'direction_guessing', 'maze']\n\n# 66-70\nskill_series['2dee498d.json'] = ['detect_repetition', 'crop', 'divide_by_n']\nskill_series['31aa019c.json'] = ['find_the_intruder', 'remove_noise', 'contouring']\nskill_series['321b1fc6.json'] = ['pattern_repetition', 'pattern_juxtaposition']\nskill_series['32597951.json'] = ['find_the_intruder', 'recoloring']\nskill_series['3345333e.json'] = ['pattern_completion', 'pattern_reflection', 'remove_noise']\n\n# 71-75\nskill_series['3428a4f5.json'] = ['detect_wall', 'separate_images', 'pattern_differences']\nskill_series['3618c87e.json'] = ['gravity']\nskill_series['3631a71a.json'] = ['image_filling', 'pattern_expansion', 'pattern_rotation']\nskill_series['363442ee.json'] = ['detect_wall', 'pattern_repetition', 'pattern_juxtaposition']\nskill_series['36d67576.json'] = ['pattern_repetition', 'pattern_juxtaposition', 'pattern_reflection', 'pattern_rotation']\n\n# 76-80\nskill_series['36fdfd69.json'] = ['recoloring', 'rectangle_guessing']\nskill_series['3906de3d.json'] = ['gravity']\nskill_series['39a8645d.json'] = ['count_patterns', 'take_maximum', 'crop']\nskill_series['39e1d7f9.json'] = ['detect_grid', 'pattern_repetition', 'grid_coloring']\nskill_series['3aa6fb7a.json'] = ['pattern_completion', 'pattern_rotation']\n\n# 81-85\nskill_series['3ac3eb23.json'] = ['draw_pattern_from_point', 'pattern_repetition']\nskill_series['3af2c5a8.json'] = ['image_repetition', 'image_reflection', 'image_rotation']\nskill_series['3bd67248.json'] = ['draw_line_from_border', 'diagonals','pattern_repetition']\nskill_series['3bdb4ada.json'] = ['recoloring','pattern_repetition', 'holes']\nskill_series['3befdf3e.json'] = ['take_negative', 'pattern_expansion']\n\n# 86-90\nskill_series['3c9b0459.json'] = ['image_rotation']\nskill_series['3de23699.json'] = ['take_negative', 'crop', 'rectangle_guessing']\nskill_series['3e980e27.json'] = ['pattern_repetition', 'pattern_juxtaposition', 'direction_guessing', 'pattern_reflection']\nskill_series['3eda0437.json'] = ['rectangle_guessing', 'recoloring', 'measure_area', 'take_maximum']\nskill_series['3f7978a0.json'] = ['crop', 'rectangle_guessing', 'find_the_intruder']\n\n# 91-95\nskill_series['40853293.json'] = ['connect_the_dots']\nskill_series['4093f84a.json'] = ['gravity', 'recoloring', 'projection_unto_rectangle']\nskill_series['41e4d17e.json'] = ['draw_line_from_point', 'pattern_repetition']\nskill_series['4258a5f9.json'] = ['pattern_repetition', 'contouring']\nskill_series['4290ef0e.json'] = ['pattern_moving', 'concentric', 'crop']\n\n# 96-100\nskill_series['42a50994.json'] = ['remove_noise', 'count_tiles']\nskill_series['4347f46a.json'] = ['loop_filling', 'color_guessing']\nskill_series['444801d8.json'] = ['pattern_repetition', 'pattern_expansion', 'rectangle_guessing']\nskill_series['445eab21.json'] = ['measure_area', 'take_maximum']\nskill_series['447fd412.json'] = ['pattern_repetition', 'draw_pattern_from_point', 'pattern_resizing']\n\n# 101-105\nskill_series['44d8ac46.json'] = ['loop_filling', 'rectangle_guessing']\nskill_series['44f52bb0.json'] = ['detect_symmetry', 'associate_images_to_bools']\nskill_series['4522001f.json'] = ['image_rotation', 'pairwise_analogy']\nskill_series['4612dd53.json'] = ['pattern_completion', 'rectangle_guessing']\nskill_series['46442a0e.json'] = ['image_repetition', 'image_reflection']\n\n# 106-110\nskill_series['469497ad.json'] = ['image_resizing', 'draw_line_from_point', 'diagonals']\nskill_series['46f33fce.json'] = ['pattern_resizing', 'image_resizing']\nskill_series['47c1f68c.json'] = ['detect_grid', 'find_the_intruder', 'crop', 'recolor', 'color_guessing', 'image_repetition', 'image_reflection']\nskill_series['484b58aa.json'] = ['image_filling', 'pattern_expansion', 'pattern_repetition']\nskill_series['48d8fb45.json'] = ['find_the_intruder', 'crop']\n\n# 111-115\nskill_series['4938f0c2.json'] = ['pattern_expansion', 'pattern_rotation', 'pattern_reflection']\nskill_series['496994bd.json'] = ['pattern_reflection']\nskill_series['49d1d64f.json'] = ['pattern_expansion', 'image_expansion']\nskill_series['4be741c5.json'] = ['summarize']\nskill_series['4c4377d9.json'] = ['image_repetition', 'image_reflection']\n\n# 116-120\nskill_series['4c5c2cf0.json'] = ['pattern_expansion', 'pattern_rotation', 'pattern_reflection']\nskill_series['50846271.json'] = ['pattern_completion', 'recoloring']\nskill_series['508bd3b6.json'] = ['draw_line_from_point', 'direction_guessing', 'pattern_reflection']\nskill_series['50cb2852.json'] = ['holes', 'rectangle_guessing']\nskill_series['5117e062.json'] = ['find_the_intruder', 'crop', 'recoloring']\n\n# 121-125\nskill_series['5168d44c.json'] = ['direction_guessing', 'recoloring', 'contouring', 'pattern_moving']\nskill_series['539a4f51.json'] = ['pattern_expansion', 'image_expansion']\nskill_series['53b68214.json'] = ['pattern_expansion', 'image_expansion']\nskill_series['543a7ed5.json'] = ['contouring', 'loop_filling']\nskill_series['54d82841.json'] = ['pattern_expansion', 'gravity']\n\n# 126-130\nskill_series['54d9e175.json'] = ['detect_grid', 'separate_images', 'associate_images_to_images']\nskill_series['5521c0d9.json'] = ['pattern_moving', 'measure_length']\nskill_series['5582e5ca.json'] = ['count_tiles', 'dominant_color']\nskill_series['5614dbcf.json'] = ['remove_noise', 'image_resizing']\nskill_series['56dc2b01.json'] = ['gravity', 'direction_guessing', 'pattern_expansion']\n\n# 131-135\nskill_series['56ff96f3.json'] = ['pattern_completion', 'rectangle_guessing']\nskill_series['57aa92db.json'] = ['draw_pattern_from_point', 'pattern_repetition', 'pattern_resizing']\nskill_series['5ad4f10b.json'] = ['color_guessing', 'remove_noise', 'recoloring', 'crop', 'image_resizing']\nskill_series['5bd6f4ac.json'] = ['rectangle_guessing', 'crop']\nskill_series['5c0a986e.json'] = ['draw_line_from_point', 'diagonals', 'direction_guessing']\n\n# 136-140\nskill_series['5c2c9af4.json'] = ['rectangle_guessing', 'pattern_expansion']\nskill_series['5daaa586.json'] = ['detect_grid', 'crop', 'draw_line_from_point', 'direction_guessing']\nskill_series['60b61512.json'] = ['pattern_completion']\nskill_series['6150a2bd.json'] = ['image_rotation']\nskill_series['623ea044.json'] = ['draw_line_from_point', 'diagonals']\n\n# 141-145\nskill_series['62c24649.json'] = ['image_repetition', 'image_reflection', 'image_rotation']\nskill_series['63613498.json'] = ['recoloring', 'compare_image', 'detect_wall']\nskill_series['6430c8c4.json'] = ['detect_wall', 'separate_images', 'take_complement', 'pattern_intersection']\nskill_series['6455b5f5.json'] = ['measure_area', 'take_maximum', 'take_minimum', 'loop_filling', 'associate_colors_to_ranks']\nskill_series['662c240a.json'] = ['separate_images', 'detect_symmetry', 'find_the_intruder', 'crop']\n\n# 146-150\nskill_series['67385a82.json'] = ['recoloring', 'measure_area', 'associate_colors_to_bools']\nskill_series['673ef223.json'] = ['recoloring', 'draw_line_from_point', 'portals']\nskill_series['6773b310.json'] = ['detect_grid', 'separate_images', 'count_tiles', 'associate_colors_to_numbers']\nskill_series['67a3c6ac.json'] = ['image_reflection']\nskill_series['67a423a3.json'] = ['pattern_intersection', 'contouring']\n\n# 151-155\nskill_series['67e8384a.json'] = ['image_repetition', 'image_reflection', 'image_rotation']\nskill_series['681b3aeb.json'] = ['pattern_moving', 'jigsaw', 'crop', 'bring_patterns_close']\nskill_series['6855a6e4.json'] = ['pattern_moving', 'direction_guessing', 'x_marks_the_spot']\nskill_series['68b16354.json'] = ['image_reflection']\nskill_series['694f12f3.json'] = ['rectangle_guessing', 'loop_filling', 'measure_area', 'associate_colors_to_ranks']\n\n# 156-160\nskill_series['6a1e5592.json'] = ['pattern_moving', 'jigsaw', 'recoloring']\nskill_series['6aa20dc0.json'] = ['pattern_repetition', 'pattern_juxtaposition', 'pattern_resizing']\nskill_series['6b9890af.json'] = ['pattern_moving', 'pattern_resizing', 'crop', 'x_marks_the_spot']\nskill_series['6c434453.json'] = ['replace_pattern']\nskill_series['6cdd2623.json'] = ['connect_the_dots', 'find_the_intruder', 'remove_noise']\n\n# 161-165\nskill_series['6cf79266.json'] = ['rectangle_guessing', 'recoloring']\nskill_series['6d0160f0.json'] = ['detect_grid', 'separate_image', 'find_the_intruder', 'pattern_moving']\nskill_series['6d0aefbc.json'] = ['image_repetition', 'image_reflection']\nskill_series['6d58a25d.json'] = ['draw_line_from_point']\nskill_series['6d75e8bb.json'] = ['rectangle_guessing', 'pattern_completion']\n\n# 166-170\nskill_series['6e02f1e3.json'] = ['count_different_colors', 'associate_images_to_numbers']\nskill_series['6e19193c.json'] = ['draw_line_from_point', 'direction_guessing', 'diagonals']\nskill_series['6e82a1ae.json'] = ['recoloring', 'count_tiles', 'associate_colors_to_numbers']\nskill_series['6ecd11f4.json'] = ['color_palette', 'recoloring', 'pattern_resizing', 'crop']\nskill_series['6f8cd79b.json'] = ['ex_nihilo', 'contouring']\n\n# 171-175\nskill_series['6fa7a44f.json'] = ['image_repetition', 'image_reflection']\nskill_series['72322fa7.json'] = ['pattern_repetition', 'pattern_juxtaposition']\nskill_series['72ca375d.json'] = ['find_the_intruder', 'detect_symmetry', 'crop']\nskill_series['73251a56.json'] = ['image_filling', 'diagonal_symmetry']\nskill_series['7447852a.json'] = ['pattern_expansion', 'pairwise_analogy']\n\n# 176-180\nskill_series['7468f01a.json'] = ['crop', 'image_reflection']\nskill_series['746b3537.json'] = ['crop', 'direction_guessing']\nskill_series['74dd1130.json'] = ['image_reflection', 'diagonal_symmetry']\nskill_series['75b8110e.json'] = ['separate_images', 'image_juxtaposition']\nskill_series['760b3cac.json'] = ['pattern_reflection', 'direction_guessing']\n\n# 181-185\nskill_series['776ffc46.json'] = ['recoloring', 'associate_colors_to_patterns', 'detect_enclosure', 'find_the_intruder']\nskill_series['77fdfe62.json'] = ['recoloring', 'color_guessing', 'detect_grid', 'crop']\nskill_series['780d0b14.json'] = ['detect_grid', 'summarize']\nskill_series['7837ac64.json'] = ['detect_grid', 'color_guessing', 'grid_coloring', 'crop', 'extrapolate_image_from_grid']\nskill_series['794b24be.json'] = ['count_tiles', 'associate_images_to_numbers']\n\n# 186-190\nskill_series['7b6016b9.json'] = ['loop_filling', 'background_filling', 'color_guessing']\nskill_series['7b7f7511.json'] = ['separate_images', 'detect_repetition', 'crop']\nskill_series['7c008303.json'] = ['color_palette', 'detect_grid', 'recoloring', 'color_guessing', 'separate_images', 'crop']\nskill_series['7ddcd7ec.json'] = ['draw_line_from_point', 'direction_guessing', 'diagonals']\nskill_series['7df24a62.json'] = ['pattern_repetition', 'pattern_rotation', 'pattern_juxtaposition', 'out_of_boundary']\n\n# 191-195\nskill_series['7e0986d6.json'] = ['color_guessing', 'remove_noise']\nskill_series['7f4411dc.json'] = ['rectangle_guessing', 'remove_noise']\nskill_series['7fe24cdd.json'] = ['image_repetition', 'image_rotation']\nskill_series['80af3007.json'] = ['crop', 'pattern_resizing', 'image_resizing', 'fractal_repetition']\nskill_series['810b9b61.json'] = ['recoloring', 'detect_closed_curves']\n\n# 196-200\nskill_series['82819916.json'] = ['pattern_repetition', 'color_guessing', 'draw_line_from_point', 'associate_colors_to_colors']\nskill_series['83302e8f.json'] = ['detect_grid', 'detect_closed_curves', 'rectangle_guessing', 'associate_colors_to_bools', 'loop_filling']\nskill_series['834ec97d.json'] = ['draw_line_from_border', 'pattern_repetition', 'spacing', 'measure_distance_from_side']\nskill_series['8403a5d5.json'] = ['draw_line_from_point', 'pattern_repetition', 'direction_guessing']\nskill_series['846bdb03.json'] = ['pattern_moving', 'pattern_reflection', 'crop', 'color_matching', 'x_marks_the_spot']\n\n# 201-205\nskill_series['855e0971.json'] = ['draw_line_from_point', 'direction_guessing', 'separate_images', 'holes']\nskill_series['85c4e7cd.json'] = ['color_guessing', 'recoloring', 'color_permutation']\nskill_series['868de0fa.json'] = ['loop_filling', 'color_guessing', 'measure_area', 'even_or_odd', 'associate_colors_to_bools']\nskill_series['8731374e.json'] = ['rectangle_guessing', 'crop', 'draw_line_from_point']\nskill_series['88a10436.json'] = ['pattern_repetition', 'pattern_juxtaposition']\n\n# 206-210\nskill_series['88a62173.json'] = ['detect_grid', 'separate_images', 'find_the_intruder', 'crop']\nskill_series['890034e9.json'] = ['pattern_repetition', 'rectangle_guessing', 'contouring']\nskill_series['8a004b2b.json'] = ['pattern_repetition', 'pattern_resizing', 'pattern_juxtaposition', 'rectangle_guessing', 'crop']\nskill_series['8be77c9e.json'] = ['image_repetition', 'image_reflection']\nskill_series['8d5021e8.json'] = ['image_repetition', 'image_reflection']\n\n# 211-215\nskill_series['8d510a79.json'] = ['draw_line_from_point', 'detect_wall', 'direction_guessing', 'associate_colors_to_bools']\nskill_series['8e1813be.json'] = ['recoloring', 'color_guessing', 'direction_guesing' 'crop', 'image_within_image']\nskill_series['8e5a5113.json'] = ['detect_wall', 'separate_images', 'image_repetition', 'image_rotation']\nskill_series['8eb1be9a.json'] = ['pattern_repetition', 'image_filling']\nskill_series['8efcae92.json'] = ['separate_images', 'rectangle_guessing', 'count_tiles', 'take_maximum', 'crop']\n\n# 216-220\nskill_series['8f2ea7aa.json'] = ['crop', 'fractal_repetition']\nskill_series['90c28cc7.json'] = ['crop', 'rectangle_guessing', 'summarize']\nskill_series['90f3ed37.json'] = ['pattern_repetition', 'recoloring']\nskill_series['913fb3ed.json'] = ['contouring', 'associate_colors_to_colors']\nskill_series['91413438.json'] = ['count_tiles', 'algebra', 'image_repetition']\n\n# 221-225\nskill_series['91714a58.json'] = ['find_the_intruder', 'remove_noise']\nskill_series['9172f3a0.json'] = ['image_resizing']\nskill_series['928ad970.json'] = ['rectangle_guessing', 'color_guessing', 'draw_rectangle']\nskill_series['93b581b8.json'] = ['pattern_expansion', 'color_guessing', 'out_of_boundary']\nskill_series['941d9a10.json'] = ['detect_grid', 'loop_filling', 'pairwise_analogy']\n\n# 226-230\nskill_series['94f9d214.json'] = ['separate_images', 'take_complement', 'pattern_intersection']\nskill_series['952a094c.json'] = ['rectangle_guessing', 'inside_out']\nskill_series['9565186b.json'] = ['separate_shapes', 'count_tiles', 'recoloring', 'take_maximum', 'associate_color_to_bools']\nskill_series['95990924.json'] = ['pattern_expansion']\nskill_series['963e52fc.json'] = ['image_expansion', 'pattern_expansion']\n\n# 231-235\nskill_series['97999447.json'] = ['draw_line_from_point', 'pattern_expansion']\nskill_series['97a05b5b.json'] = ['pattern_moving', 'pattern_juxtaposition', 'crop', 'shape_guessing']\nskill_series['98cf29f8.json'] = ['pattern_moving', 'bring_patterns_close']\nskill_series['995c5fa3.json'] = ['take_complement', 'detect_wall', 'separate_images', 'associate_colors_to_images', 'summarize']\nskill_series['99b1bc43.json'] = ['take_complement', 'detect_wall', 'separate_images', 'pattern_intersection']\n\n# 236-240\nskill_series['99fa7670.json'] = ['draw_line_from_point', 'pattern_expansion']\nskill_series['9aec4887.json'] = ['pattern_moving', 'x_marks_the_spot', 'crop', 'recoloring', 'color_guessing']\nskill_series['9af7a82c.json'] = ['separate_images', 'count_tiles', 'summarize', 'order_numbers']\nskill_series['9d9215db.json'] = ['pattern_expansion', 'pattern_reflection', 'pattern_rotation']\nskill_series['9dfd6313.json'] = ['image_reflection', 'diagonal_symmetry']\n\n# 241-245\nskill_series['9ecd008a.json'] = ['image_filling', 'pattern_expansion', 'pattern_reflection', 'pattern_rotation', 'crop']\nskill_series['9edfc990.json'] = ['background_filling', 'holes']\nskill_series['9f236235.json'] = ['detect_grid', 'summarize', 'image_reflection']\nskill_series['a1570a43.json'] = ['pattern_moving', 'rectangle_guessing', 'x_marks_the_spot']\nskill_series['a2fd1cf0.json'] = ['connect_the_dots']\n\n# 246-250\nskill_series['a3325580.json'] = ['separate_shapes', 'count_tiles', 'take_maximum', 'summarize', 'remove_intruders']\nskill_series['a3df8b1e.json'] = ['pattern_expansion', 'draw_line_from_point', 'diagonals', 'bounce']\nskill_series['a416b8f3.json'] = ['image_repetition']\nskill_series['a48eeaf7.json'] = ['pattern_moving', 'bring_patterns_close', 'gravity', 'direction_guessing']\nskill_series['a5313dff.json'] = ['loop_filling']\n\n# 251-255\nskill_series['a5f85a15.json'] = ['recoloring', 'pattern_modification', 'pairwise_analogy']\nskill_series['a61ba2ce.json'] = ['pattern_moving', 'bring_patterns_close', 'crop', 'jigsaw']\nskill_series['a61f2674.json'] = ['separate_shapes', 'count_tiles', 'take_maximum', 'take_minimum', 'recoloring', 'associate_colors_to_ranks', 'remove_intruders']\nskill_series['a64e4611.json'] = ['background_filling', 'rectangle_guessing']\nskill_series['a65b410d.json'] = ['pattern_expansion', 'count_tiles', 'associate_colors_to_ranks']\n\n# 256-260\nskill_series['a68b268e.json'] = ['detect_grid', 'separate_images', 'pattern_juxtaposition']\nskill_series['a699fb00.json'] = ['pattern_expansion', 'connect_the_dots']\nskill_series['a740d043.json'] = ['crop', 'detect_background_color', 'recoloring']\nskill_series['a78176bb.json'] = ['draw_parallel_line', 'direction_guessing', 'remove_intruders']\nskill_series['a79310a0.json'] = ['pattern_moving', 'recoloring', 'pairwise_analogy']\n\n# 261-265\nskill_series['a85d4709.json'] = ['separate_images', 'associate_colors_to_images', 'summarize']\nskill_series['a87f7484.json'] = ['separate_images', 'find_the_intruder', 'crop']\nskill_series['a8c38be5.json'] = ['pattern_moving', 'jigsaw', 'crop']\nskill_series['a8d7556c.json'] = ['recoloring', 'rectangle_guessing']\nskill_series['a9f96cdd.json'] = ['replace_pattern', 'out_of_boundary']\n\n# 266-270\nskill_series['aabf363d.json'] = ['recoloring', 'color_guessing', 'remove_intruders']\nskill_series['aba27056.json'] = ['pattern_expansion', 'draw_line_from_point', 'diagonals']\nskill_series['ac0a08a4.json'] = ['image_resizing', 'count_tiles', 'size_guessing']\nskill_series['ae3edfdc.json'] = ['bring_patterns_close', 'gravity']\nskill_series['ae4f1146.json'] = ['separate_images', 'count_tiles', 'crop']\n\n# 271-275\nskill_series['aedd82e4.json'] = ['recoloring', 'separate_shapes', 'count_tiles', 'take_minimum', 'associate_colors_to_bools']\nskill_series['af902bf9.json'] = ['ex_nihilo', 'x_marks_the_spot']\nskill_series['b0c4d837.json'] = ['measure_length', 'associate_images_to_numbers']\nskill_series['b190f7f5.json'] = ['separate_images', 'image_expasion', 'color_palette', 'image_resizing', 'replace_pattern']\nskill_series['b1948b0a.json'] = ['recoloring', 'associate_colors_to_colors']\n\n# 276-280\nskill_series['b230c067.json'] = ['recoloring', 'separate_shapes', 'find_the_intruder', 'associate_colors_to_bools']\nskill_series['b27ca6d3.json'] = ['find_the_intruder', 'count_tiles', 'contouring']\nskill_series['b2862040.json'] = ['recoloring', 'detect_closed_curves', 'associate_colors_to_bools']\nskill_series['b527c5c6.json'] = ['pattern_expansion', 'draw_line_from_point', 'contouring', 'direction_guessing', 'size_guessing']\nskill_series['b548a754.json'] = ['pattern_expansion', 'pattern_modification', 'x_marks_the_spot']\n\n# 281-285\nskill_series['b60334d2.json'] = ['replace_pattern']\nskill_series['b6afb2da.json'] = ['recoloring', 'replace_pattern', 'rectangle_guessing']\nskill_series['b7249182.json'] = ['pattern_expansion']\nskill_series['b775ac94.json'] = ['pattern_expansion', 'pattern_repetition', 'recoloring', 'pattern_rotation', 'pattern_reflection', 'direction_guessing', 'pattern_juxtaposition']\nskill_series['b782dc8a.json'] = ['pattern_expansion', 'maze']\n\n# 286-290\nskill_series['b8825c91.json'] = ['pattern_completion', 'pattern_rotation', 'pattern_reflection']\nskill_series['b8cdaf2b.json'] = ['pattern_expansion', 'draw_line_from_point', 'diagonals', 'pairwise_analogy']\nskill_series['b91ae062.json'] = ['image_resizing', 'size_guessing', 'count_different_colors']\nskill_series['b94a9452.json'] = ['crop', 'take_negative']\nskill_series['b9b7f026.json'] = ['find_the_intruder', 'summarize']\n\n# 291-295\nskill_series['ba26e723.json'] = ['pattern_modification', 'pairwise_analogy', 'recoloring']\nskill_series['ba97ae07.json'] = ['pattern_modification', 'pairwise_analogy', 'rettangle_guessing', 'recoloring']\nskill_series['bb43febb.json'] = ['loop_filling', 'rettangle_guessing']\nskill_series['bbc9ae5d.json'] = ['pattern_expansion', 'image_expansion']\nskill_series['bc1d5164.json'] = ['pattern_moving', 'pattern_juxtaposition', 'crop', 'pairwise_analogy']\n\n# 296-300\nskill_series['bd4472b8.json'] = ['detect_wall', 'pattern_expansion', 'ex_nihilo', 'color_guessing', 'color_palette']\nskill_series['bda2d7a6.json'] = ['recoloring', 'pairwise_analogy', 'pattern_modification', 'color_permutation']\nskill_series['bdad9b1f.json'] = ['draw_line_from_point', 'direction_guessing', 'recoloring', 'take_intersection']\nskill_series['be94b721.json'] = ['separate_shapes', 'count_tiles', 'take_maximum', 'crop']\nskill_series['beb8660c.json'] = ['pattern_moving', 'count_tiles', 'order_numbers']\n\n# 301-305\nskill_series['c0f76784.json'] = ['loop_filling', 'measure_area', 'associate_colors_to_numbers']\nskill_series['c1d99e64.json'] = ['draw_line_from_border', 'detect_grid']\nskill_series['c3e719e8.json'] = ['image_repetition', 'image_expansion', 'count_different_colors', 'take_maximum']\nskill_series['c3f564a4.json'] = ['pattern_expansion', 'image_filling']\nskill_series['c444b776.json'] = ['detect_grid', 'separate_images', 'find_the_intruder', 'image_repetition']\n\n# 306-310\nskill_series['c59eb873.json'] = ['image_resizing']\nskill_series['c8cbb738.json'] = ['pattern_moving', 'jigsaw', 'crop']\nskill_series['c8f0f002.json'] = ['recoloring', 'associate_colors_to_colors']\nskill_series['c909285e.json'] = ['find_the_intruder', 'crop', 'rectangle_guessing']\nskill_series['c9e6f938.json'] = ['image_repetition', 'image_reflection']\n\n# 311-315\nskill_series['c9f8e694.json'] = ['recoloring', 'pattern_repetition', 'color_palette']\nskill_series['caa06a1f.json'] = ['pattern_expansion', 'image_filling']\nskill_series['cbded52d.json'] = ['detect_grid', 'separate_images', 'pattern_modification', 'pattern_repetition', 'pattern_juxtaposition', 'connect_the_dots']\nskill_series['cce03e0d.json'] = ['image_repetition', 'image_expansion', 'pairwise_analogy']\nskill_series['cdecee7f.json'] = ['summarize', 'pairwise_analogy']\n\n# 316-320\nskill_series['ce22a75a.json'] = ['replace_pattern']\nskill_series['ce4f8723.json'] = ['detect_wall', 'separate_images', 'take_complement', 'take_intersection']\nskill_series['ce602527.json'] = ['crop', 'size_guessing', 'shape_guessing', 'find_the_intruder', 'remove_intruder']\nskill_series['ce9e57f2.json'] = ['recoloring', 'count_tiles', 'take_half']\nskill_series['cf98881b.json'] = ['detect_wall', 'separate_images', 'pattern_juxtaposition']\n\n# 321-325\nskill_series['d037b0a7.json'] = ['pattern_expansion', 'draw_line_from_point']\nskill_series['d06dbe63.json'] = ['pattern_expansion', 'pairwise_analogy']\nskill_series['d07ae81c.json'] = ['draw_line_from_point', 'diagonals', 'color_guessing']\nskill_series['d0f5fe59.json'] = ['separate_shapes', 'count_shapes', 'associate_images_to_numbers', 'pairwise_analogy']\nskill_series['d10ecb37.json'] = ['crop']\n\n# 326-330\nskill_series['d13f3404.json'] = ['image_expansion', 'draw_line_from_point', 'diagonals']\nskill_series['d22278a0.json'] = ['pattern_expansion', 'pairwise_analogy']\nskill_series['d23f8c26.json'] = ['crop', 'image_expansion']\nskill_series['d2abd087.json'] = ['separate_shapes', 'count_tiles', 'associate_colors_to_numbers', 'recoloring']\nskill_series['d364b489.json'] = ['pattern_expansion']\n\n# 331-335\nskill_series['d406998b.json'] = ['recoloring', 'one_yes_one_no', 'cylindrical']\nskill_series['d43fd935.json'] = ['draw_line_from_point', 'direction_guessing', 'projection_unto_rectangle']\nskill_series['d4469b4b.json'] = ['dominant_color', 'associate_images_to_colors']\nskill_series['d4a91cb9.json'] = ['connect_the_dots', 'direction_guessing']\nskill_series['d4f3cd78.json'] = ['rectangle_guessing', 'recoloring', 'draw_line_from_point']\n\n# 336-340\nskill_series['d511f180.json'] = ['associate_colors_to_colors']\nskill_series['d5d6de2d.json'] = ['loop_filling', 'replace_pattern', 'remove_intruders']\nskill_series['d631b094.json'] = ['count_tiles', 'dominant_color', 'summarize']\nskill_series['d687bc17.json'] = ['bring_patterns_close', 'gravity', 'direction_guessing', 'find_the_intruder', 'remove_intruders']\nskill_series['d6ad076f.json'] = ['bridges', 'connect_the_dots', 'draw_line_from_point']\n\n# 341-345\nskill_series['d89b689b.json'] = ['pattern_juxtaposition', 'summarize', 'direction_guessing']\nskill_series['d8c310e9.json'] = ['pattern_expansion', 'pattern_repetition', 'pattern_completion']\nskill_series['d90796e8.json'] = ['replace_pattern']\nskill_series['d9f24cd1.json'] = ['draw_line_from_point', 'gravity', 'obstacles']\nskill_series['d9fac9be.json'] = ['find_the_intruder', 'summarize', 'x_marks_the_spot']\n\n# 346-350\nskill_series['dae9d2b5.json'] = ['pattern_juxtaposition', 'separate_images', 'recoloring']\nskill_series['db3e9e38.json'] = ['pattern_expansion', 'out_of_boundary']\nskill_series['db93a21d.json'] = ['contouring', 'draw_line_from_point', 'measure_area', 'measure_length', 'algebra']\nskill_series['dbc1a6ce.json'] = ['connect_the_dots']\nskill_series['dc0a314f.json'] = ['pattern_completion', 'crop']\n\n# 351-355\nskill_series['dc1df850.json'] = ['contouring', 'pattern_expansion', 'out_of_boundary']\nskill_series['dc433765.json'] = ['pattern_moving', 'direction_guessing', 'only_one']\nskill_series['ddf7fa4f.json'] = ['color_palette', 'recoloring']\nskill_series['de1cd16c.json'] = ['separate_images', 'count_tiles', 'take_maximum', 'summarize']\nskill_series['ded97339.json'] = ['connect_the_dots']\n\n# 356-360\nskill_series['e179c5f4.json'] = ['pattern_expansion', 'bouncing']\nskill_series['e21d9049.json'] = ['pattern_expansion', 'draw_line_from_point', 'color_palette']\nskill_series['e26a3af2.json'] = ['remove_noise', 'separate_images']\nskill_series['e3497940.json'] = ['detect_wall', 'separate_images', 'image_reflection', 'image_juxtaposition']\nskill_series['e40b9e2f.json'] = ['pattern_expansion', 'pattern_reflection', 'pattern_rotation']\n\n# 361-365\nskill_series['e48d4e1a.json'] = ['count_tiles', 'pattern_moving', 'detect_grid', 'out_of_boundary']\nskill_series['e5062a87.json'] = ['pattern_repetition', 'pattern_juxtaposition']\nskill_series['e509e548.json'] = ['recoloring', 'associate_colors_to_shapes', 'homeomorphism']\nskill_series['e50d258f.json'] = ['separate_images', 'detect_background_color', 'crop', 'count_tiles', 'take_maximum']\nskill_series['e6721834.json'] = ['pattern_moving', 'pattern_juxtaposition', 'crop']\n\n# 366-370\nskill_series['e73095fd.json'] = ['loop_filling', 'rectangle_guessing']\nskill_series['e76a88a6.json'] = ['pattern_repetition', 'pattern_juxtaposition']\nskill_series['e8593010.json'] = ['holes', 'count_tiles', 'loop_filling', 'associate_colors_to_numbers']\nskill_series['e8dc4411.json'] = ['pattern_expansion', 'direction_guessing']\nskill_series['e9614598.json'] = ['pattern_expansion', 'direction_guessing', 'measure_length']\n\n# 371-375\nskill_series['e98196ab.json'] = ['detect_wall', 'separate_images', 'image_juxtaposition']\nskill_series['e9afcf9a.json'] = ['pattern_modification']\nskill_series['ea32f347.json'] = ['separate_shapes', 'count_tiles', 'recoloring', 'associate_colors_to_ranks']\nskill_series['ea786f4a.json'] = ['pattern_modification', 'draw_line_from_point', 'diagonals']\nskill_series['eb281b96.json'] = ['image_repetition', 'image_reflection']\n\n# 376-380\nskill_series['eb5a1d5d.json'] = ['summarize']\nskill_series['ec883f72.json'] = ['pattern_expansion', 'draw_line_from_point', 'diagonals']\nskill_series['ecdecbb3.json'] = ['pattern_modification', 'draw_line_from_point']\nskill_series['ed36ccf7.json'] = ['image_rotation']\nskill_series['ef135b50.json'] = ['draw_line_from_point', 'bridges', 'connect_the_dots']\n\n# 381-385\nskill_series['f15e1fac.json'] = ['draw_line_from_point', 'gravity', 'obstacles', 'direction_guessing']\nskill_series['f1cefba8.json'] = ['draw_line_from_point', 'pattern_modification']\nskill_series['f25fbde4.json'] = ['crop', 'image_resizing']\nskill_series['f25ffba3.json'] = ['pattern_repetition', 'pattern_reflection']\nskill_series['f2829549.json'] = ['detect_wall', 'separate_images', 'take_complement', 'pattern_intersection']\n\n# 386-390\nskill_series['f35d900a.json'] = ['pattern_expansion']\nskill_series['f5b8619d.json'] = ['pattern_expansion', 'draw_line_from_point', 'image_repetition']\nskill_series['f76d97a5.json'] = ['take_negative', 'recoloring', 'associate_colors_to_colors']\nskill_series['f8a8fe49.json'] = ['pattern_moving', 'pattern_reflection']\nskill_series['f8b3ba0a.json'] = ['detect_grid', 'find_the_intruder', 'dominant_color', 'count_tiles', 'summarize', 'order_numbers']\n\n# 391-395\nskill_series['f8c80d96.json'] = ['pattern_expansion', 'background_filling']\nskill_series['f8ff0b80.json'] = ['separate_shapes', 'count_tiles', 'summarize', 'order_numbers']\nskill_series['f9012d9b.json'] = ['pattern_expansion', 'pattern_completion', 'crop']\nskill_series['fafffa47.json'] = ['separate_images', 'take_complement', 'pattern_intersection']\nskill_series['fcb5c309.json'] = ['rectangle_guessing', 'separate_images', 'count_tiles', 'take_maximum', 'crop', 'recoloring']\n\n# 396-399\nskill_series['fcc82909.json'] = ['pattern_expansion', 'separate_images', 'count_different_colors']\nskill_series['feca6190.json'] = ['pattern_expansion', 'image_expansion', 'draw_line_from_point', 'diagonals']\nskill_series['ff28f65a.json'] = ['count_shapes', 'associate_images_to_numbers']\nskill_series['ff805c23.json'] = ['pattern_expansion', 'pattern_completion', 'crop']\n\nskill_series.head(10)\n# Shape predictor\n\ndef shape_finder(train, test):\n # return output shape of test\n \n inputs = []\n outputs = []\n for task in train:\n inputs.append(np.array(task[\"input\"]))\n outputs.append(np.array(task[\"output\"]))\n \n shape_ratio = [[np.array(train[i][\"input\"]).shape[j]/np.array(train[i][\"output\"]).shape[j] for j in range(len(np.array(train[i][\"input\"]).shape))] for i in range(len(train))]\n \n # input shape is a multiple of output\n if all(shape_ratio[0] == shape_ratio[i] for i in range(len(shape_ratio))):\n return [tuple([np.array(test[i][\"input\"]).shape[j]/shape_ratio[0][j] for j in range(len(inputs[i].shape))]) for i in range(len(test))]\n \n # All output shape are the same\n elif all(item.shape == outputs[0].shape for item in outputs):\n return [outputs[0].shape for i in range(len(test))]\n \n return([(0,0),(0,0),(0,0)])\n\n\n# function to test shape_finder\nscore = 0\ntraining_path = data_path / 'training'\ntraining_tasks = sorted(os.listdir(training_path))\nfailed = []\nfor i in range(len(training_tasks)):\n\n task_file = str(training_path / training_tasks[i])\n\n with open(task_file, 'r') as f:\n task = json.load(f)\n \n out_shape = shape_finder(task[\"train\"], task[\"test\"])\n for j in range(len(task[\"test\"])):\n if out_shape[j] == np.array(task[\"test\"][j][\"output\"]).shape:\n score+=1\n else:\n failed.append(i)\nprint(f\"Score : {score}/{len(training_tasks)} \\n{score*100/len(training_tasks)}%\")\ndef load_task_with_logic(logic):\n #retrieve tasks in list\n tasks = []\n for task in one_logic_task.keys():\n if one_logic_task[task][0] == logic:\n tasks.append(task)\n #load\n loaded_tasks = []\n for task in tasks:\n with open(str(training_path / task), \"r\") as f:\n loaded_tasks.append(json.load(f))\n return loaded_tasks\n\n# get all task that have only one logic associated\n\none_logic_task = pd.Series()\nfor key in skill_series.keys():\n if len(skill_series[key]) == 1:\n one_logic_task[key] = skill_series[key]\nvalues, count = np.unique(one_logic_task.values, return_counts=True)\nfor i,value in enumerate(values):\n print(value, \":\", count[i])\nunique_skill = []\nfor skills in skill_series:\n for skill in skills:\n if skill not in unique_skill:\n unique_skill.append(skill)\nprint(len(unique_skill))\ndef preprocess_img(img, preprocess_type, shift= (0,0)):\n #preprocess one image\n output_shape = (32,32)\n img = np.asarray(img)\n\n if preprocess_type == \"CENTER\":\n # TODO\n raise NotImplementedError\n elif preprocess_type == \"CORNER\":\n #TODO improve by having another color for outmap\n background = np.zeros(output_shape)\n background[shift[0]:shift[0]+img.shape[0],shift[1]:shift[1]+img.shape[1]] = img\n return background\n return img.tolist()\n\ndef preprocess_in_out(in_out, preprocess_type, data_augmentation=False):\n #preprocess input and output the same way\n in_out_dict = in_out.copy()\n shift = (0,0)\n if data_augmentation:\n max_shape = (32 - max([(len(in_out_dict[key])) for key in in_out_dict.keys()]),32 - max([(len(in_out_dict[key][0])) for key in in_out_dict.keys()]))\n shift = (random.Random().choice(range(max_shape[0])), random.Random().choice(range(max_shape[1])))\n for keys in in_out_dict.keys():\n in_out_dict[keys] = preprocess_img(in_out_dict[keys], preprocess_type, shift)\n return in_out_dict\n\n\ndef preprocess(task, preprocess_type, data_augmentation=False):\n #preprocess a whole task\n for task_keys in task.keys():\n for index,_ in enumerate(task[task_keys]):\n task[task_keys][index] = preprocess_in_out(task[task_keys][index], preprocess_type, data_augmentation)\n return task\nfor task in skill_series.keys():\n with open(str(training_path / task), \"r\") as f:\n task = json.load(f)\n for task_keys in task:\n for index,_ in enumerate(task[task_keys]):\n for sub_key in task[task_keys][index].keys():\n max_shape = (max([(len(task[task_keys][index][key])) for key in task[task_keys][index].keys()]),max([(len(task[task_keys][index][key][0])) for key in task[task_keys][index].keys()]))\n\n# make hot vector\ntotal_skills = []\nfor skills in skill_series.values:\n for skill in skills:\n if skill not in total_skills:\n total_skills.append(skill)\ntotal_skills = sorted(total_skills)\n# print(total_skills)\n\ntrain_input_example = []\ntrain_output_example = []\ntrain_label = []\n\ntest_input_example = []\ntest_output_example = []\ntest_label = []\n\nfor task in skill_series.keys():\n for i in range(20):\n # make 20 image with data augmentation\n with open(str(training_path / task), \"r\") as f:\n loaded_task = preprocess(json.load(f), \"CORNER\", True) \n\n indexes = []\n for skill in skill_series[task]:\n indexes.append(total_skills.index(skill))\n label = sum(tf.one_hot(indexes, len(total_skills)))\n\n for train_task in loaded_task[\"train\"]:\n train_input_example.append(tf.convert_to_tensor(train_task[\"input\"]))\n train_output_example.append(tf.convert_to_tensor(train_task[\"output\"]))\n train_label.append(label)\n for test_task in loaded_task[\"test\"]:\n test_input_example.append(tf.convert_to_tensor(test_task[\"input\"]))\n test_output_example.append(tf.convert_to_tensor(test_task[\"output\"]))\n test_label.append(label)\n\nprint(train_input_example[0],train_output_example[0], train_label[0])\n\ntrain_example_dataset = tf.data.Dataset.from_tensor_slices((train_input_example, train_output_example))\ntrain_example_dataset = train_example_dataset.map(lambda x, y: (tf.math.divide(x,9),tf.math.divide(y,9)))\ntrain_example_dataset = train_example_dataset.map(lambda x, y : (tf.expand_dims(x,-1), tf.expand_dims(y,-1)))\n\ntrain_label_dataset = tf.data.Dataset.from_tensor_slices((train_label))\ntrain_dataset = tf.data.Dataset.zip((train_example_dataset, train_label_dataset)).batch(32).repeat().shuffle(1301)\n\n\ntest_example_dataset = tf.data.Dataset.from_tensor_slices((test_input_example, test_output_example))\ntest_example_dataset = test_example_dataset.map(lambda x, y: (tf.math.divide(x,9),tf.math.divide(y,9)))\ntest_example_dataset = test_example_dataset.map(lambda x, y : (tf.expand_dims(x,-1), tf.expand_dims(y,-1)))\n\ntest_label_dataset = tf.data.Dataset.from_tensor_slices((test_label))\ntest_dataset = tf.data.Dataset.zip((test_example_dataset, test_label_dataset)).batch(32).repeat().shuffle(416)\nfrom tensorflow.keras import backend, regularizers, models\nL2_WEIGHT_DECAY = 0.01\nBATCH_NORM_DECAY = 0.99\nBATCH_NORM_EPSILON = 0.001\n\ndef identity_block(input_tensor, kernel_size, filters):\n \"\"\"The identity block is the block that has no conv layer at shortcut.\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of\n middle conv layer at main path\n filters: list of integers, the filters of 3 conv layer at main path\n \"\"\"\n filters1, filters2, filters3 = filters\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n \n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n \n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n \n x = layers.Conv2D(filters2, kernel_size,\n padding='same', use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n \n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n \n x = layers.Activation('relu')(x)\n \n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n \n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n \n x = layers.add([x, input_tensor])\n x = layers.Activation('relu')(x)\n return x\n\ndef conv_block(input_tensor, kernel_size, filters, strides=(2, 2)):\n \"\"\"A block that has a conv layer at shortcut.\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of\n middle conv layer at main path\n filters: list of integers, the filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n # Returns\n Output tensor for the block.\n Note that from stage 3,\n the second conv layer at main path is with strides=(2, 2)\n And the shortcut should have strides=(2, 2) as well\n \"\"\"\n \n filters1, filters2, filters3 = filters\n \n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n \n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n \n \n x = layers.Conv2D(filters2, kernel_size, strides=strides, padding='same',\n use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n \n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n \n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n shortcut = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(shortcut)\n \n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x\n\ndef resnet50(num_classes, input_shape):\n img_input = layers.Input(shape=input_shape)\n inputA = Input(shape=input_shape)\n inputB = Input(shape=input_shape)\n \n if backend.image_data_format() == 'channels_first':\n x1 = layers.Lambda(lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),name='transpose')(inputA)\n bn_axis = 1\n else: # channels_last\n x1 = inputA\n bn_axis = 3\n # Conv1 (7x7,64,stride=2)\n x1 = layers.ZeroPadding2D(padding=(3, 3))(x1)\n x1 = layers.Conv2D(32, (7, 7),\n strides=(2, 2),\n padding='valid', use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x1)\n x1 = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x1)\n x1 = layers.Activation('relu')(x1)\n x1 = layers.ZeroPadding2D(padding=(1, 1))(x1)\n \n # 3x3 max pool,stride=2\n x1 = layers.MaxPooling2D((3, 3), strides=(2, 2))(x1)\n \n if backend.image_data_format() == 'channels_first':\n x2 = layers.Lambda(lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),name='transpose')(inputB)\n bn_axis = 1\n else: # channels_last\n x2 = inputB\n bn_axis = 3\n # Conv1 (7x7,64,stride=2)\n x2 = layers.ZeroPadding2D(padding=(3, 3))(x2)\n x2 = layers.Conv2D(32, (7, 7),\n strides=(2, 2),\n padding='valid', use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x2)\n x2 = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x2)\n x2 = layers.Activation('relu')(x2)\n x2 = layers.ZeroPadding2D(padding=(1, 1))(x2)\n \n # 3x3 max pool,stride=2\n x2 = layers.MaxPooling2D((3, 3), strides=(2, 2))(x2)\n \n \n combined = concatenate([x1, x2])\n\n # Conv2_x\n # 1×1, 64\n # 3×3, 64\n # 1×1, 256\n \n x = conv_block(combined, 3, [64, 64, 256], strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256])\n x = identity_block(x, 3, [64, 64, 256])\n \n # Conv3_x\n #\n # 1×1, 128\n # 3×3, 128\n # 1×1, 512\n \n x = conv_block(x, 3, [128, 128, 512])\n x = identity_block(x, 3, [128, 128, 512])\n x = identity_block(x, 3, [128, 128, 512])\n x = identity_block(x, 3, [128, 128, 512])\n \n # Conv4_x\n # 1×1, 256\n # 3×3, 256\n # 1×1, 1024\n x = conv_block(x, 3, [256, 256, 1024])\n x = identity_block(x, 3, [256, 256, 1024])\n x = identity_block(x, 3, [256, 256, 1024])\n x = identity_block(x, 3, [256, 256, 1024])\n x = identity_block(x, 3, [256, 256, 1024])\n x = identity_block(x, 3, [256, 256, 1024])\n \n # 1×1, 512\n # 3×3, 512\n # 1×1, 2048\n x = conv_block(x, 3, [512, 512, 2048])\n x = identity_block(x, 3, [512, 512, 2048])\n x = identity_block(x, 3, [512, 512, 2048])\n \n # average pool, 1000-d fc, softmax\n x = layers.GlobalAveragePooling2D()(x)\n x = layers.Dense(\n num_classes, activation='softmax',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),\n bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n # Create model.\n return models.Model([inputA,inputB], x, name='resnet50')\n\nresnet50 = resnet50(132,(32,32,1))\ntf.keras.utils.plot_model(resnet50, show_shapes=True, dpi=64)\nresnet50.compile(optimizer=keras.optimizers.Adam() , # Optimizer\n # Loss function to minimize\n loss=keras.losses.CosineSimilarity(),\n # List of metrics to monitor\n metrics=['accuracy'])\nresnet50.fit(train_dataset, epochs=30, steps_per_epoch = 1000, validation_steps = 200, validation_data=test_dataset)\ninputA = Input(shape=(32,32,1))\ninputB = Input(shape=(32,32,1))\n\nfilters = (32, 64)\nx=inputA\ny=inputB\nchanDim = -1\n# define the model input\n# loop over the number of filters\nx = Conv2D(16, (3, 3), padding=\"same\")(x)\nx = Activation(\"relu\")(x)\nx = BatchNormalization(axis=chanDim)(x)\nx = MaxPooling2D(pool_size=(2, 2))(x)\nx = Model(inputs=inputA, outputs=x)\ny = Conv2D(16, (3, 3), padding=\"same\")(y)\ny = Activation(\"relu\")(y)\ny = BatchNormalization(axis=chanDim)(y)\ny = MaxPooling2D(pool_size=(2, 2))(y)\ny = Model(inputs=inputB, outputs=y)\n\ncombined = concatenate([x.output, y.output])\nfor (i, f) in enumerate(filters):\n # if this is the first CONV layer then set the input\n # appropriately\n # CONV => RELU => BN => POOL\n if i == 0:\n first = Conv2D(f, (3, 3), padding=\"same\")(combined)\n else :\n first = Conv2D(f, (3, 3), padding=\"same\")(z)\n\n z = Activation(\"relu\")(first)\n z = BatchNormalization(axis=chanDim)(z)\n z = MaxPooling2D(pool_size=(2, 2))(z)\n\n# flatten the volume, then FC => RELU => BN => DROPOUT\nz = Flatten()(z)\nz = Dense(512)(z)\nz = Activation(\"relu\")(z)\nz = BatchNormalization(axis=chanDim)(z)\nz = Dropout(0.5)(z)\n# apply another FC layer, this one to match the number of nodes\n# coming out of the MLP\nz = Dense(132)(z)\nz = Activation(\"relu\")(z)\nmodel = Model([x.input, y.input], z)\ntf.keras.utils.plot_model(model, show_shapes=True, dpi=64)\nmodel.compile(optimizer=keras.optimizers.Adam() , # Optimizer\n # Loss function to minimize\n loss=keras.losses.CosineSimilarity(),\n # List of metrics to monitor\n metrics=['accuracy'])\nmodel.fit(train_dataset, epochs=30, steps_per_epoch = 1000, validation_steps = 200, validation_data=test_dataset)\nprint(training_path)\nfor file in os.listdir(training_path):\n print('\\n',file)\n with open(os.path.join(training_path, file)) as f:\n load = json.load(f)\n preprocess(load, \"CORNER\")\n result = model((tf.expand_dims(tf.expand_dims(load[\"train\"][0][\"input\"], -1),0), tf.expand_dims(tf.expand_dims(load[\"train\"][0][\"output\"], -1),0)))\n print([total_skills[i] for i in np.nonzero(result)[1]],'\\n', sorted(skill_series[file]))\ndef downsample(filters, size, apply_batchnorm=True):\n initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n result.add(\n tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',\n kernel_initializer=initializer, use_bias=False))\n\n if apply_batchnorm:\n result.add(tf.keras.layers.BatchNormalization())\n\n result.add(tf.keras.layers.LeakyReLU())\n\n return result\n\ndef upsample(filters, size, apply_dropout=False):\n initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n result.add(\n tf.keras.layers.Conv2DTranspose(filters, size, strides=2,\n padding='same',\n kernel_initializer=initializer,\n use_bias=False))\n\n result.add(tf.keras.layers.BatchNormalization())\n\n if apply_dropout:\n result.add(tf.keras.layers.Dropout(0.5))\n\n result.add(tf.keras.layers.ReLU())\n\n return result\ndef Generator():\n OUTPUT_CHANNELS = 9\n inputs = tf.keras.layers.Input(shape=[32,32,9])\n\n down_stack = [\n downsample(4, 4, apply_batchnorm=False), # (bs, 128, 128, 64)\n downsample(8, 4), # (bs, 64, 64, 128)\n downsample(16, 4), # (bs, 32, 32, 256)\n downsample(32, 4), # (bs, 16, 16, 512)\n ]\n\n up_stack = [\n upsample(32, 4, apply_dropout=True), # (bs, 8, 8, 1024)\n upsample(16, 4), # (bs, 16, 16, 1024)\n upsample(8, 4), # (bs, 32, 32, 512)\n upsample(4, 4), # (bs, 64, 64, 256)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,\n strides=2,\n padding='same',\n kernel_initializer=initializer,\n activation='tanh') # (bs, 256, 256, 3)\n\n x = inputs\n\n # Downsampling through the model\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n # Upsampling and establishing the skip connections\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = tf.keras.layers.Concatenate()([x, skip])\n x = tf.keras.layers.Conv2D(32,(1,1),padding='same')(x)\n x = tf.keras.layers.Dropout(0.5)(x)\n\n x = last(x)\n\n return tf.keras.Model(inputs=inputs, outputs=x)\n\ngenerator = Generator()\ntf.keras.utils.plot_model(generator, show_shapes=True, dpi=64)\nLOGIC_TO_LOAD = [4]\nfor LOGIC in LOGIC_TO_LOAD:\n print(len(load_task_with_logic(\"pattern_expansion\")[LOGIC][\"train\"]))\n for task in load_task_with_logic(\"pattern_expansion\")[LOGIC][\"train\"]:\n to_plot = preprocess_in_out(task, 'CORNER',True)\n gen_output = generator(tf.expand_dims(tf.one_hot(np.array(to_plot[\"input\"], dtype=np.int64),9),0), training=False)\n plot(to_plot[\"input\"])\n plot(to_plot[\"output\"])\n plot(tf.math.argmax(gen_output[0,...],axis=-1))\n #print([gen_output[0][i] for i in range(len(tf.math.argmax(gen_output,axis=-1)[0]))])\n\ninput_train = []\noutput_train = []\nfor LOGIC in LOGIC_TO_LOAD:\n for task in load_task_with_logic(\"pattern_expansion\")[LOGIC][\"train\"]:\n for i in range(100):\n # data augment 20 times\n preprocessed_task_train = preprocess_in_out(task,\"CORNER\", True)\n input_train.append(tf.one_hot(np.array(preprocessed_task_train[\"input\"], dtype=np.int64),9))\n output_train.append(tf.one_hot(np.array(preprocessed_task_train[\"output\"], dtype=np.int64),9))\nprint(len(input_train), len(output_train))\ninput_test = []\noutput_test = []\nfor LOGIC in LOGIC_TO_LOAD:\n for task in load_task_with_logic(\"pattern_expansion\")[LOGIC][\"test\"]:\n for i in range(100):\n # data augment 20 times\n preprocessed_task_test = preprocess_in_out(task,\"CORNER\", True)\n input_test.append(tf.one_hot(np.array(preprocessed_task_test[\"input\"], dtype=np.int64),9))\n output_test.append(tf.one_hot(np.array(preprocessed_task_test[\"output\"], dtype=np.int64),9))\n\ntrain_input_dataset = tf.data.Dataset.from_tensor_slices(input_train)\n#train_input_dataset = train_input_dataset.map(lambda x: (tf.math.divide(x,9)))\n#train_input_dataset = train_input_dataset.map(lambda x : (tf.expand_dims(x,-1)))\n\ntrain_output_dataset = tf.data.Dataset.from_tensor_slices(output_train)\n#train_output_dataset = train_output_dataset.map(lambda x: (tf.math.divide(x,9)))\n#train_output_dataset = train_output_dataset.map(lambda x : (tf.expand_dims(x,-1)))\n\ntrain_dataset = tf.data.Dataset.zip((train_input_dataset, train_output_dataset)).batch(5).repeat()\n\ntest_input_dataset = tf.data.Dataset.from_tensor_slices(input_test)\n#test_input_dataset = test_input_dataset.map(lambda x: (tf.math.divide(x,9)))\n#test_input_dataset = test_input_dataset.map(lambda x : (tf.expand_dims(x,-1)))\n\ntest_output_dataset = tf.data.Dataset.from_tensor_slices(output_test)\n#test_output_dataset = test_output_dataset.map(lambda x: (tf.math.divide(x,9)))\n#test_output_dataset = test_output_dataset.map(lambda x : (tf.expand_dims(x,-1)))\n\ntest_dataset = tf.data.Dataset.zip((test_input_dataset, test_output_dataset)).batch(5).repeat()\n\ngenerator.compile(optimizer=keras.optimizers.Adam() , # Optimizer\n # Loss function to minimize\n loss=keras.losses.CosineSimilarity(),\n # List of metrics to monitor\n metrics=['accuracy'])\ngenerator.fit(train_dataset, epochs=5, steps_per_epoch = 2000, validation_steps = 500, validation_data=test_dataset)\nfor elemn in train_dataset:\n [plot(inner) for inner in [tf.squeeze(elem, 0) for elem in tf.argmax(elemn,axis=-1)]]\n break\n","repo_name":"aorursy/new-nb-3","sub_path":"florianval_task-tagging-classification.py","file_name":"florianval_task-tagging-classification.py","file_ext":"py","file_size_in_byte":61092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22007831848","text":"import os\nimport webapp2\nimport jinja2\nfrom google.appengine.ext import db\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)\n\nclass Handler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\nclass Post(db.Model):\n subject = db.StringProperty(required = True)\n content = db.TextProperty(required = True)\n created = db.DateTimeProperty(auto_now_add = True)\n\n\nclass MainPage(Handler):\n def render_front(self, subject=\"\", content=\"\", error=\"\"):\n posts = db.GqlQuery(\"Select * from Post order by created DESC\")\n self.render(\"front.html\", subject=subject, content=content, error = error, posts = posts)\n\n def get(self):\n self.render_front()\n\n\nclass NewPost(Handler):\n def render_front(self, subject=\"\", content=\"\", error=\"\"):\n posts = db.GqlQuery(\"Select * from Post order by created DESC\")\n self.render(\"newpost.html\", subject=subject, content=content, error = error, posts = posts)\n\n def get(self):\n self.render_front()\n\n def post(self):\n subject = self.request.get(\"subject\")\n content = self.request.get(\"content\")\n if subject and content:\n p = Post(subject = subject, content = content)\n p.put()\n last_id = str(p.key().id())\n self.redirect(\"/blog/post/\" + last_id)\n else:\n error = \"we need both a subject and some content!\" \n self.render_front(subject,content,error)\n\nclass show_single_post(Handler):\n \n def get(self, post_id):\n blog_entry = Post.get_by_id(int(post_id))\n if blog_entry:\n self.render(\"post.html\", blog_entry=blog_entry)\n else:\n self.render(\"post.html\",error=\"Blog post %s not found!\" %post_id)\n\n\napp = webapp2.WSGIApplication([('/blog', MainPage), \n ('/blog/newpost', NewPost),\n ('/blog/post/(\\d+)', show_single_post)],\n debug=True)\n","repo_name":"molex/Python_Scripts","sub_path":"Web Development/helloworld/Blog.py","file_name":"Blog.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12220245251","text":"import inflection\nfrom sqlalchemy import MetaData, Integer\nfrom sqlalchemy.ext.declarative import declared_attr\nfrom sqlalchemy.orm import DeclarativeBase, declarative_mixin, mapped_column, Mapped\n\n\n@declarative_mixin\nclass BaseModel(DeclarativeBase):\n \"\"\"Base db model class.\"\"\"\n id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True, autoincrement=True)\n\n\nPOSTGRES_INDEXES_NAMING_CONVENTION = {\n \"ix\": \"%(column_0_label)_idx\",\n \"uq\": \"%(table_name)_%(column_0_name)_key\",\n \"ck\": \"%(table_name)_%(constraint_name)_check\",\n \"fk\": \"%(table_name)_%(column_0_name)_fkey\",\n \"pk\": \"%(table_name)_pkey\",\n}\n\nBaseModel.metadata = MetaData(naming_convention=POSTGRES_INDEXES_NAMING_CONVENTION)\n","repo_name":"MuratovER/data_aggregator","sub_path":"src/db/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10951864522","text":"# Name: \n# COMP 347 - Machine Learning\n# HW No. 3\n\n# Libraries\nimport csv\nimport math\nfrom telnetlib import X3PAD\nfrom tkinter import W, Y\nimport pandas as pd\nimport numpy as np\nfrom scipy import linalg as LA\nimport matplotlib.pyplot as plt\nimport random\nimport time\n#------------------------------------------------------------------------------\n\n# Problem 1 - Gradient Descent Using Athens Temperature Data\n#------------------------------------------------------------------------------\n# For this problem you will be implementing various forms of gradient descent \n# using the Athens temperature data. Feel free to copy over any functions you \n# wrote in HW #2 for this. WARNING: In order to get gradient descent to work\n# well, I highly recommend rewriting your cost function so that you are dividing\n# by N (i.e. the number of data points there are). Carry this over into all \n# corresponding expression where this would appear (the derivative being one of them).\n\n\ndef A_mat(x, deg):\n \"\"\"Create the matrix A part of the least squares problem.\n x: vector of input data.\n deg: degree of the polynomial fit.\"\"\"\n N = len(x)\n ones = np.ones(N, dtype=float )\n A = np.c_[np.power(x, deg)]\n for i in range(deg-1):\n newCol = np.power(x, (deg-1) -i)\n A = np.c_[A, newCol]\n A = np.c_[A, ones]\n return A\ndef LLS_Solve(x,y, deg):\n \"\"\"Find the vector w that solves the least squares regression.\n x: vector of input data.\n y: vector of output data.\n deg: degree of the polynomial fit.\"\"\"\n A = A_mat(x, deg)\n temp = (LA.inv(A.T@A))@A.T\n w = temp@y\n\n return(w)\ndef poly_func(data, coeffs):\n \"\"\"Produce the vector of output data for a polynomial.\n data: x-values of the polynomial.\n coeffs: vector of coefficients for the polynomial.\"\"\"\n y = data@coeffs\n return(y)\ndef LLS_func(x,y,w,deg):\n \"\"\"The linear least squares objective function.\n x: vector of input data.\n y: vector of output data.\n w: vector of weights.\n deg: degree of the polynomial.\"\"\"\n A = A_mat(x, deg)\n N = len(x)\n f = (1/N) * (LA.norm(A@w - y) **2)\n return f\n# 1a. Fill out the function for the derivative of the least-squares cost function:\ndef LLS_deriv(x,y,w,deg):\n \"\"\"Computes the derivative of the least squares cost function with input\n data x, output data y, coefficient vector w, and deg (the degree of the\n least squares model).\"\"\"\n A = A_mat(x, deg) \n N = len(x)\n if len(np.shape(A)) == 1: np.outer(A,A)\n return (2/N) * A.T@(A@w-y)\n\n\n# 1b. Implement gradient descent as a means of optimizing the least squares cost\n# function. Your method should include the following:\n# a. initial vector w that you are optimizing,\n# b. a tolerance K signifying the acceptable derivative norm for stopping\n# the descent method,\n# c. initial derivative vector D (initialization at least for the sake of \n# starting the loop),\n# d. an empty list called d_hist which captures the size (i.e. norm) of your\n# derivative vector at each iteration, \n# e. an empty list called c_hist which captures the cost (i.e. value of\n# the cost function) at each iteration,\n# f. implement backtracking line search as part of your steepest descent\n# algorithm. You can implement this on your own if you're feeling \n# cavalier, or if you'd like here's a snippet of what I used in mine:\n#\n# eps = 1 \n# m = LA.norm(D)**2\n# t = 0.5*m\n# while LLS_func(a_min, a_max, w - eps*D, 1) > LLS_func(a_min, a_max, w, 1) - eps*t:\n# eps *= 0.9\n\n# Plot curves showing the derivative size (i.e. d_hist) and cost (i.e. c_hist)\n# with respect to the number of iterations.\n# \n\ndata = np.genfromtxt('athens_ww2_weather.csv', delimiter=',')\ndf = pd.read_csv('athens_ww2_weather.csv', delimiter=',')\nx = df.iloc[0:, 8:9]\ny = df.iloc[0:, 7:8]\n# x = data[1:, 8:9] # min\n# y = data[1:, 7:8] # max\n\n\ndeg = 1\nK = 0.01 \nw = np.array([[100],[-100]])\nD = np.array([[-1], [1]])\n# d_hist = []\n# c_hist = []\ncount = 0\ndef backtrackingLineSearch(w, D):\n eps = 1 \n m = LA.norm(D)**2\n t = 0.5*m\n while LLS_func(x, y, w - eps*D, 1) > LLS_func(x, y, w, 1) - eps*t:\n eps *= 0.9\n return eps\n\ndef min_gd(x,y,K, w, D):\n # iterative step size: Armijo-Goldstein condition\n global count\n count = 0\n # global w_new\n d_hist = []\n c_hist=[]\n K = 0.01 \n w = np.array([[100],[-100]])\n D = np.array([[-1], [1]])\n start = time.time()\n while(LA.norm(D) >= K):\n eps = backtrackingLineSearch(w, D)\n w = w - (eps*D)\n D = LLS_deriv(x, y, w, deg)\n\n d_hist.append(LA.norm(D))\n c_hist.append(LLS_func(x,y,w,deg))\n count+=1\n\n # print(count)\n # print('eps = ', eps)\n # print('D = ', D)\n # print('eps*D = ', eps*D)\n # print('w = ', w)\n # print('w_new = ', w)\n # print('w = ', w)\n # print('D norm' , LA.norm(D))\n # print('C val', LLS_func(x,y,w,deg))\n # return min_gd(K, w, D)\n\n #FIXMEEE: add conv_speed as output of function \n end = time.time()\n conv_speed = end-start\n\n return w, d_hist, c_hist, count, D, conv_speed\n# w, d_hist, c_hist, count, D, conv_speed = min_gd(x,y,K, w, D)\ndef batch_gd(K, batch_size):\n\n d_hist = []\n c_hist = []\n\n data = np.c_[x, y]\n np.random.shuffle(data)\n N = len(data)\n # total number of batches of input size\n binQuantity = int(N/batch_size)\n remainder = N - (binQuantity*batch_size)\n currBin=0\n\n # not applicable if only training on one batch\n # accounts for different batchSize of last bin \n if(remainder != 0 and currBin == (binQuantity -1)):\n batch_size = batch_size + remainder\n \n \n i = 0 \n while LA.norm(D) >= K:\n # add these (batch_size) points to list\n miniBatchData.append(data[i: i + batch_size, :])\n # convert list to array and reshape array\n miniBatchData = np.reshape(np.array(miniBatchData), (batch_size,2))\n # segment data into x and y data\n mini_x = np.reshape((miniBatchData[:,0]), (batch_size, 1))\n mini_y = np.reshape((miniBatchData[:,1]), (batch_size, 1))\n # pass x and y into gd function \n #FIXME\n # mini_w, mini_d_hist, mini_c_hist, mini_count, D, conv_speed = min_gd(mini_x,mini_y, K, mini_w, D)\n\n cost = LLS_func(mini_x, mini_y, w, deg)\n d_hist.append(LA.norm(D))\n c_hist.append(cost)\n\n eps = 1\n m = LA.norm(D) ** 2\n t = 0.5 * m\n while LLS_func(mini_x, mini_y, w-eps*D, 1) > LLS_func(mini_x, mini_y, w, 1) - eps* t:\n eps *= 0.9\n w = w-(eps*D)\n\n D = LLS_deriv(mini_x, mini_y, w, deg)\n return w, d_hist, c_hist, iterations\n\n \n\n\n\n# plot will be spikey, batch gradients descent and schcastic will be smooth,\n# # randomize will make loinger process cs cutting complexity f each iterastion\n# newtons limits num of iterations, but complexity of each iteration is bigger\n# converge faster, find optimaum in more math\ndef plot(count, d_hist, c_hist):\n iterations = np.linspace(1,count, count)\n plt.plot(iterations, d_hist, color = 'b', marker = (5, 1))\n plt.title('Derivative Size with Respect to Iterations')\n plt.ylabel(\"Derivative Size\")\n plt.xlabel(\"Iterations\")\n plt.show()\n plt.clf()\n\n plt.plot(iterations, c_hist, color = 'b', marker = (5, 1))\n plt.title('Cost Size with Respect to Iterations')\n plt.ylabel(\"Cost Size\")\n plt.xlabel(\"Iterations\")\n plt.show()\n plt.clf()\n\n\n# 1c. Repeat part 1b, but now implement mini-batch gradient descent by randomizing\n# the data points used in each iteration. Perform mini-batch descent for batches\n# of size 5, 10, 25, and 50 data points. For each one, plot the curves\n# for d_hist and c_hist. Plot these all on one graph showing the relationship\n# between batch size and convergence speed (at least for the least squares \n# problem). Feel free to adjust the transparency of your curves so that \n# they are easily distinguishable.\n\n#for loop that goes through range (5,10,25,50)\n\ndef gd(x, y, K, w, D):\n\n # with fixed step size\n deg = 1\n K = 0.01 \n w = np.array([[100],[-100]])\n D = np.array([[-1], [1]])\n eps = 0.09\n d_hist = []\n c_hist = []\n #FIXME: add timer. set conv_time = time\n while(LA.norm(D) >= K):\n #FIXME: why does the derivative keep increasing --> goes to infinity \n # need deriv to get closer to 0 \n w = w - (eps*D)\n D = LLS_deriv(x, y, w, deg)\n d_hist.append(LA.norm(D))\n c_hist.append(LLS_func(x,y,w,deg))\n print(LA.norm(D))\n print('cost: ', LLS_func(x,y,w,deg))\n return w\n# gd(x,y, K, w, D)\n\ndef mini(batch_size, x, y):\n # FIXMEEEE: is the gd function being called bathc_size times?\n\n\n # randomize the data, but keep x,y pairs together\n batch_sizes = [5, 10, 25, 50]\n data = np.c_[x, y]\n np.random.shuffle(data)\n N = len(data)\n # total number of batches of input size\n binQuantity = int(N/batch_size)\n remainder = N - (binQuantity*batch_size)\n # create (batch_size) num bins \n miniBatchData = []\n w = []\n mini_w = np.array([[100],[-100]])\n D = []\n d_hist = []\n c_hist = []\n count = 0\n i = 0\n currBin=0\n # for each miniBatch, calculate 1 step of gd using miniBatch \n while(i [pos_1, pos_2]\n if self.channel.type == 'quantitative':\n\n first_box, last_box = self.label_boxes[0], self.label_boxes[-1]\n x1, y1 = first_box['xc'], first_box['yc']\n x2, y2 = last_box['xc'], last_box['yc']\n t1, t2 = first_box['number'], last_box['number']\n\n if self.channel.orientation == 'x':\n self.domain = [\n t1, t2\n ]\n self.range = [\n int(util.continuous_mapping(t1, t1, t2, x1, x2)),\n int(util.continuous_mapping(t2, t1, t2, x1, x2))\n ]\n else:\n self.domain = [t1, t2]\n self.range = [\n int(util.continuous_mapping(t1, t1, t2, y1, y2)),\n int(util.continuous_mapping(t2, t1, t2, y1, y2))\n ]\n\n # if ordinal : [] -> []\n\n # if temporal: [] -> []\n if self.channel.type == 'temporal':\n self.domain = [box['text'] for box in self.label_boxes]\n self.range = []\n for box in self.label_boxes:\n self.range.append(int(box['xc']) if self.channel.orientation == 'x' else box['yc'])\n\n # if nominal: [label1, .., labeln] -> [pos_1, ..., pos_n]\n if self.channel.type == 'nominal':\n self.domain = [box['text'] for box in self.label_boxes]\n self.range = []\n for box in self.label_boxes:\n self.range.append(int(box['xc']) if self.channel.orientation=='x' else box['yc'])\n\n def infer_scale_type(self):\n if self.channel.type == 'quantitative':\n scales = [\n {\n 'name': 'linear',\n # 'func': lambda x: x,\n 'ifunc': lambda x, m, b: m * x + b\n },\n {\n 'name': 'log',\n # 'func': lambda x: np.ma.log10(x),\n 'ifunc': lambda x, m, b: m * np.power(10, x) + b\n },\n {\n 'name': 'pow',\n # 'func': lambda x: np.power(x, 2),\n 'ifunc': lambda x, m, b: m * np.power(x, 0.5) + b\n },\n {\n 'name': 'sqrt',\n # 'func': lambda x: np.sqrt(x),\n 'ifunc': lambda x, m, b: m * np.power(x, 2) + b\n }\n ]\n\n if self.channel.orientation == 'x':\n xdata = np.array([box['xc'] for box in self.label_boxes])\n else:\n length = self.channel.spec_heigth\n xdata = np.array([length - box['yc'] for box in self.label_boxes])\n\n ydata = np.array([box['number'] for box in self.label_boxes])\n\n # hack to avoid errors with log scale\n mapping = np.vectorize(util.continuous_mapping, otypes=[np.float])\n xdata = mapping(xdata, np.min(xdata), np.max(xdata), 1, 10)\n\n pred_scale = 'linear'\n min_error = np.inf\n for scale in scales:\n ifunc = scale['ifunc']\n\n # reset data\n x = xdata\n y = ydata\n\n # removing zeros if log scale\n if scale['name'] == 'log':\n valid_idx = x > 0\n x = x[valid_idx]\n y = y[valid_idx]\n\n try:\n # fit a line\n popt, pcov = curve_fit(ifunc, x, y)\n error = mean_squared_error(y, ifunc(x, *popt))\n except Exception:\n error = np.inf\n\n # saving best option\n if error < min_error:\n pred_scale = scale['name']\n min_error = error\n\n self.type = pred_scale\n\n if self.channel.type == 'nominal':\n self.type = 'nominal'\n\n def gen(self):\n scl = OrderedDict()\n scl['type'] = self.type\n scl['labels'] = [b['text'] for b in self.label_boxes]\n if self.channel.type == 'quantitative':\n scl['values'] = [b['number'] for b in self.label_boxes]\n if self.channel.type == 'temporal':\n scl['values'] = [b['number'] for b in self.label_boxes]\n scl['domain'] = self.domain\n scl['range'] = self.range\n return scl","repo_name":"visual-ds/rev","sub_path":"rev/spec/scale.py","file_name":"scale.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"29365753805","text":"import shutil\nimport subprocess\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), 'src', 'math_py'))\nfrom prob_utils import rename_tags\n\ndef main(): \n if len(sys.argv) != 3:\n print('Usage: python3 rename.py tag_from tag_to') \n sys.exit(0)\n tag_from = sys.argv[1]\n tag_to = sys.argv[2]\n rename_tags.rename_all(tag_from,tag_to)\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"kapsitis/math","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18396542133","text":"from .models import Photo\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom .models import Profile, matches, prompt,dayte\nfrom django.contrib.auth.models import User\nfrom .utils import getSuggestions,get_unseen_matches,get_all_matches\nfrom django.utils import timezone\nimport os \nimport json\nfrom django.core.files.base import ContentFile\n# Create your views here.\n\n@api_view(['patch'])\n@permission_classes([IsAuthenticated])\ndef update_location(request):\n user=request.user\n profile=user.profile\n profile.lat=request.data.get('lat')\n profile.lng=request.data.get('lng')\n #profile.location_name=request.data.get('location')\n #to be added location name---------------------------------\n profile.save()\n return Response({'message': 'Location updated'}, status=status.HTTP_200_OK)\n\n\n\n@api_view(['get'])\n@permission_classes([IsAuthenticated])\ndef home(request):\n user=request.user\n profile = Profile.objects.filter(user=user).first() \n if not profile:\n return Response({'message': 'Please finish setting up your profile'}, status=status.HTTP_400_BAD_REQUEST)\n \n suggestions=getSuggestions(user,profile)\n data=[]\n for suggestion in suggestions:\n #id of the suggestion\n id = suggestion.id\n #get the name of the suggestion\n name = suggestion.first_name\n\n # create a dictionary for each suggestion\n suggestion_dict = {}\n # get pictures in a list with the profile picture the first one\n pictures=[]\n for photo in suggestion.profile.photo_set.all():\n pictures.append(photo.image.url)\n suggestion_dict['pictures'] = pictures\n # get list of interests\n interests=[]\n for interest in suggestion.profile.interests.all():\n interests.append(interest.name)\n suggestion_dict['interests'] = interests\n # get age from birth date\n age=None\n if suggestion.profile.birth_date:\n age=timezone.now().year-suggestion.profile.birth_date.year\n suggestion_dict['age'] = age\n # get prompts from the class prompts where the user == suggestion.user\n prompts=[]\n for p in prompt.objects.filter(user=suggestion):\n prompt_dict={}\n prompt_dict['prompt']=p.prompt\n prompt_dict['answer']=p.answer\n prompts.append(prompt_dict)\n suggestion_dict['id'] = id\n suggestion_dict['name'] = name\n suggestion_dict['prompts'] = prompts\n # get gender\n suggestion_dict['gender'] = suggestion.profile.gender\n # get location\n suggestion_dict['location'] = suggestion.profile.location\n # check if the user has liked the suggestion\n suggestion_dict['matching'] = matches.objects.filter(user1=user, user2=suggestion,matched=True).exists()\n #check if the user has liked the user\n suggestion_dict['liked'] =matches.objects.filter(user1=user, user2=suggestion,matched=False).exists()\n # append the suggestion dictionary to the data list\n data.append(suggestion_dict)\n\n\n user_dict={}\n user_dict['name']=user.first_name\n user_dict['plan'] = profile.plan\n user_dict['birth_date'] = profile.birth_date\n #get the user's pictures\n pictures=[]\n for photo in profile.photo_set.all():\n pictures.append(photo.image.url)\n user_dict['pictures'] = pictures\n #get the user's interests\n interests=[]\n for interest in profile.interests.all():\n interests.append(interest.name)\n user_dict['interests'] = interests\n #get the user's prompts\n prompts=[]\n for p in prompt.objects.filter(user=user):\n prompt_dict={}\n prompt_dict['prompt']=p.prompt\n prompt_dict['answer']=p.answer\n prompts.append(prompt_dict)\n user_dict['prompts'] = prompts\n # GET THE USERS gender\n user_dict['gender']=user.profile.gender\n \n return Response({'user':user_dict,'suggestions': data,'matches':get_unseen_matches(user)}, status=status.HTTP_200_OK)\n \n\n\n\n\n@api_view(['patch'])\n@permission_classes([IsAuthenticated])\ndef updateProfile(request):\n data = request.data\n user = request.user\n profile = user.profile\n if data.get('name'):\n user.name = data.get('name')\n if data.get('phone_number'):\n profile.phone_number = data.get('phone_number')\n if data.get('birth_date'):\n profile.birth_date = data.get('birth_date')\n if data.get('interests'):\n profile.interests = data.get('interests')\n if data.get('bio'):\n profile.bio = data.get('bio')\n if data.get('prompts'):\n profile.prompts = data.get('prompts')\n\n # set pictures or rearrange them\n photos = data.get('photos', [])\n for i, photo in enumerate(photos):\n # Create a new Photo object with the current user's profile\n if i == 0:\n photo_obj = Photo.objects.create(profile=profile, profile_picture=True)\n else:\n photo_obj = Photo.objects.create(profile=profile, profile_picture=False)\n \n # Save the image data to the image field of the Photo object if photo.startswith('http'):\n # Check if the URL exists\n response = request.get(photo)\n if response.status_code == 200:\n photo_obj.image.save(os.path.basename(photo), ContentFile(response.content))\n else:\n photo_obj.save_picture_from_base64(photo)\n\n\n@api_view(['post'])\n@permission_classes([IsAuthenticated])\ndef like(request):\n user=request.user\n profile=user.profile\n suggestion_id=request.data.get('id')\n suggestion=User.objects.get(id=suggestion_id)\n #check if the other user has liked me and we did not match then we alert him that they matched\n if matches.objects.filter(user1=suggestion, user2=user,matched=False).exists() :\n match=matches.objects.get(user1=suggestion, user2=user,matched=False)\n match.matched=True\n match.save()\n #return both users profile pictures (the images field profilepicture is true)\n picture = ''\n for photo in suggestion.profile.photo_set.all():\n if photo.profile_picture:\n picture = photo.image.url\n return Response({'message': 'match','picture':picture,'id':match.id}, status=status.HTTP_200_OK)\n if matches.objects.filter(user1=user, user2=suggestion,matched=False).exists():\n print(\"You already liked this user\")\n return Response({'message': 'You already liked this user'}, status=status.HTTP_400_BAD_REQUEST)\n else:\n \n match=matches.objects.create(user1=user, user2=suggestion,matched=False)\n match.save()\n return Response({'message': 'You liked this user'}, status=status.HTTP_200_OK)\n\n\n@api_view(['post'])\n@permission_classes([IsAuthenticated])\ndef set_dayte_day(request):\n user=request.user\n profile=user.profile\n match_id=request.data.get('match_id')\n print (match_id)\n days_times=(request.data.get('days_times')) #a map of days and times\n days=[]\n times=[]\n for day, time in days_times.items():\n days.append(day)\n times.append(time)\n #convert the map to string\n days_str = ','.join(days)\n times_str = ','.join(times)\n\n\n match=matches.objects.get(id=match_id)\n if match.user1==user:\n match.user1_pref_days=days_str\n match.user1_pref_times=times_str\n match.seen_user1=True\n\n else:\n match.user2_pref_days=days_str\n match.user2_pref_times=times_str\n match.seen_user2=True\n\n match.save()\n #check if the other user has set their days and if so create a dayte object\n if match.user1_pref_days!='' and match.user2_pref_days!='':\n dayte_obj=dayte.objects.create(match=match)\n dayte_day = dayte_obj.calc_mid()\n dayte_obj.save()\n day_of_week = dayte_day.strftime('%A')\n date = dayte_day.strftime('%Y-%m-%d')\n time = dayte_day.strftime('%H:%M') \n return Response({'message': 'You have a confirmed date on next '+ day_of_week + ' ' + str(date)+' at '+time}, status=status.HTTP_200_OK)\n else:\n return Response({'message': 'You have to wait for the other user to choose the date'}, status=status.HTTP_200_OK)\n\n@api_view(['get'])\n@permission_classes([IsAuthenticated])\ndef get_all_user_matches(request):\n user=request.user\n profile=user.profile\n matches=get_all_matches(user)\n return Response({'matches':matches}, status=status.HTTP_200_OK)\n","repo_name":"sharkoyd/dayte-backend","sub_path":"dayte/base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"39170629429","text":"import streamlit as st\nimport cv2\nimport numpy as np\nimport pickle\nfrom PIL import Image, ImageTk\n\n\n# Define labels for the different fruit and vegetable categories\nlabels = [\"apple\", \"banana\", \"beetroot\", \"bell pepper\", \"cabbage\", \"capsicum\", \"carrot\", \"cauliflower\", \"chilli pepper\", \"corn\", \"cucumber\", \"eggplant\", \"garlic\", \"ginger\", \"grapes\", \"jalepeno\", \"kiwi\", \"lemon\", \"lettuce\", \"mango\", \"onion\", \"orange\", \"paprika\", \"pear\", \"peas\", \"pineapple\", \"pomegranate\", \"potato\", \"raddish\", \"soy beans\", \"spinach\", \"sweetcorn\", \"sweetpotato\", \"tomato\", \"turnip\", \"watermelon\"]\n\nwith open('../logistic/model.pkl', 'rb') as f:\n model = pickle.load(f)\n\n\n\n# Define CSS style for label\n\n\n\n\ndef predict(image):\n # Preprocess image\n img = cv2.resize(image, (32, 32))\n img_vector = img.flatten()\n # Use the trained model to predict the label of the input image\n label_number = model.predict([img_vector])[0]\n label = labels[label_number]\n a = f\"Đây là {label} và nó thuộc fruit. 🍍 \"\n b = f\"Đây là {label} và nó thuộc rau củ. 🍅\"\n # Determine whether the input image belongs to fruits or vegetables\n if label in ['apple', 'banana', 'kiwi', 'lemon', 'mango', 'orange', 'pear', 'pineapple', 'pomegranate', 'watermelon']:\n return a\n else:\n return b\n\n\nst.markdown('
', unsafe_allow_html=True)\nst.markdown('

Nhận Dạng Và Phân Loại Rau Củ🍅 và Quả 🍍

', unsafe_allow_html=True)\nst.markdown('

Chọn ảnh của bạn

', unsafe_allow_html=True)\n# Create a file uploader in Streamlit\nuploaded_file = st.file_uploader(\"Upload Image\", type=[\"jpg\", \"jpeg\", \"png\"])\n\n# Check if file is uploaded\nif uploaded_file is not None:\n # Load image using PIL\n image = Image.open(uploaded_file)\n # Display the uploaded image\n st.image(image, caption=\"Uploaded\", use_column_width=True)\n # Convert PIL image to OpenCV format\n img = np.array(image)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n # Use the predict function to get the label of the uploaded image\n result = predict(img)\n # Display the label\n button = st.button(\"Dự Đoán\")\n if button:\n st.write(f\"

{result}

\", unsafe_allow_html=True)\n else:\n st.write(\"Please upload an image\")\n \n \n\n\nimport streamlit as st\n\n","repo_name":"HOAhufi2002/nhandien_phanloai_raucu_qua","sub_path":"Logistic/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"7995587324","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render, redirect, HttpResponse, get_object_or_404\nfrom django.views.generic import View, ListView, DeleteView\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.urls import reverse_lazy\nfrom django.http import JsonResponse\nfrom django.db.models import Count\nfrom django.db.models import Q\n\nimport operator\nfrom functools import reduce\n\nfrom .models import TweetTemplate, Hashtag\nfrom .forms import RegisterForm\n\nfrom accounts.models import CustomUser\n\n\nclass TweetTemplateListView(View):\n def get(self, request, *args, **kwargs):\n queryset = TweetTemplate.objects.all()\n context = {\n 'templates': queryset,\n 'form': RegisterForm(),\n }\n return render(request, 'tweet_template/index.html', context)\n\n def post(self, request, *args, **kwargs):\n # リクエストからフォームを作成\n form = RegisterForm(request.POST)\n # バリデーション\n if not form.is_valid():\n # バリデーションNGの場合はアカウント登録画面のテンプレートを再表示\n return render(request, 'tweet_template/create.html', {'form': form})\n\n form.instance.author = request.user\n\n ht_list = self.request.POST.get('hashtag', None).split()\n\n # 保存する前に一旦取り出す\n template = form.save(commit=False)\n\n # ユーザーオブジェクトを保存\n template.save()\n\n for ht in ht_list:\n try:\n the_tag = Hashtag.objects.get(name=ht)\n template.hashtag.add(the_tag)\n except ObjectDoesNotExist:\n new_tag = Hashtag(name=ht)\n new_tag.save()\n template.hashtag.add(new_tag)\n\n return redirect('tweet_template:index')\n\nindex = TweetTemplateListView.as_view()\n\n\ndef delete_template(request, pk):\n TweetTemplate.objects.filter(pk=pk).delete()\n return redirect('accounts:profile')\n\n\nclass Explore(View):\n def get(self, request, *args, **kwargs): \n return render(request, 'tweet_template/explore.html')\n\nexplore = Explore.as_view()\n\n\nclass Search(View):\n def get(self, request, *args, **kwargs):\n if self.request.GET.get('query'): \n q_word = self.request.GET.get('query').split()\n\n queryset = TweetTemplate.objects.filter(\n reduce(operator.and_, (Q(name__icontains=q) for q in q_word)) |\n reduce(operator.and_, (Q(content__icontains=q) for q in q_word)) |\n reduce(operator.or_, (Q(hashtag__name=q) for q in q_word))\n ).distinct()\n\n context = {\n 'q_word': q_word,\n 'object_list': queryset\n }\n\n return render(request, 'tweet_template/search_result.html', context)\n else:\n return render(request, 'tweet_template/search_result.html')\n\nsearch = Search.as_view()\n","repo_name":"sonokr/templa","sub_path":"tweet_template/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"41024719917","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 5 23:26:58 2017\n\n@author: syedrahman\n\"\"\"\n\nimport os\n\nos.chdir('/Users/syedrahman/Documents/Summer2017/Insight/Project')\n\n\nimport numpy as np\nimport sys, re\nimport itertools\nfrom collections import Counter\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom tqdm import tqdm\ntqdm.pandas(desc=\"progress-bar\")\n\nfrom matplotlib import pyplot\n\n# CNN for the humor dataset\nimport numpy\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.models import load_model\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n\ndata = pd.read_csv('humor_dataset.csv') \n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\ndef clear(string):\n try:\n sen = clean_str(string) \n return sen\n except:\n return 'NC'\n\ndef postprocess(data, n=100000):\n data = data.sample(n)\n data['text1'] = data['text'].progress_map(clear) ## progress_map is a variant of the map function plus a progress bar. Handy to monitor DataFrame creations.\n data = data[data.text1 != 'NC']\n data.reset_index(inplace=True)\n data.drop('index', inplace=True, axis=1)\n return data\n\nnewdata = postprocess(data)\n\nx = np.array(newdata.text1)\ny = np.array(newdata.funny)\n\ntop_words = 5000\n\nvectorizer = CountVectorizer(analyzer = \"word\", \\\n tokenizer = None, \\\n preprocessor = None, \\\n stop_words = None, \\\n max_features = top_words) \n\ndata_features = vectorizer.fit_transform(x)\ndata_features = data_features.toarray()\n\nvocab = vectorizer.get_feature_names()\nprint(vocab)\n\nprint(data_features.shape)\n\ntype(data_features)\n\n# sorting the data according to max column frequency \nb = np.sum(data_features, axis = 0)\nidx = b.argsort()\ndata_features = data_features[:,idx]\n\nX_train = data_features[0:80000,:]\nX_test = data_features[80000:100000,:]\ny_train = np.array(newdata.funny)[0:80000] \ny_test = np.array(newdata.funny)[80000:100000]\n\n\t\nmax_words = 500\nX_train = sequence.pad_sequences(X_train, maxlen=max_words)\nX_test = sequence.pad_sequences(X_test, maxlen=max_words)\n\n########################################################################\n### Simple Multi-Layer Perceptron Model ###\n########################################################################\n\t\n# create the model\nmodel = Sequential()\nmodel.add(Embedding(top_words, 32, input_length=max_words))\nmodel.add(Flatten())\nmodel.add(Dense(250, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=2, batch_size=128, verbose=2)\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\n\n##############################################################################\n# One-Dimensional Convolutional Neural Network Model #\n##############################################################################\n# create the model\nmodel = Sequential()\nmodel.add(Embedding(top_words, 32, input_length=max_words))\nmodel.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Flatten())\nmodel.add(Dense(250, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary()) \n\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=2, batch_size=128, verbose=2)\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\nmodel.save('model.h5') \n\nprediction = model.predict(X_test[0])\nprint(prediction)","repo_name":"shr264/Python","sub_path":"Humor/humor-cnn.py","file_name":"humor-cnn.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"9754747795","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom calender.models import Reservation, RegularReservation\nfrom manager.utils import AdminCalendar\nfrom django.views import generic\nfrom calender.views import *\nfrom calender.models import *\n# Create your views here.\ndef manager_index(request):\n if request.user.is_authenticated:\n return render(request, 'manager/index.html')\n else:\n return redirect('manager:login')\n\n\nclass AdminCalendarView(generic.ListView):\n model = Reservation\n template_name = 'manager/calendarAdmin.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # use today's date for the calendar\n d = get_date(self.request.GET.get('month', None))\n context['prev_month'] = prev_month(d)\n context['next_month'] = next_month(d) \n # Instantiate our calendar class with today's year and date\n cal = AdminCalendar(d.year, d.month)\n\n # Call the formatmonth method, which returns our calendar as a table\n html_cal = cal.formatmonth(withyear=True)\n context['calendar'] = mark_safe(html_cal)\n\n return context\n\ndef admin_save(request):\n if request.user.is_authenticated:\n if 'reset' in request.POST :\n for a in request.POST:\n if a == 'csrfmiddlewaretoken' or a == 'reason' or a == 'reset':\n continue\n try:\n date = a.split('-', -1)\n yearmonthdate = date[0]+'-'+date[1]+'-'+date[2]\n if date[3] == 'am' :\n time = '10'\n else :\n time = '14'\n instance = get_object_or_404(Reservation, date=yearmonthdate, time=time)\n instance.delete()\n except:\n continue\n elif 'h_reset' in request.POST :\n for a in request.POST:\n if a == 'csrfmiddlewaretoken' or a == 'reason' or a == 'h_reset':\n continue\n try :\n date = a.split('-', -1)\n yearmonthdate = date[0]+'-'+date[1]+'-'+date[2]\n date[3]\n continue\n except:\n instance = get_object_or_404(Reservation, date=yearmonthdate, status='5')\n instance.delete()\n elif 'holiday' in request.POST :\n name = request.POST['reason']\n for a in request.POST:\n if a == 'csrfmiddlewaretoken' or a == 'holiday' or a == 'reason' :\n continue \n try :\n date = a.split('-', -1)\n yearmonthdate = date[0]+'-'+date[1]+'-'+date[2] \n date[3]\n continue\n except :\n try : \n get_object_or_404(Reservation, date=yearmonthdate, status='5')\n except:\n instance = Reservation(date=yearmonthdate, name=name, status='5')\n instance.save() \n else :\n for a in request.POST:\n if a == 'csrfmiddlewaretoken' or a == 'reason':\n continue\n try :\n date = a.split('-', -1)\n yearmonthdate = date[0]+'-'+date[1]+'-'+date[2]\n if date[3] == 'am' :\n time = '10'\n else :\n time = '14'\n try : \n get_object_or_404(Reservation, date=yearmonthdate, time=time, status='0')\n except :\n instance = Reservation(date=yearmonthdate, time=time, status='0')\n instance.save()\n except:\n pass\n change_status()\n try :\n return redirect(reverse('manager:calendarAdmin')+'?month='+date[0]+'-'+date[1])\n except : \n return redirect('manager:calendarAdmin')\n else :\n return redirect('manager:login')\n\n\ndef group_form(request, reservation_id):\n try:\n instance = get_object_or_404(Reservation, pk=reservation_id)\n if instance.status == '1':\n status = '신청대기'\n elif instance.status == '2':\n status = '검토중'\n elif instance.status == '3':\n status = '승인완료'\n date = str(instance.date).split('-')\n date = date[0] + '년 ' + date[1] + '월 ' + date[2] + '일'\n time = instance.time + ':00'\n datetime = date + ' / ' + time\n if instance.grade == 'n':\n grade = '기타'\n else :\n grade = instance.grade + '학년'\n if instance.major == 0:\n major = '공통'\n elif instance.major == 1:\n major = '문과'\n elif instance.major == 2:\n major = '이과'\n length = str(instance.length)+'0분'\n\n places = Place.objects.all()\n return render(request, 'manager/groupform_admin.html', \n {\n 'reservation' : instance,\n 'status': status,\n 'datetime': datetime,\n 'grade': grade,\n 'major': major,\n 'length': length,\n 'places': places,\n })\n except:\n return redirect('calender:reservation_edit', reservation_id = reservation_id)\n\ndef group_confirm(request, reservation_id):\n if request.user.is_authenticated:\n instance = get_object_or_404(Reservation, pk=reservation_id)\n \n if request.method == 'GET':\n if instance.status == '1':\n instance.status = '2'\n instance.save()\n \n elif request.method == 'POST':\n place_name = request.POST['place']\n if place_name == '':\n try :\n place = get_object_or_404(Place, name='정각원 앞 백년비')\n except : \n place = Place()\n place.name = '정각원 앞 백년비'\n place.save()\n else :\n try :\n place = get_object_or_404(Place, name=place_name)\n except :\n place = Place()\n place.name = place_name\n place.save()\n instance.place = place\n instance.admin_comment = request.POST['comment']\n \n instance.status = '3'\n instance.save()\n statusMail('단체', instance, '승인')\n return redirect('manager:group_form', reservation_id = reservation_id)\n else :\n return redirect('manager:login')\n\ndef admin_regular_list(request):\n if request.user.is_authenticated:\n instances = RegularReservation.objects.all().order_by('-pk')\n page = request.GET.get('page', '1')\n paginator = Paginator(instances, '10')\n page_obj = paginator.page(page)\n\n tourdates = RegularDate.objects.filter(date__range=[datetime.datetime.today(), datetime.datetime.today() + datetime.timedelta(days=91)]).order_by('date')\n return render(request, 'manager/regular_list_admin.html', \n {\n 'page_obj': page_obj,\n 'tourdates': tourdates,\n })\n else:\n return redirect('manager:login')\n\ndef admin_regular_form(request, reservation_id):\n instance = get_object_or_404(RegularReservation, pk = reservation_id)\n if instance.age == 'u':\n age = '14세이상'\n else :\n age = '14세미만'\n if instance.grade == 'n':\n grade = '기타'\n else :\n grade = str(instance.grade) + '학년'\n places = Place.objects.all()\n return render(request, 'manager/regular_form.html', \n {\n 'reservation' : instance,\n 'age' : age,\n 'grade' : grade,\n 'places' : places\n })\n\ndef regular_status_change(request, reservation_id):\n if request.user.is_authenticated:\n instance = get_object_or_404(RegularReservation, pk=reservation_id)\n if request.method == \"POST\":\n instance.status = request.POST['status']\n if request.POST['status'] == '2':\n place_name = request.POST['place']\n if place_name == '':\n try :\n place = get_object_or_404(Place, name='팔정도 코끼리상 앞')\n except : \n place = Place()\n place.name = '팔정도 코끼리상 앞'\n place.save()\n else :\n try :\n place = get_object_or_404(Place, name=place_name)\n except :\n place = Place()\n place.name = place_name\n place.save()\n instance.place = place\n statusMail('정기', instance, '승인')\n else :\n instance.place = None\n statusMail('정기', instance, '거부')\n instance.admin_comment = request.POST['comment']\n instance.save()\n return redirect('manager:admin_regular_form', reservation_id=reservation_id)\n else:\n return redirect('manager:login')\n\ndef regulardate_cud(request, date_id=None):\n if request.user.is_authenticated:\n if request.method == 'POST':\n if request.POST['pk'] == 'none':\n instance = RegularDate()\n year = request.POST['year']\n month = request.POST['month']\n day = request.POST['day'] \n else:\n instance = get_object_or_404(RegularDate, pk=request.POST['pk'])\n if request.POST['year'] != '':\n year = request.POST['year']\n else :\n year = str(instance.date.year)\n if request.POST['month'] != '':\n month = request.POST['month']\n else :\n month = str(instance.date.month)\n if request.POST['day'] != '':\n day = request.POST['day'] \n else :\n day = str(instance.date.day)\n instance.date = year+'-'+month+'-'+day\n instance.save()\n return redirect('manager:admin_regular_list')\n else:\n return redirect('manager:login')","repo_name":"ustar1210/Donggam","sub_path":"donggam/manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"35848137841","text":"# common.py\nfrom .core import *\nfrom .helpers import DelimitedList, any_open_tag, any_close_tag\nfrom datetime import datetime\n\n\n# some other useful expressions - using lower-case class name since we are really using this as a namespace\nclass pyparsing_common:\n \"\"\"Here are some common low-level expressions that may be useful in\n jump-starting parser development:\n\n - numeric forms (:class:`integers`, :class:`reals`,\n :class:`scientific notation`)\n - common :class:`programming identifiers`\n - network addresses (:class:`MAC`,\n :class:`IPv4`, :class:`IPv6`)\n - ISO8601 :class:`dates` and\n :class:`datetime`\n - :class:`UUID`\n - :class:`comma-separated list`\n - :class:`url`\n\n Parse actions:\n\n - :class:`convert_to_integer`\n - :class:`convert_to_float`\n - :class:`convert_to_date`\n - :class:`convert_to_datetime`\n - :class:`strip_html_tags`\n - :class:`upcase_tokens`\n - :class:`downcase_tokens`\n\n Example::\n\n pyparsing_common.number.run_tests('''\n # any int or real number, returned as the appropriate type\n 100\n -100\n +100\n 3.14159\n 6.02e23\n 1e-12\n ''')\n\n pyparsing_common.fnumber.run_tests('''\n # any int or real number, returned as float\n 100\n -100\n +100\n 3.14159\n 6.02e23\n 1e-12\n ''')\n\n pyparsing_common.hex_integer.run_tests('''\n # hex numbers\n 100\n FF\n ''')\n\n pyparsing_common.fraction.run_tests('''\n # fractions\n 1/2\n -3/4\n ''')\n\n pyparsing_common.mixed_integer.run_tests('''\n # mixed fractions\n 1\n 1/2\n -3/4\n 1-3/4\n ''')\n\n import uuid\n pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID))\n pyparsing_common.uuid.run_tests('''\n # uuid\n 12345678-1234-5678-1234-567812345678\n ''')\n\n prints::\n\n # any int or real number, returned as the appropriate type\n 100\n [100]\n\n -100\n [-100]\n\n +100\n [100]\n\n 3.14159\n [3.14159]\n\n 6.02e23\n [6.02e+23]\n\n 1e-12\n [1e-12]\n\n # any int or real number, returned as float\n 100\n [100.0]\n\n -100\n [-100.0]\n\n +100\n [100.0]\n\n 3.14159\n [3.14159]\n\n 6.02e23\n [6.02e+23]\n\n 1e-12\n [1e-12]\n\n # hex numbers\n 100\n [256]\n\n FF\n [255]\n\n # fractions\n 1/2\n [0.5]\n\n -3/4\n [-0.75]\n\n # mixed fractions\n 1\n [1]\n\n 1/2\n [0.5]\n\n -3/4\n [-0.75]\n\n 1-3/4\n [1.75]\n\n # uuid\n 12345678-1234-5678-1234-567812345678\n [UUID('12345678-1234-5678-1234-567812345678')]\n \"\"\"\n\n convert_to_integer = token_map(int)\n \"\"\"\n Parse action for converting parsed integers to Python int\n \"\"\"\n\n convert_to_float = token_map(float)\n \"\"\"\n Parse action for converting parsed numbers to Python float\n \"\"\"\n\n integer = Word(nums).set_name(\"integer\").set_parse_action(convert_to_integer)\n \"\"\"expression that parses an unsigned integer, returns an int\"\"\"\n\n hex_integer = (\n Word(hexnums).set_name(\"hex integer\").set_parse_action(token_map(int, 16))\n )\n \"\"\"expression that parses a hexadecimal integer, returns an int\"\"\"\n\n signed_integer = (\n Regex(r\"[+-]?\\d+\")\n .set_name(\"signed integer\")\n .set_parse_action(convert_to_integer)\n )\n \"\"\"expression that parses an integer with optional leading sign, returns an int\"\"\"\n\n fraction = (\n signed_integer().set_parse_action(convert_to_float)\n + \"/\"\n + signed_integer().set_parse_action(convert_to_float)\n ).set_name(\"fraction\")\n \"\"\"fractional expression of an integer divided by an integer, returns a float\"\"\"\n fraction.add_parse_action(lambda tt: tt[0] / tt[-1])\n\n mixed_integer = (\n fraction | signed_integer + Opt(Opt(\"-\").suppress() + fraction)\n ).set_name(\"fraction or mixed integer-fraction\")\n \"\"\"mixed integer of the form 'integer - fraction', with optional leading integer, returns float\"\"\"\n mixed_integer.add_parse_action(sum)\n\n real = (\n Regex(r\"[+-]?(?:\\d+\\.\\d*|\\.\\d+)\")\n .set_name(\"real number\")\n .set_parse_action(convert_to_float)\n )\n \"\"\"expression that parses a floating point number and returns a float\"\"\"\n\n sci_real = (\n Regex(r\"[+-]?(?:\\d+(?:[eE][+-]?\\d+)|(?:\\d+\\.\\d*|\\.\\d+)(?:[eE][+-]?\\d+)?)\")\n .set_name(\"real number with scientific notation\")\n .set_parse_action(convert_to_float)\n )\n \"\"\"expression that parses a floating point number with optional\n scientific notation and returns a float\"\"\"\n\n # streamlining this expression makes the docs nicer-looking\n number = (sci_real | real | signed_integer).setName(\"number\").streamline()\n \"\"\"any numeric expression, returns the corresponding Python type\"\"\"\n\n fnumber = (\n Regex(r\"[+-]?\\d+\\.?\\d*([eE][+-]?\\d+)?\")\n .set_name(\"fnumber\")\n .set_parse_action(convert_to_float)\n )\n \"\"\"any int or real number, returned as float\"\"\"\n\n identifier = Word(identchars, identbodychars).set_name(\"identifier\")\n \"\"\"typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')\"\"\"\n\n ipv4_address = Regex(\n r\"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}\"\n ).set_name(\"IPv4 address\")\n \"IPv4 address (``0.0.0.0 - 255.255.255.255``)\"\n\n _ipv6_part = Regex(r\"[0-9a-fA-F]{1,4}\").set_name(\"hex_integer\")\n _full_ipv6_address = (_ipv6_part + (\":\" + _ipv6_part) * 7).set_name(\n \"full IPv6 address\"\n )\n _short_ipv6_address = (\n Opt(_ipv6_part + (\":\" + _ipv6_part) * (0, 6))\n + \"::\"\n + Opt(_ipv6_part + (\":\" + _ipv6_part) * (0, 6))\n ).set_name(\"short IPv6 address\")\n _short_ipv6_address.add_condition(\n lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8\n )\n _mixed_ipv6_address = (\"::ffff:\" + ipv4_address).set_name(\"mixed IPv6 address\")\n ipv6_address = Combine(\n (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(\n \"IPv6 address\"\n )\n ).set_name(\"IPv6 address\")\n \"IPv6 address (long, short, or mixed form)\"\n\n mac_address = Regex(\n r\"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\\1[0-9a-fA-F]{2}){4}\"\n ).set_name(\"MAC address\")\n \"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)\"\n\n @staticmethod\n def convert_to_date(fmt: str = \"%Y-%m-%d\"):\n \"\"\"\n Helper to create a parse action for converting parsed date string to Python datetime.date\n\n Params -\n - fmt - format to be passed to datetime.strptime (default= ``\"%Y-%m-%d\"``)\n\n Example::\n\n date_expr = pyparsing_common.iso8601_date.copy()\n date_expr.set_parse_action(pyparsing_common.convert_to_date())\n print(date_expr.parse_string(\"1999-12-31\"))\n\n prints::\n\n [datetime.date(1999, 12, 31)]\n \"\"\"\n\n def cvt_fn(ss, ll, tt):\n try:\n return datetime.strptime(tt[0], fmt).date()\n except ValueError as ve:\n raise ParseException(ss, ll, str(ve))\n\n return cvt_fn\n\n @staticmethod\n def convert_to_datetime(fmt: str = \"%Y-%m-%dT%H:%M:%S.%f\"):\n \"\"\"Helper to create a parse action for converting parsed\n datetime string to Python datetime.datetime\n\n Params -\n - fmt - format to be passed to datetime.strptime (default= ``\"%Y-%m-%dT%H:%M:%S.%f\"``)\n\n Example::\n\n dt_expr = pyparsing_common.iso8601_datetime.copy()\n dt_expr.set_parse_action(pyparsing_common.convert_to_datetime())\n print(dt_expr.parse_string(\"1999-12-31T23:59:59.999\"))\n\n prints::\n\n [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]\n \"\"\"\n\n def cvt_fn(s, l, t):\n try:\n return datetime.strptime(t[0], fmt)\n except ValueError as ve:\n raise ParseException(s, l, str(ve))\n\n return cvt_fn\n\n iso8601_date = Regex(\n r\"(?P\\d{4})(?:-(?P\\d\\d)(?:-(?P\\d\\d))?)?\"\n ).set_name(\"ISO8601 date\")\n \"ISO8601 date (``yyyy-mm-dd``)\"\n\n iso8601_datetime = Regex(\n r\"(?P\\d{4})-(?P\\d\\d)-(?P\\d\\d)[T ](?P\\d\\d):(?P\\d\\d)(:(?P\\d\\d(\\.\\d*)?)?)?(?PZ|[+-]\\d\\d:?\\d\\d)?\"\n ).set_name(\"ISO8601 datetime\")\n \"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``\"\n\n uuid = Regex(r\"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}\").set_name(\"UUID\")\n \"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)\"\n\n _html_stripper = any_open_tag.suppress() | any_close_tag.suppress()\n\n @staticmethod\n def strip_html_tags(s: str, l: int, tokens: ParseResults):\n \"\"\"Parse action to remove HTML tags from web page HTML source\n\n Example::\n\n # strip HTML links from normal text\n text = 'More info at the pyparsing wiki page'\n td, td_end = make_html_tags(\"TD\")\n table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)(\"body\") + td_end\n print(table_text.parse_string(text).body)\n\n Prints::\n\n More info at the pyparsing wiki page\n \"\"\"\n return pyparsing_common._html_stripper.transform_string(tokens[0])\n\n _commasepitem = (\n Combine(\n OneOrMore(\n ~Literal(\",\")\n + ~LineEnd()\n + Word(printables, exclude_chars=\",\")\n + Opt(White(\" \\t\") + ~FollowedBy(LineEnd() | \",\"))\n )\n )\n .streamline()\n .set_name(\"commaItem\")\n )\n comma_separated_list = DelimitedList(\n Opt(quoted_string.copy() | _commasepitem, default=\"\")\n ).set_name(\"comma separated list\")\n \"\"\"Predefined expression of 1 or more printable words or quoted strings, separated by commas.\"\"\"\n\n upcase_tokens = staticmethod(token_map(lambda t: t.upper()))\n \"\"\"Parse action to convert tokens to upper case.\"\"\"\n\n downcase_tokens = staticmethod(token_map(lambda t: t.lower()))\n \"\"\"Parse action to convert tokens to lower case.\"\"\"\n\n # fmt: off\n url = Regex(\n # https://mathiasbynens.be/demo/url-regex\n # https://gist.github.com/dperini/729294\n r\"(?P\" +\n # protocol identifier (optional)\n # short syntax // still required\n r\"(?:(?:(?Phttps?|ftp):)?\\/\\/)\" +\n # user:pass BasicAuth (optional)\n r\"(?:(?P\\S+(?::\\S*)?)@)?\" +\n r\"(?P\" +\n # IP address exclusion\n # private & local networks\n r\"(?!(?:10|127)(?:\\.\\d{1,3}){3})\" +\n r\"(?!(?:169\\.254|192\\.168)(?:\\.\\d{1,3}){2})\" +\n r\"(?!172\\.(?:1[6-9]|2\\d|3[0-1])(?:\\.\\d{1,3}){2})\" +\n # IP address dotted notation octets\n # excludes loopback network 0.0.0.0\n # excludes reserved space >= 224.0.0.0\n # excludes network & broadcast addresses\n # (first & last IP address of each class)\n r\"(?:[1-9]\\d?|1\\d\\d|2[01]\\d|22[0-3])\" +\n r\"(?:\\.(?:1?\\d{1,2}|2[0-4]\\d|25[0-5])){2}\" +\n r\"(?:\\.(?:[1-9]\\d?|1\\d\\d|2[0-4]\\d|25[0-4]))\" +\n r\"|\" +\n # host & domain names, may end with dot\n # can be replaced by a shortest alternative\n # (?![-_])(?:[-\\w\\u00a1-\\uffff]{0,63}[^-_]\\.)+\n r\"(?:\" +\n r\"(?:\" +\n r\"[a-z0-9\\u00a1-\\uffff]\" +\n r\"[a-z0-9\\u00a1-\\uffff_-]{0,62}\" +\n r\")?\" +\n r\"[a-z0-9\\u00a1-\\uffff]\\.\" +\n r\")+\" +\n # TLD identifier name, may end with dot\n r\"(?:[a-z\\u00a1-\\uffff]{2,}\\.?)\" +\n r\")\" +\n # port number (optional)\n r\"(:(?P\\d{2,5}))?\" +\n # resource path (optional)\n r\"(?P\\/[^?# ]*)?\" +\n # query string (optional)\n r\"(\\?(?P[^#]*))?\" +\n # fragment (optional)\n r\"(#(?P\\S*))?\" +\n r\")\"\n ).set_name(\"url\")\n \"\"\"URL (http/https/ftp scheme)\"\"\"\n # fmt: on\n\n # pre-PEP8 compatibility names\n convertToInteger = convert_to_integer\n \"\"\"Deprecated - use :class:`convert_to_integer`\"\"\"\n convertToFloat = convert_to_float\n \"\"\"Deprecated - use :class:`convert_to_float`\"\"\"\n convertToDate = convert_to_date\n \"\"\"Deprecated - use :class:`convert_to_date`\"\"\"\n convertToDatetime = convert_to_datetime\n \"\"\"Deprecated - use :class:`convert_to_datetime`\"\"\"\n stripHTMLTags = strip_html_tags\n \"\"\"Deprecated - use :class:`strip_html_tags`\"\"\"\n upcaseTokens = upcase_tokens\n \"\"\"Deprecated - use :class:`upcase_tokens`\"\"\"\n downcaseTokens = downcase_tokens\n \"\"\"Deprecated - use :class:`downcase_tokens`\"\"\"\n\n\n_builtin_exprs = [\n v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)\n]\n","repo_name":"pypa/pipenv","sub_path":"pipenv/patched/pip/_vendor/pyparsing/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":13387,"program_lang":"python","lang":"en","doc_type":"code","stars":24273,"dataset":"github-code","pt":"34"} +{"seq_id":"22554198149","text":"from django.test import TestCase\n\nfrom scorings.models import Team, Match\n\n\nclass TestScoring(TestCase):\n\n def setUp(self):\n # CREATE TEAMS\n self.team_fantastics = Team.objects.create(name=\"Fantastics\")\n self.team_crazy_ones = Team.objects.create(name=\"Crazy Ones\")\n self.team_rebels = Team.objects.create(name=\"Rebels\")\n self.team_fc_super = Team.objects.create(name=\"FC Super\")\n self.team_misfits = Team.objects.create(name=\"Misfits\")\n\n # CREATE MATCHES\n self.list_match = [\n {\n \"team_a\": self.team_crazy_ones,\n \"score_a\": 3,\n \"team_b\": self.team_rebels,\n \"score_b\": 3\n },\n {\n \"team_a\": self.team_fantastics,\n \"score_a\": 1,\n \"team_b\": self.team_fc_super,\n \"score_b\": 0\n },\n {\n \"team_a\": self.team_crazy_ones,\n \"score_a\": 1,\n \"team_b\": self.team_fc_super,\n \"score_b\": 1\n },\n {\n \"team_a\": self.team_fantastics,\n \"score_a\": 3,\n \"team_b\": self.team_rebels,\n \"score_b\": 1\n },\n {\n \"team_a\": self.team_crazy_ones,\n \"score_a\": 4,\n \"team_b\": self.team_rebels,\n \"score_b\": 0\n }\n ]\n for match in self.list_match:\n new_match = Match.objects.create(**match)\n\n def test_team_models(self):\n team_count = Team.objects.count()\n self.assertEqual(team_count, 5)\n match_count = Match.objects.count()\n self.assertEqual(match_count, 5)\n list_team = Team.objects.all().values_list('name', flat=True)\n self.assertEqual(list_team[0], \"Fantastics\")\n self.assertEqual(list_team[1], \"Crazy Ones\")\n self.assertEqual(list_team[2], \"FC Super\")\n self.assertEqual(list_team[3], \"Rebels\")\n self.assertEqual(list_team[4], \"Misfits\")\n\n def test_total_points(self):\n self.assertEqual(self.team_fantastics.get_total_points(), 6)\n self.assertEqual(self.team_crazy_ones.get_total_points(), 5)\n self.assertEqual(self.team_fc_super.get_total_points(), 1)\n self.assertEqual(self.team_rebels.get_total_points(), 1)\n self.assertEqual(self.team_misfits.get_total_points(), 0)\n\n def test_add_new_match(self):\n new_match_score = {\n \"team_a\": self.team_misfits,\n \"score_a\": 2,\n \"team_b\": self.team_rebels,\n \"score_b\": 0\n }\n new_match = Match.objects.create(**new_match_score)\n list_team = Team.objects.all().values_list('name', flat=True)\n self.assertEqual(list_team[0], \"Fantastics\")\n self.assertEqual(list_team[1], \"Crazy Ones\")\n self.assertEqual(list_team[2], \"Misfits\")\n self.assertEqual(list_team[3], \"FC Super\")\n self.assertEqual(list_team[4], \"Rebels\")\n\n\n\n\n\n\n\n","repo_name":"maxysilaen/core-games","sub_path":"games/scorings/tests/test_scorings_models.py","file_name":"test_scorings_models.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70787589218","text":"# USAGE\n# python train_succulents_detector.py --dataset dataset\n\n# import the necessary packages\n\nTF_CPP_MIN_LOG_LEVEL=2\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n# from tensorflow.keras.applications import MobileNetV2\n# from tensorflow.keras.layers import AveragePooling2D\n# from tensorflow.keras.layers import Dropout\n# from tensorflow.keras.layers import Flatten\n# from tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\n# from tensorflow.keras.models import Model\n# from tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.preprocessing.image import load_img\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nfrom tensorflow.keras.models import load_model\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sn\nimport pandas as pd\n\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport os\n\n\n \n \n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", default = 'dataset',\n\thelp=\"path to input dataset\")\nap.add_argument(\"-p\", \"--plot\", type=str, default=\"plot.png\",\n\thelp=\"path to output loss/accuracy plot\")\nap.add_argument(\"-m\", \"--model\", type=str,\n\tdefault=\"succulents2.model\",\n\thelp=\"path to output succulents detector model\")\nargs = vars(ap.parse_args())\n\n# initialize the initial learning rate, number of epochs to train for,\n# and batch size\nINIT_LR = 1e-5\nEPOCHS = 2\nBS = 12\n\n# grab the list of images in our dataset directory, then initialize\n# the list of data (i.e., images) and class images\nprint(\"[INFO] loading images...\")\nimagePaths = list(paths.list_images(args[\"dataset\"]))\ndata = []\nlabels = []\n\n# loop over the image paths\nfor imagePath in imagePaths:\n\t# extract the class label from the filename\n\tlabel = imagePath.split(os.path.sep)[-2]\n\n\t# load the input image (224x224) and preprocess it\n\timage = load_img(imagePath, target_size=(224, 224))\n\timage = img_to_array(image)\n\timage = preprocess_input(image)\n\n\t# update the data and labels lists, respectively\n\tdata.append(image)\n\tlabels.append(label)\n\n\n\n\n# convert the data and labels to NumPy arrays\ndata = np.array(data, dtype=\"float32\")\nlabels = np.array(labels)\n\n# perform one-hot encoding on the labels\nlb = LabelEncoder()\nlabels = lb.fit_transform(labels)\n#labels = lb.transform(labels)\nlabels = to_categorical(labels)\n\n# partition the data into training and testing splits using 75% of\n# the data for training and the remaining 25% for testing\n\n\n(trainX, testX, trainY, testY) = train_test_split(data, labels,\n\ttest_size=0.20, stratify=labels, random_state=42)\ndel data\ndel labels\ndel imagePaths\n# construct the training image generator for data augmentation\naug = ImageDataGenerator(\n\trotation_range=20,\n\tzoom_range=0.15,\n\twidth_shift_range=0.2,\n\theight_shift_range=0.2,\n\tshear_range=0.15,\n \thorizontal_flip=True,\n\tfill_mode=\"nearest\")\n\n#modelo artigo\n\n\n# base = tf.keras.applications.VGG19(input_shape=(224, 224, 3),\n# include_top = False,\n# weights ='imagenet',\n# pooling = 'max')\n\nbase = tf.keras.applications.MobileNetV2(weights=\"imagenet\",\n include_top=False,\n input_tensor=Input(shape=(224, 224, 3)))\n\n\nbase.summary()\nfor layer in base.layers:\n layer.trainable = False\ncnn = tf.keras.models.Sequential()\n\ncnn.add(base)\n\n# cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding=\"same\", activation=\"relu\", input_shape=[224, 224, 3]))\n# cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))\n\n# cnn.add(tf.keras.layers.Dropout(.25))\n\n\n# cnn.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding=\"same\", activation=\"relu\"))\n# cnn.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding=\"same\", activation=\"relu\"))\n# cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))\n\n# cnn.add(tf.keras.layers.Dropout(.25))\n\n# cnn.add(tf.keras.layers.Conv2D(filters=128, kernel_size=3, padding=\"same\", activation=\"relu\"))\n# cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))\n\n# cnn.add(tf.keras.layers.Dropout(.25))\n\n# cnn.add(tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding=\"same\", activation=\"relu\"))\n# cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))\n\n# cnn.add(tf.keras.layers.Dropout(.25))\ncnn.add(tf.keras.layers.Flatten(input_shape=base.output_shape[1:]))\n\ncnn.add(tf.keras.layers.Dense(units=128*2, activation='relu'))\ncnn.add(tf.keras.layers.Dropout(.5))\ncnn.add(tf.keras.layers.Dense(units=128, activation='relu'))\ncnn.add(tf.keras.layers.Dropout(.5))\n\ncnn.add(tf.keras.layers.Dense(units=10, activation='softmax'))\n\n\ncnn.summary()\n\n# compile our model\nprint(\"[INFO] compiling model...\")\n# opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n\nopt = tf.keras.optimizers.SGD(lr=INIT_LR, momentum=0.9)\ncnn.compile(loss=\"binary_crossentropy\", optimizer=opt,\tmetrics=[\"accuracy\"])\n\n# train the head of the network\nprint(\"[INFO] training head...\")\ncallback = EarlyStopping(monitor='loss', patience=3)\n\nH = cnn.fit(\n\taug.flow(trainX, trainY, batch_size=BS),\n\tsteps_per_epoch=len(trainX) // BS,\n\tvalidation_data=(testX, testY),\n\tvalidation_steps=len(testX) // BS,\n\tepochs=EPOCHS, \n callbacks = [callback]#,\n # workers =4,\n # use_multiprocessing= True\n )\n\n\n\n# make predictions on the testing set\nprint(\"[INFO] evaluating network...\")\npredIdxs = cnn.predict(testX, batch_size=BS)\npredIdxs2 =cnn.predict(testX)\n\n\n\n# for each image in the testing set we need to find the index of the\n# label with corresponding largest predicted probability\npredIdxs = np.argmax(predIdxs, axis=1)\n\n\n\n# show a nicely formatted classification report\nprint(classification_report(testY.argmax(axis=1), predIdxs,\n\ttarget_names=lb.classes_))\n\n\ncm = confusion_matrix(testY.argmax(axis=1), predIdxs)\ndf_cm = pd.DataFrame(cm, range(10), range(10))\ndf_cm.index = lb.classes_\ndf_cm.columns= lb.classes_\nsn.set(font_scale=1.4) # for label size\nsn.heatmap(df_cm, annot=True, annot_kws={\"size\": 16})\nplt.show()\nplt.clf()\n\n# serialize the model to disk\nprint(\"[INFO] saving succulents detector model...\")\ncnn.save(args[\"model\"], save_format=\"h5\")\n\nH= load_model(args[\"model\"])\n\n# plot the training loss and accuracy\nN = EPOCHS\nplt.style.use(\"ggplot\")\n\nplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\nplt.title(\"Training Loss\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss\")\nplt.legend(loc=\"lower left\")\nplt.show()\nplt.clf()\n\n\n\nplt.style.use(\"ggplot\")\nplt.plot(np.arange(0, N), H.history[\"accuracy\"], label=\"train_acc\")\nplt.plot(np.arange(0, N), H.history[\"val_accuracy\"], label=\"val_acc\")\nplt.title(\"Training Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Accuracy\")\nplt.legend(loc=\"lower left\")\nplt.show()\n\n# plt.savefig(args[\"plot\"])\n","repo_name":"PDunice/SuculentasAI","sub_path":"train_succulents_detector_artigo.py","file_name":"train_succulents_detector_artigo.py","file_ext":"py","file_size_in_byte":7344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"13011966576","text":"from pickletools import uint8\r\nfrom unittest import result\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\"\"\"\r\nMORFOLOJİK OPERASYONLAR\r\n\r\nErezyon, genişleme, açma, kapatma ve morfolojik gradyan gibi\r\nmorfolojik operasyonların ne olduklarını öğreneceğiz.\r\n\r\nErozyon:\r\nErozyonun temel fikri sadece toprak erozyonu gibidir, ön plandaki nesnenin sınırlarını aşındırır.\r\n\r\nGenişleme:\r\nErozyonun tam tersidir.\r\nGörüntüdeki beyaz bölgeyi arıtırır.\r\n\r\nAçma:\r\nAçılma, erozyonun + genişlemedir.\r\nGürültünün giderilmesinde faydalıdır.\r\n\r\nKapatma:\r\nkapanış, açmanın tam tersidir.\r\nGenişleme + erozyondur.(önce genişleme sonra erozyon işlemi yapılır)\r\nÖn plandaki nesnelerin içindeki küçük delikleri veya nesne üzerindeki küçük siyah noktaları kapatmak için kullanışlıdır.\r\n\r\nMorfolojik Gradyan:\r\nBir görüntünün genişlemesi ve erozyonu arasındaki farktır.\r\n\r\n\"\"\"\r\n\r\n#resmi içe aktar\r\nimg = cv2.imread(\"pictures/logo.jpg\",0)\r\nplt.figure(), plt.imshow(img, cmap= \"gray\"), plt.axis(\"off\"), plt.title(\"original image\")\r\n\r\n#erozyon\r\nkernel = np.ones((5,5), dtype=np.uint8)\r\nresult = cv2.erode(img, kernel, iterations=6)\r\nplt.figure(), plt.imshow(result, cmap= \"gray\"), plt.axis(\"off\"), plt.title(\"Erozyon image\")\r\n\r\n#Genişleme (dilation)\r\nresult2 = cv2.dilate(img, kernel, iterations=6)\r\nplt.figure(), plt.imshow(result2, cmap= \"gray\"), plt.axis(\"off\"), plt.title(\"genisleme image\")\r\n\r\n#white noise\r\nwhiteNoise = np.random.randint(0,2, size = img.shape[:2])\r\nwhiteNoise = whiteNoise*255\r\nplt.figure(), plt.imshow(whiteNoise, cmap=\"gray\"), plt.axis(\"off\"), plt.title(\"white noise\")\r\n\r\nnoise_img = whiteNoise + img\r\nplt.figure(), plt.imshow(noise_img, cmap=\"gray\"), plt.axis(\"off\"), plt.title(\"img whit white noise\")\r\n\r\n#açılma\r\nopenning = cv2.morphologyEx(noise_img.astype(np.float32), cv2.MORPH_OPEN, kernel)\r\nplt.figure(), plt.imshow(openning, cmap=\"gray\"), plt.axis(\"off\"), plt.title(\"acilma\")\r\n\r\n#black noise\r\nblackNoise = np.random.randint(0,2, size = img.shape[:2])\r\nblackNoise = blackNoise*-255\r\nplt.figure(), plt.imshow(blackNoise, cmap=\"gray\"), plt.axis(\"off\"), plt.title(\"black noise\")\r\n\r\nblack_noise_img = blackNoise + img\r\nblack_noise_img[black_noise_img <= -245] = 0\r\nplt.figure(), plt.imshow(black_noise_img, cmap=\"gray\"), plt.axis(\"off\"), plt.title(\"black noise image \")\r\n\r\n# kapatma\r\nclosing = cv2.morphologyEx(black_noise_img.astype(np.float32), cv2.MORPH_CLOSE, kernel)\r\nplt.figure(), plt.imshow(closing, cmap=\"gray\"), plt.axis(\"off\"), plt.title(\"kapanma\")\r\n\r\n#gradient \r\ngradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\r\nplt.figure(), plt.imshow(gradient, cmap=\"gray\"), plt.axis(\"off\"), plt.title(\"grad\"), plt.show() \r\n\r\n\r\n\r\n","repo_name":"oguzhan3s/OpenCV","sub_path":"Tutorial/opencv_11.py","file_name":"opencv_11.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"40284714364","text":"\"\"\"\nThis module creates the gym environment wrapped arround the microgrid core\n\"\"\"\n\nfrom abc import abstractmethod\nfrom typing import Tuple, Union\n\nimport gym\nimport numpy as np\nfrom gym import spaces\n\nfrom easygrid.microgrid import Microgrid\nfrom easygrid.types import MicrogridConfig\n\n\nclass GridEnv(gym.Env):\n \"\"\"\n Gym Environment wrapper of the Microgrid env.\n\n ...\n\n Attributes\n ----------\n microgrid : easygrid.Microgrid\n The underlyind microgrid object which will actually handle the \\\n computations. See this class for more details\n Methods\n -------\n step : executes the action given by the agent (or something else) and \\\n returns information about the state of the environment and reward\n reset : resets the environment to an initial state and returns this state.\n render : displays the environment (not supported atm)\n close : TBD\n \"\"\"\n\n metadata = {\"render.modes\": [\"human\"]}\n\n def __init__(self, config: Union[MicrogridConfig, dict]) -> None:\n\n \"\"\"\n Creates the relevant attributes based on the config\n\n Args:\n config (dict): Configuration for the underlying microgrid.\n \"\"\"\n super().__init__()\n self.microgrid = Microgrid(MicrogridConfig.parse_obj(config))\n self.observation_space = spaces.Box(\n low=self.microgrid.min_values,\n high=self.microgrid.max_values,\n dtype=np.float32,\n )\n # Actions are normalized to -1,1 and scaled back in the microgrid\n self.action_space = spaces.Box(\n low=-1,\n high=1,\n shape=(len(self.microgrid.max_actions),),\n dtype=np.float32,\n )\n\n @property\n def config(self) -> MicrogridConfig:\n \"\"\"\n Returns:\n MicrogridConfig: The current underlying config\n \"\"\"\n return self.microgrid.config\n\n def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, str]:\n \"\"\"\n Executes the action given by the agent (or something else) and \\\n returns information about the state of the environment and reward\n\n Args:\n action (np.ndarray): The action to be processed by the environment,\\\n it should contain:\n - How much to store/discharge in the battery (float)\n - How much to sell/buy from the grid (float)\n\n Returns:\n Tuple[np.ndarray, float, bool, NoneType]: The observation, reward,\\\n done and info following gym template\n \"\"\"\n observation, done, costs = self.microgrid.run_timestep(action)\n reward = GridEnv.compute_reward(costs)\n info = \"\"\n return observation, reward, done, info\n\n def reset(self) -> np.ndarray:\n \"\"\"\n Resets the environment to an initial state and returns this state.\n\n Raises:\n NotImplementedError: _description_\n \"\"\"\n return self.microgrid.reset()\n\n def render(self, mode=\"human\"):\n \"\"\"TBD\n\n Args:\n mode (str, optional): _description_. Defaults to \"human\".\n\n Raises:\n NotImplementedError: _description_\n \"\"\"\n raise NotImplementedError\n\n def close(self):\n \"\"\"\n TBD\n\n Raises:\n NotImplementedError: _description_\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def compute_reward(costs: Tuple[float, float, float]) -> float:\n \"\"\"\n Computes the reward/cost based on the precomputed costs.\n TBD : add the possibility to integrate the state of the microgrid\n\n Args:\n costs (Tuple[float, float, float]): The overcharge, grid and\n error costs\n\n Returns:\n float: The reward/cost\n \"\"\"\n reward = sum(costs)\n return reward\n","repo_name":"YannBerthelot/easygrid","sub_path":"src/easygrid/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"33726400339","text":"spam = ['bans', 'bags', 'potato', 'yam']\n\ndef co(spam):\n rom = \"\"\n spams = spam[0:3]\n \n for i in spams:\n rom += str(i + ', ')\n \n rom += 'and ' + str(spam[len(spam)-1])\n\n return rom\nprint(co(spam))","repo_name":"thedarknative/pythonjourney","sub_path":"slice.py","file_name":"slice.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"32278873636","text":"#this file contains the GamePiece, Pawn, and King classes\n\n#*******************************************************\nfrom config import redMoveDir, blackMoveDir\nfrom boardCheck import BoardCheck\n#*******************************************************\n\nclass GamePiece:\n def __init__(self, pColorIn, locIn, pieceGrid):\n self.type = \"None\"\n self.pColor = pColorIn\n self.loc = locIn\n self.lastRow = -2\n self.moveDir = -10\n self.boardCheck = BoardCheck()\n\n @property\n def type(self):\n return self._type\n @type.setter\n def type(self, val):\n self._type = val\n @property\n def pColor(self):\n return self._pColor\n @pColor.setter\n def pColor(self, val):\n self._pColor = val\n @property\n def loc(self):\n return self._loc\n @loc.setter\n def loc(self, val):\n self._loc = val\n @property\n def lastRow(self):\n return self._lastRow\n @lastRow.setter\n def lastRow(self, val):\n self._lastRow = val\n @property\n def moveDir(self):\n return self._moveDir\n @moveDir.setter\n def moveDir(self, val):\n self._moveDir = val\n\n\nclass Pawn(GamePiece):\n def __init__(self, pColorIn, locIn, pieceGrid):\n super().__init__(pColorIn, locIn, pieceGrid)\n self.type = \"pawn\"\n if pColorIn == \"red\":\n if redMoveDir == -1:\n self.lastRow = 0\n else:\n self.lastRow = 7\n self.moveDir = redMoveDir\n elif pColorIn == \"black\":\n if blackMoveDir == 1:\n self.lastRow = 7\n else:\n self.lastRow = 0\n self.moveDir = blackMoveDir\n else:\n self.lastRow = -2\n self.moveDir = -10\n\n if (self.boardCheck.isOpen(pieceGrid, self.loc) == False) or (self.boardCheck.isInvalid(self.loc) == True):\n print('error')\n\n\nclass King(GamePiece):\n def __init__(self, pColorIn, locIn, pieceGrid):\n super().__init__(pColorIn, locIn, pieceGrid)\n self.moveDir = 1 #1 vs -1 doesn't matter but picked one to make life easier\n self.type = \"king\"\n if pColorIn == \"red\":\n if redMoveDir == -1:\n self.lastRow = 0\n else:\n self.lastRow = 7\n elif pColorIn == \"black\":\n if blackMoveDir == 1:\n self.lastRow = 7\n else:\n self.lastRow = 0\n else:\n self.lastRow = -2\n self.moveDir = -10\n\n if (self.boardCheck.isOpen(pieceGrid, self.loc) == False) or (self.boardCheck.isInvalid(self.loc) == True):\n # throw error\n print('error')\n\n\n","repo_name":"alyssabarth2237/Checkers_Game_Validation","sub_path":"gamePiece.py","file_name":"gamePiece.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"45771421371","text":"# encoding:utf-8\nimport copy\nimport random\nimport bisect #bisect_left これで二部探索の大小検索が行える\nimport fractions #最小公倍数などはこっち\nimport math\nimport sys\n\nmod = 10**9+7\nsys.setrecursionlimit(mod) # 再帰回数上限はでdefault1000\n\ndef LI(): return list(map(int, sys.stdin.readline().split()))\n\nN = int(input())\nA = [0 for i in range(N)]\nfor i in range(N):\n A[i] = int(input())\n\nA_c = copy.deepcopy(A)\nA_c.sort()\nfor i in range(N):\n if A_c[-1] == A[i]:\n print(A_c[-2])\n else:\n print(A_c[-1])\n","repo_name":"seven320/AtCoder","sub_path":"134/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40241615179","text":"### Stack: based on built-in list\n# stack is LIFO\nmystack = [3, 4, 6, 7, 2, 1, 5]\nmystack\n\n# push - use append()\nmystack.append(9)\nmystack.append(0)\nmystack\n\n# pop - use pop()\nmystack.pop()\nmystack\n\n### Queue: based on deque() in collections\n# queue is FIFO\nfrom collections import deque\nmyqueue = deque([3, 4, 5, 6, 7])\nmyqueue\n\n# enqueue\nmyqueue.append(2)\nmyqueue\n\n# dequeue\nmyqueue.popleft()\nmyqueue\n","repo_name":"bangdasun/data-structures","sub_path":"python-implementation/stack-queue.py","file_name":"stack-queue.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"34"} +{"seq_id":"33240311962","text":"# print(\"Hello World\")\n\n# n1 = 5\n# n2 = 2\n# sum = n1 * n2\n# print(n1*n2)\n\n# number1 = int(input(\"enter first number: \"))\n# number2 = int(input(\"enter second number: \"))\n# sum = number1+number2\n# print(\"{0}+{1}={2}\".format(number1, number2, sum))\n\n# target = int(input(\"Enter the target number\"))\n# exponent = int(input(\"Enter the exponent\"))\n# result = 1\n# i = 0\n# while i < exponent:\n# \tresult *= target\n# \ti+=1\n# print(\"{0}^{1}={2}\".format(target, exponent, result))\n\n# ^^^ getting used to python ^^^\n\n#-----------SOLVE ARITHMETIC START HERE-------------\n\n#start input validation loop\nwhile True:\n\t#get equation from user\n\tequation = input(\"Enter a simple arithmetic equation: \") \n\t\n\t#prepare variables\n\toperatorLoc = -1\n\toperator = ' '\n\tvalidated = True\n\n\t#find the operator\n\toperatorLoc = equation.find(\"+\")\n\tif operatorLoc == -1:\n\t\toperatorLoc = equation.find(\"-\")\n\tif operatorLoc == -1:\n\t\toperatorLoc = equation.find(\"*\")\n\tif operatorLoc == -1:\n\t\toperatorLoc = equation.find(\"/\")\n\tif operatorLoc == -1: #validated input condition\n\t\tvalidated = False\n\n\n\t#capturing operator\n\toperator = equation[operatorLoc]\n\n\t#substring out the parts before and after the operator\n\tfirstPart = \"\"\n\tsecondPart = \"\"\n\tfirstPart = equation[0: operatorLoc]\n\ttry:\n\t\tfirstPart = int(firstPart)\n\texcept ValueError:\n\t\tprint(\"Please enter an Integer\")\n\t\tvalidated = False\n\tsecondPart = equation[operatorLoc+1:len(equation)]\n\ttry:\n\t\tsecondPart = int(secondPart)\n\texcept ValueError:\n\t\tprint(\"Please enter an Integer\")\n\t\tvalidated = False\n\n\tif validated == True:\n\t\tbreak\n#end input validation loop\n\n#prepare result variable\nresult = 0\n\n#perform arithmetic\nif operator == \"+\":\n\tresult = firstPart + secondPart\nelif operator == \"-\":\n\tresult = firstPart - secondPart\nelif operator == \"*\":\n\tresult = firstPart * secondPart\nelif operator == \"/\":\n\tresult = firstPart / secondPart\n\n#print result\nprint(\"{0}={1}\".format(equation, result))","repo_name":"spensers55/FirstPythonCode","sub_path":"Hello World/Hello_World.py","file_name":"Hello_World.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73553514018","text":"import time\nimport subprocess\nimport yaml\nelapsed_time=0\nconfig=yaml.safe_load(open('wrangle_data.yaml'))\noutput_dir=config['output_folder']+'/'+config['analysis_dir']+'_run_stats'\nsubprocess.call(['mkdir', '-p', output_dir])\nstart_time=time.time()\nstill_running=True\noutput_path=output_dir+'/run_stats.txt'\noutput_file=open(output_path, 'w')\noutput_file.write(f'local_time\\telapsed_time\\tmemory_avail\\tstorage_avail\\t%cpu_iowait\\n')\n\ndef get_storage():\n\ttest=subprocess.check_output(['df', '-h']).decode()\n\ttest=test.split('\\n')\n\tfor line in test:\n\t\tline=line.split()\n\t\tif len(line)>1 and line[-1]=='/':\n\t\t\tstorage=line[-3]\n\t\t\tprint('available storage is', storage)\n\t\t\treturn(storage)\n\treturn 'storage not retrievable'\n\ndef get_memory():\n\ttest=subprocess.check_output(['free', '-g']).decode()\n\ttest=test.split('\\n')\n\tfor line in test:\n\t\tline=line.split()\n\t\tif len(line)>2 and line[0]=='Mem:':\n\t\t\tmemory=line[-1]+'G'\n\t\t\tprint('available memory is', memory)\n\t\t\treturn(memory)\n\treturn 'memory not retrievable'\n\ndef get_io():\n\ttest=subprocess.check_output(['iostat']).decode()\n\ttest=test.split('\\n')\n\tif len(test)>3:\n\t\tiowait=test[3].strip().split()[3]\n\t\tprint('percent of CPU time spent waiting for io is currently', iowait)\n\t\treturn iowait\n\treturn 'iowait not retrievable'\n\nwhile still_running:\n\tlocal=time.ctime().replace(' ', '_')\n\tcurrent=time.time()\n\telapsed_time=current-start_time\n\ttry:\n\t\toutput_file=open(output_path, 'a')\n\t\tsubprocess.check_output('pgrep -u '+'asimkin'+' snakemake', shell=True)\n\t\tmemory=get_memory()\n\t\tstorage=get_storage()\n\t\tiowait=get_io()\n\t\toutput_file.write(f'{local}\\t{elapsed_time}\\t{memory}\\t{storage}\\t{iowait}\\n')\n\t\toutput_file.close()\n\texcept Exception:\n\t\tprint('not running')\n\t\tstill_running=False\n\ttime.sleep(60)\n","repo_name":"bailey-lab/wrangler_snakemake","sub_path":"monitor_run.py","file_name":"monitor_run.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"14974383310","text":"\nfrom bparser import BParser\nfrom intbase import InterpreterBase, ErrorType\nfrom enum import Enum\n\n\nclass EnvironmentManager:\n \"\"\"\n The EnvironmentManager class maintains the lexical environment for a construct.\n In project 1, this is just a mapping between each variable (aka symbol)\n in a brewin program and the value of that variable - the value that's passed in can be\n anything you like. In our implementation we pass in a Value object which holds a type\n and a value (e.g., Int, 10).\n \"\"\"\n\n def __init__(self):\n self.environment = {}\n\n def get(self, symbol):\n \"\"\"\n Get data associated with variable name.\n \"\"\"\n if symbol in self.environment:\n return self.environment[symbol]\n\n return None\n\n def set(self, symbol, value):\n \"\"\"\n Set data associated with a variable name.\n \"\"\"\n self.environment[symbol] = value\n\n\nclass Type(Enum):\n \"\"\"Enum for all possible Brewin types.\"\"\"\n\n INT = 1\n BOOL = 2\n STRING = 3\n CLASS = 4\n NOTHING = 5\n\n\n# Represents a value, which has a type and its value\nclass Value:\n \"\"\"A representation for a value that contains a type tag.\"\"\"\n\n def __init__(self, value_type, value=None):\n self.__type = value_type\n self.__value = value\n\n def type(self):\n return self.__type\n\n def value(self):\n return self.__value\n\n def set(self, other):\n self.__type = other.type()\n self.__value = other.value()\n\n\ndef create_value(val):\n \"\"\"\n Create a Value object from a Python value.\n \"\"\"\n if val == InterpreterBase.TRUE_DEF:\n return Value(Type.BOOL, True)\n if val == InterpreterBase.FALSE_DEF:\n return Value(Type.BOOL, False)\n if val[0] == '\"':\n return Value(Type.STRING, val.strip('\"'))\n if val.lstrip('-').isnumeric():\n return Value(Type.INT, int(val))\n if val == InterpreterBase.NULL_DEF:\n return Value(Type.CLASS, None)\n if val == InterpreterBase.NOTHING_DEF:\n return Value(Type.NOTHING, None)\n return None\n\n\nclass MethodDef:\n \"\"\"\n Wrapper struct for the definition of a member method.\n \"\"\"\n\n def __init__(self, method_def):\n self.method_name = method_def[1]\n self.formal_params = method_def[2]\n self.code = method_def[3]\n\n\nclass FieldDef:\n \"\"\"\n Wrapper struct for the definition of a member field.\n \"\"\"\n\n def __init__(self, field_def):\n self.field_name = field_def[1]\n self.default_field_value = field_def[2]\n\n\nclass ClassDef:\n \"\"\"\n Holds definition for a class:\n - list of fields (and default values)\n - list of methods\n\n class definition: [class classname [field1 field2 ... method1 method2 ...]]\n \"\"\"\n\n def __init__(self, class_def, interpreter):\n self.interpreter = interpreter\n self.name = class_def[1]\n self.__create_field_list(class_def[2:])\n self.__create_method_list(class_def[2:])\n\n def get_fields(self):\n \"\"\"\n Get a list of FieldDefs for *all* fields in the class.\n \"\"\"\n return self.fields\n\n def get_methods(self):\n \"\"\"\n Get a list of MethodDefs for *all* fields in the class.\n \"\"\"\n return self.methods\n\n def __create_field_list(self, class_body):\n self.fields = []\n fields_defined_so_far = set()\n for member in class_body:\n if member[0] == InterpreterBase.FIELD_DEF:\n if member[1] in fields_defined_so_far: # redefinition\n self.interpreter.error(\n ErrorType.NAME_ERROR,\n \"duplicate field \" + member[1],\n member[0].line_num,\n )\n self.fields.append(FieldDef(member))\n fields_defined_so_far.add(member[1])\n\n def __create_method_list(self, class_body):\n self.methods = []\n methods_defined_so_far = set()\n for member in class_body:\n if member[0] == InterpreterBase.METHOD_DEF:\n if member[1] in methods_defined_so_far: # redefinition\n self.interpreter.error(\n ErrorType.NAME_ERROR,\n \"duplicate method \" + member[1],\n member[0].line_num,\n )\n self.methods.append(MethodDef(member))\n methods_defined_so_far.add(member[1])\n\n\nclass ObjectDef:\n STATUS_PROCEED = 0\n STATUS_RETURN = 1\n STATUS_NAME_ERROR = 2\n STATUS_TYPE_ERROR = 3\n\n def __init__(self, interpreter, class_def, trace_output):\n self.interpreter = interpreter # objref to interpreter object. used to report errors, get input, produce output\n self.class_def = class_def # take class body from 3rd+ list elements, e.g., [\"class\",classname\", [classbody]]\n self.trace_output = trace_output\n self.__map_fields_to_values()\n self.__map_method_names_to_method_definitions()\n self.__create_map_of_operations_to_lambdas() # sets up maps to facilitate binary and unary operations, e.g., (+ 5 6)\n\n def call_method(self, method_name, actual_params, line_num_of_caller):\n \"\"\"\n actual_params is a list of Value objects (all parameters are passed by value).\n\n The caller passes in the line number so we can properly generate an error message.\n The error is then generated at the source (i.e., where the call is initiated).\n \"\"\"\n if method_name not in self.methods:\n self.interpreter.error(\n ErrorType.NAME_ERROR,\n \"unknown method \" + method_name,\n line_num_of_caller,\n )\n method_info = self.methods[method_name]\n if len(actual_params) != len(method_info.formal_params):\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"invalid number of parameters in call to \" + method_name,\n line_num_of_caller,\n )\n env = (\n EnvironmentManager()\n ) # maintains lexical environment for function; just params for now\n for formal, actual in zip(method_info.formal_params, actual_params):\n env.set(formal, actual)\n # since each method has a single top-level statement, execute it.\n status, return_value = self.__execute_statement(env, method_info.code)\n # if the method explicitly used the (return expression) statement to return a value, then return that\n # value back to the caller\n if status == ObjectDef.STATUS_RETURN:\n return return_value\n # The method didn't explicitly return a value, so return a value of type nothing\n return Value(InterpreterBase.NOTHING_DEF)\n\n def __execute_statement(self, env, code):\n \"\"\"\n returns (status_code, return_value) where:\n - status_code indicates if the next statement includes a return\n - if so, the current method should terminate\n - otherwise, the next statement in the method should run normally\n - return_value is a Value containing the returned value from the function\n \"\"\"\n if self.trace_output:\n print(f\"{code[0].line_num}: {code}\")\n tok = code[0]\n if tok == InterpreterBase.BEGIN_DEF:\n return self.__execute_begin(env, code)\n if tok == InterpreterBase.SET_DEF:\n return self.__execute_set(env, code)\n if tok == InterpreterBase.IF_DEF:\n return self.__execute_if(env, code)\n if tok == InterpreterBase.CALL_DEF:\n return self.__execute_call(env, code)\n if tok == InterpreterBase.WHILE_DEF:\n return self.__execute_while(env, code)\n if tok == InterpreterBase.RETURN_DEF:\n return self.__execute_return(env, code)\n if tok == InterpreterBase.INPUT_STRING_DEF:\n return self.__execute_input(env, code, True)\n if tok == InterpreterBase.INPUT_INT_DEF:\n return self.__execute_input(env, code, False)\n if tok == InterpreterBase.PRINT_DEF:\n return self.__execute_print(env, code)\n\n self.interpreter.error(\n ErrorType.SYNTAX_ERROR, \"unknown statement \" + tok, tok.line_num\n )\n\n # (begin (statement1) (statement2) ... (statementn))\n def __execute_begin(self, env, code):\n for statement in code[1:]:\n status, return_value = self.__execute_statement(env, statement)\n if status == ObjectDef.STATUS_RETURN:\n return (\n status,\n return_value,\n ) # could be a valid return of a value or an error\n # if we run thru the entire block without a return, then just return proceed\n # we don't want the calling block to exit with a return\n return ObjectDef.STATUS_PROCEED, None\n\n # (call object_ref/me methodname param1 param2 param3)\n # where params are expressions, and expresion could be a value, or a (+ ...)\n # statement version of a method call; there's also an expression version of a method call below\n def __execute_call(self, env, code):\n return ObjectDef.STATUS_PROCEED, self.__execute_call_aux(\n env, code, code[0].line_num\n )\n\n # (set varname expression), where expresion could be a value, or a (+ ...)\n def __execute_set(self, env, code):\n val = self.__evaluate_expression(env, code[2], code[0].line_num)\n self.__set_variable_aux(env, code[1], val, code[0].line_num)\n return ObjectDef.STATUS_PROCEED, None\n\n # (return expression) where expresion could be a value, or a (+ ...)\n def __execute_return(self, env, code):\n if len(code) == 1:\n # [return] with no return expression\n return ObjectDef.STATUS_RETURN, create_value(InterpreterBase.NOTHING_DEF)\n return ObjectDef.STATUS_RETURN, self.__evaluate_expression(\n env, code[1], code[0].line_num\n )\n\n # (print expression1 expression2 ...) where expresion could be a variable, value, or a (+ ...)\n def __execute_print(self, env, code):\n output = \"\"\n for expr in code[1:]:\n # TESTING NOTE: Will not test printing of object references\n term = self.__evaluate_expression(env, expr, code[0].line_num)\n val = term.value()\n typ = term.type()\n if typ == Type.BOOL:\n val = \"true\" if val else \"false\"\n # document - will never print out an object ref\n output += str(val)\n self.interpreter.output(output)\n return ObjectDef.STATUS_PROCEED, None\n\n # (inputs target_variable) or (inputi target_variable) sets target_variable to input string/int\n def __execute_input(self, env, code, get_string):\n inp = self.interpreter.get_input()\n if get_string:\n val = Value(Type.STRING, inp)\n else:\n val = Value(Type.INT, int(inp))\n\n self.__set_variable_aux(env, code[1], val, code[0].line_num)\n return ObjectDef.STATUS_PROCEED, None\n\n # helper method used to set either parameter variables or member fields; parameters currently shadow\n # member fields\n def __set_variable_aux(self, env, var_name, value, line_num):\n # parameter shadows fields\n if value.type() == Type.NOTHING:\n self.interpreter.error(\n ErrorType.TYPE_ERROR, \"can't assign to nothing \" + var_name, line_num\n )\n param_val = env.get(var_name)\n if param_val is not None:\n env.set(var_name, value)\n return\n\n if var_name not in self.fields:\n self.interpreter.error(\n ErrorType.NAME_ERROR, \"unknown variable \" + var_name, line_num\n )\n self.fields[var_name] = value\n\n # (if expression (statement) (statement) ) where expresion could be a boolean constant (e.g., true), member\n # variable without ()s, or a boolean expression in parens, like (> 5 a)\n def __execute_if(self, env, code):\n condition = self.__evaluate_expression(env, code[1], code[0].line_num)\n if condition.type() != Type.BOOL:\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"non-boolean if condition \" + ' '.join(x for x in code[1]),\n code[0].line_num,\n )\n if condition.value():\n status, return_value = self.__execute_statement(\n env, code[2]\n ) # if condition was true\n return status, return_value\n if len(code) == 4:\n status, return_value = self.__execute_statement(\n env, code[3]\n ) # if condition was false, do else\n return status, return_value\n return ObjectDef.STATUS_PROCEED, None\n\n # (while expression (statement) ) where expresion could be a boolean value, boolean member variable,\n # or a boolean expression in parens, like (> 5 a)\n def __execute_while(self, env, code):\n while True:\n condition = self.__evaluate_expression(env, code[1], code[0].line_num)\n if condition.type() != Type.BOOL:\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"non-boolean while condition \" + ' '.join(x for x in code[1]),\n code[0].line_num,\n )\n if not condition.value(): # condition is false, exit loop immediately\n return ObjectDef.STATUS_PROCEED, None\n # condition is true, run body of while loop\n status, return_value = self.__execute_statement(env, code[2])\n if status == ObjectDef.STATUS_RETURN:\n return (\n status,\n return_value,\n ) # could be a valid return of a value or an error\n\n # given an expression, return a Value object with the expression's evaluated result\n # expressions could be: constants (true, 5, \"blah\"), variables (e.g., x), arithmetic/string/logical expressions\n # like (+ 5 6), (+ \"abc\" \"def\"), (> a 5), method calls (e.g., (call me foo)), or instantiations (e.g., new dog_class)\n def __evaluate_expression(self, env, expr, line_num_of_statement):\n if not isinstance(expr, list):\n # locals shadow member variables\n val = env.get(expr)\n if val is not None:\n return val\n if expr in self.fields:\n return self.fields[expr]\n # need to check for variable name and get its value too\n value = create_value(expr)\n if value is not None:\n return value\n self.interpreter.error(\n ErrorType.NAME_ERROR,\n \"invalid field or parameter \" + expr,\n line_num_of_statement,\n )\n\n operator = expr[0]\n if operator in self.binary_op_list:\n operand1 = self.__evaluate_expression(env, expr[1], line_num_of_statement)\n operand2 = self.__evaluate_expression(env, expr[2], line_num_of_statement)\n if operand1.type() == operand2.type() and operand1.type() == Type.INT:\n if operator not in self.binary_ops[Type.INT]:\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"invalid operator applied to ints\",\n line_num_of_statement,\n )\n return self.binary_ops[Type.INT][operator](operand1, operand2)\n if operand1.type() == operand2.type() and operand1.type() == Type.STRING:\n if operator not in self.binary_ops[Type.STRING]:\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"invalid operator applied to strings\",\n line_num_of_statement,\n )\n return self.binary_ops[Type.STRING][operator](operand1, operand2)\n if operand1.type() == operand2.type() and operand1.type() == Type.BOOL:\n if operator not in self.binary_ops[Type.BOOL]:\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"invalid operator applied to bool\",\n line_num_of_statement,\n )\n return self.binary_ops[Type.BOOL][operator](operand1, operand2)\n if operand1.type() == operand2.type() and operand1.type() == Type.CLASS:\n if operator not in self.binary_ops[Type.CLASS]:\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"invalid operator applied to class\",\n line_num_of_statement,\n )\n return self.binary_ops[Type.CLASS][operator](operand1, operand2)\n # error what about an obj reference and null\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n f\"operator {operator} applied to two incompatible types\",\n line_num_of_statement,\n )\n if operator in self.unary_op_list:\n operand = self.__evaluate_expression(env, expr[1], line_num_of_statement)\n if operand.type() == Type.BOOL:\n if operator not in self.unary_ops[Type.BOOL]:\n self.interpreter.error(\n ErrorType.TYPE_ERROR,\n \"invalid unary operator applied to bool\",\n line_num_of_statement,\n )\n return self.unary_ops[Type.BOOL][operator](operand)\n\n # handle call expression: (call objref methodname p1 p2 p3)\n if operator == InterpreterBase.CALL_DEF:\n return self.__execute_call_aux(env, expr, line_num_of_statement)\n # handle new expression: (new classname)\n if operator == InterpreterBase.NEW_DEF:\n return self.__execute_new_aux(env, expr, line_num_of_statement)\n\n # (new classname)\n def __execute_new_aux(self, _, code, line_num_of_statement):\n obj = self.interpreter.instantiate(code[1], line_num_of_statement)\n return Value(Type.CLASS, obj)\n\n # this method is a helper used by call statements and call expressions\n # (call object_ref/me methodname p1 p2 p3)\n def __execute_call_aux(self, env, code, line_num_of_statement):\n # determine which object we want to call the method on\n obj_name = code[1]\n if obj_name == InterpreterBase.ME_DEF:\n obj = self\n else:\n obj = self.__evaluate_expression(\n env, obj_name, line_num_of_statement\n ).value()\n # prepare the actual arguments for passing\n if obj is None:\n self.interpreter.error(\n ErrorType.FAULT_ERROR, \"null dereference\", line_num_of_statement\n )\n actual_args = []\n for expr in code[3:]:\n actual_args.append(\n self.__evaluate_expression(env, expr, line_num_of_statement)\n )\n return obj.call_method(code[2], actual_args, line_num_of_statement)\n\n def __map_method_names_to_method_definitions(self):\n self.methods = {}\n for method in self.class_def.get_methods():\n self.methods[method.method_name] = method\n\n def __map_fields_to_values(self):\n self.fields = {}\n for field in self.class_def.get_fields():\n self.fields[field.field_name] = create_value(field.default_field_value)\n\n def __create_map_of_operations_to_lambdas(self):\n self.binary_op_list = [\n \"+\",\n \"-\",\n \"*\",\n \"/\",\n \"%\",\n \"==\",\n \"!=\",\n \"<\",\n \"<=\",\n \">\",\n \">=\",\n \"&\",\n \"|\",\n ]\n self.unary_op_list = [\"!\"]\n self.binary_ops = {}\n self.binary_ops[Type.INT] = {\n \"+\": lambda a, b: Value(Type.INT, a.value() + b.value()),\n \"-\": lambda a, b: Value(Type.INT, a.value() - b.value()),\n \"*\": lambda a, b: Value(Type.INT, a.value() * b.value()),\n \"/\": lambda a, b: Value(\n Type.INT, a.value() // b.value()\n ), # // for integer ops\n \"%\": lambda a, b: Value(Type.INT, a.value() % b.value()),\n \"==\": lambda a, b: Value(Type.BOOL, a.value() == b.value()),\n \"!=\": lambda a, b: Value(Type.BOOL, a.value() != b.value()),\n \">\": lambda a, b: Value(Type.BOOL, a.value() > b.value()),\n \"<\": lambda a, b: Value(Type.BOOL, a.value() < b.value()),\n \">=\": lambda a, b: Value(Type.BOOL, a.value() >= b.value()),\n \"<=\": lambda a, b: Value(Type.BOOL, a.value() <= b.value()),\n }\n self.binary_ops[Type.STRING] = {\n \"+\": lambda a, b: Value(Type.STRING, a.value() + b.value()),\n \"==\": lambda a, b: Value(Type.BOOL, a.value() == b.value()),\n \"!=\": lambda a, b: Value(Type.BOOL, a.value() != b.value()),\n \">\": lambda a, b: Value(Type.BOOL, a.value() > b.value()),\n \"<\": lambda a, b: Value(Type.BOOL, a.value() < b.value()),\n \">=\": lambda a, b: Value(Type.BOOL, a.value() >= b.value()),\n \"<=\": lambda a, b: Value(Type.BOOL, a.value() <= b.value()),\n }\n self.binary_ops[Type.BOOL] = {\n \"&\": lambda a, b: Value(Type.BOOL, a.value() and b.value()),\n \"|\": lambda a, b: Value(Type.BOOL, a.value() or b.value()),\n \"==\": lambda a, b: Value(Type.BOOL, a.value() == b.value()),\n \"!=\": lambda a, b: Value(Type.BOOL, a.value() != b.value()),\n }\n self.binary_ops[Type.CLASS] = {\n \"==\": lambda a, b: Value(Type.BOOL, a.value() == b.value()),\n \"!=\": lambda a, b: Value(Type.BOOL, a.value() != b.value()),\n }\n\n self.unary_ops = {}\n self.unary_ops[Type.BOOL] = {\n \"!\": lambda a: Value(Type.BOOL, not a.value()),\n }\n\n\nclass Interpreter(InterpreterBase):\n \"\"\"\n Main interpreter class that subclasses InterpreterBase.\n \"\"\"\n\n def __init__(self, console_output=True, inp=None, trace_output=False):\n super().__init__(console_output, inp)\n self.trace_output = trace_output\n self.main_object = None\n self.class_index = {}\n\n def run(self, program):\n \"\"\"\n Run a program (an array of strings, where each item is a line of source code).\n Delegates parsing to the provided BParser class in bparser.py.\n \"\"\"\n status, parsed_program = BParser.parse(program)\n if not status:\n super().error(\n ErrorType.SYNTAX_ERROR, f\"Parse error on program: {parsed_program}\"\n )\n self.__map_class_names_to_class_defs(parsed_program)\n\n # instantiate main class\n invalid_line_num_of_caller = None\n self.main_object = self.instantiate(\n InterpreterBase.MAIN_CLASS_DEF, invalid_line_num_of_caller\n )\n\n # call main function in main class; return value is ignored from main\n self.main_object.call_method(\n InterpreterBase.MAIN_FUNC_DEF, [], invalid_line_num_of_caller\n )\n\n # program terminates!\n\n def instantiate(self, class_name, line_num_of_statement):\n \"\"\"\n Instantiate a new class. The line number is necessary to properly generate an error\n if a `new` is called with a class name that does not exist.\n This reports the error where `new` is called.\n \"\"\"\n if class_name not in self.class_index:\n super().error(\n ErrorType.TYPE_ERROR,\n f\"No class named {class_name} found\",\n line_num_of_statement,\n )\n class_def = self.class_index[class_name]\n obj = ObjectDef(\n self, class_def, self.trace_output\n ) # Create an object based on this class definition\n return obj\n\n def __map_class_names_to_class_defs(self, program):\n self.class_index = {}\n for item in program:\n if item[0] == InterpreterBase.CLASS_DEF:\n if item[1] in self.class_index:\n super().error(\n ErrorType.TYPE_ERROR,\n f\"Duplicate class name {item[1]}\",\n item[0].line_num,\n )\n self.class_index[item[1]] = ClassDef(item, self)\n","repo_name":"joseph082/brewin-interpreter","sub_path":"interpreterv2 - original.py","file_name":"interpreterv2 - original.py","file_ext":"py","file_size_in_byte":24569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"16037517176","text":"def check(r,c):\n arr[r][c] = 1\n\n\nN = int(input())\n# 요소가 0 인 100 X 100 행렬 만들기\narr = [[0] * 100 for _ in range(100)]\nfor i in range(N):\n # 색종이 위치 받기\n a, b = map(int, input().split())\n for j in range(10):\n for k in range(10):\n # 색종이 위치에 해당하는 부분에 1을 넣어준다.\n arr[a+j][b+k] = 1\ncnt = 0\nfor i in range(100):\n # 리스트를 돌면서 1의 개수를 샌다.\n cnt += arr[i].count(1)\nprint(cnt)\n","repo_name":"jenu8628/TIL","sub_path":"algorithm/beakjoon/2563 색종이.py","file_name":"2563 색종이.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"73062358817","text":"from django.urls import path\nfrom .views import (\nHomeView,\nCourseView,\nCourseCreateView,\ncourse_single,\nAssignmentCreateView,\nAssignmentView,\nAssignmentDeleteView,\nAssignmentSubmissionView,\nAssignmentSubmissionListView,\nAssignmentSubmissionDelete,\nAssSubmissionEnterMarks\n)\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\napp_name = \"core\"\n\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n path('course/', CourseView.as_view(), name='course'),\n path('course-create/', CourseCreateView.as_view(), name='course-create'),\n path('assignment-create/', AssignmentCreateView.as_view(), name='assignment-create'),\n path('assignment/', AssignmentView.as_view(), name='assignment-list'),\n path('/delete/', AssignmentDeleteView.as_view(), name='delete-assignment'),\n path('/course-view/', course_single, name='course-view'),\n path('assignment-submission/', AssignmentSubmissionView.as_view(), name='assignment-submission'),\n path('assignment-submission-list', AssignmentSubmissionListView.as_view(), name='assignment-submission-list'),\n path('/delete/', AssignmentSubmissionDelete.as_view(), name='assignment-submission-delete'),\n path('assignment-submission-list/',AssSubmissionEnterMarks.as_view(),name=\"enter-mark\")\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Prago2001/DBMS","sub_path":"Online_Assignment_Submission_System_Project_Django/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40791410837","text":"import random\nimport sys\nfrom maxHeap import buildMaxHeap, maxHeapSort\nfrom minHeap import buildMinHeap, minHeapSort\n\n\ndef main():\n '''\n main function that runs a test run on a random array\n '''\n if len(sys.argv) != 3:\n print(\"Usage: python3 main.py size max/min\")\n return\n size = int(sys.argv[1])\n type = sys.argv[2]\n # print([*range(10)])\n array = [*range(size)]\n random.shuffle(array)\n print(f'array to be sorted as a {type}heap: {array}')\n if type == 'max':\n buildMaxHeap(array, size)\n print(maxHeapSort(array, size))\n elif type == 'min':\n buildMinHeap(array, size)\n print(minHeapSort(array, size))\n else:\n quit(1)\n\nif __name__ == '__main__':\n main()","repo_name":"kiankyars/heapSort","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22997601523","text":"import socket\r\n\r\nip = '127.0.0.1'\r\nport = 8001\r\naddress = (ip, port)\r\nprint(\"Server hosted on\", ip + \", listening on port\", str(port) + \"...\")\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.bind(address)\r\ns.listen(5)\r\n\r\nwhile(1):\r\n buf = b''\r\n connection, addr = s.accept()\r\n print(\"Receive data from\", addr)\r\n \r\n buf = connection.recv(1024)\r\n \r\n print(buf.decode())\r\n connection.send(buf.decode())\r\n \r\n connection.close()\r\ns.close()\r\n","repo_name":"phoebebai/CMPUT404Lab2","sub_path":"echo_server.py","file_name":"echo_server.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"15616478326","text":"from module_part4 import *\r\nfrom p5 import *\r\n\r\n\r\ndef preload() :\r\n global FLECHE_JAUNE,FLECHE_ROUGE\r\n # on définie la variable fleche_jaune qui contiendra un objet image\r\n FLECHE_JAUNE = loadImage(\"https://capytale2.ac-paris.fr/web/sites/default/files/2022/06-18/15-34-01/flehce_jaune.jpg\")\r\n FLECHE_ROUGE = loadImage(\"https://capytale2.ac-paris.fr/web/sites/default/files/2022/06-18/15-19-31/flehce_rouge.jpg\")\r\n\r\ndef setup() :\r\n createCanvas(300, 400) # on crée une fenêtre de 100x100 pixels\r\n background(\"#ivory\") # on choisi une couleur pour le fond\r\n\r\ndef image_cliquee(x,y) :\r\n \"\"\"\r\n IN : x et y : coordonnées du clic\r\n OUT :\r\n - si on a cliqué dans la zone des images : la fonction renvoie le numéro de l'image cliquée\r\n - Sinon : elle renvoie -1\r\n \"\"\"\r\n global LARGE,HAUT,DECAL_X,DECAL_Y,NB_IMAGES\r\n \r\n if DECAL_X <= x <= DECAL_X + NB_IMAGES * LARGE and DECAL_Y <= y <= DECAL_Y + HAUT :\r\n return int(x-DECAL_X)//LARGE\r\n return -1\r\n\r\ndef clic() :\r\n global CLIC_EN_COURS\r\n global JOUEUR,GRILLE\r\n \r\n if mouseIsPressed :\r\n if not CLIC_EN_COURS : # si un clic n'est pas deja en cours\r\n CLIC_EN_COURS = True # alors on met clic_en_cours à True pour bloquer d'autre exécution\r\n num = image_cliquee(mouseX,mouseY)\r\n if num >= 0 :\r\n GRILLE = joue_jeton(GRILLE,JOUEUR,num)\r\n JOUEUR = 3 - JOUEUR\r\n else :\r\n CLIC_EN_COURS = False # le bouton est relaché, on remet clic_en_cours à False\r\n\r\ndef affiche_grille(x,y) :\r\n \"\"\"\r\n Dession une grille constituée de 7 lignes, contenant chacune 7 carrés.\r\n La couleur des carrés est (0,0,200), leur taille est indiquée par la variable globale LARGE.\r\n Le premier carré est déssiné aux coordonnées x,y reçues en paramètres.\r\n \"\"\"\r\n global LARGE,HAUT\r\n \r\n for ligne in range(6) :\r\n for colonne in range(7) :\r\n fill(0,0,200)\r\n square(x+colonne*LARGE, y+ligne*LARGE, LARGE)\r\n pion(x+colonne*LARGE , y+ligne*LARGE, ligne, colonne)\r\n \r\ndef pion(x,y,i,j) :\r\n global GRILLE,LARGE\r\n centre_x = x + LARGE//2\r\n centre_y = y + LARGE//2\r\n if GRILLE[i][j] == 0 : color=\"white\"\r\n elif GRILLE[i][j] == 1 : color=\"red\"\r\n else : color=\"yellow\"\r\n \r\n fill(color)\r\n circle(centre_x,centre_y,LARGE * 3 //4,)\r\n \r\ndef draw() :\r\n global LARGE,DECAL_X,DECAL_Y,NB_IMAGES\r\n global FLECHE_JAUNE,FLECHE_ROUGE,JOUEUR\r\n \r\n if JOUEUR == 1 :\r\n fleche = FLECHE_ROUGE\r\n else :\r\n fleche = FLECHE_JAUNE\r\n \r\n for i in range(NB_IMAGES) :\r\n # on dessine l'image\r\n image(fleche, DECAL_X + i*LARGE,DECAL_Y)\r\n \r\n affiche_grille(DECAL_X,DECAL_Y+56)\r\n \r\n clic()\r\n \r\n####################################################\r\n### PROGRAMME PRINCIPAL ####\r\n####################################################\r\n\r\n# initialisation de la grille\r\nGRILLE = creation_grille_vierge()\r\n\r\n# largeur/hauteur des images :\r\nLARGE = 40\r\nHAUT = 56\r\nNB_IMAGES = 7\r\n\r\n# position des images \r\nDECAL_X = 10\r\nDECAL_Y = 40\r\n\r\n# lancement :\r\nJOUEUR = 1\r\nCLIC_EN_COURS = False\r\n\r\nrun()","repo_name":"Jthirot/NSI-thirot-JL","sub_path":"NSI/Première/projets/puissance4/code_part4_1.py","file_name":"code_part4_1.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"6871715951","text":"# Credits: @mrismanaziz\n# FROM File-Sharing-Man \n# t.me/SharingUserbot & t.me/Lunatic0de\n\nimport os\n\nfrom bot import Bot\nfrom config import (\n ADMINS,\n API_HASH,\n APP_ID,\n CHANNEL_ID,\n DB_URI,\n FORCE_MSG,\n FORCE_SUB_CHANNEL,\n FORCE_SUB_GROUP,\n HEROKU_API_KEY,\n HEROKU_APP_NAME,\n LOGGER,\n OWNER,\n PROTECT_CONTENT,\n START_MSG,\n TG_BOT_TOKEN,\n)\nfrom pyrogram import filters\nfrom pyrogram.types import Message\n\n\n@Bot.on_message(filters.command(\"logs\") & filters.user(ADMINS))\nasync def get_bot_logs(client: Bot, m: Message):\n bot_log_path = \"logs.txt\"\n if os.path.exists(bot_log_path):\n try:\n await m.reply_document(\n bot_log_path,\n quote=True,\n caption=\"Ini Logs Bot ini\",\n )\n except Exception as e:\n os.remove(bot_log_path)\n LOGGER(__name__).warning(e)\n elif not os.path.exists(bot_log_path):\n await m.reply_text(\"❌ Tidak ada log yang ditemukan!\")\n\n\n@Bot.on_message(filters.command(\"vars\") & filters.user(ADMINS))\nasync def varsFunc(client: Bot, message: Message):\n Man = await message.reply_text(\"Tunggu Sebentar...\")\n text = f\"\"\"CONFIG VARS @{client.username}\nAPP_ID = {APP_ID}\nAPI_HASH = {API_HASH}\nTG_BOT_TOKEN = {TG_BOT_TOKEN}\nDATABASE_URL = {DB_URI}\nOWNER = {OWNER}\nADMINS = {ADMINS}\n \nCUSTOM VARS\nCHANNEL_ID = {CHANNEL_ID}\nFORCE_SUB_CHANNEL = {FORCE_SUB_CHANNEL}\nFORCE_SUB_GROUP = {FORCE_SUB_GROUP}\nPROTECT_CONTENT = {PROTECT_CONTENT}\nSTART_MSG = {START_MSG}\nFORCE_MSG = {FORCE_MSG}\n\nHEROKU CONFIGVARS\nHEROKU_APP_NAME = {HEROKU_APP_NAME}\nHEROKU_API_KEY = {HEROKU_API_KEY}\n \"\"\"\n await Man.edit_text(text)\n","repo_name":"mrismanaziz/File-Sharing-Man","sub_path":"plugins/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"34"} +{"seq_id":"18345026069","text":"\"\"\"vod URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom rest_framework import routers, serializers, viewsets\n\n\n# Serializers define the API representation.\nfrom movies.models import Movie\n\n\nclass MovieSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Movie\n fields = ('id', 'title', 'description', 'image', 'url')\n\n\n# ViewSets define the view behavior.\nclass MovieViewSet(viewsets.ModelViewSet):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.DefaultRouter()\nrouter.register(r'movies', MovieViewSet)\n\nurlpatterns = [\n url(r'^', include('publics.urls')),\n url(r'^api/', include(router.urls)),\n url(r'^admin/', admin.site.urls),\n]\n","repo_name":"takutico/vod","sub_path":"vod/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"71915787298","text":"from rest_framework import serializers\nfrom todo.models import Todo\n\n\nclass TodoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Todo\n fields = '__all__' # GET all data\n # fields = ['title', 'completed'] # GET only title and completed value\n #exclude = ['title']\n read_only_fields = ['id']\n\n\n\n# -------------- STANDARD SERIALIZER -------------- #\n\nclass TodoDefaultSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n title = serializers.CharField()\n completed = serializers.BooleanField()\n\n def create(self, validated_data):\n print(validated_data)\n return Todo.objects.create(**validated_data) # ** kullanılmalı, dictionary geldiği için hata alır.\n\n def update(self, instance, validated_data):\n instance.title= validated_data.get('title', instance.title)\n instance.completed= validated_data.get('completed', instance.completed)\n instance.save()\n return instance\n\n# -------------- STANDARD SERIALIZER -------------- #\n\n # -------------- VALIDATION -------------- # \n\n def validate(self, data): #object seviyesinde\n if data['title'] == data['completed']:\n raise serializers.ValidationError('Baslik ve durum alanları aynı olamaz. Lütfen farklı bir başlık giriniz.')\n return data\n\n def validate_title(self, value): #value seviyesinde\n if len(value) < 4:\n raise serializers.ValidationError(f'Başlık alanı 4 karakterden az olamaz. Giriş yapılan karakter sayısı: {len(value)}')\n return value \n\n # -------------- VALIDATION -------------- # ","repo_name":"ardilsercan/Django-Samples","sub_path":"djangotodoapp/todo/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23673622730","text":"#!/usr/bin/env Python\nfrom sys import argv\nimport itertools\n\nwith open(argv[1],'r') as in_file:\n in_data = in_file.read().splitlines()\nin_file.closed\n\nmonoisotopic_mass_table = {'A':71.03711,'C':103.00919,'D':115.02694,\\\n 'E':129.04259,'F':147.06841,'G':57.02146,\\\n 'H':137.05891,'I':113.08406,'K':128.09496,\\\n 'L':113.08406,'M':131.04049,'N':114.04293,\\\n 'P':97.05276,'Q':128.05858,'R':156.10111,\\\n 'S':87.03203,'T':101.04768,'V':99.06841,\\\n 'W':186.07931,'Y':163.06333}\namino_acids = [key for key in monoisotopic_mass_table.keys()]\nreverse_mass_table = dict((reversed(item) for item in monoisotopic_mass_table.items()))\n\ntotal_mass = float(in_data[0])\npeaks = []\nfor i in range(1,len(in_data)):\n peaks.append(float(in_data[i]))\npeaks.sort()\nlength = ((len(peaks) - 2) / 2)\n\n\n#Collect pairs of peaks\ndo_not_add = []\ncol_peaks = []\nfor i in peaks:\n for j in peaks:\n sum = i + j\n if (((sum < (total_mass + 0.01)) and (sum > (total_mass - 0.01))) and not((i in do_not_add) or (j in do_not_add))):\n col_peaks.append((i,j))\n col_peaks.append((j,i))\n do_not_add.append(i)\n do_not_add.append(j)\ncol_peaks.sort()\n\ndo_not_add = []\ni = 0\nj = 1\nm = len(col_peaks)\n \n\n\n'''\ndo_not_add = []\ncollected = []\nfor i in range(len(peaks)-1):\n for j in range(i+1,len(peaks)):\n diff = peaks[j] - peaks[i]\n summ = peaks[j] + peaks[i]\n for k in reverse_mass_table:\n if (((diff > (k - 0.01)) and (diff < (k + 0.01))) and not((sum < (total_mass + 0.1)) and (sum > (total_mass - 0.1)))):\n if not((peaks[i] in do_not_add) or (peaks[j] in do_not_add)):\n collected.append((peaks[i],peaks[j],reverse_mass_table[k]))\n do_not_add.append(peaks[i])\n do_not_add.append(peaks[j])\nfor i in collected:\n print i\n'''\n\n#with open('peptide_full_spectrum.txt','w') as out_file:\n# out_file.write(\"\".join(seq))\n#out_file.closed()\n","repo_name":"cookjmatt/Rosalind-Bioinformatics-Scripts","sub_path":"peptide_full_spectrum.py","file_name":"peptide_full_spectrum.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"36723781222","text":"import numpy as np\nimport pandas as pd\n\n# 朴素贝叶斯分类 \nfrom sklearn.naive_bayes import GaussianNB\n# 决策树\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import LabelEncoder\n\n# 通常情况下,用一部分数据去训练,另一部分去测试\nfrom sklearn.model_selection import train_test_split\n\n# 混淆矩阵\nfrom sklearn.metrics import confusion_matrix, classification_report\n\n\ndf = pd.read_csv(r'E:\\python_files\\csv\\Iris.csv')\nle = LabelEncoder()\ndf['labels'] = le.fit_transform(df.Species)\n\nX = df[ ['SepalLengthCm', \n 'SepalWidthCm', \n 'PetalLengthCm', \n 'PetalWidthCm'\n ] ]\ny = df['labels']\n\n# 通常情况下,用一部分数据去训练,另一部分去测试\nX_train, X_test, y_train, y_test = train_test_split(X, y, \n train_size=0.6, # 默认0.75\n test_size=0.4) # 默认0.25\n # train_size + test_size不能大于1\n\nclf = DecisionTreeClassifier()\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test) # y_pred是对X_test部分数据的预测\ncm = confusion_matrix(y_test, y_pred)\nreport = classification_report(y_test, y_pred)\nprint(report)\n\nclf = GaussianNB()\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test) # y_pred是对X_test部分数据的预测\ncm = confusion_matrix(y_test, y_pred)\nreport = classification_report(y_test, y_pred)\nprint(report)\n\n\n\n\n","repo_name":"wzgdavid/python_kecheng","sub_path":"samples/chapter10/python机器学习/fenleis.py","file_name":"fenleis.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"34"} +{"seq_id":"7408247215","text":"from users.models import Profile\nfrom .models import Post\n\ndef user(request): #add to setting\n if request.user.is_authenticated:\n profile = Profile.objects.filter(user = request.user)[0]\n else :\n profile = None \n \n return{'g_profile':profile}\n\ndef last_posts(request):\n post = Post.objects.all().order_by('-date_posted')[:5]\n return {'g_last_posts':post}","repo_name":"jazijazi/django-blog","sub_path":"django_blog/blog/blog_processors.py","file_name":"blog_processors.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"23526132726","text":"\"\"\"1358 하키 / 실버 IV\"\"\"\r\n\r\n\r\ncnt = 0\r\nw, h, x, y, p = map(int, input().split())\r\nr = h / 2\r\npositions = [list(map(int, input().split())) for _ in range(p)]\r\n\r\n\r\ndef in_rink(px: int, py: int):\r\n if x <= px <= x + w and y <= py <= y + h:\r\n return 1\r\n if r**2 >= (x - px) ** 2 + (y + r - py) ** 2:\r\n return 1\r\n if r**2 >= (x + w - px) ** 2 + (y + r - py) ** 2:\r\n return 1\r\n return 0\r\n\r\n\r\nfor px, py in positions:\r\n cnt += in_rink(px, py)\r\nprint(cnt)\r\n","repo_name":"Kdelphinus/Python_study","sub_path":"Baekjoon/silver/silver IV/1358_hockey.py","file_name":"1358_hockey.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"8100876481","text":"\n######################################################################################################################\n# This is a lambda function to provide \"billing amount\" information stored in billing dynamodb table, 'billing_aggrs'\n######################################################################################################################\n\nimport os\nimport boto3\nimport json\n\nimport decimal\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom dateutil import tz\nfrom dateutil import parser\n\nregion = os.environ.get('AWS_DEFAULT_REGION')\namount_table_name = os.environ.get('DYNAMODB_AGGR_TABLE_NAME')\nclient = boto3.resource('dynamodb', region_name=region)\namount_table = client.Table(amount_table_name)\n\n\ndef lambda_handler(event, context):\n\n given_date = event.get('date')\n current_date = datetime.datetime.utcnow()\n if given_date:\n given_date = parser.parse(given_date)\n if given_date.year != current_date.year or given_date.month != current_date.month:\n current_date = datetime.datetime(given_date.year, given_date.month, 1) + relativedelta(months=1) + relativedelta(days=-1)\n print(\"current_date = %s\" % current_date)\n\n report_type = event['type']\n if report_type == 'summary':\n return get_summary_message(current_date)\n else:\n return get_detail_messages(current_date, event['account_id'])\n raise \"Not supported type, %s\" % report_type\n\n\ndef get_summary_message(current_date):\n\n response = amount_table.query(\n KeyConditionExpression=\"id = :id\",\n ExpressionAttributeValues={\n ':id': '*_%s' % current_date.strftime('%Y-%m')\n }\n )\n #print(\"there are %d items found\" % len(response['Items']))\n\n return response['Items']\n\n\ndef get_detail_messages(current_date, account_id):\n\n response = amount_table.query(\n KeyConditionExpression=\"id = :id\",\n ExpressionAttributeValues={\n ':id': '%s_%s' % (account_id, current_date.strftime('%Y-%m'))\n }\n )\n #print(\"there are %d items found\" % len(response['Items']))\n\n return response['Items']\n","repo_name":"SungardAS/aws-services-billing","sub_path":"batch/amount_handler.py","file_name":"amount_handler.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"40462415894","text":"'''\nClass Model: model for the deep clustering speech seperation\n'''\nimport numpy as np\nimport ipdb\nimport tensorflow as tf\n\nfrom GlobalConstont import *\n\n# from ln_lstm import LayerNormalizedLSTMCell\n# from bnlstm import BNLSTMCell\n\n\nclass Model(object):\n def __init__(self, n_hidden, batch_size, p_keep_ff, p_keep_rc):\n '''n_hidden: number of hidden states\n p_keep_ff: forward keep probability\n p_keep_rc: recurrent keep probability'''\n self.n_hidden = n_hidden\n self.batch_size = batch_size\n # if training:\n # self.p_keep_ff = 1 - P_DROPOUT_FF\n # self.p_keep_rc = 1 - P_DROPOUT_RC\n # else:\n # self.p_keep_ff = 1\n # self.p_keep_rc = 1\n self.p_keep_ff = p_keep_ff\n self.p_keep_rc = p_keep_rc\n # biases and weights for the last layer\n self.weights = {\n 'out': tf.Variable(\n tf.random_normal([2 * n_hidden, EMBBEDDING_D * NEFF]))\n }\n self.biases = {\n 'out': tf.Variable(\n tf.random_normal([EMBBEDDING_D * NEFF]))\n }\n\n def inference(self, x):\n '''The structure of the network'''\n # ipdb.set_trace()\n # four layer of LSTM cell blocks\n with tf.variable_scope('BLSTM1') as scope:\n # lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(\n # self.n_hidden)\n # lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(\n # self.n_hidden)\n lstm_fw_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_fw_cell = tf.nn.rnn_cell.DropoutWrapper(\n lstm_fw_cell, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n lstm_bw_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_bw_cell = tf.nn.rnn_cell.DropoutWrapper(\n lstm_bw_cell, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(\n lstm_fw_cell, lstm_bw_cell, x,\n sequence_length=[FRAMES_PER_SAMPLE] * self.batch_size,\n dtype=tf.float32)\n state_concate = tf.concat(2, outputs)\n with tf.variable_scope('BLSTM2') as scope:\n # lstm_fw_cell2 = tf.nn.rnn_cell.LSTMCell(\n # self.n_hidden)\n # lstm_bw_cell2 = tf.nn.rnn_cell.LSTMCell(\n # self.n_hidden)\n lstm_fw_cell2 = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_fw_cell2 = tf.nn.rnn_cell.DropoutWrapper(\n lstm_fw_cell2, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n lstm_bw_cell2 = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_bw_cell2 = tf.nn.rnn_cell.DropoutWrapper(\n lstm_bw_cell2, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n outputs2, _ = tf.nn.bidirectional_dynamic_rnn(\n lstm_fw_cell2, lstm_bw_cell2, state_concate,\n sequence_length=[FRAMES_PER_SAMPLE] * self.batch_size,\n dtype=tf.float32)\n state_concate2 = tf.concat(2, outputs2)\n with tf.variable_scope('BLSTM3') as scope:\n lstm_fw_cell3 = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_fw_cell3 = tf.nn.rnn_cell.DropoutWrapper(\n lstm_fw_cell3, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n lstm_bw_cell3 = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_bw_cell3 = tf.nn.rnn_cell.DropoutWrapper(\n lstm_bw_cell3, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n outputs3, _ = tf.nn.bidirectional_dynamic_rnn(\n lstm_fw_cell3, lstm_bw_cell3, state_concate2,\n sequence_length=[FRAMES_PER_SAMPLE] * self.batch_size,\n dtype=tf.float32)\n state_concate3 = tf.concat(2, outputs3)\n with tf.variable_scope('BLSTM4') as scope:\n lstm_fw_cell4 = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_fw_cell4 = tf.nn.rnn_cell.DropoutWrapper(\n lstm_fw_cell4, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n lstm_bw_cell4 = tf.contrib.rnn.LayerNormBasicLSTMCell(\n self.n_hidden, layer_norm=False,\n dropout_keep_prob=self.p_keep_rc)\n lstm_bw_cell4 = tf.nn.rnn_cell.DropoutWrapper(\n lstm_bw_cell4, input_keep_prob=1,\n output_keep_prob=self.p_keep_ff)\n outputs4, _ = tf.nn.bidirectional_dynamic_rnn(\n lstm_fw_cell4, lstm_bw_cell4, state_concate3,\n sequence_length=[FRAMES_PER_SAMPLE] * self.batch_size,\n dtype=tf.float32)\n state_concate4 = tf.concat(2, outputs4)\n # one layer of embedding output with tanh activation function\n out_concate = tf.reshape(state_concate4, [-1, self.n_hidden * 2])\n emb_out = tf.matmul(out_concate,\n self.weights['out']) + self.biases['out']\n emb_out = tf.nn.tanh(emb_out)\n reshaped_emb = tf.reshape(emb_out, [-1, NEFF, EMBBEDDING_D])\n # normalization before output\n normalized_emb = tf.nn.l2_normalize(reshaped_emb, 2)\n return normalized_emb\n\n def loss(self, embeddings, Y, VAD):\n '''Defining the loss function'''\n embeddings_rs = tf.reshape(embeddings, shape=[-1, EMBBEDDING_D])\n VAD_rs = tf.reshape(VAD, shape=[-1])\n # get the embeddings with active VAD\n embeddings_rsv = tf.transpose(\n tf.mul(tf.transpose(embeddings_rs), VAD_rs))\n embeddings_v = tf.reshape(\n embeddings_rsv, [-1, FRAMES_PER_SAMPLE * NEFF, EMBBEDDING_D])\n # get the Y(speaker indicator function) with active VAD\n Y_rs = tf.reshape(Y, shape=[-1, 2])\n Y_rsv = tf.transpose(\n tf.mul(tf.transpose(Y_rs), VAD_rs))\n Y_v = tf.reshape(Y_rsv, shape=[-1, FRAMES_PER_SAMPLE * NEFF, 2])\n # fast computation format of the embedding loss function\n loss_batch = tf.nn.l2_loss(\n tf.batch_matmul(tf.transpose(\n embeddings_v, [0, 2, 1]), embeddings_v)) - \\\n 2 * tf.nn.l2_loss(\n tf.batch_matmul(tf.transpose(\n embeddings_v, [0, 2, 1]), Y_v)) + \\\n tf.nn.l2_loss(\n tf.batch_matmul(tf.transpose(\n Y_v, [0, 2, 1]), Y_v))\n loss_v = (loss_batch) / self.batch_size\n tf.scalar_summary('loss', loss_v)\n return loss_v\n\n def train(self, loss, lr):\n '''Optimizer'''\n optimizer = tf.train.AdamOptimizer(\n learning_rate=lr,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8)\n # optimizer = tf.train.MomentumOptimizer(lr, 0.9)\n gradients, v = zip(*optimizer.compute_gradients(loss))\n gradients, _ = tf.clip_by_global_norm(gradients, 200)\n train_op = optimizer.apply_gradients(\n zip(gradients, v))\n return train_op\n","repo_name":"zhr1201/deep-clustering","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"34"} +{"seq_id":"23810406633","text":"# 최대공약수와 최소공배수\n\na, b = map(int, input().split())\n\nstep = 1\nbucket = []\nmaxValue = max(a, b)\nwhile True:\n if step > maxValue:\n break\n if a%step == 0 and b%step == 0 and step != 1:\n a //= step\n b //= step\n bucket.append(step)\n else:\n step += 1\n#print(bucket, a, b)\nlcm = gcf = 1\nfor i in bucket:\n gcf *= i\nlcm = gcf*a*b\n\nprint(gcf, lcm)","repo_name":"myomu/backjoonCT","sub_path":"정수론 및 조합론/bj2609.py","file_name":"bj2609.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"26795765851","text":"\"\"\"\nFunction that determines the index of the smallest element of the cyclically shifted array.\n\nInput:[4, 5, 6, 7, 9, 1, 3]\nOutput: 1\n\"\"\"\ndef smallest_cyclic_array(arr):\n\tlow = 0\n\thigh = len(arr) - 1\n\n\twhile low < high:\n\t\tmid = (low + high) // 2\n\n\t\t# If mid index value is greater than the high index value,\n\t\t# then the smallest value might be in the right side.\n\t\tif arr[mid] > arr[high]:\n\t\t\tlow = mid + 1\n\t\t# If mid index value is smaller or equal to the high index value, \n\t\t# then the smallest value might be in the left side\n\t\telif arr[mid] <= arr[high]:\n\t\t\thigh = mid\n\n\treturn arr[low]\n\nprint(smallest_cyclic_array([4, 5, 6, 7, 9, 1, 3]))\n","repo_name":"neerajp99/algorithms","sub_path":"binary_search/cyclic_array.py","file_name":"cyclic_array.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"4478756645","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 10 19:11:40 2019\n\n@author: Clément\n\"\"\"\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\ndef main():\n print(\"On cherche à prédire les valeurs des digits en utilisant une random forest\")\n #Chargement des données\n digits = datasets.load_digits()\n \n #Séparation données et cibles\n data = digits['data']\n target = digits['target']\n \n #Création d'un jeu d'entrainement et d'un jeu de test\n x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.33, random_state=42)\n print(\"Entrainement sur\", len(x_train), \"images\")\n \n #Création du classifieur\n rfc = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\n \n #Entrainement\n rfc.fit(x_train, y_train)\n \n #Prédiction sur le jeu de test\n predict(rfc, x_test, y_test)\n \n \ndef predict(rfc, x_test, y_test):\n \n #Initialisation statistiques\n nb_predictions = 0\n nb_predictions_correctes = 0\n \n #Tests de prédicition sur le jeu de test\n for key, x in enumerate(x_test):\n if rfc.predict([x]) == y_test[key]:\n nb_predictions_correctes += 1\n nb_predictions += 1\n \n #Affichage précision\n print(\"Prédictions correctes:\", nb_predictions_correctes, \"/\", nb_predictions)\n print(\"Précision : \", round((nb_predictions_correctes / nb_predictions) * 100, 2), \"%\")\n \nif __name__ == \"__main__\":\n main() ","repo_name":"ClementCaillaud/MachineLearning_ynov","sub_path":"06-Random forest/randomForest.py","file_name":"randomForest.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"27605165330","text":"import pandas as pd\nimport cv2\nimport urllib.request\nimport numpy as np\nimport os\nfrom datetime import datetime\nimport face_recognition\nfrom PIL import Image\nimport csv\nfrom datetime import datetime\npath = r'D:\\COPY TEST\\image_folder'\nurl='http://192.168.29.58/cam-hi.jpg'\n##'''cam.bmp / cam-lo.jpg /cam-hi.jpg / cam.mjpeg '''\n \n# if 'students.csv' in os.listdir(os.path.join(r'D:\\testing')):\nif os.path.exists('D:\\\\COPY TEST\\\\Students.csv'):\n print(\"there iss..\")\n os.remove(\"Students.csv\")\nelse:\n df=pd.DataFrame(list())\n df.to_csv(\"Students.csv\")\n \n \nimages = []\nclassNames = []\nmyList = os.listdir(path)\nprint(myList)\nfor cl in myList:\n curImg = cv2.imread(f'{path}/{cl}')\n images.append(curImg)\n classNames.append(os.path.splitext(cl)[0])\nprint(classNames)\n \n \ndef findEncodings(images):\n encodeList = []\n for img in images:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n encode = face_recognition.face_encodings(img)[0]\n encodeList.append(encode)\n return encodeList\n \n \ndef detailStudents(name, reg_no, programee, course, batch):\n now = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n try:\n with open(\"Students.csv\", \"x\") as f:\n f.write(\"name,reg_no,programme,course,batch,time\\n\")\n except FileExistsError:\n pass # file already exists, do nothing\n\n # open the file for appending\n with open(\"Students.csv\", 'a') as f:\n writer = csv.writer(f)\n writer.writerow([name, reg_no, programee, course, batch,now])\n print(\"Student details added successfully!\")\n\ndef get_current_student(matches, classNames, faceLoc, img):\n for matchIndex, match in enumerate(matches):\n if match:\n name = classNames[matchIndex].upper()\n reg_no = \"12345\" # replace with actual registration number\n programee = \"B.Tech\" # replace with actual program name\n course = \"Computer Science\" # replace with actual course name\n batch = \"2022\" # replace with actual batch year\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n return name, reg_no, programee, course, batch\n return None, None, None, None, None\n \nencodeListKnown = findEncodings(images)\nprint('Encoding Complete')\n \n#cap = cv2.VideoCapture(0)\n \n\nwhile True:\n #success, img = cap.read()\n img_resp=urllib.request.urlopen(url)\n imgnp=np.array(bytearray(img_resp.read()),dtype=np.uint8)\n img=cv2.imdecode(imgnp,-1)\n img_resp = urllib.request.urlopen(url)\n imgnp = np.array(bytearray(img_resp.read()), dtype=np.uint8)\n img = cv2.imdecode(imgnp, -1)\n if img is not None and img.shape[0] > 0 and img.shape[1] > 0:\n cv2.imshow('Webcam', img)\n print(img.shape)\n else:\n print(\"Error reading image from URL\")\n# img = captureScreen()\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25) # type: ignore\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\n \n facesCurFrame = face_recognition.face_locations(imgS)\n encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)\n \n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\n print(\"Face Detected\")\n matches = face_recognition.compare_faces(encodeListKnown, encodeFace)\n faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)\n matchIndex = np.argmin(faceDis)\n if matches[matchIndex]:\n name = classNames[matchIndex].upper()\n reg_no = \"12345\" # replace with actual registration number\n programee = \"B.Tech\" # replace with actual program name\n course = \"Computer Science\" # replace with actual course name\n batch = \"2022\" # replace with actual batch year\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n detailStudents(name, reg_no, programee, course, batch)\n\n \n else:\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 0, 255), cv2.FILLED)\n cv2.putText(img, \"Intruder Alert\", (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n\n cv2.imshow('Webcam', img)\n key=cv2.waitKey(5)\n if key==ord('q'):\n break\ncv2.destroyAllWindows()\ncv2.imread\n","repo_name":"pratheekjs/project1","sub_path":"face_detection_students.py","file_name":"face_detection_students.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"28354045321","text":"import os\nimport shutil\nfrom src.config import DEFAULT_MAIN, MAIN_PATH\n\n\nif __name__ == \"__main__\":\n while True:\n answer = input('Do you really want to clear main? [y/n] ')\n if answer == 'y':\n with open(MAIN_PATH, 'w+') as f:\n for line in DEFAULT_MAIN:\n print(line, file=f)\n break\n elif answer == 'n':\n exit(0)\n else:\n print('Unrecognized answer \"{}\".'.format(answer))\n","repo_name":"VoIlAlex/object-detection-algorithms","sub_path":"clear_main.py","file_name":"clear_main.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"34"} +{"seq_id":"2489587721","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nr\"\"\"\n.. codeauthor:: Riley Smith & Wilfried Mercier - IRAP \n\nA custom Self Organising Map which can write and read its values into and from an external file.\n\nMost of the code comes from Riley Smith implementation found in `sklearn-som `_ python library. \n\"\"\"\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom typing import Optional, Union, List, Tuple\n\nclass SOM():\n \"\"\"\n .. codeauthor:: Riley Smith\n \n The 2-D, rectangular grid self-organizing map class using Numpy.\n \n :param int m: (**Optional**) shape along dimension 0 (vertical) of the SOM\n :param int n: (**Optional**) shape along dimesnion 1 (horizontal) of the SOM\n :param int dim: (**Optional**) dimensionality (number of features) of the input space\n :param float lr: (**Optional**) initial step size for updating the SOM weights.\n :param float sigma: (**Optional**) magnitude of change to each weight. Does not update over training (as does learning rate). Higher values mean more aggressive updates to weights.\n :param int max_iter: Optional parameter to stop training if you reach this many interation.\n :param int random_state: (**Optional**) integer seed to the random number generator for weight initialization. This will be used to create a new instance of Numpy's default random number generator (it will not call np.random.seed()). Specify an integer for deterministic results.\n \"\"\"\n \n def __init__(self, m: int = 3, n: int = 3, dim: int = 3, lr: float = 1, sigma: float = 1, max_iter: int = 3000, random_state: Optional[int] = None) -> None:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Init method.\n \"\"\"\n \n # Initialize descriptive features of SOM\n self.m = m\n self.n = n\n self.dim = dim\n self.shape = (m, n)\n self.initial_lr = lr\n self.lr = lr\n self.sigma = sigma\n self.max_iter = max_iter\n \n # Physical parameters associated to each cell in the SOM\n self.phys = {}\n\n # Initialize weights\n self.random_state = random_state\n rng = np.random.default_rng(random_state)\n self.weights = rng.normal(size=(m * n, dim))\n self._locations = self._get_locations(m, n)\n\n # Set after fitting\n self._inertia = None\n self._n_iter_ = None\n self._trained = False\n\n def _get_locations(self, m: int, n: int) -> np.ndarray:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Return the indices of an m by n array.\n \n :param int m: shape along dimension 0 (vertical) of the SOM\n :param int n: shape along dimension 1 (horizontal) of the SOM\n \n :returns: indices of the array\n :rtype: ndarray[int]\n \"\"\"\n \n return np.argwhere(np.ones(shape=(m, n))).astype(np.int64)\n\n def _find_bmu(self, x: Union[List[float], np.ndarray]) -> int:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Find the index of the best matching unit for the input vector x.\n \n :param x: input vector (1D)\n :type x: list or ndarray\n \n :returns: index of the best matching unit\n :rtype: int\n \"\"\"\n \n diff = self.weights-x\n distance = np.sum(diff*diff, axis=1)\n \n return np.argmin(distance)\n\n def step(self, x: Union[List[float], np.ndarray]) -> None:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Do one step of training on the given input vector.\n \n :param x: input vector (1D)\n :type x: list or ndarray\n \"\"\"\n \n import time\n\n # Get index of best matching unit\n bmu_index = self._find_bmu(x)\n\n # Find location of best matching unit\n bmu_location = self._locations[bmu_index, :]\n\n # Find square distance from each weight to the BMU\n beg = time.time()\n stacked_bmu = np.stack([bmu_location]*(self.m*self.n), axis=0)\n bmu_distance = np.sum(np.power(self._locations.astype(np.float64) - stacked_bmu.astype(np.float64), 2), axis=1)\n #print('dist', time.time()-beg)\n \n # Compute update neighborhood\n neighborhood = np.exp((bmu_distance / (self.sigma ** 2)) * -1)\n local_step = self.lr * neighborhood\n\n # Stack local step to be proper shape for update\n local_multiplier = np.stack([local_step]*(self.dim), axis=1)\n\n # Multiply by difference between input and weights\n delta = local_multiplier * (x - self.weights)\n\n # Update weights\n self.weights += delta\n \n return\n\n def _compute_point_intertia(self, x: Union[List[float], np.ndarray]) -> float:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Compute the inertia of a single point. Inertia defined as squared distance from point to closest cluster center (BMU)\n \n :param x: input vector (1D)\n :type x: list or ndarray\n \"\"\"\n # Find BMU\n bmu_index = self._find_bmu(x)\n bmu = self.weights[bmu_index]\n \n # Compute sum of squared distance (just euclidean distance) from x to bmu\n inertia = np.sum(np.square(x - bmu))\n \n return inertia\n\n def fit(self, X: np.ndarray, epochs: int = 1, shuffle: bool = True) -> None:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Take data (a tensor of type float64) as input and fit the SOM to that data for the specified number of epochs.\n\n :param ndarray X: training data. Must have shape (n, self.dim) where n is the number of training samples.\n :param int epochs: (**Optional**) number of times to loop through the training data when fitting\n :param bool shuffle: (**Optional**) whether or not to randomize the order of train data when fitting. Can be seeded with np.random.seed() prior to calling fit.\n \"\"\"\n \n import time\n \n # Count total number of iterations\n global_iter_counter = 0\n n_samples = X.shape[0]\n total_iterations = np.minimum(epochs * n_samples, self.max_iter)\n\n for epoch in range(epochs):\n \n # Break if past max number of iterations\n if global_iter_counter > self.max_iter:\n break\n\n if shuffle:\n rng = np.random.default_rng(self.random_state)\n indices = rng.permutation(n_samples)\n else:\n indices = np.arange(n_samples)\n\n # Train\n beg = time.time()\n for idx in indices:\n \n # Break if past max number of iterations\n if global_iter_counter > self.max_iter:\n break\n \n # Do one step of training\n inp = X[idx]\n self.step(inp)\n \n \n # Update learning rate\n global_iter_counter += 1\n self.lr = (1 - (global_iter_counter / total_iterations)) * self.initial_lr\n \n print(time.time()-beg)\n\n #beg = time.time()\n # Compute inertia\n inertia = np.sum(np.array([float(self._compute_point_intertia(x)) for x in X]))\n self._inertia_ = inertia\n\n # Set n_iter_ attribute\n self._n_iter_ = global_iter_counter\n\n # Set trained flag\n self._trained = True\n #print(time.time()-beg)\n\n return\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Predict cluster for each element in X.\n\n :param ndarray X: training data. Must have shape (n, self.dim) where n is the number of training samples.\n\n :returns: an ndarray of shape (n,). The predicted cluster index for each item in X.\n :rtype: ndarray[int]\n \n :raises NotImplmentedError: if fit() method has not been called already\n \"\"\"\n \n # Check to make sure SOM has been fit\n if not self._trained:\n raise NotImplementedError('SOM object has no predict() method until after calling fit().')\n\n # Make sure X has proper shape\n assert len(X.shape) == 2, f'X should have two dimensions, not {len(X.shape)}'\n assert X.shape[1] == self.dim, f'This SOM has dimension {self.dim}. Received input with dimension {X.shape[1]}'\n\n labels = np.array([self._find_bmu(x) for x in X])\n return labels\n\n def transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Transform the data X into cluster distance space.\n\n :param ndarray X: training data. Must have shape (n, self.dim) where n is the number of training samples.\n\n :returns: tansformed data of shape (n, self.n*self.m). The Euclidean distance from each item in X to each cluster center.\n :rtype: ndarray[float]\n \"\"\"\n # Stack data and cluster centers\n X_stack = np.stack([X]*(self.m*self.n), axis=1)\n cluster_stack = np.stack([self.weights]*X.shape[0], axis=0)\n\n # Compute difference\n diff = X_stack - cluster_stack\n\n return np.linalg.norm(diff, axis=2)\n\n def fit_predict(self, X: np.ndarray, **kwargs) -> np.ndarray:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Convenience method for calling fit(X) followed by predict(X).\n\n :param ndarray X: data of shape (n, self.dim). The data to fit and then predict.\n :param **kwargs: optional keyword arguments for the .fit() method\n\n :returns: ndarray of shape (n,). The index of the predicted cluster for each item in X (after fitting the SOM to the data in X).\n :rtype: ndarray[int]\n \"\"\"\n # Fit to data\n self.fit(X, **kwargs)\n\n # Return predictions\n return self.predict(X)\n\n def fit_transform(self, X: np.ndarray, **kwargs) -> np.ndarray:\n \"\"\"\n .. codeauthor:: Riley Smith\n \n Convenience method for calling fit(X) followed by transform(X). Unlike in sklearn, this is not implemented more efficiently (the efficiency is the same as calling fit(X) directly followed by transform(X)).\n\n :param ndarray X: data of shape (n, self.dim) where n is the number of samples\n :param **kwargs: optional keyword arguments for the .fit() method\n\n :returns: ndarray of shape (n, self.m*self.n). The Euclidean distance from each item in X to each cluster center.\n :rtype: ndarray[float]\n \"\"\"\n # Fit to data\n self.fit(X, **kwargs)\n\n # Return points in cluster distance space\n return self.transform(X)\n \n ######################################\n # IO methods #\n ######################################\n \n def read(self, fname: str, *args, **kwargs) -> None:\n '''\n .. codeauthor:: Wilfried Mercier - IRAP \n \n Read the result of a SOM written into a FITS file with the .write() method.\n \n :param str fname: input file. Must be a FITS file.\n \n :raises TypeError: if **fname** is not of type str\n '''\n \n if not isinstance(fname, str):\n raise TypeError(f'fname is of type {type(fname)} but it must be of type str.')\n \n with fits.open(fname) as hdul:\n hdr = hdul[1].header\n table = np.asarray(hdul[1].data)\n \n # Recover parameters required for the SOM\n self.m = hdr['DIM1']\n self.n = hdr['DIM2']\n self._n_iter_ = hdr['NITER']\n self.dim = hdr['NFEAT']\n \n # List of fields corresponding to weights\n fields = [f'F{pos}' for pos in range(self.dim)]\n \n # Extract weights\n self.weights = np.asarray(table[fields].tolist())\n \n # Extract physical parameter\n for name in table.dtype.names:\n if name[:2] == 'P_':\n self.phys[name[2:]] = table[name]\n \n self._trained = True\n return\n \n \n def write(self, fname: str, colnames: Union[List[str], Tuple[str]] = [], **kwargs) -> None:\n '''\n .. codeauthor:: Wilfried Mercier - IRAP \n \n Write the result of the SOM into a FITS file.\n \n :param str fname: output filename. Output file will always be a FITS file.\n \n :param colnames: list of column names to add into the header of the VOtable\n :type colnames: list[str] or tuple[str]\n :param **kwargs: (**Optional**) additional columns to add to the table\n \n :raises NotImplementedError: if the fit method has not been used yet\n :raises TypeError: if **fname** is not of type str or **colnames** is neither a list nor a tuple\n '''\n \n # Check to make sure SOM has been fit\n if not self._trained:\n raise NotImplementedError('SOM object has no write() method until after calling fit().')\n \n if not isinstance(fname, str):\n raise TypeError(f'fname is of type {type(fname)} but it must be of type str.')\n \n if not isinstance(colnames, (list, tuple)):\n raise TypeError(f'colnames is of type {type(colnames)} but it must be of type list.')\n \n # Create an empty astropy Table\n table = Table()\n \n # Add features\n for pos, weight in enumerate(self.weights.T):\n table[f'F{pos}'] = weight\n \n # Add physical parameters\n for key, value in kwargs.items():\n table[f'P_{key.upper()}'] = value\n \n # Header to add into the FITS object\n hdr = fits.Header({'DIM1' : self.m,\n 'DIM2' : self.n,\n 'NITER' : self.n_iter_,\n 'NFEAT' : self.dim\n })\n \n # Add column names into header for information purposes only\n for pos, col in enumerate(colnames):\n hdr[f'COL{pos}'] = col\n \n # Convert to a FITS object and write\n hdu0 = fits.PrimaryHDU()\n hdu1 = fits.BinTableHDU(data=table, header=hdr, name='SOM')\n hdulist = fits.HDUList([hdu0, hdu1])\n hdulist.writeto(fname, overwrite=True)\n \n return\n \n #################################################\n # Physical parameters methods #\n #################################################\n \n def get(self, param: str) -> np.ndarray:\n '''\n .. codeauthor:: Wilfried Mercier - IRAP \n \n Return the given physical parameters if it exists.\n \n :param str param: parameter to return\n :returns: array of physical parameter value associated to each node\n :rtype: ndarray\n \n :raises KeyError: if **param** is not found\n '''\n \n if param not in self.phys:\n raise KeyError(f'physical parameter {param} not found.')\n \n return self.phys[param]\n \n ##########################################\n # Properties #\n ##########################################\n\n @property\n def cluster_centers_(self) -> np.ndarray:\n '''\n .. codeauthor:: Riley Smith\n \n Give the coordinates of each cluster centre as an array of shape (m, n, dim).\n \n :returns: cluster centres\n :rtype: ndarray[int]\n '''\n \n return self.weights.reshape(self.m, self.n, self.dim)\n\n @property\n def inertia_(self) -> np.ndarray:\n '''\n .. codeauthor:: Riley Smith\n \n Inertia.\n \n :returns: computed inertia\n :rtype: ndarray[float]\n \n :raises AttributeError: if the SOM does not have the inertia already computed\n '''\n \n if self._inertia_ is None:\n raise AttributeError('SOM does not have inertia until after calling fit()')\n \n return self._inertia_\n\n @property\n def n_iter_(self) -> int:\n '''\n .. codeauthor:: Riley Smith\n \n Number of iterations.\n \n :returns: number of iterations\n :rtype: int\n \n :rtype AttributeError: if the number of iterations is not initialised yet\n '''\n if self._n_iter_ is None:\n raise AttributeError('SOM does not have n_iter_ attribute until after calling fit()')\n \n return self._n_iter_\n\n","repo_name":"WilfriedMercier/wilfried","sub_path":"utilities/som.py","file_name":"som.py","file_ext":"py","file_size_in_byte":17227,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"34"} +{"seq_id":"20914339093","text":"import array\ndef insertion(arr):\n for i in range(0,len(arr)-1):\n while arr[i]>arr[i+1] and i>=0:\n arr[i],arr[i+1] = arr[i+1],arr[i]\n i-=1\n print(arr)\n\narray=array.array(\"i\",[2, 10, 9, 6, 5, 1, 4, 3])\ninsertion(array)\n\n","repo_name":"ThunHour/learn_python","sub_path":"data_strcture/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"20270918077","text":"import pandas as pd \nimport pandas.io.data as web\nfrom datetime import datetime \n\n# (1) Get underlying data\n\ndef get_tickers(filename):\n\treturn open(filename,'rb').read().splitlines()\n\ndef save_ts(ticker_file):\n\tfor stock in get_tickers(ticker_file):\n\t\tif stock != 'FB': continue \n\t\tstart = datetime.strptime(\"1/1/1975\", \"%m/%d/%Y\")\n\t\tend = datetime.today()\n\t\ttry:\n\t\t\tf = web.DataReader(stock, 'yahoo', start, end)\n\t\t\tf['Returns'] = (f['Close'][1:] - f['Open'][:-1]) / f['Open'][:-1]\n\t\t\tstock = stock.replace('/','-')\n\t\t\tf.to_csv('../data/underlying/' + stock + '.csv')\n\t\texcept:\n\t\t\tcontinue\n\t\n\nif __name__ == '__main__':\n\tsave_ts('../data/sp500_tickers.txt')","repo_name":"vishalv95/MaxPain","sub_path":"scripts/sp500_scraper.py","file_name":"sp500_scraper.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"34"} +{"seq_id":"27944196921","text":"# 遍历所有链表节点,再重新赋值,有点儿笨\n# 执行用时:104ms,击败89.55%\n# 内存消耗:22.7MB,击败25.05%\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n def reorderList(self, head: ListNode) -> None:\n if head:\n node = head\n val = []\n while node.next:\n node = node.next\n val.append(node.val)\n node = head.next\n for i in range(len(val)):\n if i%2 == 0:\n node.val = val[-i//2-1]\n else:\n node.val = val[i//2]\n node = node.next\n\n\n# 可以考虑用栈\n# 把节点压入栈中, 再弹出来\n\nclass Solution(object):\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: None Do not return anything, modify head in-place instead.\n \"\"\"\n if not head: return None\n p = head\n stack = []\n # 把所有节点压入栈中\n while p:\n stack.append(p)\n p = p.next\n # 长度\n n = len(stack)\n # 找到中点前一个位置 \n count = (n - 1) // 2\n p = head\n while count:\n # 弹出栈顶\n tmp = stack.pop()\n # 与链头拼接\n tmp.next = p.next\n p.next = tmp\n # 移动一个位置\n p = tmp.next\n count -= 1\n stack.pop().next = None\n\n\n","repo_name":"MrQ722/leetcode-notes","sub_path":"Python3/#143 重排链表.py","file_name":"#143 重排链表.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"70234547619","text":"from random import randint\n\nfrom dagster import In, OpExecutionContext, Out, ResourceDefinition, String, graph, op\nfrom workspaces.config import POSTGRES\nfrom workspaces.resources import postgres_resource\n\n\n@op(\n config_schema={\"table_name\": String},\n out={\"stocks\": Out(dagster_type=String)},\n required_resource_keys={\"database\"},\n tags={\"kind\": \"postgres\"},\n)\ndef create_table(context: OpExecutionContext):\n table_name = context.op_config[\"table_name\"]\n sql = f\"CREATE TABLE IF NOT EXISTS {table_name} (column_1 VARCHAR(100));\"\n context.resources.database.execute_query(sql)\n return table_name\n\n\n@op(\n required_resource_keys={\"database\"},\n ins={\"table_name\": In(dagster_type=String)},\n tags={\"kind\": \"postgres\"},\n)\ndef insert_into_table(context: OpExecutionContext, table_name):\n sql = f\"INSERT INTO {table_name} (column_1) VALUES (1);\"\n\n number_of_rows = randint(1, 10)\n for _ in range(number_of_rows):\n context.resources.database.execute_query(sql)\n context.log.info(\"Inserted a row\")\n\n context.log.info(\"Batch inserted\")\n\n\n@graph\ndef etl():\n table = create_table()\n insert_into_table(table)\n\n\nlocal = {\"ops\": {\"create_table\": {\"config\": {\"table_name\": \"fake_table\"}}}}\n\ndocker = {\n \"resources\": {\"database\": {\"config\": POSTGRES}},\n \"ops\": {\"create_table\": {\"config\": {\"table_name\": \"postgres_table\"}}},\n}\n\netl_local = etl.to_job(\n name=\"etl_local\",\n config=local,\n resource_defs={\"database\": ResourceDefinition.mock_resource()},\n)\n\netl_docker = etl.to_job(\n name=\"etl_docker\",\n config=docker,\n resource_defs={\"database\": postgres_resource},\n)\n","repo_name":"dehume/corise-dagster","sub_path":"week_2/workspaces/content/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"34"} +{"seq_id":"17963640563","text":"from typing import Optional\n\nfrom sqlalchemy.orm import Session\n\nfrom app import crud, models, schemas\nfrom app.tests.utils.user import create_random_user\nfrom app.tests.utils.utils import random_integer, random_lower_string\nfrom app.tests.utils.workflow import create_random_workflow\n\n\ndef create_random_instruction(\n db: Session,\n *,\n owner_id: Optional[int] = None,\n workflow_id: Optional[int] = None,\n) -> models.Instruction:\n if owner_id is None:\n user = create_random_user(db)\n owner_id = user.id\n if workflow_id is None:\n workflow = create_random_workflow(db, owner_id=owner_id)\n workflow_id = workflow.id\n category = random_lower_string()\n data = random_lower_string()\n trial = random_integer()\n instruction_in = schemas.InstructionCreate(\n category=category, data=data, trial=trial\n )\n return crud.instruction.create(\n db=db,\n obj_in=instruction_in,\n owner_id=owner_id,\n workflow_id=workflow_id,\n )\n","repo_name":"JBEI/dnada","sub_path":"backend/app/app/tests/utils/instruction.py","file_name":"instruction.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"34"} +{"seq_id":"72605622177","text":"#first output\nprint(\"\"\"**************************************\n** Welcome to the Snakes Cafe! **\n** Please see our menu below. **\n**\n** To quit at any time, type \"quit\" **\n**************************************\"\"\")\n#second\nprint(\"\"\"\n\nAppetizers\n----------\nWings\nCookies\nSpring Rolls\n\nEntrees\n-------\nSalmon\nSteak\nMeat Tornado\nA Literal Garden\n\nDesserts\n--------\nIce Cream\nCake\nPie\n\nDrinks\n------\nCoffee\nTea\nUnicorn Tears\n\"\"\")\n#third\nprint(\"\"\"\n***********************************\n** What would you like to order? **\n***********************************\n\"\"\")\n#menu lists\n\nAppetizers = [\"Appetizers\" , \"Wings\" , \"Cookies\" , \"Spring Rolls\"]\nEntrees = [ \"Salmon\" ,\"Steak\" ,\"Meat Tornado\", \"A Literal Garden\"]\nDesserts = [\"Ice Cream\" , \"Cake\" ,\"Pie\"]\nDrinks = [\"Coffee\" ,\"Tea\" ,\"Unicorn Tears\"]\nMenu = Appetizers + Entrees + Desserts + Drinks\n#input from user\nthe_order=[]\nwhile(True):\n order=input(\">\")\n if order == 'quit':\n break\n \n elif order in Menu:\n the_order.append(order)\n counter=the_order.count(order)\n #append use to add element to the list\n #count to get num of element\n\n\n if counter == 1:\n print( f\"** 1 order of {order} has been added to your meal **\")\n\n else:\n print(f\"** {counter} orders of {order} have been added to your meal **\")\n\n print('\\n', \"all thing that you order is:\", '\\n')\n print(the_order)\n ","repo_name":"layanabushaweesh/snakes_cafe","sub_path":"snakes_cafe/snakes_cafe.py","file_name":"snakes_cafe.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"25818082790","text":"import pytest\nimport base64\nfrom shortenme.app import app, init_db\n\n\n@pytest.fixture\ndef client():\n app.config[\"TESTING\"] = True\n\n # Setup database - run schema to clear out data and recreate table\n runner = app.test_cli_runner()\n runner.invoke(init_db)\n\n yield app.test_client()\n\n # Teardown\n\n\ndef test_index(client):\n \"\"\"Test that the index returns HTML with status 200\"\"\"\n response = client.get(\"/\", content_type=\"html/text\")\n assert response.status_code == 200\n\n\ndef test_analytics_page(client):\n \"\"\"Test that the admin analytics page returns 200 when passed valid HTTP Basic credentials\"\"\"\n credentials = base64.b64encode(b\"admin:adminpass\").decode(\"utf-8\")\n response = client.get(\n \"/analytics/\",\n content_type=\"html/text\",\n headers={\"Authorization\": \"Basic \" + credentials},\n )\n assert response.status_code == 200\n\n\ndef test_analytics_page_unauthorized(client):\n \"\"\"Test that the admin analytics page returns unauthorized without credentials\"\"\"\n response = client.get(\"/analytics/\", content_type=\"html/text\")\n assert response.status_code == 401\n\n\ndef test_api_create_shorturl_random(client):\n \"\"\"Ensure that random short URLs can be created by only providing a URL\"\"\"\n response = client.post(\"/api/create\", json={\"url\": \"google.com\"})\n assert response.status_code == 200\n assert response.json[\"result\"] == \"success\"\n assert \"short_url\" and \"url\" in response.json\n\n\ndef test_api_create_shorturl(client):\n \"\"\"Ensure that short URLs can be created with custom short url and expiry\"\"\"\n response = client.post(\n \"/api/create\",\n json={\n \"url\": \"google.com\",\n \"shorturl\": \"short7\",\n \"expiry\": \"2022-02-14T23:35:00\",\n },\n )\n assert response.status_code == 200\n assert response.json[\"result\"] == \"success\"\n assert response.json[\"short_url\"] == \"short7\"\n assert \"url\" in response.json\n\n\ndef test_api_reject_create_duplicate(client):\n \"\"\"Ensure that duplicates cannot be inserted\"\"\"\n client.post(\n \"/api/create\",\n json={\n \"url\": \"google.com\",\n \"shorturl\": \"short7\",\n \"expiry\": \"2022-02-14T23:35:00\",\n },\n )\n response = client.post(\n \"/api/create\",\n json={\n \"url\": \"google.com\",\n \"shorturl\": \"short7\",\n \"expiry\": \"2022-02-14T23:35:00\",\n },\n )\n assert response.status_code == 409\n\n\ndef test_api_create_bad_payload_failure(client):\n \"\"\"Ensure that payloads missing url or other data errors fail\"\"\"\n response = client.post(\"/api/create\", json={})\n assert response.status_code == 400\n assert \"error\" in response.json\n\n\ndef test_api_delete_shorturl(client):\n \"\"\"Ensure that URLs can be deleted\"\"\"\n client.post(\n \"/api/create\",\n json={\n \"url\": \"google.com\",\n \"shorturl\": \"short7\",\n \"expiry\": \"2022-02-14T23:35:00\",\n },\n )\n response = client.delete(\"/api/delete\", json={\"shorturl\": \"short7\"})\n assert response.status_code == 200\n assert response.json[\"deleted\"] == \"success\"\n\n\ndef test_api_delete_failure(client):\n \"\"\"Ensure error response on a deletion of URL that does not exist\"\"\"\n response = client.delete(\"/api/delete\", json={\"shorturl\": \"short7\"})\n assert response.status_code == 404\n assert \"error\" in response.json\n\n\ndef test_api_analytics(client):\n \"\"\"Ensure that analytics for a given short URL can be retrieved\"\"\"\n client.post(\n \"/api/create\",\n json={\n \"url\": \"google.com\",\n \"shorturl\": \"short7\",\n \"expiry\": \"2022-02-14T23:35:00\",\n },\n )\n response = client.get(\"/api/analytics/short7\")\n assert response.status_code == 200\n assert response.json[\"source_url\"] == \"http://google.com\"\n assert response.json[\"views\"] == 0\n assert \"created_utc\" and \"expiry\" in response.json\n","repo_name":"mcescalante/shortenme","sub_path":"tests/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"34"} +{"seq_id":"22507658376","text":"from django.shortcuts import render\nfrom .models import Product, Contact, Orders\nfrom django.shortcuts import redirect\nfrom math import ceil\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.db import connection\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect\nfrom shop.forms import UserCreateForm\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom shop.models import Signup\nfrom django.contrib.auth.models import User\n# import the logging library\nimport logging\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n# Create your views here.\nfrom django.http import HttpResponse\n\n\nregistered=0\ndef index(request):\n\n allProds = []\n catprods = Product.objects.values('category', 'id')\n cats = {item['category'] for item in catprods}\n for cat in cats:\n prod = Product.objects.filter(category=cat)\n n = len(prod)\n nSlides = n // 4 + ceil((n / 4) - (n // 4))\n allProds.append([prod, range(1, nSlides), nSlides])\n params = {'allProds':allProds}\n return render(request, 'shop/index.html', params)\n\ndef about(request):\n return render(request, 'shop/about.html')\n\ndef contact(request):\n if request.method==\"POST\":\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n phone = request.POST.get('phone', '')\n desc = request.POST.get('desc', '')\n contact = Contact(name=name,email=email,phone=phone, desc=desc)\n contact.save()\n return render(request, 'shop/contact.html')\n\ndef tracker(request):\n return render(request, 'shop/tracker.html')\n\ndef search(request):\n return render(request, 'shop/search.html')\n\ndef productView(request, myid):\n # Fetch the product using the id\n product = Product.objects.filter(id=myid)\n\n\n return render(request, 'shop/prodView.html', {'product':product[0]})\n@login_required\ndef checkout(request):\n\n if request.method==\"POST\":\n items_json = request.POST.get('itemsJson', '')\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n address= request.POST.get('address1', '') + \" \" + request.POST.get('address2', '')\n year=request.POST.get('year','')\n phone = request.POST.get('phone', '')\n product_name=request.POST.get('product','')\n order = Orders(items_json=items_json, name=name, email=email,room_no=address,year=year,phone=phone,product_name=product_name)\n order.save()\n thank = True\n id = order.order_id\n return render(request, 'shop/checkout.html', {'thank':thank, 'id': id})\n return render(request, 'shop/checkout.html')\n@login_required\ndef product(request):\n\n if request.method==\"POST\" and request.FILES['imag']:\n user=User.objects.get(username=request.user.username)\n image=request.FILES['imag']\n fs=FileSystemStorage()\n fs.save(image.name,image)\n image=image\n name=request.POST.get('name','')\n category=request.POST.get('category','')\n price=request.POST.get('price','')\n desc=request.POST.get('desc','')\n #phone=request.POST.get('phone','')\n price1=request.POST.get('price1','')\n product=Product(product_name=name,category=category,price=price,desc=desc,image=image,user=user)\n product.save()\n sel=True\n con={\"sel\":sel}\n #chk=True\n return render(request,'shop/product.html',con)\n\n return render(request,'shop/product.html')\ndef signup(request):\n form = UserCreateForm()\n if request.method == 'POST':\n form = UserCreateForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n print('helo')\n return redirect('/login')\n print('2')\n else:\n form = UserCreateForm()\n messages.error(request, \"Password doesnot match or username already exist\")\n return render(request, 'shop/signup.html', {'form': form})\n return render(request, 'shop/signup.html', {'form': form})\n\ndef log_in(request):\n if request.method==\"POST\":\n username=request.POST.get('username','')\n password=request.POST.get('password','')\n user=authenticate(username=username,password=password)\n if user:\n auth.login(request,user)\n return redirect('/')\n else:\n messages.error(request, \"Incorrect Username or Password\")\n return render(request,'shop/login.html')\n else:\n return render(request,'shop/login.html')\ndef log_out(request):\n #Euser=UserCreationForm(request)\n logout(request)\n return redirect('/')\ndef account(request):\n user=User.objects.get(username=request.user.username)\n product=Product.objects.filter(user=user)\n for i in product:\n print(i)\n con={\"product\":\"product\"}\n return render(request,'shop/account.html',{'product':product})\n","repo_name":"avanish981/KnitCart","sub_path":"sbook/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"34"}